edited_code
stringlengths 17
978k
| original_code
stringlengths 17
978k
|
---|---|
#!/home/zhuqingjie/env/py3_tf_low/bin/python
'''
@Time : 07.26 0026 下午 01:19
@Author : zhuqingjie
@User : zhu
@FileName: control.py
@Software: PyCharm
'''
'''
总的控制逻辑
1,control只向外部暴露一个端口,外部向control发请求,control根据mode来去调用其他server模块
2,同时还解决了外部不能直接访问ai节点的问题。主服务跑在ai节点,control服务跑在登陆节点,这样外部就能访问了
'''
import json, os, requests, sys, time
from flask import Flask, request
# param
ai01_ip = '10.11.1.81'
ai02_ip = '10.11.1.82'
ai03_ip = '10.11.1.83'
ai04_ip = '10.11.1.84'
ai05_ip = '10.11.1.85'
IP = ai05_ip # 主服务的IP地址
app = Flask(__name__)
print_ = lambda x: print(f"--> [{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))}]: {x}")
printc = lambda s: print(f"\033[1;35m{s}\033[0m")
mode_list = ['1', '2', '21', '22', '3', '4', '5', '51', '6']
def do_request(port, body):
url = f'http://{IP}:{port}'
printc(url)
printc(body)
response = requests.post(url, data=body)
printc('do_request ok')
return response.text
@app.route('/', methods=['POST'])
def handle():
print('\n')
print('-' * 50)
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# 读取参数
dic_url = request.form
print_(f'\n\tparams: {dic_url}')
error_param = 'error_param'
mode = dic_url.get('mode', error_param)
if mode == error_param:
return json.dumps({
'status': -1,
'info': 'param error: not find "mode"!',
'dst_path': 'null',
})
elif mode not in mode_list:
return json.dumps({
'status': -1,
'info': 'param error: "mode" must in 1-6!',
'dst_path': 'null',
})
elif mode == '1':
return do_request(9001, dic_url)
elif mode == '2':
return do_request(9002, dic_url)
elif mode == '21':
return do_request(9021, dic_url)
elif mode == '22':
return do_request(9022, dic_url)
elif mode == '3':
return do_request(9003, dic_url)
elif mode == '4':
return do_request(9004, dic_url)
elif mode == '5':
return do_request(9005, dic_url)
elif mode == '51':
return do_request(9051, dic_url)
elif mode == '6':
return do_request(9006, dic_url)
# elif mode in ['10', '11']:
# return do_request(9010, dic_url)
else:
return json.dumps({
'status': 2,
'info': 'error: An impossible error.',
'dst_path': 'null',
})
if __name__ == '__main__':
# app.run(host='0.0.0.0', port='7006')
body = {
'mode': '1',
'donotsave': '0',
'userID': 'zhuqingj',
'src_path': '/home/zhangli_lab/zhuqingjie/prj/tunet/res_test/0x.bmp',
}
res = do_request(9001, body)
print(res)
|
#!/home/zhuqingjie/env/py3_tf_low/bin/python
'''
@Time : 07.26 0026 下午 01:19
@Author : zhuqingjie
@User : zhu
@FileName: control.py
@Software: PyCharm
'''
'''
总的控制逻辑
1,control只向外部暴露一个端口,外部向control发请求,control根据mode来去调用其他server模块
2,同时还解决了外部不能直接访问ai节点的问题。主服务跑在ai节点,control服务跑在登陆节点,这样外部就能访问了
'''
import json, os, requests, sys, time
from flask import Flask, request
# param
ai01_ip = '10.11.1.81'
ai02_ip = '10.11.1.82'
ai03_ip = '10.11.1.83'
ai04_ip = '10.11.1.84'
ai05_ip = '10.11.1.85'
IP = ai05_ip # 主服务的IP地址
app = Flask(__name__)
print_ = lambda x: print(f"--> [{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}]: {x}")
printc = lambda s: print(f"\033[1;35m{s}\033[0m")
mode_list = ['1', '2', '21', '22', '3', '4', '5', '51', '6']
def do_request(port, body):
url = f'http://{IP}:{port}'
printc(url)
printc(body)
response = requests.post(url, data=body)
printc('do_request ok')
return response.text
@app.route('/', methods=['POST'])
def handle():
print('\n')
print('-' * 50)
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# 读取参数
dic_url = request.form
print_(f'\n\tparams: {dic_url}')
error_param = 'error_param'
mode = dic_url.get('mode', error_param)
if mode == error_param:
return json.dumps({
'status': -1,
'info': 'param error: not find "mode"!',
'dst_path': 'null',
})
elif mode not in mode_list:
return json.dumps({
'status': -1,
'info': 'param error: "mode" must in 1-6!',
'dst_path': 'null',
})
elif mode == '1':
return do_request(9001, dic_url)
elif mode == '2':
return do_request(9002, dic_url)
elif mode == '21':
return do_request(9021, dic_url)
elif mode == '22':
return do_request(9022, dic_url)
elif mode == '3':
return do_request(9003, dic_url)
elif mode == '4':
return do_request(9004, dic_url)
elif mode == '5':
return do_request(9005, dic_url)
elif mode == '51':
return do_request(9051, dic_url)
elif mode == '6':
return do_request(9006, dic_url)
# elif mode in ['10', '11']:
# return do_request(9010, dic_url)
else:
return json.dumps({
'status': 2,
'info': 'error: An impossible error.',
'dst_path': 'null',
})
if __name__ == '__main__':
# app.run(host='0.0.0.0', port='7006')
body = {
'mode': '1',
'donotsave': '0',
'userID': 'zhuqingj',
'src_path': '/home/zhangli_lab/zhuqingjie/prj/tunet/res_test/0x.bmp',
}
res = do_request(9001, body)
print(res)
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
"""
*aces-dev* Reference Config Generator
=====================================
Defines various objects related to the generation of the *aces-dev* reference
*OpenColorIO* config:
- :func:`opencolorio_config_aces.generate_config_aces`
"""
import csv
import logging
import re
from collections import defaultdict
from datetime import datetime
from enum import Flag, auto
from pathlib import Path
from opencolorio_config_aces.config.generation import (
ConfigData, colorspace_factory, generate_config, look_factory,
view_transform_factory)
from opencolorio_config_aces.config.reference import (
classify_aces_ctl_transforms, discover_aces_ctl_transforms,
unclassify_ctl_transforms)
from opencolorio_config_aces.utilities import git_describe, required
__author__ = 'OpenColorIO Contributors'
__copyright__ = 'Copyright Contributors to the OpenColorIO Project.'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'OpenColorIO Contributors'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH',
'ACES_CONFIG_REFERENCE_COLORSPACE',
'ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE',
'ACES_CONFIG_COLORSPACE_NAME_SEPARATOR',
'ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR',
'ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR',
'ACES_CONFIG_DISPLAY_FAMILY', 'COLORSPACE_NAME_SUBSTITUTION_PATTERNS',
'LOOK_NAME_SUBSTITUTION_PATTERNS',
'TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS',
'VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS',
'DISPLAY_NAME_SUBSTITUTION_PATTERNS', 'ColorspaceDescriptionStyle',
'beautify_name', 'beautify_colorspace_name', 'beautify_look_name',
'beautify_transform_family', 'beautify_view_transform_name',
'beautify_display_name', 'ctl_transform_to_colorspace_name',
'ctl_transform_to_look_name', 'ctl_transform_to_transform_family',
'ctl_transform_to_description', 'ctl_transform_to_colorspace',
'ctl_transform_to_look', 'create_builtin_transform',
'style_to_view_transform', 'style_to_display_colorspace',
'generate_config_aces'
]
ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH = (
Path(__file__).parents[0] / 'resources' /
'OpenColorIO-ACES-Config Transforms - Reference Config - Mapping.csv')
"""
Path to the *ACES* *CTL* transforms to *OpenColorIO* colorspaces mapping file.
CONFIG_MAPPING_FILE_PATH : unicode
"""
ACES_CONFIG_REFERENCE_COLORSPACE = 'ACES2065-1'
"""
*OpenColorIO* config reference colorspace.
ACES_CONFIG_REFERENCE_COLORSPACE : unicode
"""
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE = 'OCES'
"""
*OpenColorIO* config output encoding colorspace.
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE : unicode
"""
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR = ' - '
"""
*OpenColorIO* config colorspace name separator.
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR : unicode
"""
ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR = '/'
"""
*OpenColorIO* config colorspace family separator.
ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR : unicode
"""
ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR = '_to_'
"""
*OpenColorIO* config *BuiltinTransform* name separator.
ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR : unicode
"""
ACES_CONFIG_DISPLAY_FAMILY = 'Display'
"""
*OpenColorIO* config display family.
ACES_CONFIG_DISPLAY_FAMILY : unicode
"""
COLORSPACE_NAME_SUBSTITUTION_PATTERNS = {
'ACES_0_1_1': 'ACES 0.1.1',
'ACES_0_2_2': 'ACES 0.2.2',
'ACES_0_7_1': 'ACES 0.7.1',
'_7nits': '',
'_15nits': '',
'_': ' ',
'-raw': '',
'-': ' ',
'\\b(\\w+)limited\\b': '(\\1 Limited)',
'\\b(\\d+)nits\\b': '(\\1 nits)',
'RGBmonitor': 'sRGB',
'Rec709': 'Rec. 709',
'Rec2020': 'Rec. 2020',
}
"""
*OpenColorIO* colorspace name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
COLORSPACE_NAME_SUBSTITUTION_PATTERNS : dict
"""
COLORSPACE_NAME_SUBSTITUTION_PATTERNS.update({
# Input transforms also use the "family" name and thus need beautifying.
(f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}Alexa'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}v\\d+'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}.*'):
'',
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}':
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR,
})
LOOK_NAME_SUBSTITUTION_PATTERNS = {
# TODO: Implement support for callable patterns.
# The following one should be a dedicated definition/callable.
'BlueLightArtifactFix': 'Blue Light Artifact Fix'
}
"""
*OpenColorIO* look name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
LOOK_NAME_SUBSTITUTION_PATTERNS : dict
"""
TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS = {
'\\\\': ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR,
'vendorSupplied[/\\\\]': '',
'arri': 'ARRI',
'alexa': 'Alexa',
'canon': 'Canon',
'panasonic': 'Panasonic',
'red': 'RED',
'sony': 'Sony',
}
"""
*OpenColorIO* transform family substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS : dict
"""
VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS = {
'7.2nit': '&',
'15nit': '&',
'lim': ' lim',
'nit': ' nits',
'sim': ' sim on',
'CINEMA': 'Cinema',
'VIDEO': 'Video',
'REC1886': 'Rec.1886',
'REC709': 'Rec.709',
'REC2020': 'Rec.2020',
'-': ' ',
}
"""
*OpenColorIO* view transform name substitution patterns.
VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS : dict
"""
DISPLAY_NAME_SUBSTITUTION_PATTERNS = {
'G2.6-': '',
'-BFD': '',
'REC.1886': 'Rec.1886',
'REC.709': 'Rec.709 Video',
'REC.2020': 'Rec.2020 Video',
'REC.2100': 'Rec.2100',
'-Rec.': ' / Rec.',
'-1000nit': '',
# Legacy Substitutions
'dcdm': 'DCDM',
'p3': 'P3',
'rec709': 'Rec. 709',
'rec2020': 'Rec. 2020',
}
"""
*OpenColorIO* display name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
DISPLAY_NAME_SUBSTITUTION_PATTERNS : dict
"""
class ColorspaceDescriptionStyle(Flag):
"""
Enum storing the various *OpenColorIO* colorspace description styles.
"""
NONE = auto()
ACES = auto()
OPENCOLORIO = auto()
SHORT = auto()
LONG = auto()
SHORT_UNION = ACES | OPENCOLORIO | SHORT
LONG_UNION = ACES | OPENCOLORIO | LONG
def beautify_name(name, patterns):
"""
Beautifies given name by applying in succession the given patterns.
Parameters
----------
name : unicode
Name to beautify.
patterns : dict
Dictionary of regular expression patterns and substitution to apply
onto the name.
Returns
-------
unicode
Beautified name.
Examples
--------
>>> beautify_name(
... 'Rec709_100nits_dim',
... COLORSPACE_NAME_SUBSTITUTION_PATTERNS)
'Rec. 709 (100 nits) dim'
"""
for pattern, substitution in patterns.items():
name = re.sub(pattern, substitution, name)
return name.strip()
def beautify_colorspace_name(name):
"""
Beautifies given *OpenColorIO* colorspace name by applying in succession
the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* colorspace name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* colorspace name.
Examples
--------
>>> beautify_colorspace_name('Rec709_100nits_dim')
'Rec. 709 (100 nits) dim'
"""
return beautify_name(name, COLORSPACE_NAME_SUBSTITUTION_PATTERNS)
def beautify_look_name(name):
"""
Beautifies given *OpenColorIO* look name by applying in succession the
relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* look name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* look name.
Examples
--------
>>> beautify_look_name('BlueLightArtifactFix')
'Blue Light Artifact Fix'
"""
return beautify_name(name, LOOK_NAME_SUBSTITUTION_PATTERNS)
def beautify_transform_family(name):
"""
Beautifies given *OpenColorIO* colorspace family by applying in succession
the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* colorspace family to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* colorspace family.
Examples
--------
>>> beautify_transform_family('vendorSupplied/arri/alexa/v3/EI800')
'ARRI/Alexa/v3/EI800'
"""
return beautify_name(name, TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS)
def beautify_view_transform_name(name):
"""
Beautifies given *OpenColorIO* view transform name by applying in
succession the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* view transform name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* view transform name.
Examples
--------
>>> beautify_view_transform_name(
... 'ACES-OUTPUT - ACES2065-1_to_CIE-XYZ-D65 - SDR-CINEMA_1.0')
'Output - SDR Cinema - ACES 1.0'
"""
basename, version = name.split(ACES_CONFIG_COLORSPACE_NAME_SEPARATOR)[
-1].split('_')
tokens = basename.split('-')
family, genus = (['-'.join(tokens[:2]), '-'.join(tokens[2:])]
if len(tokens) > 2 else [basename, None])
family = beautify_name(family, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
genus = (beautify_name(genus, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
if genus is not None else genus)
return (f'Output - {family} ({genus}) - ACES {version}'
if genus is not None else f'Output - {family} - ACES {version}')
def beautify_display_name(name):
"""
Beautifies given *OpenColorIO* display name by applying in succession the
relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* display name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* display name.
Examples
--------
>>> beautify_display_name('DISPLAY - CIE-XYZ-D65_to_sRGB')
'Display - sRGB'
>>> beautify_display_name('rec709')
'Display - Rec. 709'
"""
basename = name.split(ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR)[-1]
name = beautify_name(basename, DISPLAY_NAME_SUBSTITUTION_PATTERNS)
return f'Display - {name}'
def ctl_transform_to_colorspace_name(ctl_transform):
"""
Generates the *OpenColorIO* colorspace name for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace name
for.
Returns
-------
unicode
*OpenColorIO* colorspace name.
"""
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_colorspace_name(name)
def ctl_transform_to_look_name(ctl_transform):
"""
Generates the *OpenColorIO* look name for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* look name for.
Returns
-------
unicode
*OpenColorIO* look name.
"""
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_look_name(name)
def ctl_transform_to_transform_family(ctl_transform, analytical=True):
"""
Generates the *OpenColorIO* transform family for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* transform family
for.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Returns
-------
unicode
*OpenColorIO* transform family.
"""
if analytical:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
family = 'CSC'
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
else:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
if re.match('ACES|ADX', ctl_transform.name):
family = 'ACES'
else:
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
return beautify_transform_family(family)
@required('OpenColorIO')
def ctl_transform_to_description(
ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
factory=colorspace_factory,
**kwargs):
"""
Generates the *OpenColorIO* colorspace or look description for given
*ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
factory : callable, optional
Factory used to adjust the code paths because of slight difference
of signature between the *OpenColorIO* colorspace and look.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
unicode
*OpenColorIO* colorspace or look description.
"""
import PyOpenColorIO as ocio
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
forward, inverse = ([
'to_reference',
'from_reference',
] if factory is colorspace_factory else [
'forward_transform',
'inverse_transform',
])
transforms = [
transform for transform in (kwargs.get(forward),
kwargs.get(inverse))
if transform is not None
]
transform = next(iter(transforms), None)
if isinstance(transform, ocio.BuiltinTransform):
description.append(transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
if len(description) > 0:
description.append('')
aces_transform_id = (
ctl_transform.aces_transform_id.aces_transform_id)
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.append(f'ACEStransformID: {aces_transform_id}')
else:
description.append('CTL Transform')
description.append(f'{'=' * len(description[-1])}\n')
description.append(f'{ctl_transform.description}\n')
description.append(f'ACEStransformID: {aces_transform_id}')
description = '\n'.join(description)
return description
def ctl_transform_to_colorspace(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
"""
Generates the *OpenColorIO* colorspace for given *ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* colorspace.
"""
name = ctl_transform_to_colorspace_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
colorspace_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'family':
family,
'description':
description,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
def ctl_transform_to_look(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
"""
Generates the *OpenColorIO* look for given *ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* look for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.look_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* look.
"""
name = ctl_transform_to_look_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
look_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'description':
description,
}
settings.update(kwargs)
look = look_factory(**settings)
return look
@required('OpenColorIO')
def create_builtin_transform(style):
"""
Creates an *OpenColorIO* builtin transform for given style.
If the style does not exist, a placeholder transform is used in place
of the builtin transform.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
Returns
-------
BuiltinTransform
*OpenColorIO* builtin transform for given style.
"""
import PyOpenColorIO as ocio
builtin_transform = ocio.BuiltinTransform()
try:
builtin_transform.setStyle(style)
except ocio.Exception:
logging.warning(f'{style} style is not defined, '
f'using a placeholder "FileTransform" instead!')
builtin_transform = ocio.FileTransform()
builtin_transform.setSrc(style)
return builtin_transform
@required('OpenColorIO')
def style_to_view_transform(style,
ctl_transforms,
describe=ColorspaceDescriptionStyle.LONG_UNION):
"""
Creates an *OpenColorIO* view transform for given style.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
ctl_transforms : array_like
Array of :class:`opencolorio_config_aces.config.reference.CTLTransform`
class instances corresponding to the given style.
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
Returns
-------
ViewTransform
*OpenColorIO* view transform for given style.
"""
import PyOpenColorIO as ocio
name = beautify_view_transform_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
aces_transform_ids, aces_descriptions = zip(
*[(ctl_transform.aces_transform_id.aces_transform_id,
ctl_transform.description)
for ctl_transform in ctl_transforms])
if len(description) > 0:
description.append('')
if describe in (ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.extend([
f'ACEStransformID: {aces_transform_id}'
for aces_transform_id in aces_transform_ids
])
else:
description.append(
f'CTL Transform'
f'{'s' if len(aces_transform_ids) >= 2 else ''}')
description.append(f'{'=' * len(description[-1])}\n')
description.append(f'\n{'-' * 80}\n\n'.join([
(f'{aces_descriptions[i]}\n\n'
f'ACEStransformID: {aces_transform_id}\n')
for i, aces_transform_id in enumerate(aces_transform_ids)
]))
description = '\n'.join(description)
view_transform = view_transform_factory(
name, from_reference=builtin_transform, description=description)
return view_transform
@required('OpenColorIO')
def style_to_display_colorspace(
style, describe=ColorspaceDescriptionStyle.OPENCOLORIO, **kwargs):
"""
Creates an *OpenColorIO* display colorspace for given style.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* display colorspace for given style.
"""
import PyOpenColorIO as ocio
kwargs.setdefault('family', ACES_CONFIG_DISPLAY_FAMILY)
name = beautify_display_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
description = '\n'.join(description)
settings = {
'name': name,
'family': ACES_CONFIG_DISPLAY_FAMILY,
'description': description,
'from_reference': builtin_transform,
'reference_space': ocio.REFERENCE_SPACE_DISPLAY,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
@required('OpenColorIO')
def generate_config_aces(
config_name=None,
validate=True,
describe=ColorspaceDescriptionStyle.SHORT_UNION,
config_mapping_file_path=ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH,
analytical=True,
additional_data=False):
"""
Generates the *aces-dev* reference implementation *OpenColorIO* Config
using the *Mapping* method.
The Config generation is constrained by a *CSV* file exported from the
*Reference Config - Mapping* sheet from a
`Google Sheets file <https://docs.google.com/spreadsheets/d/\
1SXPt-USy3HlV2G2qAvh9zit6ZCINDOlfKT07yXJdWLg>`__. The *Google Sheets* file
was originally authored using the output of the *aces-dev* conversion graph
to support the discussions of the *OpenColorIO* *Working Group* on the
design of the *aces-dev* reference implementation *OpenColorIO* Config.
The resulting mapping is the outcome of those discussions and leverages the
new *OpenColorIO 2* display architecture while factoring many transforms.
Parameters
----------
config_name : unicode, optional
*OpenColorIO* config file name, if given the config will be written to
disk.
validate : bool, optional
Whether to validate the config.
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
config_mapping_file_path : unicode, optional
Path to the *CSV* mapping file used by the *Mapping* method.
analytical : bool, optional
Whether to generate *OpenColorIO* transform families that analytically
match the given *ACES* *CTL* transform, i.e. true to the *aces-dev*
reference but not necessarily user friendly.
additional_data : bool, optional
Whether to return additional data.
Returns
-------
Config or tuple
*OpenColorIO* config or tuple of *OpenColorIO* config,
:class:`opencolorio_config_aces.ConfigData` class instance and dict of
*OpenColorIO* colorspaces and
:class:`opencolorio_config_aces.config.reference.CTLTransform` class
instances.
"""
import PyOpenColorIO as ocio
ctl_transforms = unclassify_ctl_transforms(
classify_aces_ctl_transforms(discover_aces_ctl_transforms()))
builtin_transforms = [
builtin for builtin in ocio.BuiltinTransformRegistry()
]
config_mapping = defaultdict(list)
with open(config_mapping_file_path) as csv_file:
dict_reader = csv.DictReader(
csv_file,
delimiter=',',
fieldnames=[
'ordering',
'aces_transform_id',
'builtin_transform_style',
'linked_display_colorspace_style',
'interface',
'encoding',
'categories',
])
# Skipping the first header line.
next(dict_reader)
for transform_data in dict_reader:
# Checking whether the "BuiltinTransform" style exists.
style = transform_data['builtin_transform_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
# Checking whether the linked "DisplayColorspace"
# "BuiltinTransform" style exists.
style = transform_data['linked_display_colorspace_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
# Finding the "CTLTransform" class instance that matches given
# "ACEStransformID", if it does not exist, there is a critical
# mismatch in the mapping with *aces-dev*.
aces_transform_id = transform_data['aces_transform_id']
filtered_ctl_transforms = [
ctl_transform for ctl_transform in ctl_transforms
if ctl_transform.aces_transform_id.aces_transform_id ==
aces_transform_id
]
ctl_transform = next(iter(filtered_ctl_transforms), None)
assert ctl_transform is not None, (
f'"aces-dev" has no transform with "{aces_transform_id}" '
f'ACEStransformID, please cross-check the '
f'"{config_mapping_file_path}" config mapping file and '
f'the "aces-dev" "CTL" transforms!')
transform_data['ctl_transform'] = ctl_transform
config_mapping[transform_data['builtin_transform_style']].append(
transform_data)
colorspaces = []
looks = []
displays, display_names = [], []
view_transforms, view_transform_names = [], []
shared_views = []
aces_family_prefix = 'CSC' if analytical else 'ACES'
scene_reference_colorspace = colorspace_factory(
f'{aces_family_prefix} - {ACES_CONFIG_REFERENCE_COLORSPACE}',
'ACES',
description=(
'The "Academy Color Encoding System" reference colorspace.'),
encoding='scene-linear')
display_reference_colorspace = colorspace_factory(
'CIE-XYZ-D65',
description='The "CIE XYZ (D65)" display connection colorspace.',
reference_space=ocio.REFERENCE_SPACE_DISPLAY)
raw_colorspace = colorspace_factory(
'Utility - Raw',
'Utility',
description='The utility "Raw" colorspace.',
is_data=True)
colorspaces += [
scene_reference_colorspace,
display_reference_colorspace,
raw_colorspace,
]
for style, transforms_data in config_mapping.items():
if transforms_data[0]['interface'] == 'ViewTransform':
view_transform = style_to_view_transform(style, [
transform_data['ctl_transform']
for transform_data in transforms_data
], describe)
view_transforms.append(view_transform)
view_transform_name = view_transform.getName()
view_transform_names.append(view_transform_name)
for transform_data in transforms_data:
display_style = transform_data[
'linked_display_colorspace_style']
display = style_to_display_colorspace(
display_style,
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
display_name = display.getName()
if display_name not in display_names:
displays.append(display)
display_names.append(display_name)
shared_views.append({
'display': display_name,
'view': view_transform_name,
'view_transform': view_transform_name,
})
else:
for transform_data in transforms_data:
ctl_transform = transform_data['ctl_transform']
if transform_data['interface'] == 'Look':
look = ctl_transform_to_look(
ctl_transform,
describe,
analytical=analytical,
forward_transform=create_builtin_transform(style),
process_space=scene_reference_colorspace.getName(),
)
looks.append(look)
else:
colorspace = ctl_transform_to_colorspace(
ctl_transform,
describe,
analytical=analytical,
to_reference=create_builtin_transform(style),
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
colorspaces.append(colorspace)
untonemapped_view_transform = view_transform_factory(
'Un-tone-mapped',
from_reference=ocio.BuiltinTransform(
'UTILITY - ACES-AP0_to_CIE-XYZ-D65_BFD'),
)
untonemapped_view_transform_name = untonemapped_view_transform.getName()
for display in display_names:
shared_views.append({
'display': display,
'view': untonemapped_view_transform_name,
'view_transform': untonemapped_view_transform_name,
})
data = ConfigData(
description=(
f'The "Academy Color Encoding System" (ACES) "Reference Config".'
f'\n\n'
f'This "OpenColorIO" config is a strict and quasi-analytical '
f'implementation of "aces-dev" and is designed as a reference for '
f'software developers. It is not a replacement for the previous '
f'"ACES" configs nor the "ACES Studio Config".'
f'\n\n'
f'Generated with "OpenColorIO-Config-ACES" {git_describe()} '
f'on the {datetime.now().strftime('%Y/%m/%d at %H:%M')}.'),
roles={
ocio.ROLE_COLOR_TIMING: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_COMPOSITING_LOG: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_DATA: 'Utility - Raw',
ocio.ROLE_DEFAULT: scene_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_DISPLAY:
display_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_SCENE: scene_reference_colorspace.getName(),
ocio.ROLE_REFERENCE: scene_reference_colorspace.getName(),
ocio.ROLE_RENDERING: f'{aces_family_prefix} - ACEScg',
ocio.ROLE_SCENE_LINEAR: f'{aces_family_prefix} - ACEScg',
},
colorspaces=colorspaces + displays,
looks=looks,
view_transforms=view_transforms + [untonemapped_view_transform],
shared_views=shared_views,
views=shared_views + [{
'display': display,
'view': 'Raw',
'colorspace': 'Utility - Raw'
} for display in display_names],
active_displays=display_names,
active_views=view_transform_names + ['Raw'],
file_rules=[{
'name': 'Default',
'colorspace': scene_reference_colorspace.getName()
}],
inactive_colorspaces=['CIE-XYZ-D65'],
default_view_transform=untonemapped_view_transform.getName(),
profile_version=2)
config = generate_config(data, config_name, validate)
if additional_data:
return config, data
else:
return config
if __name__ == '__main__':
import os
import opencolorio_config_aces
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
build_directory = os.path.join(opencolorio_config_aces.__path__[0], '..',
'build')
if not os.path.exists(build_directory):
os.makedirs(build_directory)
config, data = generate_config_aces(
config_name=os.path.join(build_directory,
'config-aces-reference.ocio'),
analytical=False,
additional_data=True)
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
"""
*aces-dev* Reference Config Generator
=====================================
Defines various objects related to the generation of the *aces-dev* reference
*OpenColorIO* config:
- :func:`opencolorio_config_aces.generate_config_aces`
"""
import csv
import logging
import re
from collections import defaultdict
from datetime import datetime
from enum import Flag, auto
from pathlib import Path
from opencolorio_config_aces.config.generation import (
ConfigData, colorspace_factory, generate_config, look_factory,
view_transform_factory)
from opencolorio_config_aces.config.reference import (
classify_aces_ctl_transforms, discover_aces_ctl_transforms,
unclassify_ctl_transforms)
from opencolorio_config_aces.utilities import git_describe, required
__author__ = 'OpenColorIO Contributors'
__copyright__ = 'Copyright Contributors to the OpenColorIO Project.'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'OpenColorIO Contributors'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH',
'ACES_CONFIG_REFERENCE_COLORSPACE',
'ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE',
'ACES_CONFIG_COLORSPACE_NAME_SEPARATOR',
'ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR',
'ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR',
'ACES_CONFIG_DISPLAY_FAMILY', 'COLORSPACE_NAME_SUBSTITUTION_PATTERNS',
'LOOK_NAME_SUBSTITUTION_PATTERNS',
'TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS',
'VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS',
'DISPLAY_NAME_SUBSTITUTION_PATTERNS', 'ColorspaceDescriptionStyle',
'beautify_name', 'beautify_colorspace_name', 'beautify_look_name',
'beautify_transform_family', 'beautify_view_transform_name',
'beautify_display_name', 'ctl_transform_to_colorspace_name',
'ctl_transform_to_look_name', 'ctl_transform_to_transform_family',
'ctl_transform_to_description', 'ctl_transform_to_colorspace',
'ctl_transform_to_look', 'create_builtin_transform',
'style_to_view_transform', 'style_to_display_colorspace',
'generate_config_aces'
]
ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH = (
Path(__file__).parents[0] / 'resources' /
'OpenColorIO-ACES-Config Transforms - Reference Config - Mapping.csv')
"""
Path to the *ACES* *CTL* transforms to *OpenColorIO* colorspaces mapping file.
CONFIG_MAPPING_FILE_PATH : unicode
"""
ACES_CONFIG_REFERENCE_COLORSPACE = 'ACES2065-1'
"""
*OpenColorIO* config reference colorspace.
ACES_CONFIG_REFERENCE_COLORSPACE : unicode
"""
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE = 'OCES'
"""
*OpenColorIO* config output encoding colorspace.
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE : unicode
"""
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR = ' - '
"""
*OpenColorIO* config colorspace name separator.
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR : unicode
"""
ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR = '/'
"""
*OpenColorIO* config colorspace family separator.
ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR : unicode
"""
ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR = '_to_'
"""
*OpenColorIO* config *BuiltinTransform* name separator.
ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR : unicode
"""
ACES_CONFIG_DISPLAY_FAMILY = 'Display'
"""
*OpenColorIO* config display family.
ACES_CONFIG_DISPLAY_FAMILY : unicode
"""
COLORSPACE_NAME_SUBSTITUTION_PATTERNS = {
'ACES_0_1_1': 'ACES 0.1.1',
'ACES_0_2_2': 'ACES 0.2.2',
'ACES_0_7_1': 'ACES 0.7.1',
'_7nits': '',
'_15nits': '',
'_': ' ',
'-raw': '',
'-': ' ',
'\\b(\\w+)limited\\b': '(\\1 Limited)',
'\\b(\\d+)nits\\b': '(\\1 nits)',
'RGBmonitor': 'sRGB',
'Rec709': 'Rec. 709',
'Rec2020': 'Rec. 2020',
}
"""
*OpenColorIO* colorspace name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
COLORSPACE_NAME_SUBSTITUTION_PATTERNS : dict
"""
COLORSPACE_NAME_SUBSTITUTION_PATTERNS.update({
# Input transforms also use the "family" name and thus need beautifying.
(f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}Alexa'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}v\\d+'
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}.*'):
'',
f'{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}':
ACES_CONFIG_COLORSPACE_NAME_SEPARATOR,
})
LOOK_NAME_SUBSTITUTION_PATTERNS = {
# TODO: Implement support for callable patterns.
# The following one should be a dedicated definition/callable.
'BlueLightArtifactFix': 'Blue Light Artifact Fix'
}
"""
*OpenColorIO* look name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
LOOK_NAME_SUBSTITUTION_PATTERNS : dict
"""
TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS = {
'\\\\': ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR,
'vendorSupplied[/\\\\]': '',
'arri': 'ARRI',
'alexa': 'Alexa',
'canon': 'Canon',
'panasonic': 'Panasonic',
'red': 'RED',
'sony': 'Sony',
}
"""
*OpenColorIO* transform family substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS : dict
"""
VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS = {
'7.2nit': '&',
'15nit': '&',
'lim': ' lim',
'nit': ' nits',
'sim': ' sim on',
'CINEMA': 'Cinema',
'VIDEO': 'Video',
'REC1886': 'Rec.1886',
'REC709': 'Rec.709',
'REC2020': 'Rec.2020',
'-': ' ',
}
"""
*OpenColorIO* view transform name substitution patterns.
VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS : dict
"""
DISPLAY_NAME_SUBSTITUTION_PATTERNS = {
'G2.6-': '',
'-BFD': '',
'REC.1886': 'Rec.1886',
'REC.709': 'Rec.709 Video',
'REC.2020': 'Rec.2020 Video',
'REC.2100': 'Rec.2100',
'-Rec.': ' / Rec.',
'-1000nit': '',
# Legacy Substitutions
'dcdm': 'DCDM',
'p3': 'P3',
'rec709': 'Rec. 709',
'rec2020': 'Rec. 2020',
}
"""
*OpenColorIO* display name substitution patterns.
Notes
-----
- The substitutions are evaluated in order.
DISPLAY_NAME_SUBSTITUTION_PATTERNS : dict
"""
class ColorspaceDescriptionStyle(Flag):
"""
Enum storing the various *OpenColorIO* colorspace description styles.
"""
NONE = auto()
ACES = auto()
OPENCOLORIO = auto()
SHORT = auto()
LONG = auto()
SHORT_UNION = ACES | OPENCOLORIO | SHORT
LONG_UNION = ACES | OPENCOLORIO | LONG
def beautify_name(name, patterns):
"""
Beautifies given name by applying in succession the given patterns.
Parameters
----------
name : unicode
Name to beautify.
patterns : dict
Dictionary of regular expression patterns and substitution to apply
onto the name.
Returns
-------
unicode
Beautified name.
Examples
--------
>>> beautify_name(
... 'Rec709_100nits_dim',
... COLORSPACE_NAME_SUBSTITUTION_PATTERNS)
'Rec. 709 (100 nits) dim'
"""
for pattern, substitution in patterns.items():
name = re.sub(pattern, substitution, name)
return name.strip()
def beautify_colorspace_name(name):
"""
Beautifies given *OpenColorIO* colorspace name by applying in succession
the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* colorspace name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* colorspace name.
Examples
--------
>>> beautify_colorspace_name('Rec709_100nits_dim')
'Rec. 709 (100 nits) dim'
"""
return beautify_name(name, COLORSPACE_NAME_SUBSTITUTION_PATTERNS)
def beautify_look_name(name):
"""
Beautifies given *OpenColorIO* look name by applying in succession the
relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* look name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* look name.
Examples
--------
>>> beautify_look_name('BlueLightArtifactFix')
'Blue Light Artifact Fix'
"""
return beautify_name(name, LOOK_NAME_SUBSTITUTION_PATTERNS)
def beautify_transform_family(name):
"""
Beautifies given *OpenColorIO* colorspace family by applying in succession
the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* colorspace family to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* colorspace family.
Examples
--------
>>> beautify_transform_family('vendorSupplied/arri/alexa/v3/EI800')
'ARRI/Alexa/v3/EI800'
"""
return beautify_name(name, TRANSFORM_FAMILY_SUBSTITUTION_PATTERNS)
def beautify_view_transform_name(name):
"""
Beautifies given *OpenColorIO* view transform name by applying in
succession the relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* view transform name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* view transform name.
Examples
--------
>>> beautify_view_transform_name(
... 'ACES-OUTPUT - ACES2065-1_to_CIE-XYZ-D65 - SDR-CINEMA_1.0')
'Output - SDR Cinema - ACES 1.0'
"""
basename, version = name.split(ACES_CONFIG_COLORSPACE_NAME_SEPARATOR)[
-1].split('_')
tokens = basename.split('-')
family, genus = (['-'.join(tokens[:2]), '-'.join(tokens[2:])]
if len(tokens) > 2 else [basename, None])
family = beautify_name(family, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
genus = (beautify_name(genus, VIEW_TRANSFORM_NAME_SUBSTITUTION_PATTERNS)
if genus is not None else genus)
return (f'Output - {family} ({genus}) - ACES {version}'
if genus is not None else f'Output - {family} - ACES {version}')
def beautify_display_name(name):
"""
Beautifies given *OpenColorIO* display name by applying in succession the
relevant patterns.
Parameters
----------
name : unicode
*OpenColorIO* display name to beautify.
Returns
-------
unicode
Beautified *OpenColorIO* display name.
Examples
--------
>>> beautify_display_name('DISPLAY - CIE-XYZ-D65_to_sRGB')
'Display - sRGB'
>>> beautify_display_name('rec709')
'Display - Rec. 709'
"""
basename = name.split(ACES_CONFIG_BUILTIN_TRANSFORM_NAME_SEPARATOR)[-1]
name = beautify_name(basename, DISPLAY_NAME_SUBSTITUTION_PATTERNS)
return f'Display - {name}'
def ctl_transform_to_colorspace_name(ctl_transform):
"""
Generates the *OpenColorIO* colorspace name for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace name
for.
Returns
-------
unicode
*OpenColorIO* colorspace name.
"""
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_colorspace_name(name)
def ctl_transform_to_look_name(ctl_transform):
"""
Generates the *OpenColorIO* look name for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* look name for.
Returns
-------
unicode
*OpenColorIO* look name.
"""
if ctl_transform.source in (ACES_CONFIG_REFERENCE_COLORSPACE,
ACES_CONFIG_OUTPUT_ENCODING_COLORSPACE):
name = ctl_transform.target
else:
name = ctl_transform.source
return beautify_look_name(name)
def ctl_transform_to_transform_family(ctl_transform, analytical=True):
"""
Generates the *OpenColorIO* transform family for given *ACES* *CTL*
transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* transform family
for.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Returns
-------
unicode
*OpenColorIO* transform family.
"""
if analytical:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
family = 'CSC'
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
else:
if (ctl_transform.family == 'csc'
and ctl_transform.namespace == 'Academy'):
if re.match('ACES|ADX', ctl_transform.name):
family = 'ACES'
else:
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'input_transform':
family = (f'Input{ACES_CONFIG_COLORSPACE_FAMILY_SEPARATOR}'
f'{ctl_transform.genus}')
elif ctl_transform.family == 'output_transform':
family = 'Output'
elif ctl_transform.family == 'lmt':
family = 'LMT'
return beautify_transform_family(family)
@required('OpenColorIO')
def ctl_transform_to_description(
ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
factory=colorspace_factory,
**kwargs):
"""
Generates the *OpenColorIO* colorspace or look description for given
*ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
factory : callable, optional
Factory used to adjust the code paths because of slight difference
of signature between the *OpenColorIO* colorspace and look.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
unicode
*OpenColorIO* colorspace or look description.
"""
import PyOpenColorIO as ocio
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
forward, inverse = ([
'to_reference',
'from_reference',
] if factory is colorspace_factory else [
'forward_transform',
'inverse_transform',
])
transforms = [
transform for transform in (kwargs.get(forward),
kwargs.get(inverse))
if transform is not None
]
transform = next(iter(transforms), None)
if isinstance(transform, ocio.BuiltinTransform):
description.append(transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
if len(description) > 0:
description.append('')
aces_transform_id = (
ctl_transform.aces_transform_id.aces_transform_id)
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.append(f'ACEStransformID: {aces_transform_id}')
else:
description.append('CTL Transform')
description.append(f'{"=" * len(description[-1])}\n')
description.append(f'{ctl_transform.description}\n')
description.append(f'ACEStransformID: {aces_transform_id}')
description = '\n'.join(description)
return description
def ctl_transform_to_colorspace(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
"""
Generates the *OpenColorIO* colorspace for given *ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* colorspace for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* colorspace.
"""
name = ctl_transform_to_colorspace_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
colorspace_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'family':
family,
'description':
description,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
def ctl_transform_to_look(ctl_transform,
describe=ColorspaceDescriptionStyle.LONG_UNION,
analytical=True,
**kwargs):
"""
Generates the *OpenColorIO* look for given *ACES* *CTL* transform.
Parameters
----------
ctl_transform : CTLTransform
*ACES* *CTL* transform to generate the *OpenColorIO* look for.
describe : bool, optional
Whether to use the full *ACES* *CTL* transform description or just the
first line.
analytical : bool, optional
Whether to generate the *OpenColorIO* transform family that
analytically matches the given *ACES* *CTL* transform, i.e. true to
the *aces-dev* reference but not necessarily user friendly.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.look_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* look.
"""
name = ctl_transform_to_look_name(ctl_transform)
family = ctl_transform_to_transform_family(ctl_transform, analytical)
description = ctl_transform_to_description(ctl_transform, describe,
look_factory, **kwargs)
settings = {
'name': (f'{beautify_colorspace_name(family)}'
f'{ACES_CONFIG_COLORSPACE_NAME_SEPARATOR}'
f'{name}'),
'description':
description,
}
settings.update(kwargs)
look = look_factory(**settings)
return look
@required('OpenColorIO')
def create_builtin_transform(style):
"""
Creates an *OpenColorIO* builtin transform for given style.
If the style does not exist, a placeholder transform is used in place
of the builtin transform.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
Returns
-------
BuiltinTransform
*OpenColorIO* builtin transform for given style.
"""
import PyOpenColorIO as ocio
builtin_transform = ocio.BuiltinTransform()
try:
builtin_transform.setStyle(style)
except ocio.Exception:
logging.warning(f'{style} style is not defined, '
f'using a placeholder "FileTransform" instead!')
builtin_transform = ocio.FileTransform()
builtin_transform.setSrc(style)
return builtin_transform
@required('OpenColorIO')
def style_to_view_transform(style,
ctl_transforms,
describe=ColorspaceDescriptionStyle.LONG_UNION):
"""
Creates an *OpenColorIO* view transform for given style.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
ctl_transforms : array_like
Array of :class:`opencolorio_config_aces.config.reference.CTLTransform`
class instances corresponding to the given style.
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
Returns
-------
ViewTransform
*OpenColorIO* view transform for given style.
"""
import PyOpenColorIO as ocio
name = beautify_view_transform_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
if describe in (ColorspaceDescriptionStyle.ACES,
ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
aces_transform_ids, aces_descriptions = zip(
*[(ctl_transform.aces_transform_id.aces_transform_id,
ctl_transform.description)
for ctl_transform in ctl_transforms])
if len(description) > 0:
description.append('')
if describe in (ColorspaceDescriptionStyle.ACES
| ColorspaceDescriptionStyle.SHORT,
ColorspaceDescriptionStyle.SHORT_UNION):
description.extend([
f'ACEStransformID: {aces_transform_id}'
for aces_transform_id in aces_transform_ids
])
else:
description.append(
f'CTL Transform'
f'{"s" if len(aces_transform_ids) >= 2 else ""}')
description.append(f'{"=" * len(description[-1])}\n')
description.append(f'\n{"-" * 80}\n\n'.join([
(f'{aces_descriptions[i]}\n\n'
f'ACEStransformID: {aces_transform_id}\n')
for i, aces_transform_id in enumerate(aces_transform_ids)
]))
description = '\n'.join(description)
view_transform = view_transform_factory(
name, from_reference=builtin_transform, description=description)
return view_transform
@required('OpenColorIO')
def style_to_display_colorspace(
style, describe=ColorspaceDescriptionStyle.OPENCOLORIO, **kwargs):
"""
Creates an *OpenColorIO* display colorspace for given style.
Parameters
----------
style : unicode
*OpenColorIO* builtin transform style
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
Other Parameters
----------------
\\**kwargs : dict, optional
Keywords arguments for the
:func:`opencolorio_config_aces.colorspace_factory` definition.
Returns
-------
ColorSpace
*OpenColorIO* display colorspace for given style.
"""
import PyOpenColorIO as ocio
kwargs.setdefault('family', ACES_CONFIG_DISPLAY_FAMILY)
name = beautify_display_name(style)
builtin_transform = ocio.BuiltinTransform(style)
description = None
if describe != ColorspaceDescriptionStyle.NONE:
description = []
if describe in (ColorspaceDescriptionStyle.OPENCOLORIO,
ColorspaceDescriptionStyle.SHORT_UNION,
ColorspaceDescriptionStyle.LONG_UNION):
description.append(builtin_transform.getDescription())
description = '\n'.join(description)
settings = {
'name': name,
'family': ACES_CONFIG_DISPLAY_FAMILY,
'description': description,
'from_reference': builtin_transform,
'reference_space': ocio.REFERENCE_SPACE_DISPLAY,
}
settings.update(kwargs)
colorspace = colorspace_factory(**settings)
return colorspace
@required('OpenColorIO')
def generate_config_aces(
config_name=None,
validate=True,
describe=ColorspaceDescriptionStyle.SHORT_UNION,
config_mapping_file_path=ACES_CONFIG_REFERENCE_MAPPING_FILE_PATH,
analytical=True,
additional_data=False):
"""
Generates the *aces-dev* reference implementation *OpenColorIO* Config
using the *Mapping* method.
The Config generation is constrained by a *CSV* file exported from the
*Reference Config - Mapping* sheet from a
`Google Sheets file <https://docs.google.com/spreadsheets/d/\
1SXPt-USy3HlV2G2qAvh9zit6ZCINDOlfKT07yXJdWLg>`__. The *Google Sheets* file
was originally authored using the output of the *aces-dev* conversion graph
to support the discussions of the *OpenColorIO* *Working Group* on the
design of the *aces-dev* reference implementation *OpenColorIO* Config.
The resulting mapping is the outcome of those discussions and leverages the
new *OpenColorIO 2* display architecture while factoring many transforms.
Parameters
----------
config_name : unicode, optional
*OpenColorIO* config file name, if given the config will be written to
disk.
validate : bool, optional
Whether to validate the config.
describe : int, optional
Any value from the
:class:`opencolorio_config_aces.ColorspaceDescriptionStyle` enum.
config_mapping_file_path : unicode, optional
Path to the *CSV* mapping file used by the *Mapping* method.
analytical : bool, optional
Whether to generate *OpenColorIO* transform families that analytically
match the given *ACES* *CTL* transform, i.e. true to the *aces-dev*
reference but not necessarily user friendly.
additional_data : bool, optional
Whether to return additional data.
Returns
-------
Config or tuple
*OpenColorIO* config or tuple of *OpenColorIO* config,
:class:`opencolorio_config_aces.ConfigData` class instance and dict of
*OpenColorIO* colorspaces and
:class:`opencolorio_config_aces.config.reference.CTLTransform` class
instances.
"""
import PyOpenColorIO as ocio
ctl_transforms = unclassify_ctl_transforms(
classify_aces_ctl_transforms(discover_aces_ctl_transforms()))
builtin_transforms = [
builtin for builtin in ocio.BuiltinTransformRegistry()
]
config_mapping = defaultdict(list)
with open(config_mapping_file_path) as csv_file:
dict_reader = csv.DictReader(
csv_file,
delimiter=',',
fieldnames=[
'ordering',
'aces_transform_id',
'builtin_transform_style',
'linked_display_colorspace_style',
'interface',
'encoding',
'categories',
])
# Skipping the first header line.
next(dict_reader)
for transform_data in dict_reader:
# Checking whether the "BuiltinTransform" style exists.
style = transform_data['builtin_transform_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
# Checking whether the linked "DisplayColorspace"
# "BuiltinTransform" style exists.
style = transform_data['linked_display_colorspace_style']
if style:
assert (style in builtin_transforms), (
f'"{style}" "BuiltinTransform" style does not '
f'exist!')
# Finding the "CTLTransform" class instance that matches given
# "ACEStransformID", if it does not exist, there is a critical
# mismatch in the mapping with *aces-dev*.
aces_transform_id = transform_data['aces_transform_id']
filtered_ctl_transforms = [
ctl_transform for ctl_transform in ctl_transforms
if ctl_transform.aces_transform_id.aces_transform_id ==
aces_transform_id
]
ctl_transform = next(iter(filtered_ctl_transforms), None)
assert ctl_transform is not None, (
f'"aces-dev" has no transform with "{aces_transform_id}" '
f'ACEStransformID, please cross-check the '
f'"{config_mapping_file_path}" config mapping file and '
f'the "aces-dev" "CTL" transforms!')
transform_data['ctl_transform'] = ctl_transform
config_mapping[transform_data['builtin_transform_style']].append(
transform_data)
colorspaces = []
looks = []
displays, display_names = [], []
view_transforms, view_transform_names = [], []
shared_views = []
aces_family_prefix = 'CSC' if analytical else 'ACES'
scene_reference_colorspace = colorspace_factory(
f'{aces_family_prefix} - {ACES_CONFIG_REFERENCE_COLORSPACE}',
'ACES',
description=(
'The "Academy Color Encoding System" reference colorspace.'),
encoding='scene-linear')
display_reference_colorspace = colorspace_factory(
'CIE-XYZ-D65',
description='The "CIE XYZ (D65)" display connection colorspace.',
reference_space=ocio.REFERENCE_SPACE_DISPLAY)
raw_colorspace = colorspace_factory(
'Utility - Raw',
'Utility',
description='The utility "Raw" colorspace.',
is_data=True)
colorspaces += [
scene_reference_colorspace,
display_reference_colorspace,
raw_colorspace,
]
for style, transforms_data in config_mapping.items():
if transforms_data[0]['interface'] == 'ViewTransform':
view_transform = style_to_view_transform(style, [
transform_data['ctl_transform']
for transform_data in transforms_data
], describe)
view_transforms.append(view_transform)
view_transform_name = view_transform.getName()
view_transform_names.append(view_transform_name)
for transform_data in transforms_data:
display_style = transform_data[
'linked_display_colorspace_style']
display = style_to_display_colorspace(
display_style,
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
display_name = display.getName()
if display_name not in display_names:
displays.append(display)
display_names.append(display_name)
shared_views.append({
'display': display_name,
'view': view_transform_name,
'view_transform': view_transform_name,
})
else:
for transform_data in transforms_data:
ctl_transform = transform_data['ctl_transform']
if transform_data['interface'] == 'Look':
look = ctl_transform_to_look(
ctl_transform,
describe,
analytical=analytical,
forward_transform=create_builtin_transform(style),
process_space=scene_reference_colorspace.getName(),
)
looks.append(look)
else:
colorspace = ctl_transform_to_colorspace(
ctl_transform,
describe,
analytical=analytical,
to_reference=create_builtin_transform(style),
encoding=transform_data.get('encoding'),
categories=transform_data.get('categories'))
colorspaces.append(colorspace)
untonemapped_view_transform = view_transform_factory(
'Un-tone-mapped',
from_reference=ocio.BuiltinTransform(
'UTILITY - ACES-AP0_to_CIE-XYZ-D65_BFD'),
)
untonemapped_view_transform_name = untonemapped_view_transform.getName()
for display in display_names:
shared_views.append({
'display': display,
'view': untonemapped_view_transform_name,
'view_transform': untonemapped_view_transform_name,
})
data = ConfigData(
description=(
f'The "Academy Color Encoding System" (ACES) "Reference Config".'
f'\n\n'
f'This "OpenColorIO" config is a strict and quasi-analytical '
f'implementation of "aces-dev" and is designed as a reference for '
f'software developers. It is not a replacement for the previous '
f'"ACES" configs nor the "ACES Studio Config".'
f'\n\n'
f'Generated with "OpenColorIO-Config-ACES" {git_describe()} '
f'on the {datetime.now().strftime("%Y/%m/%d at %H:%M")}.'),
roles={
ocio.ROLE_COLOR_TIMING: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_COMPOSITING_LOG: f'{aces_family_prefix} - ACEScct',
ocio.ROLE_DATA: 'Utility - Raw',
ocio.ROLE_DEFAULT: scene_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_DISPLAY:
display_reference_colorspace.getName(),
ocio.ROLE_INTERCHANGE_SCENE: scene_reference_colorspace.getName(),
ocio.ROLE_REFERENCE: scene_reference_colorspace.getName(),
ocio.ROLE_RENDERING: f'{aces_family_prefix} - ACEScg',
ocio.ROLE_SCENE_LINEAR: f'{aces_family_prefix} - ACEScg',
},
colorspaces=colorspaces + displays,
looks=looks,
view_transforms=view_transforms + [untonemapped_view_transform],
shared_views=shared_views,
views=shared_views + [{
'display': display,
'view': 'Raw',
'colorspace': 'Utility - Raw'
} for display in display_names],
active_displays=display_names,
active_views=view_transform_names + ['Raw'],
file_rules=[{
'name': 'Default',
'colorspace': scene_reference_colorspace.getName()
}],
inactive_colorspaces=['CIE-XYZ-D65'],
default_view_transform=untonemapped_view_transform.getName(),
profile_version=2)
config = generate_config(data, config_name, validate)
if additional_data:
return config, data
else:
return config
if __name__ == '__main__':
import os
import opencolorio_config_aces
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
build_directory = os.path.join(opencolorio_config_aces.__path__[0], '..',
'build')
if not os.path.exists(build_directory):
os.makedirs(build_directory)
config, data = generate_config_aces(
config_name=os.path.join(build_directory,
'config-aces-reference.ocio'),
analytical=False,
additional_data=True)
|
import sys
import os
from io import StringIO
from datetime import datetime
import unittest
from unittest.mock import patch
sys.path.append(os.path.abspath("./src/"))
from calendarApp.models import Event, Calendar
class CalendarModelTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data1 = {
"name": "Test Event 1",
"start_time": "01/01/2000 00:00:00",
"end_time": "01/01/2001 00:00:00"
}
cls.data2 = {
"name": "Test Event 2",
"start_time": "01/01/2001 00:00:00",
"end_time": "01/01/2002 00:00:00"
}
@classmethod
def tearDownClass(cls):
del cls.data1
del cls.data2
def setUp(self):
self.calendar = Calendar("Test")
def tearDown(self):
del self.calendar
def test_event_add(self):
# Test Configuration and Setup
with patch('sys.stdout', StringIO()) as print_output:
# Test Function
self.calendar.add_event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
calendar_event = self.calendar.schedule[0]
# Test Assertions
self.assertEqual(
f"[INFO] Event {self.data1["name"]} added", print_output.getvalue().rstrip())
self.assertEqual(self.data1["name"], calendar_event.name)
def test_event_delete(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
calendar_event = self.calendar.schedule[0]
with patch('sys.stdout', StringIO()) as print_output:
# Test Function
self.calendar.delete_event([str(calendar_event.id)])
# Test Assertions
self.assertEqual(
f"[INFO] Event(s) ['{calendar_event.id}'] removed", print_output.getvalue().rstrip())
self.assertFalse(self.calendar.schedule)
def test_event_order(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"]),
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
# Test Function
self.calendar.order_events()
# Test Assertions
self.assertLess(
self.calendar.schedule[0].start_time, self.calendar.schedule[1].start_time)
def test_event_print(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"]),
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"])
]
# Test Assertions
with patch('sys.stdout', StringIO()) as print_output:
self.calendar.print_events()
self.assertTrue(self.data1["name"] in print_output.getvalue())
self.assertTrue(self.data2["name"] in print_output.getvalue())
if __name__ == "__main__":
unittest.main()
|
import sys
import os
from io import StringIO
from datetime import datetime
import unittest
from unittest.mock import patch
sys.path.append(os.path.abspath("./src/"))
from calendarApp.models import Event, Calendar
class CalendarModelTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data1 = {
"name": "Test Event 1",
"start_time": "01/01/2000 00:00:00",
"end_time": "01/01/2001 00:00:00"
}
cls.data2 = {
"name": "Test Event 2",
"start_time": "01/01/2001 00:00:00",
"end_time": "01/01/2002 00:00:00"
}
@classmethod
def tearDownClass(cls):
del cls.data1
del cls.data2
def setUp(self):
self.calendar = Calendar("Test")
def tearDown(self):
del self.calendar
def test_event_add(self):
# Test Configuration and Setup
with patch('sys.stdout', StringIO()) as print_output:
# Test Function
self.calendar.add_event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
calendar_event = self.calendar.schedule[0]
# Test Assertions
self.assertEqual(
f"[INFO] Event {self.data1['name']} added", print_output.getvalue().rstrip())
self.assertEqual(self.data1["name"], calendar_event.name)
def test_event_delete(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
calendar_event = self.calendar.schedule[0]
with patch('sys.stdout', StringIO()) as print_output:
# Test Function
self.calendar.delete_event([str(calendar_event.id)])
# Test Assertions
self.assertEqual(
f"[INFO] Event(s) ['{calendar_event.id}'] removed", print_output.getvalue().rstrip())
self.assertFalse(self.calendar.schedule)
def test_event_order(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"]),
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
# Test Function
self.calendar.order_events()
# Test Assertions
self.assertLess(
self.calendar.schedule[0].start_time, self.calendar.schedule[1].start_time)
def test_event_print(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"]),
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"])
]
# Test Assertions
with patch('sys.stdout', StringIO()) as print_output:
self.calendar.print_events()
self.assertTrue(self.data1["name"] in print_output.getvalue())
self.assertTrue(self.data2["name"] in print_output.getvalue())
if __name__ == "__main__":
unittest.main()
|
from argparse import Namespace
import csv
from logging import Logger
import os
from pprint import pformat
from typing import List
import numpy as np
from tensorboardX import SummaryWriter
import torch
from tqdm import trange
import pickle
from torch.optim.lr_scheduler import ExponentialLR
from .evaluate import evaluate, evaluate_predictions
from .predict import predict, save_predictions
from .train import train
from chemprop.data import StandardScaler
from chemprop.data.utils import flip_data, get_class_sizes, get_data, get_task_names, split_data, split_loocv
from chemprop.models import build_model
from chemprop.nn_utils import param_count
from chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\
makedirs, save_checkpoint
def run_training(args: Namespace, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Set GPU
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
# Print args
debug(pformat(vars(args)))
# Get data
debug('Loading data')
args.task_names = get_task_names(args.data_path, args.data_format)
data = get_data(path=args.data_path, args=args, logger=logger)
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
if args.separate_test_path:
test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger)
if args.separate_val_path:
val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger)
if args.separate_val_path and args.separate_test_path:
train_data = data
elif args.separate_val_path:
train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger)
elif args.separate_test_path:
train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
elif args.split_type == 'loocv':
train_data, val_data, test_data = split_loocv(data=data, args=args, logger=logger)
else:
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.dataset_type == 'classification':
class_sizes = get_class_sizes(test_data)
debug('Class sizes in test set')
for i, task_class_sizes in enumerate(class_sizes):
debug(f'{args.task_names[i]} '
f'{', '.join(f'{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')
if not args.train_all and task_class_sizes == 0: # TODO: only works for just 1 property prediction task
debug('Moved to next epoch due to homogenous targets in test set.')
return [float('nan')]
if args.save_smiles_splits:
with open(args.data_path, 'r') as f:
reader = csv.reader(f)
header = next(reader)
lines_by_smiles = {}
indices_by_smiles = {}
for i, line in enumerate(reader):
smiles = (line[0], line[1])
lines_by_smiles[smiles] = line
indices_by_smiles[smiles] = i
all_split_indices = []
for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:
with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['smiles'])
for smiles in dataset.smiles():
writer.writerow([smiles])
with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for smiles in dataset.smiles():
writer.writerow(lines_by_smiles[smiles])
split_indices = []
for smiles in dataset.smiles():
split_indices.append(indices_by_smiles[smiles])
split_indices = sorted(split_indices)
all_split_indices.append(split_indices)
with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f:
pickle.dump(all_split_indices, f)
if args.symmetric:
train_data = flip_data(train_data)
if args.features_scaling:
drug_scaler, cmpd_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(drug_scaler, cmpd_scaler)
test_data.normalize_features(drug_scaler, cmpd_scaler)
else:
drug_scaler, cmpd_scaler = None, None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
if args.dataset_type == 'regression':
debug('Fitting scaler')
train_smiles, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
else:
scaler = None
# Get loss and metric functions
loss_func = get_loss_func(args)
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
if args.dataset_type == 'multiclass':
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes))
else:
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Train ensemble of models
for model_idx in range(args.ensemble_size):
# Tensorboard writer
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
try:
writer = SummaryWriter(log_dir=save_dir)
except:
writer = SummaryWriter(logdir=save_dir)
# Load/build model
if args.checkpoint_paths is not None:
debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')
model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger)
else:
debug(f'Building model {model_idx}')
model = build_model(args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.cuda()
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Optimizers
optimizer = build_optimizer(model, args)
# Learning rate schedulers
scheduler = build_lr_scheduler(optimizer, args)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in trange(args.epochs):
debug(f'Epoch {epoch}')
n_iter = train(
model=model,
data=train_data,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
logger=logger,
writer=writer
)
if isinstance(scheduler, ExponentialLR):
scheduler.step()
val_scores, val_loss = evaluate(
model=model,
data=val_data,
loss_func=loss_func,
num_tasks=args.num_tasks,
metric_func=metric_func,
batch_size=args.batch_size,
dataset_type=args.dataset_type,
scaler=scaler,
logger=logger
)
# Average validation score
avg_val_score = np.nanmean(val_scores)
debug(f'Validation {args.metric} = {avg_val_score:.6f}')
writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter)
debug(f'Validation loss = {val_loss:.6f}')
writer.add_scalar(f'validation_loss', val_loss, n_iter)
if args.show_individual_scores:
# Individual validation scores
for task_name, val_score in zip(args.task_names, val_scores):
debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter)
# Save model checkpoint if improved validation score
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Evaluate on test set using model with best validation score
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)
test_preds = predict(
model=model,
data=test_data,
batch_size=args.batch_size,
scaler=scaler
)
if args.save_preds:
val_preds = predict(model=model, data=val_data, batch_size=args.batch_size, scaler=scaler)
train_preds = predict(model=model, data=train_data, batch_size=args.batch_size, scaler=scaler)
save_predictions(save_dir, train_data, val_data, test_data, \
train_preds, val_preds, test_preds, args.task_names, scaler)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds)
# Average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)
if args.show_individual_scores:
# Individual test scores
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)
# Evaluate ensemble on test set
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# Average ensemble score
avg_ensemble_test_score = np.nanmean(ensemble_scores)
info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0)
# Individual ensemble scores
if args.show_individual_scores:
for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')
return ensemble_scores
|
from argparse import Namespace
import csv
from logging import Logger
import os
from pprint import pformat
from typing import List
import numpy as np
from tensorboardX import SummaryWriter
import torch
from tqdm import trange
import pickle
from torch.optim.lr_scheduler import ExponentialLR
from .evaluate import evaluate, evaluate_predictions
from .predict import predict, save_predictions
from .train import train
from chemprop.data import StandardScaler
from chemprop.data.utils import flip_data, get_class_sizes, get_data, get_task_names, split_data, split_loocv
from chemprop.models import build_model
from chemprop.nn_utils import param_count
from chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\
makedirs, save_checkpoint
def run_training(args: Namespace, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Set GPU
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
# Print args
debug(pformat(vars(args)))
# Get data
debug('Loading data')
args.task_names = get_task_names(args.data_path, args.data_format)
data = get_data(path=args.data_path, args=args, logger=logger)
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
if args.separate_test_path:
test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger)
if args.separate_val_path:
val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger)
if args.separate_val_path and args.separate_test_path:
train_data = data
elif args.separate_val_path:
train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger)
elif args.separate_test_path:
train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
elif args.split_type == 'loocv':
train_data, val_data, test_data = split_loocv(data=data, args=args, logger=logger)
else:
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.dataset_type == 'classification':
class_sizes = get_class_sizes(test_data)
debug('Class sizes in test set')
for i, task_class_sizes in enumerate(class_sizes):
debug(f'{args.task_names[i]} '
f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')
if not args.train_all and task_class_sizes == 0: # TODO: only works for just 1 property prediction task
debug('Moved to next epoch due to homogenous targets in test set.')
return [float('nan')]
if args.save_smiles_splits:
with open(args.data_path, 'r') as f:
reader = csv.reader(f)
header = next(reader)
lines_by_smiles = {}
indices_by_smiles = {}
for i, line in enumerate(reader):
smiles = (line[0], line[1])
lines_by_smiles[smiles] = line
indices_by_smiles[smiles] = i
all_split_indices = []
for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:
with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['smiles'])
for smiles in dataset.smiles():
writer.writerow([smiles])
with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for smiles in dataset.smiles():
writer.writerow(lines_by_smiles[smiles])
split_indices = []
for smiles in dataset.smiles():
split_indices.append(indices_by_smiles[smiles])
split_indices = sorted(split_indices)
all_split_indices.append(split_indices)
with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f:
pickle.dump(all_split_indices, f)
if args.symmetric:
train_data = flip_data(train_data)
if args.features_scaling:
drug_scaler, cmpd_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(drug_scaler, cmpd_scaler)
test_data.normalize_features(drug_scaler, cmpd_scaler)
else:
drug_scaler, cmpd_scaler = None, None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
if args.dataset_type == 'regression':
debug('Fitting scaler')
train_smiles, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
else:
scaler = None
# Get loss and metric functions
loss_func = get_loss_func(args)
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
if args.dataset_type == 'multiclass':
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes))
else:
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Train ensemble of models
for model_idx in range(args.ensemble_size):
# Tensorboard writer
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
try:
writer = SummaryWriter(log_dir=save_dir)
except:
writer = SummaryWriter(logdir=save_dir)
# Load/build model
if args.checkpoint_paths is not None:
debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')
model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger)
else:
debug(f'Building model {model_idx}')
model = build_model(args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.cuda()
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Optimizers
optimizer = build_optimizer(model, args)
# Learning rate schedulers
scheduler = build_lr_scheduler(optimizer, args)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in trange(args.epochs):
debug(f'Epoch {epoch}')
n_iter = train(
model=model,
data=train_data,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
logger=logger,
writer=writer
)
if isinstance(scheduler, ExponentialLR):
scheduler.step()
val_scores, val_loss = evaluate(
model=model,
data=val_data,
loss_func=loss_func,
num_tasks=args.num_tasks,
metric_func=metric_func,
batch_size=args.batch_size,
dataset_type=args.dataset_type,
scaler=scaler,
logger=logger
)
# Average validation score
avg_val_score = np.nanmean(val_scores)
debug(f'Validation {args.metric} = {avg_val_score:.6f}')
writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter)
debug(f'Validation loss = {val_loss:.6f}')
writer.add_scalar(f'validation_loss', val_loss, n_iter)
if args.show_individual_scores:
# Individual validation scores
for task_name, val_score in zip(args.task_names, val_scores):
debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter)
# Save model checkpoint if improved validation score
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Evaluate on test set using model with best validation score
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)
test_preds = predict(
model=model,
data=test_data,
batch_size=args.batch_size,
scaler=scaler
)
if args.save_preds:
val_preds = predict(model=model, data=val_data, batch_size=args.batch_size, scaler=scaler)
train_preds = predict(model=model, data=train_data, batch_size=args.batch_size, scaler=scaler)
save_predictions(save_dir, train_data, val_data, test_data, \
train_preds, val_preds, test_preds, args.task_names, scaler)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds)
# Average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)
if args.show_individual_scores:
# Individual test scores
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)
# Evaluate ensemble on test set
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# Average ensemble score
avg_ensemble_test_score = np.nanmean(ensemble_scores)
info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0)
# Individual ensemble scores
if args.show_individual_scores:
for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')
return ensemble_scores
|
try:
import time
FirstTime = time.time()
import os
import io
import sys
import time
import glob
import socket
import locale
import hashlib
import tempfile
import datetime
import subprocess
from ctypes import windll
from urllib.request import urlopen
try:
import psutil
importedPsutil = True
except ImportError:
importedPsutil = False
import win32gui
import win32api
import pythoncom
import win32process
import win32com.client
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal as Signal
from pynput.keyboard import Controller, Key
from pynput.mouse import Controller as MouseController
from external.FramelessWindow import QFramelessDialog
from languages import *
import globals
old_stdout = sys.stdout
sys.stdout = buffer = io.StringIO()
from settings import *
from tools import *
import tools
from external.WnfReader import isFocusAssistEnabled, getNotificationNumber
blacklistedProcesses = ["msrdc.exe", "mstsc.exe", "CDViewer.exe", "wfica32.exe", "vmware-view.exe", "vmware.exe"]
blacklistedFullscreenApps = ("", "Program Manager", "NVIDIA GeForce Overlay", "ElenenClock_IgnoreFullscreenEvent") # The "" codes for titleless windows
seconddoubleclick = False
isRDPRunning = False
restartCount = 0
tempDir = ""
timeStr = ""
dateTimeFormat = ""
clocks = []
oldScreens = []
isFocusAssist = False
numOfNotifs = 0
print("---------------------------------------------------------------------------------------------------")
print("")
print(f" ElevenClock's {versionName} (v{version}) log: Select all the text and hit Ctrl+C to copy it")
print(f" All modules loaded successfully and sys.stdout patched correctly, starting main script")
print(f" Translator function set language to \"{langName}\"")
print("")
print("---------------------------------------------------------------------------------------------------")
print("")
print(" Log legend:")
print(" 🔵: Verbose")
print(" 🟢: Information")
print(" 🟡: Warning")
print(" 🟠: Handled unexpected exception")
print(" 🔴: Unhandled unexpected exception")
print(" 🟣: Handled expected exception")
print("")
def _(s) -> str:
return tools._(s)
def checkRDP():
def checkIfElevenClockRunning(processess, blacklistedProcess) -> bool:
for p_name in processess:
if p_name in blacklistedProcess:
print(f"🟡 Blacklisted procName {p_name} detected, hiding...")
return True
return False
global isRDPRunning
print("🔵 Starting RDP thread")
while True:
pythoncom.CoInitialize()
_wmi = win32com.client.GetObject('winmgmts:')
processes = _wmi.ExecQuery('Select Name from win32_process')
procs = [p.Name for p in processes]
isRDPRunning = checkIfElevenClockRunning(procs, blacklistedProcesses)
time.sleep(5)
def getMousePos():
try:
return QPoint(mController.position[0], mController.position[1])
except AttributeError:
print("🟠 Mouse thread returned AttributeError")
except Exception as e:
report(e)
def updateChecker():
updateIfPossible()
time.sleep(60)
while True:
updateIfPossible()
time.sleep(7200)
def updateIfPossible(force = False):
try:
if(not(getSettings("DisableAutoCheckForUpdates")) or force):
print("🔵 Starting update check")
integrityPass = False
dmname = socket.gethostbyname_ex("versions.somepythonthings.tk")[0]
if(dmname == "769432b9-3560-4f94-8f90-01c95844d994.id.repl.co" or getSettings("BypassDomainAuthCheck")): # Check provider IP to prevent exploits
integrityPass = True
try:
response = urlopen("https://versions.somepythonthings.tk/versions/elevenclock.ver" if not getSettings("AlternativeUpdateServerProvider") else "http://www.somepythonthings.tk/versions/elevenclock.ver")
except Exception as e:
report(e)
response = urlopen("http://www.somepythonthings.tk/versions/elevenclock.ver")
integrityPass = True
print("🔵 Version URL:", response.url)
response = response.read().decode("utf8")
new_version_number = response.split("///")[0]
provided_hash = response.split("///")[2].replace("\n", "").lower()
if float(new_version_number) > version:
print("🟢 Updates found!")
if(not(getSettings("DisableAutoInstallUpdates")) or force):
showNotif.infoSignal.emit(("ElevenClock Updater"), ("ElevenClock is downloading updates"))
if(integrityPass):
url = "https://github.com/martinet101/ElevenClock/releases/latest/download/ElevenClock.Installer.exe"
filedata = urlopen(url)
datatowrite = filedata.read()
filename = ""
with open(os.path.join(tempDir, "SomePythonThings-ElevenClock-Updater.exe"), 'wb') as f:
f.write(datatowrite)
filename = f.name
if(hashlib.sha256(datatowrite).hexdigest().lower() == provided_hash):
print("🔵 Hash: ", provided_hash)
print("🟢 Hash ok, starting update")
if(getSettings("EnableSilentUpdates") and not(force)):
mousePos = getMousePos()
time.sleep(5)
while mousePos != getMousePos():
print("🟡 User is using the mouse, waiting")
mousePos = getMousePos()
time.sleep(5)
subprocess.run('start /B "" "{0}" /verysilent'.format(filename), shell=True)
else:
subprocess.run('start /B "" "{0}" /silent'.format(filename), shell=True)
else:
print("🟠 Hash not ok")
print("🟠 File hash: ", hashlib.sha256(datatowrite).hexdigest())
print("🟠 Provided hash: ", provided_hash)
showWarn.infoSignal.emit(("Updates found!"), f"ElevenClock Version {new_version_number} is available, but ElevenClock can't verify the authenticity of the package. Please go ElevenClock's homepage and download the latest version from there.\n\nDo you want to open the download page?")
else:
print("🟠 Can't verify update server authenticity, aborting")
print("🟠 Provided DmName:", dmname)
print("🟠 Expected DmNane: 769432b9-3560-4f94-8f90-01c95844d994.id.repl.co")
showWarn.infoSignal.emit(("Updates found!"), f"ElevenClock Version {new_version_number} is available, but ElevenClock can't verify the authenticity of the updates server. Please go ElevenClock's homepage and download the latest version from there.\n\nDo you want to open the download page?")
else:
showNotif.infoSignal.emit(("Updates found!"), f"ElevenClock Version {new_version_number} is available. Go to ElevenClock's Settings to update")
else:
print("🟢 Updates not found")
else:
print("🟠 Update checking disabled")
#old_stdout.write(buffer.getvalue())
#old_stdout.flush()
except Exception as e:
report(e)
#old_stdout.write(buffer.getvalue())
#old_stdout.flush()
def resetRestartCount():
global restartCount
while True:
if(restartCount>0):
print("🔵 Restart loop:", restartCount)
restartCount -= 1
time.sleep(0.3)
def loadClocks():
global clocks, oldScreens, st, restartCount, st
try:
st.kill()
except AttributeError:
pass
ForceClockOnFirstMonitor = getSettings("ForceClockOnFirstMonitor")
HideClockOnSecondaryMonitors = getSettings("HideClockOnSecondaryMonitors")
oldScreens = []
clocks = []
if importedPsutil:
process = psutil.Process(os.getpid())
memOk = (process.memory_info().rss/1048576) <= 150
else:
print("🟠 Psutil couldn't be imported!")
memOk = True
if restartCount<20 and memOk:
restartCount += 1
i = 0
for screen in app.screens():
screen: QScreen
oldScreens.append(getGeometry(screen))
if not screen == QGuiApplication.primaryScreen() or ForceClockOnFirstMonitor: # Check if we are not on the primary screen
if not HideClockOnSecondaryMonitors or screen == QGuiApplication.primaryScreen(): # First monitor is not affected by HideClockOnSecondaryMonitors
clocks.append(Clock(screen.logicalDotsPerInchX()/96, screen.logicalDotsPerInchY()/96, screen, i))
i += 1
else:
print("🟠 This is a secondary screen and is set to be skipped")
else: # Skip the primary display, as it has already the clock
print("🟡 This is the primary screen and is set to be skipped")
st = KillableThread(target=screenCheckThread, daemon=True, name="Main [loaded]: Screen listener")
st.start()
else:
os.startfile(sys.executable)
print("🔴 Overloading system, killing!")
app.quit()
sys.exit(1)
def getGeometry(screen: QScreen):
"""
Return a tuple containing: (screen_width, screen_height, screen_pos_x, screen_pos_y, screen_DPI, desktopWindowRect)
"""
try:
geometry = screen.geometry()
g = (geometry.width(), geometry.height(), geometry.x(), geometry.y(), screen.logicalDotsPerInch(), win32api.EnumDisplayMonitors())
return g
except Exception as e:
report(e)
geometry = QGuiApplication.primaryScreen().geometry()
g = (geometry.width(), geometry.height(), geometry.x(), geometry.y(), screen.logicalDotsPerInch(), win32api.EnumDisplayMonitors())
return g
def theyMatch(oldscreens, newscreens):
if len(oldscreens) != len(newscreens) or len(app.screens()) != len(win32api.EnumDisplayMonitors()):
return False # The number of displays has changed
# Check that all screen dimensions and dpi are the same as before
return all(old == getGeometry(new) for old, new in zip(oldscreens, newscreens))
def wnfDataThread():
global isFocusAssist, numOfNotifs
while True:
isFocusAssist = isFocusAssistEnabled()
time.sleep(0.25)
if not isFocusAssist:
numOfNotifs = getNotificationNumber()
time.sleep(0.25)
def screenCheckThread():
while theyMatch(oldScreens, app.screens()):
time.sleep(1)
signal.restartSignal.emit()
pass
def closeClocks():
for clock in clocks:
clock.hide()
clock.close()
def showMessage(title: str, body: str, uBtn: bool = True) -> None:
"""
Shows a Windows Notification
"""
lastState = i.isVisible()
i.show()
i.showMessage(title, body)
if uBtn:
sw.updateButton.show()
i.setVisible(lastState)
def restartClocks(caller: str = ""):
global clocks, st, rdpThread
closeClocks()
loadClocks()
loadTimeFormat()
try:
rdpThread.kill()
except AttributeError:
pass
rdpThread = KillableThread(target=checkRDP, daemon=True)
if(getSettings("EnableHideOnRDP")):
rdpThread.start()
def isElevenClockRunningThread():
nowTime = time.time()
name = f"ElevenClockRunning{nowTime}"
setSettings(name, True, False)
while True:
try:
for file in glob.glob(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), "ElevenClockRunning*")):
if(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), name) == file):
pass
else:
if(float(file.replace(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), "ElevenClockRunning"), "")) < nowTime): # If lockfile is older
os.remove(file)
if not(getSettings(name)):
print("🟠 KILLING, NEWER VERSION RUNNING")
killSignal.infoSignal.emit("", "")
except Exception as e:
report(e)
time.sleep(2)
def wanrUserAboutUpdates(a, b):
if(QMessageBox.question(sw, a, b, QMessageBox.Open | QMessageBox.Cancel, QMessageBox.Open) == QMessageBox.Open):
os.startfile("https://github.com/martinet101/ElevenClock/releases/latest")
def checkIfWokeUpThread():
while True:
lastTime = time.time()
time.sleep(3)
if((lastTime+6) < time.time()):
os.startfile(sys.executable)
def loadTimeFormat():
global dateTimeFormat
showSeconds = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "ShowSecondsInSystemClock", 0) or getSettings("EnableSeconds")
locale.setlocale(locale.LC_ALL, readRegedit(r"Control Panel\International", "LocaleName", "en_US"))
dateTimeFormat = "%HH:%M\n%A\n(W%W) %d/%m/%Y"
if getSettings("DisableTime"):
dateTimeFormat = dateTimeFormat.replace("%HH:%M\n", "")
if getSettings("DisableDate"):
if("\n" in dateTimeFormat):
dateTimeFormat = dateTimeFormat.replace("\n(W%W) %d/%m/%Y", "")
else:
dateTimeFormat = dateTimeFormat.replace("(W%W) %d/%m/%Y", "")
elif not getSettings("EnableWeekNumber"):
dateTimeFormat = dateTimeFormat.replace("(W%W) ", "")
else:
dateTimeFormat = dateTimeFormat.replace("(W%W) ", f"({_("W")}%W) ")
if not getSettings("EnableWeekDay"):
try:
dateTimeFormat = dateTimeFormat.replace("%A", "").replace("\n\n", "\n")
if dateTimeFormat[-1] == "\n":
dateTimeFormat = dateTimeFormat[0:-1]
if dateTimeFormat[0] == "\n":
dateTimeFormat = dateTimeFormat[1:]
except IndexError as e:
print("🟠 Date/Time string looks to be empty!")
except Exception as e:
report(e)
tDateMode = readRegedit(r"Control Panel\International", "sShortDate", "dd/MM/yyyy")
print("🔵 tDateMode:", tDateMode)
dateMode = ""
for i, ministr in enumerate(tDateMode.split("'")):
if i%2==0:
dateMode += ministr.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%$").replace("d", "%#d").replace("$", "d").replace("MMMM", "%B").replace("MMM", "%b").replace("MM", "%m").replace("M", "%#m").replace("yyyy", "%Y").replace("yy", "%y")
else:
dateMode += ministr
tTimeMode = readRegedit(r"Control Panel\International", "sShortTime", "H:mm")
print("🔵 tTimeMode:", tTimeMode)
timeMode = ""
for i, ministr in enumerate(tTimeMode.split("'")):
if i%2==0:
timeMode += ministr.replace("HH", "%$").replace("H", "%#H").replace("$", "H").replace("hh", "%I").replace("h", "%#I").replace("mm", "%M").replace("m", "%#M").replace("tt", "%p").replace("t", "%p").replace("ss", "%S").replace("s", "%#S")
if not("S" in timeMode) and showSeconds == 1:
for separator in ":.-/_":
if(separator in timeMode):
timeMode += f"{separator}%S"
else:
timeMode += ministr
for separator in ":.-/_":
timeMode = timeMode.replace(f" %p{separator}%S", f"{separator}%S %p")
timeMode = timeMode.replace(f" %p{separator}%#S", f"{separator}%#S %p")
timeMode = timeMode.replace("%S", "%S·").replace("%#S", "%#S·")
dateTimeFormat = dateTimeFormat.replace("%d/%m/%Y", dateMode).replace("%HH:%M", timeMode)
print("🔵 Loaded date time format:", dateTimeFormat)
def timeStrThread():
global timeStr, dateTimeFormat
fixHyphen = getSettings("EnableHyphenFix")
encoding = 'unicode-escape'
while True:
for _ in range(36000):
dateTimeFormatUnicode = dateTimeFormat.encode(encoding).decode()
now = datetime.datetime.now()
timeStr = now.strftime(dateTimeFormatUnicode).encode().decode(encoding)
if fixHyphen:
timeStr = timeStr.replace("t-", "t -")
try:
secs = datetime.datetime.now().strftime("%S")
if secs[-1] == "1":
timeStr = timeStr.replace("·", " \u200e")
else:
timeStr = timeStr.replace("·", "")
except IndexError:
pass
time.sleep(0.1)
class RestartSignal(QObject):
restartSignal = Signal()
def __init__(self) -> None:
super().__init__()
class InfoSignal(QObject):
infoSignal = Signal(str, str)
def __init__(self) -> None:
super().__init__()
class Clock(QWidget):
refresh = Signal()
hideSignal = Signal()
callInMainSignal = Signal(object)
styler = Signal(str)
preferedwidth = 200
preferedHeight = 48
focusassitant = True
lastTheme = 0
clockShouldBeHidden = False
shouldBeVisible = True
isRDPRunning = True
clockOnTheLeft = False
textInputHostHWND = 0
INTLOOPTIME = 2
def __init__(self, dpix: float, dpiy: float, screen: QScreen, index: int):
super().__init__()
if f"_{screen.name()}_" in getSettingsValue("BlacklistedMonitors"):
print("🟠 Monitor blacklisted!")
self.hide()
else:
self.index = index
print(f"🔵 Initializing clock {index}...")
self.callInMainSignal.connect(lambda f: f())
self.styler.connect(self.setStyleSheet)
self.taskbarBackgroundColor = not getSettings("DisableTaskbarBackgroundColor") and not (getSettings("UseCustomBgColor") or getSettings("AccentBackgroundcolor"))
self.transparentBackground = getSettings("DisableTaskbarBackgroundColor") and not (getSettings("UseCustomBgColor") or getSettings("AccentBackgroundcolor"))
if self.taskbarBackgroundColor:
print("🔵 Using taskbar background color")
self.bgcolor = "0, 0, 0, 0"
else:
print("🟡 Not using taskbar background color")
if getSettings("AccentBackgroundcolor"):
self.bgcolor = f"{getColors()[5 if isTaskbarDark() else 1]},100"
else:
self.bgcolor = getSettingsValue("UseCustomBgColor") if getSettingsValue("UseCustomBgColor") else "0, 0, 0, 0"
print("🔵 Using bg color:", self.bgcolor)
self.prefMargins = 0
try:
if readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "TaskbarSi", 1) == 0 or (not getSettings("DisableTime") and not getSettings("DisableDate") and getSettings("EnableWeekDay")):
self.prefMargins = self.getPx(5)
self.widgetStyleSheet = f"background-color: rgba(bgColor%); margin: {self.getPx(0)}px;margin-top: 0px;margin-bottom: 0px; border-radius: {self.getPx(5)}px;"
if not(not getSettings("DisableTime") and not getSettings("DisableDate") and getSettings("EnableWeekDay")):
print("🟡 Small sized taskbar")
self.preferedHeight = 32
self.preferedwidth = 200
else:
print("🟢 Regular sized taskbar")
self.prefMargins = self.getPx(3)
self.widgetStyleSheet = f"background-color: rgba(bgColor%);margin: {self.getPx(0)}px;border-radius: {self.getPx(5)}px;padding: {self.getPx(2)}px;"
except Exception as e:
print("🟡 Regular sized taskbar")
report(e)
self.prefMargins = self.getPx(3)
self.widgetStyleSheet = f"background-color: rgba(bgColor%);margin: {self.getPx(0)}px;border-radius: {self.getPx(5)}px;;padding: {self.getPx(2)}px;"
self.setStyleSheet(self.widgetStyleSheet.replace("bgColor", self.bgcolor))
if getSettings("ClockFixedHeight"):
print("🟡 Custom height being used!")
try:
self.preferedHeight = int(getSettingsValue("ClockFixedHeight"))
except ValueError as e:
report(e)
self.win32screen = {"Device": None, "Work": (0, 0, 0, 0), "Flags": 0, "Monitor": (0, 0, 0, 0)}
for win32screen in win32api.EnumDisplayMonitors():
try:
if win32api.GetMonitorInfo(win32screen[0].handle)["Device"] == screen.name():
self.win32screen = win32api.GetMonitorInfo(win32screen[0].handle)
except Exception as e:
report(e)
if self.win32screen == {"Device": None, "Work": (0, 0, 0, 0), "Flags": 0, "Monitor": (0, 0, 0, 0)}: #If no display is matching
os.startfile(sys.executable) # Restart elevenclock
app.quit()
self.screenGeometry = QRect(self.win32screen["Monitor"][0], self.win32screen["Monitor"][1], self.win32screen["Monitor"][2]-self.win32screen["Monitor"][0], self.win32screen["Monitor"][3]-self.win32screen["Monitor"][1])
print("🔵 Monitor geometry:", self.screenGeometry)
self.refresh.connect(self.refreshandShow)
self.hideSignal.connect(self.hide)
self.keyboard = Controller()
self.setWindowFlag(Qt.WindowStaysOnTopHint)
self.setWindowFlag(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlag(Qt.Tool)
hex_blob = b'0\x00\x00\x00\xfe\xff\xff\xffz\xf4\x00\x00\x03\x00\x00\x00T\x00\x00\x000\x00\x00\x00\x00\x00\x00\x00\x08\x04\x00\x00\x80\x07\x00\x008\x04\x00\x00`\x00\x00\x00\x01\x00\x00\x00'
registry_read_result = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3", "Settings", hex_blob)
self.autoHide = registry_read_result[8] == 123
if self.autoHide:
print("🟡 ElevenClock set to hide with the taskbar")
self.clockOnTheLeft = getSettings("ClockOnTheLeft")
screenName = screen.name().replace("\\", "_")
if not self.clockOnTheLeft:
if getSettings(f"SpecificClockOnTheLeft{screenName}"):
self.clockOnTheLeft = True
print(f"🟡 Clock {screenName} on the left (forced)")
else:
if getSettings(f"SpecificClockOnTheRight{screenName}"):
self.clockOnTheLeft = False
print(f"🟡 Clock {screenName} on the right (forced)")
try:
if (registry_read_result[12] == 1 and not getSettings("ForceOnBottom")) or getSettings("ForceOnTop"):
h = self.screenGeometry.y()
print("🟢 Taskbar at top")
else:
h = self.screenGeometry.y()+self.screenGeometry.height()-(self.preferedHeight*dpiy)
print("🟡 Taskbar at bottom")
except Exception as e:
report(e)
h = self.screenGeometry.y()+self.screenGeometry.height()-(self.preferedHeight*dpiy)
print("🟡 Taskbar at bottom")
self.label = Label(timeStr, self)
if self.clockOnTheLeft:
print("🟡 Clock on the left")
w = self.screenGeometry.x()+8*dpix
self.label.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
else:
self.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
print("🟢 Clock on the right")
w = self.screenGeometry.x()+self.screenGeometry.width()-((self.preferedwidth)*dpix)
if getSettings("CenterAlignment"):
self.label.setAlignment(Qt.AlignCenter)
xoff = 0
yoff = 0
if getSettings("ClockXOffset"):
print("🟡 X offset being used!")
try:
xoff = int(getSettingsValue("ClockXOffset"))
except ValueError as e:
report(e)
if getSettings("ClockYOffset"):
print("🟡 Y offset being used!")
try:
yoff = int(getSettingsValue("ClockYOffset"))
except ValueError as e:
report(e)
self.w = int(w) + xoff
self.h = int(h) + yoff
self.dpix = dpix
self.dpiy = dpiy
if not(getSettings("EnableWin32API")):
print("🟢 Using qt's default positioning system")
self.move(self.w, self.h)
self.resize(int(self.preferedwidth*dpix), int(self.preferedHeight*dpiy))
else:
print("🟡 Using win32 API positioning system")
self.user32 = windll.user32
self.user32.SetProcessDPIAware() # forces functions to return real pixel numbers instead of scaled values
win32gui.SetWindowPos(self.winId(), 0, int(w), int(h), int(self.preferedwidth*dpix), int(self.preferedHeight*dpiy), False)
print("🔵 Clock geometry:", self.geometry())
self.font: QFont = QFont()
customFont = getSettingsValue("UseCustomFont")
if customFont == "":
if lang == lang_ko:
self.fontfamilies = ["Malgun Gothic", "Segoe UI Variable", "sans-serif"]
elif lang == lang_zh_TW:
self.fontfamilies = ["Microsoft JhengHei UI", "Segoe UI Variable", "sans-serif"]
elif lang == lang_zh_CN:
self.fontfamilies = ["Microsoft YaHei UI", "Segoe UI Variable", "sans-serif"]
else:
self.fontfamilies = ["Segoe UI Variable Display", "sans-serif"]
else:
self.fontfamilies = [customFont]
print(f"🔵 Font families: {self.fontfamilies}")
customSize = getSettingsValue("UseCustomFontSize")
if customSize == "":
self.font.setPointSizeF(9.3)
else:
try:
self.font.setPointSizeF(float(customSize))
except Exception as e:
self.font.setPointSizeF(9.3)
report(e)
print(f"🔵 Font size: {self.font.pointSizeF()}")
self.font.setStyleStrategy(QFont.PreferOutline)
self.font.setLetterSpacing(QFont.PercentageSpacing, 100)
self.font.setHintingPreference(QFont.HintingPreference.PreferNoHinting)
self.label.setFont(self.font)
accColors = getColors()
def make_style_sheet(a, b, c, d, color):
bg = 1 if isTaskbarDark() else 4
fg = 6 if isTaskbarDark() else 1
return f"*{{padding: {a}px;padding-right: {b}px;margin-right: {c}px;padding-left: {d}px; color: {color};}}#notifIndicator{{background-color: rgb({accColors[bg]});color:rgb({accColors[fg]});}}"
if getSettings("UseCustomFontColor"):
print("🟡 Using custom text color:", getSettingsValue('UseCustomFontColor'))
self.lastTheme = -1
style_sheet_string = make_style_sheet(self.getPx(1), self.getPx(3), self.getPx(12), self.getPx(5), f"rgb({getSettingsValue("UseCustomFontColor")})")
self.label.setStyleSheet(style_sheet_string)
self.label.bgopacity = .1
self.fontfamilies = [element.replace("Segoe UI Variable Display", "Segoe UI Variable Display Semib") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.DemiBold)
self.label.setFont(self.font)
elif isTaskbarDark():
print("🟢 Using white text (dark mode)")
self.lastTheme = 0
style_sheet_string = make_style_sheet(self.getPx(1), self.getPx(3), self.getPx(12), self.getPx(5), "white")
self.label.setStyleSheet(style_sheet_string)
self.label.bgopacity = .1
self.fontfamilies = [element.replace("Segoe UI Variable Display", "Segoe UI Variable Display Semib") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.DemiBold)
self.label.setFont(self.font)
else:
print("🟢 Using black text (light mode)")
self.lastTheme = 1
style_sheet_string = make_style_sheet(self.getPx(1), self.getPx(3), self.getPx(12), self.getPx(5), "black")
self.label.setStyleSheet(style_sheet_string)
self.label.bgopacity = .5
self.fontfamilies = [element.replace("Segoe UI Variable Display Semib", "Segoe UI Variable Display") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
self.font.setWeight(QFont.Weight.ExtraLight)
self.label.setFont(self.font)
self.label.clicked.connect(lambda: self.showCalendar())
self.label.move(0, 0)
self.label.setFixedHeight(self.height())
self.label.resize(self.width()-self.getPx(8), self.height())
self.label.show()
loadTimeFormat()
self.show()
self.raise_()
self.setFocus()
self.full_screen_rect = (self.screenGeometry.x(), self.screenGeometry.y(), self.screenGeometry.x()+self.screenGeometry.width(), self.screenGeometry.y()+self.screenGeometry.height())
print("🔵 Full screen rect: ", self.full_screen_rect)
self.forceDarkTheme = getSettings("ForceDarkTheme")
self.forceLightTheme = getSettings("ForceLightTheme")
self.hideClockWhenClicked = getSettings("HideClockWhenClicked")
self.isLowCpuMode = getSettings("EnableLowCpuMode")
self.primary_screen = QGuiApplication.primaryScreen()
self.oldBgColor = 0
self.user32 = windll.user32
self.user32.SetProcessDPIAware() # optional, makes functions return real pixel numbers instead of scaled values
self.loop0 = KillableThread(target=self.updateTextLoop, daemon=True, name=f"Clock[{index}]: Time updater loop")
self.loop1 = KillableThread(target=self.mainClockLoop, daemon=True, name=f"Clock[{index}]: Main clock loop")
self.loop2 = KillableThread(target=self.backgroundLoop, daemon=True, name=f"Clock[{index}]: Background color loop")
self.loop0.start()
self.loop1.start()
self.loop2.start()
class QHoverButton(QPushButton):
hovered = Signal()
unhovered = Signal()
def __init__(self, text: str = "", parent: QObject = None) -> None:
super().__init__(text=text, parent=parent)
def enterEvent(self, event: QtCore.QEvent) -> None:
self.hovered.emit()
return super().enterEvent(event)
def leaveEvent(self, event: QtCore.QEvent) -> None:
self.unhovered.emit()
return super().leaveEvent(event)
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "TaskbarSd", 0) == 1) or getSettings("ShowDesktopButton"):
print("🟡 Desktop button enabled")
self.desktopButton = QHoverButton(parent=self)
self.desktopButton.clicked.connect(lambda: self.showDesktop())
self.desktopButton.show()
self.desktopButton.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.desktopButton.move(self.width()-self.getPx(10), 0)
self.desktopButton.resize(self.getPx(10), self.getPx(self.preferedHeight))
self.desktopButton.hovered.connect(lambda: self.desktopButton.setIcon(QIcon(getPath("showdesktop.png"))))
self.desktopButton.unhovered.connect(lambda: self.desktopButton.setIcon(QIcon()))
self.setFixedHeight(self.getPx(self.preferedHeight))
self.desktopButton.setStyleSheet(f"""
QPushButton{{
background-color: rgba(0, 0, 0, 0.01);
margin: 0px;
padding: 0px;
margin-top: 0px;
border-radius: 0px;
margin-bottom: 0px;
border-left: 0px solid rgba(0, 0, 0, 0.05);
border-right: 0px solid rgba(0, 0, 0, 0.05);
}}
QPushButton:hover{{
background-color: rgba(127, 127, 127, 1%);
margin: 0px;
margin-top: 0px;
border-radius: 0px;
margin-bottom: 0px;
border-left: 0px solid rgba(0, 0, 0, 0.05);
border-right: 0px solid rgba(0, 0, 0, 0.05);
}}
QPushButton:pressed{{
background-color: rgba(127, 127, 127, 1%);
margin: 0px;
margin-top: 0px;
border-radius: 0px;
margin-bottom: 0px;
border-left: 0px solid rgba(0, 0, 0, 0.05);
border-right: 0px solid rgba(0, 0, 0, 0.05);
}}
""")
def getPx(self, original) -> int:
return round(original*(self.screen().logicalDotsPerInch()/96))
def backgroundLoop(self):
while True:
try:
if self.taskbarBackgroundColor and not self.isLowCpuMode and not globals.trayIcon.contextMenu().isVisible():
intColor = self.primary_screen.grabWindow(0, self.x()+self.label.x()-1, self.y()+2, 1, 1).toImage().pixel(0, 0)
if intColor != self.oldBgColor:
self.oldBgColor = intColor
color = QColor(intColor)
self.styler.emit(self.widgetStyleSheet.replace("bgColor", f"{color.red()}, {color.green()}, {color.blue()}, 100"))
except AttributeError:
print("🟣 Expected AttributeError on backgroundLoop thread")
time.sleep(0.5)
def theresFullScreenWin(self, clockOnFirstMon, newMethod, legacyMethod):
try:
fullscreen = False
def compareFullScreenRects(window, screen, newMethod):
try:
if(newMethod):
return window[0] <= screen[0] and window[1] <= screen[1] and window[2] >= screen[2] and window[3] >= screen[3]
else:
return window[0] == screen[0] and window[1] == screen[1] and window[2] == screen[2] and window[3] == screen[3]
except Exception as e:
report(e)
def winEnumHandler(hwnd, _):
nonlocal fullscreen
if win32gui.IsWindowVisible(hwnd):
if compareFullScreenRects(win32gui.GetWindowRect(hwnd), self.full_screen_rect, newMethod):
if clockOnFirstMon and self.textInputHostHWND == 0:
pythoncom.CoInitialize()
_, pid = win32process.GetWindowThreadProcessId(hwnd)
_wmi = win32com.client.GetObject('winmgmts:')
# collect all the running processes
processes = _wmi.ExecQuery(f'Select Name from win32_process where ProcessId = {pid}')
for p in processes:
if p.Name != "TextInputHost.exe":
if(win32gui.GetWindowText(hwnd) not in blacklistedFullscreenApps):
print("🟡 Fullscreen window detected!", win32gui.GetWindowText(hwnd), win32gui.GetWindowRect(hwnd), "Fullscreen rect:", self.full_screen_rect)
fullscreen = True
else:
print("🟢 Cached text input host hwnd:", hwnd)
self.textInputHostHWND = hwnd
self.INTLOOPTIME = 2
else:
if win32gui.GetWindowText(hwnd) not in blacklistedFullscreenApps and hwnd != self.textInputHostHWND:
print("🟡 Fullscreen window detected!", win32gui.GetWindowText(hwnd), win32gui.GetWindowRect(hwnd), "Fullscreen rect:", self.full_screen_rect)
fullscreen = True
if not legacyMethod:
win32gui.EnumWindows(winEnumHandler, 0)
else:
hwnd = win32gui.GetForegroundWindow()
if(compareFullScreenRects(win32gui.GetWindowRect(hwnd), self.full_screen_rect, newMethod)):
if(win32gui.GetWindowText(hwnd) not in blacklistedFullscreenApps):
print("🟡 Fullscreen window detected!", win32gui.GetWindowText(hwnd), win32gui.GetWindowRect(hwnd), "Fullscreen rect:", self.full_screen_rect)
fullscreen = True
return fullscreen
except Exception as e:
report(e)
return False
def mainClockLoop(self):
global isRDPRunning, numOfNotifs
EnableHideOnFullScreen = not(getSettings("DisableHideOnFullScreen"))
DisableHideWithTaskbar = getSettings("DisableHideWithTaskbar")
EnableHideOnRDP = getSettings("EnableHideOnRDP")
clockOnFirstMon = getSettings("ForceClockOnFirstMonitor")
newMethod = getSettings("NewFullScreenMethod")
notifs = not getSettings("DisableNotifications")
legacyMethod = getSettings("legacyFullScreenMethod")
oldNotifNumber = 0
print(f"🔵 Show/hide loop started with parameters: HideonFS:{EnableHideOnFullScreen}, NotHideOnTB:{DisableHideWithTaskbar}, HideOnRDP:{EnableHideOnRDP}, ClockOn1Mon:{clockOnFirstMon}, NefWSMethod:{newMethod}, DisableNotifications:{notifs}, legacyFullScreenMethod:{legacyMethod}")
if self.isLowCpuMode or clockOnFirstMon:
self.INTLOOPTIME = 15
else:
self.INTLOOPTIME = 2
while True:
self.isRDPRunning = isRDPRunning
isFullScreen = self.theresFullScreenWin(clockOnFirstMon, newMethod, legacyMethod)
for i in range(self.INTLOOPTIME):
if (not(isFullScreen) or not(EnableHideOnFullScreen)) and not self.clockShouldBeHidden:
if notifs:
if isFocusAssist:
self.callInMainSignal.emit(self.label.enableFocusAssistant)
elif numOfNotifs > 0:
if oldNotifNumber != numOfNotifs:
self.callInMainSignal.emit(self.label.enableNotifDot)
else:
self.callInMainSignal.emit(self.label.disableClockIndicators)
oldNotifNumber = numOfNotifs
if self.autoHide and not(DisableHideWithTaskbar):
mousePos = getMousePos()
if (mousePos.y()+1 == self.screenGeometry.y()+self.screenGeometry.height()) and self.screenGeometry.x() < mousePos.x() and self.screenGeometry.x()+self.screenGeometry.width() > mousePos.x():
self.refresh.emit()
elif (mousePos.y() <= self.screenGeometry.y()+self.screenGeometry.height()-self.preferedHeight):
self.hideSignal.emit()
else:
if(self.isRDPRunning and EnableHideOnRDP):
self.hideSignal.emit()
else:
self.refresh.emit()
else:
self.hideSignal.emit()
time.sleep(0.2)
time.sleep(0.2)
def updateTextLoop(self) -> None:
global timeStr
while True:
self.label.setText(timeStr)
time.sleep(0.1)
def showCalendar(self):
self.keyboard.press(Key.cmd)
self.keyboard.press('n')
self.keyboard.release('n')
self.keyboard.release(Key.cmd)
if self.hideClockWhenClicked:
print("🟡 Hiding clock because clicked!")
self.clockShouldBeHidden = True
def showClockOn10s(self: Clock):
time.sleep(10)
print("🟢 Showing clock because 10s passed!")
self.clockShouldBeHidden = False
KillableThread(target=showClockOn10s, args=(self,), name=f"Temporary: 10s thread").start()
def showDesktop(self):
self.keyboard.press(Key.cmd)
self.keyboard.press('d')
self.keyboard.release('d')
self.keyboard.release(Key.cmd)
def focusOutEvent(self, event: QFocusEvent) -> None:
self.refresh.emit()
def refreshandShow(self):
if(self.shouldBeVisible):
self.show()
self.raise_()
if(self.lastTheme >= 0): # If the color is not customized
theme = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "SystemUsesLightTheme", 1)
if(theme != self.lastTheme):
if (theme == 0 or self.forceDarkTheme) and not self.forceLightTheme:
self.lastTheme = 0
self.label.setStyleSheet(f"padding: {self.getPx(1)}px;padding-right: {self.getPx(3)}px;margin-right: {self.getPx(12)}px;padding-left: {self.getPx(5)}px; color: white;")#background-color: rgba({self.bgcolor}%)")
self.label.bgopacity = 0.1
self.fontfamilies = [element.replace("Segoe UI Variable Display", "Segoe UI Variable Display Semib") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.DemiBold)
self.label.setFont(self.font)
else:
self.lastTheme = 1
self.label.setStyleSheet(f"padding: {self.getPx(1)}px;padding-right: {self.getPx(3)}px;margin-right: {self.getPx(12)}px;padding-left: {self.getPx(5)}px; color: black;")#background-color: rgba({self.bgcolor}%)")
self.label.bgopacity = .5
self.fontfamilies = [element.replace("Segoe UI Variable Display Semib", "Segoe UI Variable Display") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
self.font.setWeight(QFont.Weight.ExtraLight)
self.label.setFont(self.font)
def closeEvent(self, event: QCloseEvent) -> None:
self.shouldBeVisible = False
try:
print(f"🟡 Closing clock on {self.win32screen}")
self.loop0.kill()
self.loop1.kill()
self.loop2.kill()
except AttributeError:
pass
event.accept()
return super().closeEvent(event)
def showEvent(self, event: QShowEvent) -> None:
return super().showEvent(event)
class Label(QLabel):
clicked = Signal()
def __init__(self, text, parent):
super().__init__(text, parent=parent)
self.setMouseTracking(True)
self.backgroundwidget = QWidget(self)
self.color = "255, 255, 255"
self.installEventFilter(self)
self.bgopacity = 0.1
self.backgroundwidget.setContentsMargins(0, self.window().prefMargins, 0, self.window().prefMargins)
self.backgroundwidget.setStyleSheet(f"background-color: rgba(127, 127, 127, 0.01);border-top: {self.getPx(1)}px solid rgba({self.color},0);margin-top: {self.window().prefMargins}px; margin-bottom: {self.window().prefMargins};")
self.backgroundwidget.show()
if self.window().transparentBackground:
colorOffset = .01
else:
colorOffset = 0
self.showBackground = QVariantAnimation()
self.showBackground.setStartValue(0+colorOffset) # Not 0 to prevent white flashing on the border
self.showBackground.setEndValue(self.bgopacity)
self.showBackground.setDuration(100)
self.showBackground.setEasingCurve(QEasingCurve.InOutQuad) # Not strictly required, just for the aesthetics
self.showBackground.valueChanged.connect(lambda opacity: self.backgroundwidget.setStyleSheet(f"background-color: rgba({self.color}, {opacity/2});border-top: {self.getPx(1)}px solid rgba({self.color}, {opacity+colorOffset});margin-top: {self.window().prefMargins}px; margin-bottom: {self.window().prefMargins};"))
self.hideBackground = QVariantAnimation()
self.hideBackground.setStartValue(self.bgopacity)
self.hideBackground.setEndValue(0+colorOffset) # Not 0 to prevent white flashing on the border
self.hideBackground.setDuration(100)
self.hideBackground.setEasingCurve(QEasingCurve.InOutQuad) # Not strictly required, just for the aesthetics
self.hideBackground.valueChanged.connect(lambda opacity: self.backgroundwidget.setStyleSheet(f"background-color: rgba({self.color}, {opacity/2});border-top: {self.getPx(1)}px solid rgba({self.color}, {opacity+colorOffset});margin-top: {self.window().prefMargins}px; margin-bottom: {self.window().prefMargins};"))
self.setAutoFillBackground(True)
self.backgroundwidget.setGeometry(0, 0, self.width(), self.height())
self.opacity=QGraphicsOpacityEffect(self)
self.opacity.setOpacity(1.00)
self.backgroundwidget.setGraphicsEffect(self.opacity)
self.focusassitant = True
self.focusAssitantLabel = QPushButton(self)
self.focusAssitantLabel.move(self.width(), 0)
self.focusAssitantLabel.setAttribute(Qt.WA_TransparentForMouseEvents)
self.focusAssitantLabel.setStyleSheet("background: transparent; margin: none; padding: none;")
self.focusAssitantLabel.resize(self.getPx(30), self.height())
self.focusAssitantLabel.setIcon(QIcon(getPath(f"moon_{getTaskbarIconMode()}.png")))
self.focusAssitantLabel.setIconSize(QSize(self.getPx(16), self.getPx(16)))
accColors = getColors()
self.notifdot = True
self.notifDotLabel = QLabel("", self)
self.notifDotLabel.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.notifDotLabel.setObjectName("notifIndicator")
self.notifDotLabel.setStyleSheet(f"font-size: 8pt;font-family: \"Segoe UI Variable Display\";border-radius: {self.getPx(8)}px;padding: 0px;padding-bottom: {self.getPx(2)}px;padding-left: {self.getPx(3)}px;padding-right: {self.getPx(2)}px;margin: 0px;border:0px;")
self.disableClockIndicators()
def enableFocusAssistant(self):
if not self.focusassitant:
if self.notifdot:
self.disableClockIndicators()
self.focusassitant = True
self.setContentsMargins(self.getPx(5), self.getPx(2), self.getPx(43), self.getPx(2))
self.focusAssitantLabel.move(self.width()-self.contentsMargins().right(), 0)
self.focusAssitantLabel.setFixedWidth(self.getPx(30))
self.focusAssitantLabel.setFixedHeight(self.height())
self.focusAssitantLabel.setIconSize(QSize(self.getPx(16), self.getPx(16)))
self.focusAssitantLabel.setIcon(QIcon(getPath(f"moon_{getTaskbarIconMode()}.png")))
self.focusAssitantLabel.show()
def enableNotifDot(self):
self.notifDotLabel.setText(str(numOfNotifs))
if not self.notifdot:
self.notifdot = True
self.setContentsMargins(self.getPx(5), self.getPx(2), self.getPx(43), self.getPx(2))
topBottomPadding = (self.height()-self.getPx(16))/2 # top-bottom margin
leftRightPadding = (self.getPx(30)-self.getPx(16))/2 # left-right margin
self.notifDotLabel.move(int(self.width()-self.contentsMargins().right()+leftRightPadding), int(topBottomPadding))
self.notifDotLabel.resize(self.getPx(16), self.getPx(16))
self.notifDotLabel.setStyleSheet(f"font-size: 8pt;font-family: \"Segoe UI Variable Display\";border-radius: {self.getPx(8)}px;padding: 0px;padding-bottom: {self.getPx(2)}px;padding-left: {self.getPx(3)}px;padding-right: {self.getPx(2)}px;margin: 0px;border:0px;")
self.notifDotLabel.show()
def disableClockIndicators(self):
if self.focusassitant:
self.focusassitant = False
self.setContentsMargins(self.getPx(6), self.getPx(2), self.getPx(13), self.getPx(2))
self.focusAssitantLabel.hide()
if self.notifdot:
self.notifdot = False
self.setContentsMargins(self.getPx(6), self.getPx(2), self.getPx(13), self.getPx(2))
self.notifDotLabel.hide()
def getPx(self, i: int) -> int:
return round(i*(self.screen().logicalDotsPerInch()/96))
def enterEvent(self, event: QEvent, r=False) -> None:
geometry: QRect = self.width()
self.showBackground.setStartValue(.01)
self.showBackground.setEndValue(self.bgopacity) # Not 0 to prevent white flashing on the border
if not self.window().clockOnTheLeft:
self.backgroundwidget.move(0, 2)
self.backgroundwidget.resize(geometry, self.height()-4)
else:
self.backgroundwidget.move(0, 2)
self.backgroundwidget.resize(geometry, self.height()-4)
self.showBackground.start()
if not r:
self.enterEvent(event, r=True)
return super().enterEvent(event)
def leaveEvent(self, event: QEvent) -> None:
self.hideBackground.setStartValue(self.bgopacity)
self.hideBackground.setEndValue(.01) # Not 0 to prevent white flashing on the border
self.hideBackground.start()
return super().leaveEvent(event)
def getTextUsedSpaceRect(self):
text = self.text().strip()
if len(text.split("\n"))>=3:
mult = 0.633333333333333333
elif len(text.split("\n"))==2:
mult = 1
else:
mult = 1.5
return self.fontMetrics().boundingRect(text).width()*mult
def mousePressEvent(self, ev: QMouseEvent) -> None:
self.setWindowOpacity(0.7)
self.setWindowOpacity(0.7)
self.opacity.setOpacity(0.60)
self.backgroundwidget.setGraphicsEffect(self.opacity)
return super().mousePressEvent(ev)
def mouseReleaseEvent(self, ev: QMouseEvent) -> None:
self.setWindowOpacity(1)
self.setWindowOpacity(1)
self.opacity.setOpacity(1.00)
self.backgroundwidget.setGraphicsEffect(self.opacity)
if(ev.button() == Qt.RightButton):
mousePos = getMousePos()
if(i.contextMenu().height() != 480):
mousePos.setY(self.window().y()-(i.contextMenu().height()+5))
else:
if getSettings("HideTaskManagerButton"):
mousePos.setY(self.window().y()-int(260*(i.contextMenu().screen().logicalDotsPerInchX()/96)))
else:
mousePos.setY(self.window().y()-int(370*(i.contextMenu().screen().logicalDotsPerInchX()/96)))
i.execMenu(mousePos)
else:
self.clicked.emit()
return super().mouseReleaseEvent(ev)
def paintEvent(self, event: QPaintEvent) -> None:
w = self.minimumSizeHint().width()
try:
mw = int(getSettingsValue("ClockFixedWidth"))
if mw > w:
w = mw
except Exception as e:
report(e)
if w<self.window().getPx(self.window().preferedwidth) and not self.window().clockOnTheLeft:
self.move(self.window().getPx(self.window().preferedwidth)-w+self.getPx(2), 0)
self.resize(w, self.height())
else:
self.move(0, 0)
self.resize(w, self.height())
return super().paintEvent(event)
def resizeEvent(self, event: QResizeEvent) -> None:
if self.focusassitant:
self.focusassitant = False
self.enableFocusAssistant()
elif self.notifdot:
self.notifdot = False
self.enableNotifDot()
else:
self.notifdot = True
self.focusassitant = True
self.disableClockIndicators()
return super().resizeEvent(event)
def window(self) -> Clock:
return super().window()
# Start of main script
QApplication.setAttribute(Qt.AA_DisableHighDpiScaling)
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
mController: MouseController = None
sw: SettingsWindow = None
i: TaskbarIconTray = None
st: KillableThread = None # Will be defined on loadClocks
KillableThread(target=resetRestartCount, daemon=True, name="Main: Restart counter").start()
KillableThread(target=timeStrThread, daemon=True, name="Main: Locale string loader").start()
loadClocks()
print(f"🟢 Loaded clocks in {time.time()-FirstTime}")
tdir = tempfile.TemporaryDirectory()
tempDir = tdir.name
sw = SettingsWindow() # Declare settings window
i = TaskbarIconTray(app)
mController = MouseController()
app.primaryScreenChanged.connect(lambda: os.startfile(sys.executable))
app.screenAdded.connect(lambda: os.startfile(sys.executable))
app.screenRemoved.connect(lambda: os.startfile(sys.executable))
signal = RestartSignal()
showNotif = InfoSignal()
showWarn = InfoSignal()
killSignal = InfoSignal()
showNotif.infoSignal.connect(lambda a, b: showMessage(a, b))
showWarn.infoSignal.connect(lambda a, b: wanrUserAboutUpdates(a, b))
killSignal.infoSignal.connect(lambda: app.quit())
signal.restartSignal.connect(lambda: restartClocks("checkLoop"))
KillableThread(target=updateChecker, daemon=True, name="Main: Updater").start()
KillableThread(target=isElevenClockRunningThread, daemon=True, name="Main: Instance controller").start()
if not getSettings("EnableLowCpuMode"): KillableThread(target=checkIfWokeUpThread, daemon=True, name="Main: Sleep listener").start()
if not getSettings("EnableLowCpuMode"): KillableThread(target=wnfDataThread, daemon=True, name="Main: WNF Data listener").start()
print("🔵 Low cpu mode is set to", str(getSettings("EnableLowCpuMode"))+". DisableNotifications is set to", getSettings("DisableNotifications"))
rdpThread = KillableThread(target=checkRDP, daemon=True, name="Main: Remote desktop controller")
if getSettings("EnableHideOnRDP"):
rdpThread.start()
globals.tempDir = tempDir # Register global variables
globals.old_stdout = old_stdout # Register global variables
globals.buffer = buffer # Register global variables
globals.app = app # Register global variables
globals.sw = sw # Register global variables
globals.trayIcon = i # Register global variables
globals.loadTimeFormat = loadTimeFormat # Register global functions
globals.updateIfPossible = updateIfPossible # Register global functions
globals.restartClocks = restartClocks # Register global functions
globals.closeClocks = closeClocks # Register global functions
if not(getSettings("Updated3.21Already")) and not(getSettings("EnableSilentUpdates")):
setSettings("ForceClockOnFirstMonitor", True)
setSettings("Updated3.21Already", True)
msg = QFramelessDialog(parent=None, closeOnClick=False)
msg.setAutoFillBackground(True)
msg.setStyleSheet(sw.styleSheet())
msg.setAttribute(QtCore.Qt.WA_StyledBackground)
msg.setObjectName("QMessageBox")
msg.setTitle("ElevenClock Updater")
msg.setText(f"""<b>ElevenClock has updated to version {versionName} successfully.</b>
<br><br>This update brings:<br>
<ul><li>The ability to specify a clock minimum width</li>
<li> The ability to search through the settings</li>
<li> Fixed an aesthetic issue with the seconds</li>
<li> Added a button to reset ElevenClock</li>
<li> Fixed an issue where ElevenClock would crash when clicking the right-click menu</li>
<li> Added Nynorsk</li>
<li> Some bugfixing and other improvements</li></ul>""")
msg.addButton("Ok", QDialogButtonBox.ButtonRole.ApplyRole, lambda: msg.close())
msg.addButton("Full changelog", QDialogButtonBox.ButtonRole.ResetRole, lambda: os.startfile("https://github.com/martinet101/ElevenClock/releases"))
def settNClose():
sw.show()
msg.close()
msg.addButton("Settings", QDialogButtonBox.ButtonRole.ActionRole, lambda: settNClose())
msg.setDefaultButtonRole(QDialogButtonBox.ButtonRole.ApplyRole, sw.styleSheet())
msg.setWindowTitle("ElevenClock has updated!")
msg.show()
showSettings = False
if "--settings" in sys.argv or showSettings:
sw.show()
if not getSettings("DefaultPrefsLoaded"):
setSettings("AlreadyInstalled", True)
setSettings("NewFullScreenMethod", True)
setSettings("ForceClockOnFirstMonitor", True)
showMessage("Welcome to ElevenClock", "You can customize Elevenclock from the ElevenClock Settings. You can search them on the start menu or right-clicking on any clock -> ElevenClock Settings", uBtn=False)
print("🟢 Default settings loaded")
setSettings("DefaultPrefsLoaded", True)
showWelcomeWizard = False
if showWelcomeWizard or "--welcome" in sys.argv:
import welcome
ww = welcome.WelcomeWindow()
print(f"🟢 Loaded everything in {time.time()-FirstTime}")
if "--quit-on-loaded" in sys.argv: # This is a testing feature to test if the script can load successfully
sys.exit(0)
app.exec_()
sys.exit(0)
except Exception as e:
import webbrowser, traceback, platform
if not "versionName" in locals() and not "versionName" in globals():
versionName = "Unknown"
if not "version" in locals() and not "version" in globals():
version = "Unknown"
os_info = f"" + \
f" OS: {platform.system()}\n"+\
f" Version: {platform.win32_ver()}\n"+\
f" OS Architecture: {platform.machine()}\n"+\
f" APP Architecture: {platform.architecture()[0]}\n"+\
f" APP Version: {versionName}\n"+\
f" APP Version Code: {version}\n"+\
f" Program: ElevenClock"+\
"\n\n-----------------------------------------------------------------------------------------"
traceback_info = "Traceback (most recent call last):\n"
try:
for line in traceback.extract_tb(e.__traceback__).format():
traceback_info += line
traceback_info += f"\n{type(e).__name__}: {str(e)}"
except:
traceback_info += "\nUnable to get traceback"
traceback_info += str(type(e))
traceback_info += ": "
traceback_info += str(e)
webbrowser.open(("https://www.somepythonthings.tk/error-report/?appName=ElevenClock&errorBody="+os_info.replace('\n', '{l}').replace(' ', '{s}')+"{l}{l}{l}{l}ElevenClock Log:{l}"+str("\n\n\n\n"+traceback_info).replace('\n', '{l}').replace(' ', '{s}')).replace("#", "|=|"))
print(traceback_info)
sys.exit(1)
|
try:
import time
FirstTime = time.time()
import os
import io
import sys
import time
import glob
import socket
import locale
import hashlib
import tempfile
import datetime
import subprocess
from ctypes import windll
from urllib.request import urlopen
try:
import psutil
importedPsutil = True
except ImportError:
importedPsutil = False
import win32gui
import win32api
import pythoncom
import win32process
import win32com.client
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal as Signal
from pynput.keyboard import Controller, Key
from pynput.mouse import Controller as MouseController
from external.FramelessWindow import QFramelessDialog
from languages import *
import globals
old_stdout = sys.stdout
sys.stdout = buffer = io.StringIO()
from settings import *
from tools import *
import tools
from external.WnfReader import isFocusAssistEnabled, getNotificationNumber
blacklistedProcesses = ["msrdc.exe", "mstsc.exe", "CDViewer.exe", "wfica32.exe", "vmware-view.exe", "vmware.exe"]
blacklistedFullscreenApps = ("", "Program Manager", "NVIDIA GeForce Overlay", "ElenenClock_IgnoreFullscreenEvent") # The "" codes for titleless windows
seconddoubleclick = False
isRDPRunning = False
restartCount = 0
tempDir = ""
timeStr = ""
dateTimeFormat = ""
clocks = []
oldScreens = []
isFocusAssist = False
numOfNotifs = 0
print("---------------------------------------------------------------------------------------------------")
print("")
print(f" ElevenClock's {versionName} (v{version}) log: Select all the text and hit Ctrl+C to copy it")
print(f" All modules loaded successfully and sys.stdout patched correctly, starting main script")
print(f" Translator function set language to \"{langName}\"")
print("")
print("---------------------------------------------------------------------------------------------------")
print("")
print(" Log legend:")
print(" 🔵: Verbose")
print(" 🟢: Information")
print(" 🟡: Warning")
print(" 🟠: Handled unexpected exception")
print(" 🔴: Unhandled unexpected exception")
print(" 🟣: Handled expected exception")
print("")
def _(s) -> str:
return tools._(s)
def checkRDP():
def checkIfElevenClockRunning(processess, blacklistedProcess) -> bool:
for p_name in processess:
if p_name in blacklistedProcess:
print(f"🟡 Blacklisted procName {p_name} detected, hiding...")
return True
return False
global isRDPRunning
print("🔵 Starting RDP thread")
while True:
pythoncom.CoInitialize()
_wmi = win32com.client.GetObject('winmgmts:')
processes = _wmi.ExecQuery('Select Name from win32_process')
procs = [p.Name for p in processes]
isRDPRunning = checkIfElevenClockRunning(procs, blacklistedProcesses)
time.sleep(5)
def getMousePos():
try:
return QPoint(mController.position[0], mController.position[1])
except AttributeError:
print("🟠 Mouse thread returned AttributeError")
except Exception as e:
report(e)
def updateChecker():
updateIfPossible()
time.sleep(60)
while True:
updateIfPossible()
time.sleep(7200)
def updateIfPossible(force = False):
try:
if(not(getSettings("DisableAutoCheckForUpdates")) or force):
print("🔵 Starting update check")
integrityPass = False
dmname = socket.gethostbyname_ex("versions.somepythonthings.tk")[0]
if(dmname == "769432b9-3560-4f94-8f90-01c95844d994.id.repl.co" or getSettings("BypassDomainAuthCheck")): # Check provider IP to prevent exploits
integrityPass = True
try:
response = urlopen("https://versions.somepythonthings.tk/versions/elevenclock.ver" if not getSettings("AlternativeUpdateServerProvider") else "http://www.somepythonthings.tk/versions/elevenclock.ver")
except Exception as e:
report(e)
response = urlopen("http://www.somepythonthings.tk/versions/elevenclock.ver")
integrityPass = True
print("🔵 Version URL:", response.url)
response = response.read().decode("utf8")
new_version_number = response.split("///")[0]
provided_hash = response.split("///")[2].replace("\n", "").lower()
if float(new_version_number) > version:
print("🟢 Updates found!")
if(not(getSettings("DisableAutoInstallUpdates")) or force):
showNotif.infoSignal.emit(("ElevenClock Updater"), ("ElevenClock is downloading updates"))
if(integrityPass):
url = "https://github.com/martinet101/ElevenClock/releases/latest/download/ElevenClock.Installer.exe"
filedata = urlopen(url)
datatowrite = filedata.read()
filename = ""
with open(os.path.join(tempDir, "SomePythonThings-ElevenClock-Updater.exe"), 'wb') as f:
f.write(datatowrite)
filename = f.name
if(hashlib.sha256(datatowrite).hexdigest().lower() == provided_hash):
print("🔵 Hash: ", provided_hash)
print("🟢 Hash ok, starting update")
if(getSettings("EnableSilentUpdates") and not(force)):
mousePos = getMousePos()
time.sleep(5)
while mousePos != getMousePos():
print("🟡 User is using the mouse, waiting")
mousePos = getMousePos()
time.sleep(5)
subprocess.run('start /B "" "{0}" /verysilent'.format(filename), shell=True)
else:
subprocess.run('start /B "" "{0}" /silent'.format(filename), shell=True)
else:
print("🟠 Hash not ok")
print("🟠 File hash: ", hashlib.sha256(datatowrite).hexdigest())
print("🟠 Provided hash: ", provided_hash)
showWarn.infoSignal.emit(("Updates found!"), f"ElevenClock Version {new_version_number} is available, but ElevenClock can't verify the authenticity of the package. Please go ElevenClock's homepage and download the latest version from there.\n\nDo you want to open the download page?")
else:
print("🟠 Can't verify update server authenticity, aborting")
print("🟠 Provided DmName:", dmname)
print("🟠 Expected DmNane: 769432b9-3560-4f94-8f90-01c95844d994.id.repl.co")
showWarn.infoSignal.emit(("Updates found!"), f"ElevenClock Version {new_version_number} is available, but ElevenClock can't verify the authenticity of the updates server. Please go ElevenClock's homepage and download the latest version from there.\n\nDo you want to open the download page?")
else:
showNotif.infoSignal.emit(("Updates found!"), f"ElevenClock Version {new_version_number} is available. Go to ElevenClock's Settings to update")
else:
print("🟢 Updates not found")
else:
print("🟠 Update checking disabled")
#old_stdout.write(buffer.getvalue())
#old_stdout.flush()
except Exception as e:
report(e)
#old_stdout.write(buffer.getvalue())
#old_stdout.flush()
def resetRestartCount():
global restartCount
while True:
if(restartCount>0):
print("🔵 Restart loop:", restartCount)
restartCount -= 1
time.sleep(0.3)
def loadClocks():
global clocks, oldScreens, st, restartCount, st
try:
st.kill()
except AttributeError:
pass
ForceClockOnFirstMonitor = getSettings("ForceClockOnFirstMonitor")
HideClockOnSecondaryMonitors = getSettings("HideClockOnSecondaryMonitors")
oldScreens = []
clocks = []
if importedPsutil:
process = psutil.Process(os.getpid())
memOk = (process.memory_info().rss/1048576) <= 150
else:
print("🟠 Psutil couldn't be imported!")
memOk = True
if restartCount<20 and memOk:
restartCount += 1
i = 0
for screen in app.screens():
screen: QScreen
oldScreens.append(getGeometry(screen))
if not screen == QGuiApplication.primaryScreen() or ForceClockOnFirstMonitor: # Check if we are not on the primary screen
if not HideClockOnSecondaryMonitors or screen == QGuiApplication.primaryScreen(): # First monitor is not affected by HideClockOnSecondaryMonitors
clocks.append(Clock(screen.logicalDotsPerInchX()/96, screen.logicalDotsPerInchY()/96, screen, i))
i += 1
else:
print("🟠 This is a secondary screen and is set to be skipped")
else: # Skip the primary display, as it has already the clock
print("🟡 This is the primary screen and is set to be skipped")
st = KillableThread(target=screenCheckThread, daemon=True, name="Main [loaded]: Screen listener")
st.start()
else:
os.startfile(sys.executable)
print("🔴 Overloading system, killing!")
app.quit()
sys.exit(1)
def getGeometry(screen: QScreen):
"""
Return a tuple containing: (screen_width, screen_height, screen_pos_x, screen_pos_y, screen_DPI, desktopWindowRect)
"""
try:
geometry = screen.geometry()
g = (geometry.width(), geometry.height(), geometry.x(), geometry.y(), screen.logicalDotsPerInch(), win32api.EnumDisplayMonitors())
return g
except Exception as e:
report(e)
geometry = QGuiApplication.primaryScreen().geometry()
g = (geometry.width(), geometry.height(), geometry.x(), geometry.y(), screen.logicalDotsPerInch(), win32api.EnumDisplayMonitors())
return g
def theyMatch(oldscreens, newscreens):
if len(oldscreens) != len(newscreens) or len(app.screens()) != len(win32api.EnumDisplayMonitors()):
return False # The number of displays has changed
# Check that all screen dimensions and dpi are the same as before
return all(old == getGeometry(new) for old, new in zip(oldscreens, newscreens))
def wnfDataThread():
global isFocusAssist, numOfNotifs
while True:
isFocusAssist = isFocusAssistEnabled()
time.sleep(0.25)
if not isFocusAssist:
numOfNotifs = getNotificationNumber()
time.sleep(0.25)
def screenCheckThread():
while theyMatch(oldScreens, app.screens()):
time.sleep(1)
signal.restartSignal.emit()
pass
def closeClocks():
for clock in clocks:
clock.hide()
clock.close()
def showMessage(title: str, body: str, uBtn: bool = True) -> None:
"""
Shows a Windows Notification
"""
lastState = i.isVisible()
i.show()
i.showMessage(title, body)
if uBtn:
sw.updateButton.show()
i.setVisible(lastState)
def restartClocks(caller: str = ""):
global clocks, st, rdpThread
closeClocks()
loadClocks()
loadTimeFormat()
try:
rdpThread.kill()
except AttributeError:
pass
rdpThread = KillableThread(target=checkRDP, daemon=True)
if(getSettings("EnableHideOnRDP")):
rdpThread.start()
def isElevenClockRunningThread():
nowTime = time.time()
name = f"ElevenClockRunning{nowTime}"
setSettings(name, True, False)
while True:
try:
for file in glob.glob(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), "ElevenClockRunning*")):
if(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), name) == file):
pass
else:
if(float(file.replace(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), "ElevenClockRunning"), "")) < nowTime): # If lockfile is older
os.remove(file)
if not(getSettings(name)):
print("🟠 KILLING, NEWER VERSION RUNNING")
killSignal.infoSignal.emit("", "")
except Exception as e:
report(e)
time.sleep(2)
def wanrUserAboutUpdates(a, b):
if(QMessageBox.question(sw, a, b, QMessageBox.Open | QMessageBox.Cancel, QMessageBox.Open) == QMessageBox.Open):
os.startfile("https://github.com/martinet101/ElevenClock/releases/latest")
def checkIfWokeUpThread():
while True:
lastTime = time.time()
time.sleep(3)
if((lastTime+6) < time.time()):
os.startfile(sys.executable)
def loadTimeFormat():
global dateTimeFormat
showSeconds = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "ShowSecondsInSystemClock", 0) or getSettings("EnableSeconds")
locale.setlocale(locale.LC_ALL, readRegedit(r"Control Panel\International", "LocaleName", "en_US"))
dateTimeFormat = "%HH:%M\n%A\n(W%W) %d/%m/%Y"
if getSettings("DisableTime"):
dateTimeFormat = dateTimeFormat.replace("%HH:%M\n", "")
if getSettings("DisableDate"):
if("\n" in dateTimeFormat):
dateTimeFormat = dateTimeFormat.replace("\n(W%W) %d/%m/%Y", "")
else:
dateTimeFormat = dateTimeFormat.replace("(W%W) %d/%m/%Y", "")
elif not getSettings("EnableWeekNumber"):
dateTimeFormat = dateTimeFormat.replace("(W%W) ", "")
else:
dateTimeFormat = dateTimeFormat.replace("(W%W) ", f"({_('W')}%W) ")
if not getSettings("EnableWeekDay"):
try:
dateTimeFormat = dateTimeFormat.replace("%A", "").replace("\n\n", "\n")
if dateTimeFormat[-1] == "\n":
dateTimeFormat = dateTimeFormat[0:-1]
if dateTimeFormat[0] == "\n":
dateTimeFormat = dateTimeFormat[1:]
except IndexError as e:
print("🟠 Date/Time string looks to be empty!")
except Exception as e:
report(e)
tDateMode = readRegedit(r"Control Panel\International", "sShortDate", "dd/MM/yyyy")
print("🔵 tDateMode:", tDateMode)
dateMode = ""
for i, ministr in enumerate(tDateMode.split("'")):
if i%2==0:
dateMode += ministr.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%$").replace("d", "%#d").replace("$", "d").replace("MMMM", "%B").replace("MMM", "%b").replace("MM", "%m").replace("M", "%#m").replace("yyyy", "%Y").replace("yy", "%y")
else:
dateMode += ministr
tTimeMode = readRegedit(r"Control Panel\International", "sShortTime", "H:mm")
print("🔵 tTimeMode:", tTimeMode)
timeMode = ""
for i, ministr in enumerate(tTimeMode.split("'")):
if i%2==0:
timeMode += ministr.replace("HH", "%$").replace("H", "%#H").replace("$", "H").replace("hh", "%I").replace("h", "%#I").replace("mm", "%M").replace("m", "%#M").replace("tt", "%p").replace("t", "%p").replace("ss", "%S").replace("s", "%#S")
if not("S" in timeMode) and showSeconds == 1:
for separator in ":.-/_":
if(separator in timeMode):
timeMode += f"{separator}%S"
else:
timeMode += ministr
for separator in ":.-/_":
timeMode = timeMode.replace(f" %p{separator}%S", f"{separator}%S %p")
timeMode = timeMode.replace(f" %p{separator}%#S", f"{separator}%#S %p")
timeMode = timeMode.replace("%S", "%S·").replace("%#S", "%#S·")
dateTimeFormat = dateTimeFormat.replace("%d/%m/%Y", dateMode).replace("%HH:%M", timeMode)
print("🔵 Loaded date time format:", dateTimeFormat)
def timeStrThread():
global timeStr, dateTimeFormat
fixHyphen = getSettings("EnableHyphenFix")
encoding = 'unicode-escape'
while True:
for _ in range(36000):
dateTimeFormatUnicode = dateTimeFormat.encode(encoding).decode()
now = datetime.datetime.now()
timeStr = now.strftime(dateTimeFormatUnicode).encode().decode(encoding)
if fixHyphen:
timeStr = timeStr.replace("t-", "t -")
try:
secs = datetime.datetime.now().strftime("%S")
if secs[-1] == "1":
timeStr = timeStr.replace("·", " \u200e")
else:
timeStr = timeStr.replace("·", "")
except IndexError:
pass
time.sleep(0.1)
class RestartSignal(QObject):
restartSignal = Signal()
def __init__(self) -> None:
super().__init__()
class InfoSignal(QObject):
infoSignal = Signal(str, str)
def __init__(self) -> None:
super().__init__()
class Clock(QWidget):
refresh = Signal()
hideSignal = Signal()
callInMainSignal = Signal(object)
styler = Signal(str)
preferedwidth = 200
preferedHeight = 48
focusassitant = True
lastTheme = 0
clockShouldBeHidden = False
shouldBeVisible = True
isRDPRunning = True
clockOnTheLeft = False
textInputHostHWND = 0
INTLOOPTIME = 2
def __init__(self, dpix: float, dpiy: float, screen: QScreen, index: int):
super().__init__()
if f"_{screen.name()}_" in getSettingsValue("BlacklistedMonitors"):
print("🟠 Monitor blacklisted!")
self.hide()
else:
self.index = index
print(f"🔵 Initializing clock {index}...")
self.callInMainSignal.connect(lambda f: f())
self.styler.connect(self.setStyleSheet)
self.taskbarBackgroundColor = not getSettings("DisableTaskbarBackgroundColor") and not (getSettings("UseCustomBgColor") or getSettings("AccentBackgroundcolor"))
self.transparentBackground = getSettings("DisableTaskbarBackgroundColor") and not (getSettings("UseCustomBgColor") or getSettings("AccentBackgroundcolor"))
if self.taskbarBackgroundColor:
print("🔵 Using taskbar background color")
self.bgcolor = "0, 0, 0, 0"
else:
print("🟡 Not using taskbar background color")
if getSettings("AccentBackgroundcolor"):
self.bgcolor = f"{getColors()[5 if isTaskbarDark() else 1]},100"
else:
self.bgcolor = getSettingsValue("UseCustomBgColor") if getSettingsValue("UseCustomBgColor") else "0, 0, 0, 0"
print("🔵 Using bg color:", self.bgcolor)
self.prefMargins = 0
try:
if readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "TaskbarSi", 1) == 0 or (not getSettings("DisableTime") and not getSettings("DisableDate") and getSettings("EnableWeekDay")):
self.prefMargins = self.getPx(5)
self.widgetStyleSheet = f"background-color: rgba(bgColor%); margin: {self.getPx(0)}px;margin-top: 0px;margin-bottom: 0px; border-radius: {self.getPx(5)}px;"
if not(not getSettings("DisableTime") and not getSettings("DisableDate") and getSettings("EnableWeekDay")):
print("🟡 Small sized taskbar")
self.preferedHeight = 32
self.preferedwidth = 200
else:
print("🟢 Regular sized taskbar")
self.prefMargins = self.getPx(3)
self.widgetStyleSheet = f"background-color: rgba(bgColor%);margin: {self.getPx(0)}px;border-radius: {self.getPx(5)}px;padding: {self.getPx(2)}px;"
except Exception as e:
print("🟡 Regular sized taskbar")
report(e)
self.prefMargins = self.getPx(3)
self.widgetStyleSheet = f"background-color: rgba(bgColor%);margin: {self.getPx(0)}px;border-radius: {self.getPx(5)}px;;padding: {self.getPx(2)}px;"
self.setStyleSheet(self.widgetStyleSheet.replace("bgColor", self.bgcolor))
if getSettings("ClockFixedHeight"):
print("🟡 Custom height being used!")
try:
self.preferedHeight = int(getSettingsValue("ClockFixedHeight"))
except ValueError as e:
report(e)
self.win32screen = {"Device": None, "Work": (0, 0, 0, 0), "Flags": 0, "Monitor": (0, 0, 0, 0)}
for win32screen in win32api.EnumDisplayMonitors():
try:
if win32api.GetMonitorInfo(win32screen[0].handle)["Device"] == screen.name():
self.win32screen = win32api.GetMonitorInfo(win32screen[0].handle)
except Exception as e:
report(e)
if self.win32screen == {"Device": None, "Work": (0, 0, 0, 0), "Flags": 0, "Monitor": (0, 0, 0, 0)}: #If no display is matching
os.startfile(sys.executable) # Restart elevenclock
app.quit()
self.screenGeometry = QRect(self.win32screen["Monitor"][0], self.win32screen["Monitor"][1], self.win32screen["Monitor"][2]-self.win32screen["Monitor"][0], self.win32screen["Monitor"][3]-self.win32screen["Monitor"][1])
print("🔵 Monitor geometry:", self.screenGeometry)
self.refresh.connect(self.refreshandShow)
self.hideSignal.connect(self.hide)
self.keyboard = Controller()
self.setWindowFlag(Qt.WindowStaysOnTopHint)
self.setWindowFlag(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlag(Qt.Tool)
hex_blob = b'0\x00\x00\x00\xfe\xff\xff\xffz\xf4\x00\x00\x03\x00\x00\x00T\x00\x00\x000\x00\x00\x00\x00\x00\x00\x00\x08\x04\x00\x00\x80\x07\x00\x008\x04\x00\x00`\x00\x00\x00\x01\x00\x00\x00'
registry_read_result = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3", "Settings", hex_blob)
self.autoHide = registry_read_result[8] == 123
if self.autoHide:
print("🟡 ElevenClock set to hide with the taskbar")
self.clockOnTheLeft = getSettings("ClockOnTheLeft")
screenName = screen.name().replace("\\", "_")
if not self.clockOnTheLeft:
if getSettings(f"SpecificClockOnTheLeft{screenName}"):
self.clockOnTheLeft = True
print(f"🟡 Clock {screenName} on the left (forced)")
else:
if getSettings(f"SpecificClockOnTheRight{screenName}"):
self.clockOnTheLeft = False
print(f"🟡 Clock {screenName} on the right (forced)")
try:
if (registry_read_result[12] == 1 and not getSettings("ForceOnBottom")) or getSettings("ForceOnTop"):
h = self.screenGeometry.y()
print("🟢 Taskbar at top")
else:
h = self.screenGeometry.y()+self.screenGeometry.height()-(self.preferedHeight*dpiy)
print("🟡 Taskbar at bottom")
except Exception as e:
report(e)
h = self.screenGeometry.y()+self.screenGeometry.height()-(self.preferedHeight*dpiy)
print("🟡 Taskbar at bottom")
self.label = Label(timeStr, self)
if self.clockOnTheLeft:
print("🟡 Clock on the left")
w = self.screenGeometry.x()+8*dpix
self.label.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
else:
self.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
print("🟢 Clock on the right")
w = self.screenGeometry.x()+self.screenGeometry.width()-((self.preferedwidth)*dpix)
if getSettings("CenterAlignment"):
self.label.setAlignment(Qt.AlignCenter)
xoff = 0
yoff = 0
if getSettings("ClockXOffset"):
print("🟡 X offset being used!")
try:
xoff = int(getSettingsValue("ClockXOffset"))
except ValueError as e:
report(e)
if getSettings("ClockYOffset"):
print("🟡 Y offset being used!")
try:
yoff = int(getSettingsValue("ClockYOffset"))
except ValueError as e:
report(e)
self.w = int(w) + xoff
self.h = int(h) + yoff
self.dpix = dpix
self.dpiy = dpiy
if not(getSettings("EnableWin32API")):
print("🟢 Using qt's default positioning system")
self.move(self.w, self.h)
self.resize(int(self.preferedwidth*dpix), int(self.preferedHeight*dpiy))
else:
print("🟡 Using win32 API positioning system")
self.user32 = windll.user32
self.user32.SetProcessDPIAware() # forces functions to return real pixel numbers instead of scaled values
win32gui.SetWindowPos(self.winId(), 0, int(w), int(h), int(self.preferedwidth*dpix), int(self.preferedHeight*dpiy), False)
print("🔵 Clock geometry:", self.geometry())
self.font: QFont = QFont()
customFont = getSettingsValue("UseCustomFont")
if customFont == "":
if lang == lang_ko:
self.fontfamilies = ["Malgun Gothic", "Segoe UI Variable", "sans-serif"]
elif lang == lang_zh_TW:
self.fontfamilies = ["Microsoft JhengHei UI", "Segoe UI Variable", "sans-serif"]
elif lang == lang_zh_CN:
self.fontfamilies = ["Microsoft YaHei UI", "Segoe UI Variable", "sans-serif"]
else:
self.fontfamilies = ["Segoe UI Variable Display", "sans-serif"]
else:
self.fontfamilies = [customFont]
print(f"🔵 Font families: {self.fontfamilies}")
customSize = getSettingsValue("UseCustomFontSize")
if customSize == "":
self.font.setPointSizeF(9.3)
else:
try:
self.font.setPointSizeF(float(customSize))
except Exception as e:
self.font.setPointSizeF(9.3)
report(e)
print(f"🔵 Font size: {self.font.pointSizeF()}")
self.font.setStyleStrategy(QFont.PreferOutline)
self.font.setLetterSpacing(QFont.PercentageSpacing, 100)
self.font.setHintingPreference(QFont.HintingPreference.PreferNoHinting)
self.label.setFont(self.font)
accColors = getColors()
def make_style_sheet(a, b, c, d, color):
bg = 1 if isTaskbarDark() else 4
fg = 6 if isTaskbarDark() else 1
return f"*{{padding: {a}px;padding-right: {b}px;margin-right: {c}px;padding-left: {d}px; color: {color};}}#notifIndicator{{background-color: rgb({accColors[bg]});color:rgb({accColors[fg]});}}"
if getSettings("UseCustomFontColor"):
print("🟡 Using custom text color:", getSettingsValue('UseCustomFontColor'))
self.lastTheme = -1
style_sheet_string = make_style_sheet(self.getPx(1), self.getPx(3), self.getPx(12), self.getPx(5), f"rgb({getSettingsValue('UseCustomFontColor')})")
self.label.setStyleSheet(style_sheet_string)
self.label.bgopacity = .1
self.fontfamilies = [element.replace("Segoe UI Variable Display", "Segoe UI Variable Display Semib") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.DemiBold)
self.label.setFont(self.font)
elif isTaskbarDark():
print("🟢 Using white text (dark mode)")
self.lastTheme = 0
style_sheet_string = make_style_sheet(self.getPx(1), self.getPx(3), self.getPx(12), self.getPx(5), "white")
self.label.setStyleSheet(style_sheet_string)
self.label.bgopacity = .1
self.fontfamilies = [element.replace("Segoe UI Variable Display", "Segoe UI Variable Display Semib") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.DemiBold)
self.label.setFont(self.font)
else:
print("🟢 Using black text (light mode)")
self.lastTheme = 1
style_sheet_string = make_style_sheet(self.getPx(1), self.getPx(3), self.getPx(12), self.getPx(5), "black")
self.label.setStyleSheet(style_sheet_string)
self.label.bgopacity = .5
self.fontfamilies = [element.replace("Segoe UI Variable Display Semib", "Segoe UI Variable Display") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
self.font.setWeight(QFont.Weight.ExtraLight)
self.label.setFont(self.font)
self.label.clicked.connect(lambda: self.showCalendar())
self.label.move(0, 0)
self.label.setFixedHeight(self.height())
self.label.resize(self.width()-self.getPx(8), self.height())
self.label.show()
loadTimeFormat()
self.show()
self.raise_()
self.setFocus()
self.full_screen_rect = (self.screenGeometry.x(), self.screenGeometry.y(), self.screenGeometry.x()+self.screenGeometry.width(), self.screenGeometry.y()+self.screenGeometry.height())
print("🔵 Full screen rect: ", self.full_screen_rect)
self.forceDarkTheme = getSettings("ForceDarkTheme")
self.forceLightTheme = getSettings("ForceLightTheme")
self.hideClockWhenClicked = getSettings("HideClockWhenClicked")
self.isLowCpuMode = getSettings("EnableLowCpuMode")
self.primary_screen = QGuiApplication.primaryScreen()
self.oldBgColor = 0
self.user32 = windll.user32
self.user32.SetProcessDPIAware() # optional, makes functions return real pixel numbers instead of scaled values
self.loop0 = KillableThread(target=self.updateTextLoop, daemon=True, name=f"Clock[{index}]: Time updater loop")
self.loop1 = KillableThread(target=self.mainClockLoop, daemon=True, name=f"Clock[{index}]: Main clock loop")
self.loop2 = KillableThread(target=self.backgroundLoop, daemon=True, name=f"Clock[{index}]: Background color loop")
self.loop0.start()
self.loop1.start()
self.loop2.start()
class QHoverButton(QPushButton):
hovered = Signal()
unhovered = Signal()
def __init__(self, text: str = "", parent: QObject = None) -> None:
super().__init__(text=text, parent=parent)
def enterEvent(self, event: QtCore.QEvent) -> None:
self.hovered.emit()
return super().enterEvent(event)
def leaveEvent(self, event: QtCore.QEvent) -> None:
self.unhovered.emit()
return super().leaveEvent(event)
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "TaskbarSd", 0) == 1) or getSettings("ShowDesktopButton"):
print("🟡 Desktop button enabled")
self.desktopButton = QHoverButton(parent=self)
self.desktopButton.clicked.connect(lambda: self.showDesktop())
self.desktopButton.show()
self.desktopButton.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.desktopButton.move(self.width()-self.getPx(10), 0)
self.desktopButton.resize(self.getPx(10), self.getPx(self.preferedHeight))
self.desktopButton.hovered.connect(lambda: self.desktopButton.setIcon(QIcon(getPath("showdesktop.png"))))
self.desktopButton.unhovered.connect(lambda: self.desktopButton.setIcon(QIcon()))
self.setFixedHeight(self.getPx(self.preferedHeight))
self.desktopButton.setStyleSheet(f"""
QPushButton{{
background-color: rgba(0, 0, 0, 0.01);
margin: 0px;
padding: 0px;
margin-top: 0px;
border-radius: 0px;
margin-bottom: 0px;
border-left: 0px solid rgba(0, 0, 0, 0.05);
border-right: 0px solid rgba(0, 0, 0, 0.05);
}}
QPushButton:hover{{
background-color: rgba(127, 127, 127, 1%);
margin: 0px;
margin-top: 0px;
border-radius: 0px;
margin-bottom: 0px;
border-left: 0px solid rgba(0, 0, 0, 0.05);
border-right: 0px solid rgba(0, 0, 0, 0.05);
}}
QPushButton:pressed{{
background-color: rgba(127, 127, 127, 1%);
margin: 0px;
margin-top: 0px;
border-radius: 0px;
margin-bottom: 0px;
border-left: 0px solid rgba(0, 0, 0, 0.05);
border-right: 0px solid rgba(0, 0, 0, 0.05);
}}
""")
def getPx(self, original) -> int:
return round(original*(self.screen().logicalDotsPerInch()/96))
def backgroundLoop(self):
while True:
try:
if self.taskbarBackgroundColor and not self.isLowCpuMode and not globals.trayIcon.contextMenu().isVisible():
intColor = self.primary_screen.grabWindow(0, self.x()+self.label.x()-1, self.y()+2, 1, 1).toImage().pixel(0, 0)
if intColor != self.oldBgColor:
self.oldBgColor = intColor
color = QColor(intColor)
self.styler.emit(self.widgetStyleSheet.replace("bgColor", f"{color.red()}, {color.green()}, {color.blue()}, 100"))
except AttributeError:
print("🟣 Expected AttributeError on backgroundLoop thread")
time.sleep(0.5)
def theresFullScreenWin(self, clockOnFirstMon, newMethod, legacyMethod):
try:
fullscreen = False
def compareFullScreenRects(window, screen, newMethod):
try:
if(newMethod):
return window[0] <= screen[0] and window[1] <= screen[1] and window[2] >= screen[2] and window[3] >= screen[3]
else:
return window[0] == screen[0] and window[1] == screen[1] and window[2] == screen[2] and window[3] == screen[3]
except Exception as e:
report(e)
def winEnumHandler(hwnd, _):
nonlocal fullscreen
if win32gui.IsWindowVisible(hwnd):
if compareFullScreenRects(win32gui.GetWindowRect(hwnd), self.full_screen_rect, newMethod):
if clockOnFirstMon and self.textInputHostHWND == 0:
pythoncom.CoInitialize()
_, pid = win32process.GetWindowThreadProcessId(hwnd)
_wmi = win32com.client.GetObject('winmgmts:')
# collect all the running processes
processes = _wmi.ExecQuery(f'Select Name from win32_process where ProcessId = {pid}')
for p in processes:
if p.Name != "TextInputHost.exe":
if(win32gui.GetWindowText(hwnd) not in blacklistedFullscreenApps):
print("🟡 Fullscreen window detected!", win32gui.GetWindowText(hwnd), win32gui.GetWindowRect(hwnd), "Fullscreen rect:", self.full_screen_rect)
fullscreen = True
else:
print("🟢 Cached text input host hwnd:", hwnd)
self.textInputHostHWND = hwnd
self.INTLOOPTIME = 2
else:
if win32gui.GetWindowText(hwnd) not in blacklistedFullscreenApps and hwnd != self.textInputHostHWND:
print("🟡 Fullscreen window detected!", win32gui.GetWindowText(hwnd), win32gui.GetWindowRect(hwnd), "Fullscreen rect:", self.full_screen_rect)
fullscreen = True
if not legacyMethod:
win32gui.EnumWindows(winEnumHandler, 0)
else:
hwnd = win32gui.GetForegroundWindow()
if(compareFullScreenRects(win32gui.GetWindowRect(hwnd), self.full_screen_rect, newMethod)):
if(win32gui.GetWindowText(hwnd) not in blacklistedFullscreenApps):
print("🟡 Fullscreen window detected!", win32gui.GetWindowText(hwnd), win32gui.GetWindowRect(hwnd), "Fullscreen rect:", self.full_screen_rect)
fullscreen = True
return fullscreen
except Exception as e:
report(e)
return False
def mainClockLoop(self):
global isRDPRunning, numOfNotifs
EnableHideOnFullScreen = not(getSettings("DisableHideOnFullScreen"))
DisableHideWithTaskbar = getSettings("DisableHideWithTaskbar")
EnableHideOnRDP = getSettings("EnableHideOnRDP")
clockOnFirstMon = getSettings("ForceClockOnFirstMonitor")
newMethod = getSettings("NewFullScreenMethod")
notifs = not getSettings("DisableNotifications")
legacyMethod = getSettings("legacyFullScreenMethod")
oldNotifNumber = 0
print(f"🔵 Show/hide loop started with parameters: HideonFS:{EnableHideOnFullScreen}, NotHideOnTB:{DisableHideWithTaskbar}, HideOnRDP:{EnableHideOnRDP}, ClockOn1Mon:{clockOnFirstMon}, NefWSMethod:{newMethod}, DisableNotifications:{notifs}, legacyFullScreenMethod:{legacyMethod}")
if self.isLowCpuMode or clockOnFirstMon:
self.INTLOOPTIME = 15
else:
self.INTLOOPTIME = 2
while True:
self.isRDPRunning = isRDPRunning
isFullScreen = self.theresFullScreenWin(clockOnFirstMon, newMethod, legacyMethod)
for i in range(self.INTLOOPTIME):
if (not(isFullScreen) or not(EnableHideOnFullScreen)) and not self.clockShouldBeHidden:
if notifs:
if isFocusAssist:
self.callInMainSignal.emit(self.label.enableFocusAssistant)
elif numOfNotifs > 0:
if oldNotifNumber != numOfNotifs:
self.callInMainSignal.emit(self.label.enableNotifDot)
else:
self.callInMainSignal.emit(self.label.disableClockIndicators)
oldNotifNumber = numOfNotifs
if self.autoHide and not(DisableHideWithTaskbar):
mousePos = getMousePos()
if (mousePos.y()+1 == self.screenGeometry.y()+self.screenGeometry.height()) and self.screenGeometry.x() < mousePos.x() and self.screenGeometry.x()+self.screenGeometry.width() > mousePos.x():
self.refresh.emit()
elif (mousePos.y() <= self.screenGeometry.y()+self.screenGeometry.height()-self.preferedHeight):
self.hideSignal.emit()
else:
if(self.isRDPRunning and EnableHideOnRDP):
self.hideSignal.emit()
else:
self.refresh.emit()
else:
self.hideSignal.emit()
time.sleep(0.2)
time.sleep(0.2)
def updateTextLoop(self) -> None:
global timeStr
while True:
self.label.setText(timeStr)
time.sleep(0.1)
def showCalendar(self):
self.keyboard.press(Key.cmd)
self.keyboard.press('n')
self.keyboard.release('n')
self.keyboard.release(Key.cmd)
if self.hideClockWhenClicked:
print("🟡 Hiding clock because clicked!")
self.clockShouldBeHidden = True
def showClockOn10s(self: Clock):
time.sleep(10)
print("🟢 Showing clock because 10s passed!")
self.clockShouldBeHidden = False
KillableThread(target=showClockOn10s, args=(self,), name=f"Temporary: 10s thread").start()
def showDesktop(self):
self.keyboard.press(Key.cmd)
self.keyboard.press('d')
self.keyboard.release('d')
self.keyboard.release(Key.cmd)
def focusOutEvent(self, event: QFocusEvent) -> None:
self.refresh.emit()
def refreshandShow(self):
if(self.shouldBeVisible):
self.show()
self.raise_()
if(self.lastTheme >= 0): # If the color is not customized
theme = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "SystemUsesLightTheme", 1)
if(theme != self.lastTheme):
if (theme == 0 or self.forceDarkTheme) and not self.forceLightTheme:
self.lastTheme = 0
self.label.setStyleSheet(f"padding: {self.getPx(1)}px;padding-right: {self.getPx(3)}px;margin-right: {self.getPx(12)}px;padding-left: {self.getPx(5)}px; color: white;")#background-color: rgba({self.bgcolor}%)")
self.label.bgopacity = 0.1
self.fontfamilies = [element.replace("Segoe UI Variable Display", "Segoe UI Variable Display Semib") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.DemiBold)
self.label.setFont(self.font)
else:
self.lastTheme = 1
self.label.setStyleSheet(f"padding: {self.getPx(1)}px;padding-right: {self.getPx(3)}px;margin-right: {self.getPx(12)}px;padding-left: {self.getPx(5)}px; color: black;")#background-color: rgba({self.bgcolor}%)")
self.label.bgopacity = .5
self.fontfamilies = [element.replace("Segoe UI Variable Display Semib", "Segoe UI Variable Display") for element in self.fontfamilies]
self.font.setFamilies(self.fontfamilies)
self.font.setWeight(QFont.Weight.ExtraLight)
self.label.setFont(self.font)
def closeEvent(self, event: QCloseEvent) -> None:
self.shouldBeVisible = False
try:
print(f"🟡 Closing clock on {self.win32screen}")
self.loop0.kill()
self.loop1.kill()
self.loop2.kill()
except AttributeError:
pass
event.accept()
return super().closeEvent(event)
def showEvent(self, event: QShowEvent) -> None:
return super().showEvent(event)
class Label(QLabel):
clicked = Signal()
def __init__(self, text, parent):
super().__init__(text, parent=parent)
self.setMouseTracking(True)
self.backgroundwidget = QWidget(self)
self.color = "255, 255, 255"
self.installEventFilter(self)
self.bgopacity = 0.1
self.backgroundwidget.setContentsMargins(0, self.window().prefMargins, 0, self.window().prefMargins)
self.backgroundwidget.setStyleSheet(f"background-color: rgba(127, 127, 127, 0.01);border-top: {self.getPx(1)}px solid rgba({self.color},0);margin-top: {self.window().prefMargins}px; margin-bottom: {self.window().prefMargins};")
self.backgroundwidget.show()
if self.window().transparentBackground:
colorOffset = .01
else:
colorOffset = 0
self.showBackground = QVariantAnimation()
self.showBackground.setStartValue(0+colorOffset) # Not 0 to prevent white flashing on the border
self.showBackground.setEndValue(self.bgopacity)
self.showBackground.setDuration(100)
self.showBackground.setEasingCurve(QEasingCurve.InOutQuad) # Not strictly required, just for the aesthetics
self.showBackground.valueChanged.connect(lambda opacity: self.backgroundwidget.setStyleSheet(f"background-color: rgba({self.color}, {opacity/2});border-top: {self.getPx(1)}px solid rgba({self.color}, {opacity+colorOffset});margin-top: {self.window().prefMargins}px; margin-bottom: {self.window().prefMargins};"))
self.hideBackground = QVariantAnimation()
self.hideBackground.setStartValue(self.bgopacity)
self.hideBackground.setEndValue(0+colorOffset) # Not 0 to prevent white flashing on the border
self.hideBackground.setDuration(100)
self.hideBackground.setEasingCurve(QEasingCurve.InOutQuad) # Not strictly required, just for the aesthetics
self.hideBackground.valueChanged.connect(lambda opacity: self.backgroundwidget.setStyleSheet(f"background-color: rgba({self.color}, {opacity/2});border-top: {self.getPx(1)}px solid rgba({self.color}, {opacity+colorOffset});margin-top: {self.window().prefMargins}px; margin-bottom: {self.window().prefMargins};"))
self.setAutoFillBackground(True)
self.backgroundwidget.setGeometry(0, 0, self.width(), self.height())
self.opacity=QGraphicsOpacityEffect(self)
self.opacity.setOpacity(1.00)
self.backgroundwidget.setGraphicsEffect(self.opacity)
self.focusassitant = True
self.focusAssitantLabel = QPushButton(self)
self.focusAssitantLabel.move(self.width(), 0)
self.focusAssitantLabel.setAttribute(Qt.WA_TransparentForMouseEvents)
self.focusAssitantLabel.setStyleSheet("background: transparent; margin: none; padding: none;")
self.focusAssitantLabel.resize(self.getPx(30), self.height())
self.focusAssitantLabel.setIcon(QIcon(getPath(f"moon_{getTaskbarIconMode()}.png")))
self.focusAssitantLabel.setIconSize(QSize(self.getPx(16), self.getPx(16)))
accColors = getColors()
self.notifdot = True
self.notifDotLabel = QLabel("", self)
self.notifDotLabel.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.notifDotLabel.setObjectName("notifIndicator")
self.notifDotLabel.setStyleSheet(f"font-size: 8pt;font-family: \"Segoe UI Variable Display\";border-radius: {self.getPx(8)}px;padding: 0px;padding-bottom: {self.getPx(2)}px;padding-left: {self.getPx(3)}px;padding-right: {self.getPx(2)}px;margin: 0px;border:0px;")
self.disableClockIndicators()
def enableFocusAssistant(self):
if not self.focusassitant:
if self.notifdot:
self.disableClockIndicators()
self.focusassitant = True
self.setContentsMargins(self.getPx(5), self.getPx(2), self.getPx(43), self.getPx(2))
self.focusAssitantLabel.move(self.width()-self.contentsMargins().right(), 0)
self.focusAssitantLabel.setFixedWidth(self.getPx(30))
self.focusAssitantLabel.setFixedHeight(self.height())
self.focusAssitantLabel.setIconSize(QSize(self.getPx(16), self.getPx(16)))
self.focusAssitantLabel.setIcon(QIcon(getPath(f"moon_{getTaskbarIconMode()}.png")))
self.focusAssitantLabel.show()
def enableNotifDot(self):
self.notifDotLabel.setText(str(numOfNotifs))
if not self.notifdot:
self.notifdot = True
self.setContentsMargins(self.getPx(5), self.getPx(2), self.getPx(43), self.getPx(2))
topBottomPadding = (self.height()-self.getPx(16))/2 # top-bottom margin
leftRightPadding = (self.getPx(30)-self.getPx(16))/2 # left-right margin
self.notifDotLabel.move(int(self.width()-self.contentsMargins().right()+leftRightPadding), int(topBottomPadding))
self.notifDotLabel.resize(self.getPx(16), self.getPx(16))
self.notifDotLabel.setStyleSheet(f"font-size: 8pt;font-family: \"Segoe UI Variable Display\";border-radius: {self.getPx(8)}px;padding: 0px;padding-bottom: {self.getPx(2)}px;padding-left: {self.getPx(3)}px;padding-right: {self.getPx(2)}px;margin: 0px;border:0px;")
self.notifDotLabel.show()
def disableClockIndicators(self):
if self.focusassitant:
self.focusassitant = False
self.setContentsMargins(self.getPx(6), self.getPx(2), self.getPx(13), self.getPx(2))
self.focusAssitantLabel.hide()
if self.notifdot:
self.notifdot = False
self.setContentsMargins(self.getPx(6), self.getPx(2), self.getPx(13), self.getPx(2))
self.notifDotLabel.hide()
def getPx(self, i: int) -> int:
return round(i*(self.screen().logicalDotsPerInch()/96))
def enterEvent(self, event: QEvent, r=False) -> None:
geometry: QRect = self.width()
self.showBackground.setStartValue(.01)
self.showBackground.setEndValue(self.bgopacity) # Not 0 to prevent white flashing on the border
if not self.window().clockOnTheLeft:
self.backgroundwidget.move(0, 2)
self.backgroundwidget.resize(geometry, self.height()-4)
else:
self.backgroundwidget.move(0, 2)
self.backgroundwidget.resize(geometry, self.height()-4)
self.showBackground.start()
if not r:
self.enterEvent(event, r=True)
return super().enterEvent(event)
def leaveEvent(self, event: QEvent) -> None:
self.hideBackground.setStartValue(self.bgopacity)
self.hideBackground.setEndValue(.01) # Not 0 to prevent white flashing on the border
self.hideBackground.start()
return super().leaveEvent(event)
def getTextUsedSpaceRect(self):
text = self.text().strip()
if len(text.split("\n"))>=3:
mult = 0.633333333333333333
elif len(text.split("\n"))==2:
mult = 1
else:
mult = 1.5
return self.fontMetrics().boundingRect(text).width()*mult
def mousePressEvent(self, ev: QMouseEvent) -> None:
self.setWindowOpacity(0.7)
self.setWindowOpacity(0.7)
self.opacity.setOpacity(0.60)
self.backgroundwidget.setGraphicsEffect(self.opacity)
return super().mousePressEvent(ev)
def mouseReleaseEvent(self, ev: QMouseEvent) -> None:
self.setWindowOpacity(1)
self.setWindowOpacity(1)
self.opacity.setOpacity(1.00)
self.backgroundwidget.setGraphicsEffect(self.opacity)
if(ev.button() == Qt.RightButton):
mousePos = getMousePos()
if(i.contextMenu().height() != 480):
mousePos.setY(self.window().y()-(i.contextMenu().height()+5))
else:
if getSettings("HideTaskManagerButton"):
mousePos.setY(self.window().y()-int(260*(i.contextMenu().screen().logicalDotsPerInchX()/96)))
else:
mousePos.setY(self.window().y()-int(370*(i.contextMenu().screen().logicalDotsPerInchX()/96)))
i.execMenu(mousePos)
else:
self.clicked.emit()
return super().mouseReleaseEvent(ev)
def paintEvent(self, event: QPaintEvent) -> None:
w = self.minimumSizeHint().width()
try:
mw = int(getSettingsValue("ClockFixedWidth"))
if mw > w:
w = mw
except Exception as e:
report(e)
if w<self.window().getPx(self.window().preferedwidth) and not self.window().clockOnTheLeft:
self.move(self.window().getPx(self.window().preferedwidth)-w+self.getPx(2), 0)
self.resize(w, self.height())
else:
self.move(0, 0)
self.resize(w, self.height())
return super().paintEvent(event)
def resizeEvent(self, event: QResizeEvent) -> None:
if self.focusassitant:
self.focusassitant = False
self.enableFocusAssistant()
elif self.notifdot:
self.notifdot = False
self.enableNotifDot()
else:
self.notifdot = True
self.focusassitant = True
self.disableClockIndicators()
return super().resizeEvent(event)
def window(self) -> Clock:
return super().window()
# Start of main script
QApplication.setAttribute(Qt.AA_DisableHighDpiScaling)
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
mController: MouseController = None
sw: SettingsWindow = None
i: TaskbarIconTray = None
st: KillableThread = None # Will be defined on loadClocks
KillableThread(target=resetRestartCount, daemon=True, name="Main: Restart counter").start()
KillableThread(target=timeStrThread, daemon=True, name="Main: Locale string loader").start()
loadClocks()
print(f"🟢 Loaded clocks in {time.time()-FirstTime}")
tdir = tempfile.TemporaryDirectory()
tempDir = tdir.name
sw = SettingsWindow() # Declare settings window
i = TaskbarIconTray(app)
mController = MouseController()
app.primaryScreenChanged.connect(lambda: os.startfile(sys.executable))
app.screenAdded.connect(lambda: os.startfile(sys.executable))
app.screenRemoved.connect(lambda: os.startfile(sys.executable))
signal = RestartSignal()
showNotif = InfoSignal()
showWarn = InfoSignal()
killSignal = InfoSignal()
showNotif.infoSignal.connect(lambda a, b: showMessage(a, b))
showWarn.infoSignal.connect(lambda a, b: wanrUserAboutUpdates(a, b))
killSignal.infoSignal.connect(lambda: app.quit())
signal.restartSignal.connect(lambda: restartClocks("checkLoop"))
KillableThread(target=updateChecker, daemon=True, name="Main: Updater").start()
KillableThread(target=isElevenClockRunningThread, daemon=True, name="Main: Instance controller").start()
if not getSettings("EnableLowCpuMode"): KillableThread(target=checkIfWokeUpThread, daemon=True, name="Main: Sleep listener").start()
if not getSettings("EnableLowCpuMode"): KillableThread(target=wnfDataThread, daemon=True, name="Main: WNF Data listener").start()
print("🔵 Low cpu mode is set to", str(getSettings("EnableLowCpuMode"))+". DisableNotifications is set to", getSettings("DisableNotifications"))
rdpThread = KillableThread(target=checkRDP, daemon=True, name="Main: Remote desktop controller")
if getSettings("EnableHideOnRDP"):
rdpThread.start()
globals.tempDir = tempDir # Register global variables
globals.old_stdout = old_stdout # Register global variables
globals.buffer = buffer # Register global variables
globals.app = app # Register global variables
globals.sw = sw # Register global variables
globals.trayIcon = i # Register global variables
globals.loadTimeFormat = loadTimeFormat # Register global functions
globals.updateIfPossible = updateIfPossible # Register global functions
globals.restartClocks = restartClocks # Register global functions
globals.closeClocks = closeClocks # Register global functions
if not(getSettings("Updated3.21Already")) and not(getSettings("EnableSilentUpdates")):
setSettings("ForceClockOnFirstMonitor", True)
setSettings("Updated3.21Already", True)
msg = QFramelessDialog(parent=None, closeOnClick=False)
msg.setAutoFillBackground(True)
msg.setStyleSheet(sw.styleSheet())
msg.setAttribute(QtCore.Qt.WA_StyledBackground)
msg.setObjectName("QMessageBox")
msg.setTitle("ElevenClock Updater")
msg.setText(f"""<b>ElevenClock has updated to version {versionName} successfully.</b>
<br><br>This update brings:<br>
<ul><li>The ability to specify a clock minimum width</li>
<li> The ability to search through the settings</li>
<li> Fixed an aesthetic issue with the seconds</li>
<li> Added a button to reset ElevenClock</li>
<li> Fixed an issue where ElevenClock would crash when clicking the right-click menu</li>
<li> Added Nynorsk</li>
<li> Some bugfixing and other improvements</li></ul>""")
msg.addButton("Ok", QDialogButtonBox.ButtonRole.ApplyRole, lambda: msg.close())
msg.addButton("Full changelog", QDialogButtonBox.ButtonRole.ResetRole, lambda: os.startfile("https://github.com/martinet101/ElevenClock/releases"))
def settNClose():
sw.show()
msg.close()
msg.addButton("Settings", QDialogButtonBox.ButtonRole.ActionRole, lambda: settNClose())
msg.setDefaultButtonRole(QDialogButtonBox.ButtonRole.ApplyRole, sw.styleSheet())
msg.setWindowTitle("ElevenClock has updated!")
msg.show()
showSettings = False
if "--settings" in sys.argv or showSettings:
sw.show()
if not getSettings("DefaultPrefsLoaded"):
setSettings("AlreadyInstalled", True)
setSettings("NewFullScreenMethod", True)
setSettings("ForceClockOnFirstMonitor", True)
showMessage("Welcome to ElevenClock", "You can customize Elevenclock from the ElevenClock Settings. You can search them on the start menu or right-clicking on any clock -> ElevenClock Settings", uBtn=False)
print("🟢 Default settings loaded")
setSettings("DefaultPrefsLoaded", True)
showWelcomeWizard = False
if showWelcomeWizard or "--welcome" in sys.argv:
import welcome
ww = welcome.WelcomeWindow()
print(f"🟢 Loaded everything in {time.time()-FirstTime}")
if "--quit-on-loaded" in sys.argv: # This is a testing feature to test if the script can load successfully
sys.exit(0)
app.exec_()
sys.exit(0)
except Exception as e:
import webbrowser, traceback, platform
if not "versionName" in locals() and not "versionName" in globals():
versionName = "Unknown"
if not "version" in locals() and not "version" in globals():
version = "Unknown"
os_info = f"" + \
f" OS: {platform.system()}\n"+\
f" Version: {platform.win32_ver()}\n"+\
f" OS Architecture: {platform.machine()}\n"+\
f" APP Architecture: {platform.architecture()[0]}\n"+\
f" APP Version: {versionName}\n"+\
f" APP Version Code: {version}\n"+\
f" Program: ElevenClock"+\
"\n\n-----------------------------------------------------------------------------------------"
traceback_info = "Traceback (most recent call last):\n"
try:
for line in traceback.extract_tb(e.__traceback__).format():
traceback_info += line
traceback_info += f"\n{type(e).__name__}: {str(e)}"
except:
traceback_info += "\nUnable to get traceback"
traceback_info += str(type(e))
traceback_info += ": "
traceback_info += str(e)
webbrowser.open(("https://www.somepythonthings.tk/error-report/?appName=ElevenClock&errorBody="+os_info.replace('\n', '{l}').replace(' ', '{s}')+"{l}{l}{l}{l}ElevenClock Log:{l}"+str("\n\n\n\n"+traceback_info).replace('\n', '{l}').replace(' ', '{s}')).replace("#", "|=|"))
print(traceback_info)
sys.exit(1)
|
"""`jupytext` as a command line tool"""
import argparse
import glob
import json
import os
import re
import shlex
import subprocess
import sys
import warnings
from copy import copy
from tempfile import NamedTemporaryFile
from .combine import combine_inputs_with_outputs
from .compare import NotebookDifference, compare, test_round_trip_conversion
from .config import load_jupytext_config, notebook_formats
from .formats import (
_BINARY_FORMAT_OPTIONS,
_VALID_FORMAT_OPTIONS,
JUPYTEXT_FORMATS,
check_auto_ext,
check_file_version,
long_form_multiple_formats,
long_form_one_format,
short_form_one_format,
)
from .header import recursive_update
from .jupytext import create_prefix_dir, read, reads, write, writes
from .kernels import find_kernel_specs, get_kernel_spec, kernelspec_from_language
from .languages import _SCRIPT_EXTENSIONS
from .paired_paths import (
InconsistentPath,
base_path,
find_base_path_and_format,
full_path,
paired_paths,
)
from .pairs import latest_inputs_and_outputs, read_pair, write_pair
from .version import __version__
def system(*args, **kwargs):
"""Execute the given bash command"""
kwargs.setdefault("stdout", subprocess.PIPE)
proc = subprocess.Popen(args, **kwargs)
out, _ = proc.communicate()
if proc.returncode:
raise SystemExit(proc.returncode)
return out.decode("utf-8")
def str2bool(value):
"""Parse Yes/No/Default string
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse"""
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
if value.lower() in ("no", "false", "f", "n", "0"):
return False
if value.lower() in ("d", "default", ""):
return None
raise argparse.ArgumentTypeError("Expected: (Y)es/(T)rue/(N)o/(F)alse/(D)efault")
def parse_jupytext_args(args=None):
"""Command line parser for jupytext"""
parser = argparse.ArgumentParser(
description="Jupyter Notebooks as Markdown Documents, Julia, Python or R Scripts",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Input
parser.add_argument(
"notebooks",
help="One or more notebook(s). "
"Notebook is read from stdin when this argument is empty.",
nargs="*",
)
parser.add_argument(
"--from",
dest="input_format",
help="Jupytext format for the input(s). Inferred from the "
"file extension and content when missing.",
)
# Destination format & act on metadata
parser.add_argument(
"--to",
dest="output_format",
help=(
"The destination format: 'ipynb', 'markdown' or 'script', or a file extension: "
"'md', 'Rmd', 'jl', 'py', 'R', ..., 'auto' (script extension matching the notebook language), "
"or a combination of an extension and a format name, e.g. {} ".format(
", ".join(
{
f"md:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".md"
}
)
)
+ " or {}. ".format(
", ".join(
{
f"py:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".py"
}
)
)
+ "The default format for scripts is the 'light' format, "
"which uses few cell markers (none when possible). "
"Alternatively, a format compatible with many editors is the "
"'percent' format, which uses '# %%%%' as cell markers. "
"The main formats (markdown, light, percent) preserve "
"notebooks and text documents in a roundtrip. Use the "
"--test and and --test-strict commands to test the roundtrip on your files. "
"Read more about the available formats at "
"https://jupytext.readthedocs.io/en/latest/formats.html"
),
)
# Destination file
parser.add_argument(
"-o",
"--output",
help="Destination file. Defaults to the original file, "
"with prefix/suffix/extension changed according to "
"the destination format. "
"Use '-' to print the notebook on stdout.",
)
parser.add_argument(
"--update",
action="store_true",
help="Preserve the output cells when the destination "
"notebook is an .ipynb file that already exists",
)
parser.add_argument(
"--set-formats",
type=str,
help="Turn the notebook or text document to one or more alternative representations "
"with e.g. '--set-formats ipynb,py:light'. "
"The --set-formats option also triggers the creation/update of all paired files",
)
# Action: convert(default)/version/list paired paths/sync/apply/test
action = parser.add_mutually_exclusive_group()
action.add_argument(
"--sync",
"-s",
help="Synchronize the content of the paired representations of "
"the given notebook. Input cells are taken from the file that "
"was last modified, and outputs are read from the ipynb file, "
"if present.",
action="store_true",
)
action.add_argument(
"--paired-paths",
"-p",
help="List the locations of the alternative representations for this notebook.",
action="store_true",
)
parser.add_argument(
"--format-options",
"--opt",
action="append",
help="Set format options with e.g. "
"'--opt comment_magics=true' or '--opt notebook_metadata_filter=-kernelspec'.",
)
parser.add_argument(
"--update-metadata",
default={},
type=json.loads,
help="Update the notebook metadata with the desired dictionary. "
"Argument must be given in JSON format. For instance, if you "
"want to activate a pairing in the generated file, use e.g. "
"""--update-metadata '{"jupytext":{"formats":"ipynb,py:light"}}' """
"See also the --opt and --set-formats options for other ways "
"to operate on the Jupytext metadata.",
)
parser.add_argument(
"--use-source-timestamp",
help="Set the modification timestamp of the output file(s) equal"
"to that of the source file, and keep the source file and "
"its timestamp unchanged.",
action="store_true",
)
parser.add_argument(
"--warn-only",
"-w",
action="store_true",
help="Only issue a warning and continue processing other notebooks "
"when the conversion of a given notebook fails",
)
action.add_argument(
"--test",
action="store_true",
help="Test that the notebook is stable under a round trip conversion, "
"up to the expected changes",
)
action.add_argument(
"--test-strict",
action="store_true",
help="Test that the notebook is strictly stable under a round trip conversion",
)
parser.add_argument(
"--stop",
"-x",
dest="stop_on_first_error",
action="store_true",
help="In --test mode, stop on first round trip conversion error, and report stack traceback",
)
# Pipe notebook inputs into other commands
parser.add_argument(
"--pipe",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and read the notebook back. For instance, reformat "
"your notebook with: "
"'jupytext notebook.ipynb --pipe black' "
"If you want to reformat it and sync the paired representation, execute: "
"'jupytext notebook.ipynb --sync --pipe black' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --pipe 'black {}'",
)
parser.add_argument(
"--diff",
"-d",
action="store_true",
help="Show the differences between (the inputs) of two notebooks",
)
parser.add_argument(
"--diff-format",
help="The text format used to show differences in --diff",
)
parser.add_argument(
"--check",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and test that the returned value is non zero. For "
"instance, test that your notebook is pep8 compliant with: "
"'jupytext notebook.ipynb --check flake8' "
"or run pytest on your notebook with: "
"'jupytext notebook.ipynb --check pytest' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --check 'pytest {}'",
)
parser.add_argument(
"--pipe-fmt",
default="auto:percent",
help="The format in which the notebook should be piped to other programs, "
"when using the --pipe and/or --check commands.",
)
# Execute the notebook
parser.add_argument(
"--set-kernel",
"-k",
type=str,
help="Set the kernel with the given name on the notebook. "
"Use '--set-kernel -' to set a kernel matching the current "
"environment on Python notebooks, and matching the notebook "
"language otherwise (get the list of available kernels with "
"'jupyter kernelspec list')",
)
parser.add_argument(
"--execute",
action="store_true",
help="Execute the notebook with the given kernel. In the "
"--pre-commit-mode, the notebook is executed only if a code "
"cell changed, or if some execution outputs are missing "
"or not ordered.",
)
parser.add_argument(
"--run-path",
type=str,
help="Execute the notebook at the given path (defaults to the notebook parent directory)",
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="Quiet mode: do not comment about files being updated or created",
)
parser.add_argument(
"--show-changes",
action="store_true",
help="Display the diff for each output file",
)
action.add_argument(
"--version",
"-v",
action="store_true",
help="Show jupytext's version number and exit",
)
parser.add_argument(
"--pre-commit",
action="store_true",
help="Ignore the notebook argument, and instead apply Jupytext "
"on the notebooks found in the git index, which have an "
"extension that matches the (optional) --from argument.",
)
parser.add_argument(
"--pre-commit-mode",
action="store_true",
help="This is a mode that is compatible with the pre-commit framework. "
"In this mode, --sync won't use timestamp but instead will "
"determines the source notebook as the element of the pair "
"that is added to the git index. An alert is raised if multiple inconsistent representations are "
"in the index. It also raises an alert after updating the paired files or outputs if those "
"files need to be added to the index. Finally, filepaths that aren't in the source format "
"you are trying to convert from are ignored.",
)
return parser.parse_args(args)
def jupytext(args=None):
"""Entry point for the jupytext script"""
args = parse_jupytext_args(args)
def log(text):
if not args.quiet:
sys.stdout.write(text + "\n")
if args.version:
log(__version__)
return 0
if args.pre_commit:
warnings.warn(
"The --pre-commit argument is deprecated. "
"Please consider switching to the pre-commit.com framework "
"(let us know at https://github.com/mwouts/jupytext/issues "
"if that is an issue for you)",
DeprecationWarning,
)
if args.notebooks:
raise ValueError(
"--pre-commit takes notebooks from the git index. Do not pass any notebook here."
)
args.notebooks = notebooks_in_git_index(args.input_format)
log("[jupytext] Notebooks in git index are:")
for nb_file in args.notebooks:
log(nb_file)
# Read notebook from stdin
if not args.notebooks:
if not args.pre_commit:
args.notebooks = ["-"]
if args.set_formats is not None:
# Replace empty string with None
args.update_metadata = recursive_update(
args.update_metadata, {"jupytext": {"formats": args.set_formats or None}}
)
args.sync = True
if args.paired_paths:
if len(args.notebooks) != 1:
raise ValueError("--paired-paths applies to a single notebook")
print_paired_paths(args.notebooks[0], args.input_format)
return 1
if args.run_path:
args.execute = True
if (
(args.test or args.test_strict)
and not args.output_format
and not args.output
and not args.sync
):
raise ValueError("Please provide one of --to, --output or --sync")
if (
not args.output_format
and not args.output
and not args.sync
and not args.pipe
and not args.diff
and not args.check
and not args.update_metadata
and not args.format_options
and not args.set_kernel
and not args.execute
):
raise ValueError(
"Please provide one of --to, --output, --set-formats, --sync, --pipe, --diff, "
"--check, --update-metadata, --format-options, --set-kernel or --execute"
)
if args.diff:
if (
len(args.notebooks) != 2
or args.output_format
or args.output
or args.sync
or args.pipe
or args.check
or args.update_metadata
or args.format_options
or args.set_kernel
or args.execute
):
raise ValueError(
"Please provide two notebooks after 'jupytext --diff'.\n"
"NB: Use --show-changes if you wish to see the changes in "
"a notebook being updated by Jupytext."
)
nb_file1, nb_file2 = args.notebooks
nb1 = read(nb_file1)
nb2 = read(nb_file2)
def fmt_if_not_ipynb(nb):
fmt = nb.metadata["jupytext"]["text_representation"]
if fmt["extension"] == ".ipynb":
return None
return short_form_one_format(fmt)
diff_fmt = (
args.diff_format or fmt_if_not_ipynb(nb1) or fmt_if_not_ipynb(nb2) or "md"
)
diff = compare(
writes(nb2, diff_fmt),
writes(nb1, diff_fmt),
nb_file2,
nb_file1,
return_diff=True,
)
sys.stdout.write(diff)
return
if args.output and len(args.notebooks) != 1:
raise ValueError("Please input a single notebook when using --output")
# Warn if '--to' is used in place of '--output'
if (
not args.output
and args.output_format
and "." in args.output_format
# a suffix is expected to start with one of these characters #901
and not args.output_format.startswith((".", "-", "_"))
and "//" not in args.output_format
):
def single_line(msg, *args, **kwargs):
return f"[warning] {msg}\n"
warnings.formatwarning = single_line
warnings.warn(
"You might have passed a file name to the '--to' option, "
"when a format description was expected. Maybe you want to use the '-o' option instead?"
)
if args.input_format:
args.input_format = long_form_one_format(args.input_format)
if args.output_format:
args.output_format = long_form_one_format(args.output_format)
set_format_options(args.output_format, args.format_options)
# Wildcard extension on Windows #202
notebooks = []
for pattern in args.notebooks:
if "*" in pattern or "?" in pattern:
# Exclude the .jupytext.py configuration file
notebooks.extend(glob.glob(pattern, recursive=True))
else:
notebooks.append(pattern)
# Count how many file have round-trip issues when testing
exit_code = 0
for nb_file in notebooks:
if not args.warn_only:
exit_code += jupytext_single_file(nb_file, args, log)
else:
try:
exit_code += jupytext_single_file(nb_file, args, log)
except Exception as err:
sys.stderr.write(f"[jupytext] Error: {str(err)}\n")
return exit_code
def jupytext_single_file(nb_file, args, log):
"""Apply the jupytext command, with given arguments, to a single file"""
if nb_file == "-" and args.sync:
msg = "Missing notebook path."
if args.set_formats is not None and os.path.isfile(args.set_formats):
msg += f" Maybe you mean 'jupytext --sync {args.set_formats}' ?"
raise ValueError(msg)
nb_dest = None
if args.output:
nb_dest = args.output
elif nb_file == "-":
nb_dest = "-"
else:
try:
bp = base_path(nb_file, args.input_format)
except InconsistentPath:
if args.pre_commit_mode:
log(
"[jupytext] Ignoring unmatched input path {}{}".format(
nb_file,
f" for format {args.input_format}" if args.input_format else "",
)
)
return 0
raise
if args.output_format:
nb_dest = full_path(bp, args.output_format)
config = load_jupytext_config(os.path.abspath(nb_file))
# Just acting on metadata / pipe => save in place
save_in_place = not nb_dest and not args.sync
if save_in_place:
nb_dest = nb_file
if nb_dest == "-":
args.quiet = True
# I. ### Read the notebook ###
fmt = copy(args.input_format) or {}
if not fmt:
ext = os.path.splitext(nb_file)[1]
if ext:
fmt = {"extension": ext}
if fmt:
set_format_options(fmt, args.format_options)
log(
"[jupytext] Reading {}{}".format(
nb_file if nb_file != "-" else "stdin",
f" in format {short_form_one_format(fmt)}" if "extension" in fmt else "",
)
)
notebook = read(nb_file, fmt=fmt, config=config)
if "extension" in fmt and "format_name" not in fmt:
text_representation = notebook.metadata.get("jupytext", {}).get(
"text_representation", {}
)
if text_representation.get("extension") == fmt["extension"]:
fmt["format_name"] = text_representation["format_name"]
# Compute actual extension when using script/auto, and update nb_dest if necessary
dest_fmt = args.output_format
if dest_fmt and dest_fmt["extension"] == ".auto":
dest_fmt = check_auto_ext(dest_fmt, notebook.metadata, "--to")
if not args.output and nb_file != "-":
nb_dest = full_path(base_path(nb_file, args.input_format), dest_fmt)
# Set the kernel
set_kernel = args.set_kernel
if (
(not set_kernel)
and args.execute
and notebook.metadata.get("kernelspec", {}).get("name") is None
):
set_kernel = "-"
if set_kernel:
if set_kernel == "-":
language = (
notebook.metadata.get("jupytext", {}).get("main_language")
or notebook.metadata["kernelspec"]["language"]
)
if not language:
raise ValueError(
"Cannot infer a kernel as notebook language is not defined"
)
kernelspec = kernelspec_from_language(language)
else:
try:
kernelspec = get_kernel_spec(set_kernel)
except KeyError as err:
raise KeyError(
"Please choose a kernel name among {}".format(
find_kernel_specs().keys()
)
) from err
kernelspec = {
"name": args.set_kernel,
"language": kernelspec.language,
"display_name": kernelspec.display_name,
}
log("[jupytext] Setting kernel {}".format(kernelspec.get("name")))
args.update_metadata["kernelspec"] = kernelspec
# Are we updating a text file that has a metadata filter? #212
if args.update_metadata or args.format_options:
if (
notebook.metadata.get("jupytext", {}).get("notebook_metadata_filter")
== "-all"
):
notebook.metadata.get("jupytext", {}).pop("notebook_metadata_filter")
# Update the metadata
if args.update_metadata:
log(
"[jupytext] Updating notebook metadata with '{}'".format(
json.dumps(args.update_metadata)
)
)
if (
"kernelspec" in args.update_metadata
and "main_language" in notebook.metadata.get("jupytext", {})
):
notebook.metadata["jupytext"].pop("main_language")
recursive_update(notebook.metadata, args.update_metadata)
# Read paired notebooks, except if the pair is being created
nb_files = [nb_file, nb_dest]
if args.sync:
formats = notebook_formats(
notebook, config, nb_file, fallback_on_current_fmt=False
)
set_prefix_and_suffix(fmt, formats, nb_file)
if args.set_formats is None:
try:
notebook, inputs_nb_file, outputs_nb_file = load_paired_notebook(
notebook, fmt, config, formats, nb_file, log, args.pre_commit_mode
)
nb_files = [inputs_nb_file, outputs_nb_file]
except NotAPairedNotebook as err:
sys.stderr.write("[jupytext] Warning: " + str(err) + "\n")
return 0
except InconsistentVersions as err:
sys.stderr.write("[jupytext] Error: " + str(err) + "\n")
return 1
else:
nb_files = [nb_file]
# II. ### Apply commands onto the notebook ###
# Pipe the notebook into the desired commands
prefix = None if nb_file == "-" else os.path.splitext(os.path.basename(nb_file))[0]
for cmd in args.pipe or []:
notebook = pipe_notebook(
notebook, cmd, args.pipe_fmt, prefix=prefix, warn_only=args.warn_only
)
# and/or test the desired commands onto the notebook
for cmd in args.check or []:
pipe_notebook(
notebook,
cmd,
args.pipe_fmt,
update=False,
prefix=prefix,
warn_only=args.warn_only,
)
if (
args.execute
and args.pre_commit_mode
and execution_counts_are_in_order(notebook)
and not code_cells_have_changed(notebook, nb_files)
):
log(
f"[jupytext] Execution of {shlex.quote(nb_file)} "
f"skipped as code cells have not changed and outputs are present."
)
args.execute = False
# Execute the notebook
if args.execute:
kernel_name = notebook.metadata.get("kernelspec", {}).get("name")
log(f"[jupytext] Executing notebook with kernel {kernel_name}")
if nb_dest is not None and nb_dest != "-":
nb_path = os.path.dirname(nb_dest)
elif nb_file != "-":
nb_path = os.path.dirname(nb_file)
else:
nb_path = None
run_path = args.run_path or nb_path
if args.run_path and not os.path.isdir(run_path):
# is this a relative directory?
for base_dir in [nb_path, os.getcwd()]:
try_path = os.path.join(base_dir, run_path)
if os.path.isdir(try_path):
run_path = try_path
break
if not os.path.isdir(run_path):
raise ValueError(f"--run-path={args.run_path} is not a valid path")
if run_path:
resources = {"metadata": {"path": run_path}}
else:
resources = {}
try:
from nbconvert.preprocessors import ExecutePreprocessor
exec_proc = ExecutePreprocessor(timeout=None, kernel_name=kernel_name)
exec_proc.preprocess(notebook, resources=resources)
except (ImportError, RuntimeError) as err:
if args.pre_commit_mode:
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that you have listed 'nbconvert' and 'ipykernel' "
"under 'additional_dependencies' in the jupytext hook."
) from err
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that 'nbconvert' and 'ipykernel' are installed."
) from err
# III. ### Possible actions ###
# a. Test round trip conversion
if args.test or args.test_strict:
try:
# Round trip from an ipynb document
if fmt["extension"] == ".ipynb":
test_round_trip_conversion(
notebook,
dest_fmt,
update=args.update,
allow_expected_differences=not args.test_strict,
stop_on_first_error=args.stop_on_first_error,
)
# Round trip from a text file
else:
with open(nb_file, encoding="utf-8") as fp:
org_text = fp.read()
# If the destination is not ipynb, we convert to/back that format
if dest_fmt["extension"] != ".ipynb":
dest_text = writes(notebook, fmt=dest_fmt)
notebook = reads(dest_text, fmt=dest_fmt)
text = writes(notebook, fmt=fmt, config=config)
if args.test_strict:
compare(text, org_text)
else:
# we ignore the YAML header in the comparison #414
comment = _SCRIPT_EXTENSIONS.get(fmt["extension"], {}).get(
"comment", ""
)
# white spaces between the comment char and the YAML delimiters are allowed
if comment:
comment = comment + r"\s*"
yaml_header = re.compile(
r"^{comment}---\s*\n.*\n{comment}---\s*\n".format(
comment=comment
),
re.MULTILINE | re.DOTALL,
)
compare(
re.sub(yaml_header, "", text), re.sub(yaml_header, "", org_text)
)
except (NotebookDifference, AssertionError) as err:
sys.stdout.write(f"{nb_file}: {str(err)}")
return 1
return 0
# b. Output to the desired file or format
untracked_files = 0
def lazy_write(path, fmt=None, action=None, update_timestamp_only=False):
"""Write the notebook only if it has changed"""
if path == "-":
write(notebook, "-", fmt=fmt)
return
nonlocal untracked_files
if update_timestamp_only:
modified = False
else:
_, ext = os.path.splitext(path)
fmt = copy(fmt or {})
fmt = long_form_one_format(fmt, update={"extension": ext})
new_content = writes(notebook, fmt=fmt, config=config)
diff = None
if not new_content.endswith("\n"):
new_content += "\n"
if not os.path.isfile(path):
modified = True
else:
with open(path, encoding="utf-8") as fp:
current_content = fp.read()
modified = new_content != current_content
if modified and args.show_changes:
diff = compare(
new_content,
current_content,
"",
"",
return_diff=True,
)
if modified:
# The text representation of the notebook has changed, we write it on disk
if action is None:
message = f"[jupytext] Updating {shlex.quote(path)}"
else:
message = "[jupytext] Writing {path}{format}{action}".format(
path=shlex.quote(path),
format=" in format " + short_form_one_format(fmt)
if fmt and "format_name" in fmt
else "",
action=action,
)
if diff is not None:
message += " with this change:\n" + diff
log(message)
create_prefix_dir(path, fmt)
with open(path, "w", encoding="utf-8") as fp:
fp.write(new_content)
# Otherwise, we only update the timestamp of the text file to make sure
# they remain more recent than the ipynb file, for compatibility with the
# Jupytext contents manager for Jupyter
if args.use_source_timestamp:
log(
f"[jupytext] Setting the timestamp of {shlex.quote(path)} equal to that of {shlex.quote(nb_file)}"
)
os.utime(path, (os.stat(path).st_atime, os.stat(nb_file).st_mtime))
elif not modified and not path.endswith(".ipynb"):
log(f"[jupytext] Updating the timestamp of {shlex.quote(path)}")
os.utime(path, None)
if args.pre_commit:
system("git", "add", path)
if args.pre_commit_mode and is_untracked(path):
log(
f"[jupytext] Error: the git index is outdated.\n"
f"Please add the paired notebook with:\n"
f" git add {shlex.quote(path)}"
)
untracked_files += 1
return
if nb_dest:
if nb_dest == nb_file and not dest_fmt:
dest_fmt = fmt
# Test consistency between dest name and output format
if dest_fmt and nb_dest != "-":
base_path(nb_dest, dest_fmt)
# Describe what jupytext is doing
if save_in_place:
action = ""
elif os.path.isfile(nb_dest) and args.update:
if not nb_dest.endswith(".ipynb"):
raise ValueError("--update is only for ipynb files")
action = " (destination file updated)"
check_file_version(notebook, nb_file, nb_dest)
notebook = combine_inputs_with_outputs(notebook, read(nb_dest), fmt=fmt)
elif os.path.isfile(nb_dest):
suggest_update = (
" [use --update to preserve cell outputs and ids]"
if nb_dest.endswith(".ipynb")
else ""
)
action = f" (destination file replaced{suggest_update})"
else:
action = ""
formats = notebook.metadata.get("jupytext", {}).get("formats")
formats = long_form_multiple_formats(formats)
if formats:
try:
base_path_out, _ = find_base_path_and_format(nb_dest, formats)
except InconsistentPath:
# Drop 'formats' if the destination is not part of the paired notebooks
formats = {}
notebook.metadata.get("jupytext", {}).pop("formats")
lazy_write(nb_dest, fmt=dest_fmt, action=action)
nb_dest_in_pair = formats and any(
os.path.exists(alt_path) and os.path.samefile(nb_dest, alt_path)
for alt_path, _ in paired_paths(nb_file, fmt, formats)
)
if (
nb_dest_in_pair
and os.path.isfile(nb_file)
and not nb_file.endswith(".ipynb")
and os.path.isfile(nb_dest)
and nb_dest.endswith(".ipynb")
):
# If the destination is an ipynb file and is in the pair, then we
# update the original text file timestamp, as required by our Content Manager
# Otherwise Jupyter will refuse to open the paired notebook #335
# NB: An alternative is --use-source-timestamp
lazy_write(nb_file, update_timestamp_only=True)
# c. Synchronize paired notebooks
elif args.sync:
write_pair(nb_file, formats, lazy_write)
return untracked_files
def notebooks_in_git_index(fmt):
"""Return the list of modified and deleted ipynb files in the git index that match the given format"""
git_status = system("git", "status", "--porcelain")
re_modified = re.compile(r"^[AM]+\s+(?P<name>.*)", re.MULTILINE)
modified_files_in_git_index = re_modified.findall(git_status)
files = []
for nb_file in modified_files_in_git_index:
if nb_file.startswith('"') and nb_file.endswith('"'):
nb_file = nb_file[1:-1]
try:
base_path(nb_file, fmt)
files.append(nb_file)
except InconsistentPath:
continue
return files
def is_untracked(filepath):
"""Check whether a file was created or modified and needs to be added to the git index"""
if not filepath:
return False
output = system("git", "ls-files", filepath).strip()
if output == "":
return True
output = system("git", "diff", filepath).strip()
if output != "":
return True
return False
def print_paired_paths(nb_file, fmt):
"""Display the paired paths for this notebook"""
notebook = read(nb_file, fmt=fmt)
formats = notebook.metadata.get("jupytext", {}).get("formats")
if formats:
for path, _ in paired_paths(nb_file, fmt, formats):
if path != nb_file:
sys.stdout.write(path + "\n")
def set_format_options(fmt, format_options):
"""Apply the desired format options to the format description fmt"""
if not format_options:
return
for opt in format_options:
try:
key, value = opt.split("=")
except ValueError as err:
raise ValueError(
"Format options are expected to be of the form key=value, not '{}'".format(
opt
)
) from err
if key not in _VALID_FORMAT_OPTIONS:
raise ValueError(
"'{}' is not a valid format option. Expected one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)
)
)
if key in _BINARY_FORMAT_OPTIONS:
value = str2bool(value)
fmt[key] = value
def set_prefix_and_suffix(fmt, formats, nb_file):
"""Add prefix and suffix information from jupytext.formats if format and path matches"""
for alt_fmt in long_form_multiple_formats(formats):
if alt_fmt["extension"] == fmt["extension"] and fmt.get(
"format_name"
) == alt_fmt.get("format_name"):
try:
base_path(nb_file, alt_fmt)
fmt.update(alt_fmt)
return
except InconsistentPath:
continue
class NotAPairedNotebook(ValueError):
"""An error raised when a notebook is not a paired notebook"""
class InconsistentVersions(ValueError):
"""An error raised when two paired files in the git index contain inconsistent representations"""
def file_in_git_index(path):
if not os.path.isfile(path):
return False
return system("git", "status", "--porcelain", path).strip().startswith(("M", "A"))
def git_timestamp(path):
if not os.path.isfile(path):
return None
# Files that are in the git index are considered most recent
if file_in_git_index(path):
return float("inf")
# Return the commit timestamp
try:
git_ts_str = system("git", "log", "-1", "--pretty=%ct", path).strip()
except SystemExit as err:
if err.code == 128:
# git not initialized
git_ts_str = ""
else:
raise
if git_ts_str:
return float(git_ts_str)
# The file is not in the git index
return get_timestamp(path)
def get_timestamp(path):
if not os.path.isfile(path):
return None
return os.lstat(path).st_mtime
def load_paired_notebook(notebook, fmt, config, formats, nb_file, log, pre_commit_mode):
"""Update the notebook with the inputs and outputs of the most recent paired files"""
if not formats:
raise NotAPairedNotebook(f"{shlex.quote(nb_file)} is not a paired notebook")
formats = long_form_multiple_formats(formats)
_, fmt_with_prefix_suffix = find_base_path_and_format(nb_file, formats)
fmt.update(fmt_with_prefix_suffix)
def read_one_file(path, fmt):
if path == nb_file:
return notebook
log(f"[jupytext] Loading {shlex.quote(path)}")
return read(path, fmt=fmt, config=config)
if pre_commit_mode and file_in_git_index(nb_file):
# We raise an error if two representations of this notebook in the git index are inconsistent
nb_files_in_git_index = sorted(
(
(alt_path, alt_fmt)
for alt_path, alt_fmt in paired_paths(nb_file, fmt, formats)
if file_in_git_index(alt_path)
),
key=lambda x: 0 if x[1]["extension"] != ".ipynb" else 1,
)
if len(nb_files_in_git_index) > 1:
path0, fmt0 = nb_files_in_git_index[0]
with open(path0, encoding="utf-8") as fp:
text0 = fp.read()
for alt_path, alt_fmt in nb_files_in_git_index[1:]:
nb = read(alt_path, fmt=alt_fmt, config=config)
alt_text = writes(nb, fmt=fmt0, config=config)
if alt_text != text0:
diff = compare(alt_text, text0, alt_path, path0, return_diff=True)
raise InconsistentVersions(
f"{shlex.quote(alt_path)} and {shlex.quote(path0)} are inconsistent.\n"
+ diff
+ f"\nPlease revert JUST ONE of the files with EITHER\n"
f" git reset {shlex.quote(alt_path)} && git checkout -- {shlex.quote(alt_path)}\nOR\n"
f" git reset {shlex.quote(path0)} && git checkout -- {shlex.quote(path0)}\n"
)
inputs, outputs = latest_inputs_and_outputs(
nb_file, fmt, formats, git_timestamp if pre_commit_mode else get_timestamp
)
notebook = read_pair(inputs, outputs, read_one_file)
return notebook, inputs.path, outputs.path
def exec_command(command, input=None, capture=False, warn_only=False):
"""Execute the desired command, and pipe the given input into it"""
assert isinstance(command, list)
sys.stdout.write("[jupytext] Executing {}\n".format(" ".join(command)))
process = subprocess.Popen(
command,
**(
dict(stdout=subprocess.PIPE, stdin=subprocess.PIPE)
if input is not None
else {}
),
)
out, err = process.communicate(input=input)
if out and not capture:
sys.stdout.write(out.decode("utf-8"))
if err:
sys.stderr.write(err.decode("utf-8"))
if process.returncode:
msg = f"The command '{" ".join(command)}' exited with code {process.returncode}"
hint = (
"" if warn_only else " (use --warn-only to turn this error into a warning)"
)
sys.stderr.write(
f"[jupytext] {"Warning" if warn_only else "Error"}: {msg}{hint}\n"
)
if not warn_only:
raise SystemExit(process.returncode)
return out
def pipe_notebook(
notebook, command, fmt="py:percent", update=True, prefix=None, warn_only=False
):
"""Pipe the notebook, in the desired representation, to the given command. Update the notebook
with the returned content if desired."""
if command in ["black", "flake8", "autopep8"]:
command = command + " -"
elif command in ["pytest", "unittest"]:
command = command + " {}"
fmt = long_form_one_format(
fmt, notebook.metadata, auto_ext_requires_language_info=False
)
fmt = check_auto_ext(fmt, notebook.metadata, "--pipe-fmt")
text = writes(notebook, fmt)
command = shlex.split(command)
if "{}" in command:
if prefix is not None:
prefix = prefix + (" " if " " in prefix else "_")
tmp_file_args = dict(
mode="w+",
encoding="utf8",
prefix=prefix,
suffix=fmt["extension"],
delete=False,
)
try:
tmp = NamedTemporaryFile(**tmp_file_args)
except TypeError:
# NamedTemporaryFile does not have an 'encoding' argument on pypy
tmp_file_args.pop("encoding")
tmp = NamedTemporaryFile(**tmp_file_args)
try:
tmp.write(text)
tmp.close()
exec_command(
[cmd if cmd != "{}" else tmp.name for cmd in command],
capture=update,
warn_only=warn_only,
)
if not update:
return notebook
piped_notebook = read(tmp.name, fmt=fmt)
finally:
os.remove(tmp.name)
else:
cmd_output = exec_command(
command, text.encode("utf-8"), capture=update, warn_only=warn_only
)
if not update:
return notebook
if not cmd_output:
sys.stderr.write(
"[jupytext] The command '{}' had no output. As a result, the notebook is empty. "
"Is this expected? If not, use --check rather than --pipe for this command.".format(
command
)
)
piped_notebook = reads(cmd_output.decode("utf-8"), fmt)
if fmt["extension"] != ".ipynb":
piped_notebook = combine_inputs_with_outputs(piped_notebook, notebook, fmt)
# Remove jupytext / text_representation entry
if "jupytext" in notebook.metadata:
piped_notebook.metadata["jupytext"] = notebook.metadata["jupytext"]
else:
piped_notebook.metadata.pop("jupytext", None)
return piped_notebook
def execution_counts_are_in_order(notebook):
"""Returns True if all the code cells have an execution count, ordered from 1 to N with no missing number"""
expected_execution_count = 1
for cell in notebook.cells:
if cell.cell_type == "code":
if cell.execution_count != expected_execution_count:
return False
expected_execution_count += 1
return True
def code_cells_have_changed(notebook, nb_files):
"""The source for the code cells has not changed"""
for nb_file in nb_files:
if not os.path.exists(nb_file):
return True
nb_ref = read(nb_file)
# Are the new code cells equals to those in the file?
ref = [cell.source for cell in nb_ref.cells if cell.cell_type == "code"]
new = [cell.source for cell in notebook.cells if cell.cell_type == "code"]
if ref != new:
return True
return False
|
"""`jupytext` as a command line tool"""
import argparse
import glob
import json
import os
import re
import shlex
import subprocess
import sys
import warnings
from copy import copy
from tempfile import NamedTemporaryFile
from .combine import combine_inputs_with_outputs
from .compare import NotebookDifference, compare, test_round_trip_conversion
from .config import load_jupytext_config, notebook_formats
from .formats import (
_BINARY_FORMAT_OPTIONS,
_VALID_FORMAT_OPTIONS,
JUPYTEXT_FORMATS,
check_auto_ext,
check_file_version,
long_form_multiple_formats,
long_form_one_format,
short_form_one_format,
)
from .header import recursive_update
from .jupytext import create_prefix_dir, read, reads, write, writes
from .kernels import find_kernel_specs, get_kernel_spec, kernelspec_from_language
from .languages import _SCRIPT_EXTENSIONS
from .paired_paths import (
InconsistentPath,
base_path,
find_base_path_and_format,
full_path,
paired_paths,
)
from .pairs import latest_inputs_and_outputs, read_pair, write_pair
from .version import __version__
def system(*args, **kwargs):
"""Execute the given bash command"""
kwargs.setdefault("stdout", subprocess.PIPE)
proc = subprocess.Popen(args, **kwargs)
out, _ = proc.communicate()
if proc.returncode:
raise SystemExit(proc.returncode)
return out.decode("utf-8")
def str2bool(value):
"""Parse Yes/No/Default string
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse"""
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
if value.lower() in ("no", "false", "f", "n", "0"):
return False
if value.lower() in ("d", "default", ""):
return None
raise argparse.ArgumentTypeError("Expected: (Y)es/(T)rue/(N)o/(F)alse/(D)efault")
def parse_jupytext_args(args=None):
"""Command line parser for jupytext"""
parser = argparse.ArgumentParser(
description="Jupyter Notebooks as Markdown Documents, Julia, Python or R Scripts",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Input
parser.add_argument(
"notebooks",
help="One or more notebook(s). "
"Notebook is read from stdin when this argument is empty.",
nargs="*",
)
parser.add_argument(
"--from",
dest="input_format",
help="Jupytext format for the input(s). Inferred from the "
"file extension and content when missing.",
)
# Destination format & act on metadata
parser.add_argument(
"--to",
dest="output_format",
help=(
"The destination format: 'ipynb', 'markdown' or 'script', or a file extension: "
"'md', 'Rmd', 'jl', 'py', 'R', ..., 'auto' (script extension matching the notebook language), "
"or a combination of an extension and a format name, e.g. {} ".format(
", ".join(
{
f"md:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".md"
}
)
)
+ " or {}. ".format(
", ".join(
{
f"py:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".py"
}
)
)
+ "The default format for scripts is the 'light' format, "
"which uses few cell markers (none when possible). "
"Alternatively, a format compatible with many editors is the "
"'percent' format, which uses '# %%%%' as cell markers. "
"The main formats (markdown, light, percent) preserve "
"notebooks and text documents in a roundtrip. Use the "
"--test and and --test-strict commands to test the roundtrip on your files. "
"Read more about the available formats at "
"https://jupytext.readthedocs.io/en/latest/formats.html"
),
)
# Destination file
parser.add_argument(
"-o",
"--output",
help="Destination file. Defaults to the original file, "
"with prefix/suffix/extension changed according to "
"the destination format. "
"Use '-' to print the notebook on stdout.",
)
parser.add_argument(
"--update",
action="store_true",
help="Preserve the output cells when the destination "
"notebook is an .ipynb file that already exists",
)
parser.add_argument(
"--set-formats",
type=str,
help="Turn the notebook or text document to one or more alternative representations "
"with e.g. '--set-formats ipynb,py:light'. "
"The --set-formats option also triggers the creation/update of all paired files",
)
# Action: convert(default)/version/list paired paths/sync/apply/test
action = parser.add_mutually_exclusive_group()
action.add_argument(
"--sync",
"-s",
help="Synchronize the content of the paired representations of "
"the given notebook. Input cells are taken from the file that "
"was last modified, and outputs are read from the ipynb file, "
"if present.",
action="store_true",
)
action.add_argument(
"--paired-paths",
"-p",
help="List the locations of the alternative representations for this notebook.",
action="store_true",
)
parser.add_argument(
"--format-options",
"--opt",
action="append",
help="Set format options with e.g. "
"'--opt comment_magics=true' or '--opt notebook_metadata_filter=-kernelspec'.",
)
parser.add_argument(
"--update-metadata",
default={},
type=json.loads,
help="Update the notebook metadata with the desired dictionary. "
"Argument must be given in JSON format. For instance, if you "
"want to activate a pairing in the generated file, use e.g. "
"""--update-metadata '{"jupytext":{"formats":"ipynb,py:light"}}' """
"See also the --opt and --set-formats options for other ways "
"to operate on the Jupytext metadata.",
)
parser.add_argument(
"--use-source-timestamp",
help="Set the modification timestamp of the output file(s) equal"
"to that of the source file, and keep the source file and "
"its timestamp unchanged.",
action="store_true",
)
parser.add_argument(
"--warn-only",
"-w",
action="store_true",
help="Only issue a warning and continue processing other notebooks "
"when the conversion of a given notebook fails",
)
action.add_argument(
"--test",
action="store_true",
help="Test that the notebook is stable under a round trip conversion, "
"up to the expected changes",
)
action.add_argument(
"--test-strict",
action="store_true",
help="Test that the notebook is strictly stable under a round trip conversion",
)
parser.add_argument(
"--stop",
"-x",
dest="stop_on_first_error",
action="store_true",
help="In --test mode, stop on first round trip conversion error, and report stack traceback",
)
# Pipe notebook inputs into other commands
parser.add_argument(
"--pipe",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and read the notebook back. For instance, reformat "
"your notebook with: "
"'jupytext notebook.ipynb --pipe black' "
"If you want to reformat it and sync the paired representation, execute: "
"'jupytext notebook.ipynb --sync --pipe black' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --pipe 'black {}'",
)
parser.add_argument(
"--diff",
"-d",
action="store_true",
help="Show the differences between (the inputs) of two notebooks",
)
parser.add_argument(
"--diff-format",
help="The text format used to show differences in --diff",
)
parser.add_argument(
"--check",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and test that the returned value is non zero. For "
"instance, test that your notebook is pep8 compliant with: "
"'jupytext notebook.ipynb --check flake8' "
"or run pytest on your notebook with: "
"'jupytext notebook.ipynb --check pytest' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --check 'pytest {}'",
)
parser.add_argument(
"--pipe-fmt",
default="auto:percent",
help="The format in which the notebook should be piped to other programs, "
"when using the --pipe and/or --check commands.",
)
# Execute the notebook
parser.add_argument(
"--set-kernel",
"-k",
type=str,
help="Set the kernel with the given name on the notebook. "
"Use '--set-kernel -' to set a kernel matching the current "
"environment on Python notebooks, and matching the notebook "
"language otherwise (get the list of available kernels with "
"'jupyter kernelspec list')",
)
parser.add_argument(
"--execute",
action="store_true",
help="Execute the notebook with the given kernel. In the "
"--pre-commit-mode, the notebook is executed only if a code "
"cell changed, or if some execution outputs are missing "
"or not ordered.",
)
parser.add_argument(
"--run-path",
type=str,
help="Execute the notebook at the given path (defaults to the notebook parent directory)",
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="Quiet mode: do not comment about files being updated or created",
)
parser.add_argument(
"--show-changes",
action="store_true",
help="Display the diff for each output file",
)
action.add_argument(
"--version",
"-v",
action="store_true",
help="Show jupytext's version number and exit",
)
parser.add_argument(
"--pre-commit",
action="store_true",
help="Ignore the notebook argument, and instead apply Jupytext "
"on the notebooks found in the git index, which have an "
"extension that matches the (optional) --from argument.",
)
parser.add_argument(
"--pre-commit-mode",
action="store_true",
help="This is a mode that is compatible with the pre-commit framework. "
"In this mode, --sync won't use timestamp but instead will "
"determines the source notebook as the element of the pair "
"that is added to the git index. An alert is raised if multiple inconsistent representations are "
"in the index. It also raises an alert after updating the paired files or outputs if those "
"files need to be added to the index. Finally, filepaths that aren't in the source format "
"you are trying to convert from are ignored.",
)
return parser.parse_args(args)
def jupytext(args=None):
"""Entry point for the jupytext script"""
args = parse_jupytext_args(args)
def log(text):
if not args.quiet:
sys.stdout.write(text + "\n")
if args.version:
log(__version__)
return 0
if args.pre_commit:
warnings.warn(
"The --pre-commit argument is deprecated. "
"Please consider switching to the pre-commit.com framework "
"(let us know at https://github.com/mwouts/jupytext/issues "
"if that is an issue for you)",
DeprecationWarning,
)
if args.notebooks:
raise ValueError(
"--pre-commit takes notebooks from the git index. Do not pass any notebook here."
)
args.notebooks = notebooks_in_git_index(args.input_format)
log("[jupytext] Notebooks in git index are:")
for nb_file in args.notebooks:
log(nb_file)
# Read notebook from stdin
if not args.notebooks:
if not args.pre_commit:
args.notebooks = ["-"]
if args.set_formats is not None:
# Replace empty string with None
args.update_metadata = recursive_update(
args.update_metadata, {"jupytext": {"formats": args.set_formats or None}}
)
args.sync = True
if args.paired_paths:
if len(args.notebooks) != 1:
raise ValueError("--paired-paths applies to a single notebook")
print_paired_paths(args.notebooks[0], args.input_format)
return 1
if args.run_path:
args.execute = True
if (
(args.test or args.test_strict)
and not args.output_format
and not args.output
and not args.sync
):
raise ValueError("Please provide one of --to, --output or --sync")
if (
not args.output_format
and not args.output
and not args.sync
and not args.pipe
and not args.diff
and not args.check
and not args.update_metadata
and not args.format_options
and not args.set_kernel
and not args.execute
):
raise ValueError(
"Please provide one of --to, --output, --set-formats, --sync, --pipe, --diff, "
"--check, --update-metadata, --format-options, --set-kernel or --execute"
)
if args.diff:
if (
len(args.notebooks) != 2
or args.output_format
or args.output
or args.sync
or args.pipe
or args.check
or args.update_metadata
or args.format_options
or args.set_kernel
or args.execute
):
raise ValueError(
"Please provide two notebooks after 'jupytext --diff'.\n"
"NB: Use --show-changes if you wish to see the changes in "
"a notebook being updated by Jupytext."
)
nb_file1, nb_file2 = args.notebooks
nb1 = read(nb_file1)
nb2 = read(nb_file2)
def fmt_if_not_ipynb(nb):
fmt = nb.metadata["jupytext"]["text_representation"]
if fmt["extension"] == ".ipynb":
return None
return short_form_one_format(fmt)
diff_fmt = (
args.diff_format or fmt_if_not_ipynb(nb1) or fmt_if_not_ipynb(nb2) or "md"
)
diff = compare(
writes(nb2, diff_fmt),
writes(nb1, diff_fmt),
nb_file2,
nb_file1,
return_diff=True,
)
sys.stdout.write(diff)
return
if args.output and len(args.notebooks) != 1:
raise ValueError("Please input a single notebook when using --output")
# Warn if '--to' is used in place of '--output'
if (
not args.output
and args.output_format
and "." in args.output_format
# a suffix is expected to start with one of these characters #901
and not args.output_format.startswith((".", "-", "_"))
and "//" not in args.output_format
):
def single_line(msg, *args, **kwargs):
return f"[warning] {msg}\n"
warnings.formatwarning = single_line
warnings.warn(
"You might have passed a file name to the '--to' option, "
"when a format description was expected. Maybe you want to use the '-o' option instead?"
)
if args.input_format:
args.input_format = long_form_one_format(args.input_format)
if args.output_format:
args.output_format = long_form_one_format(args.output_format)
set_format_options(args.output_format, args.format_options)
# Wildcard extension on Windows #202
notebooks = []
for pattern in args.notebooks:
if "*" in pattern or "?" in pattern:
# Exclude the .jupytext.py configuration file
notebooks.extend(glob.glob(pattern, recursive=True))
else:
notebooks.append(pattern)
# Count how many file have round-trip issues when testing
exit_code = 0
for nb_file in notebooks:
if not args.warn_only:
exit_code += jupytext_single_file(nb_file, args, log)
else:
try:
exit_code += jupytext_single_file(nb_file, args, log)
except Exception as err:
sys.stderr.write(f"[jupytext] Error: {str(err)}\n")
return exit_code
def jupytext_single_file(nb_file, args, log):
"""Apply the jupytext command, with given arguments, to a single file"""
if nb_file == "-" and args.sync:
msg = "Missing notebook path."
if args.set_formats is not None and os.path.isfile(args.set_formats):
msg += f" Maybe you mean 'jupytext --sync {args.set_formats}' ?"
raise ValueError(msg)
nb_dest = None
if args.output:
nb_dest = args.output
elif nb_file == "-":
nb_dest = "-"
else:
try:
bp = base_path(nb_file, args.input_format)
except InconsistentPath:
if args.pre_commit_mode:
log(
"[jupytext] Ignoring unmatched input path {}{}".format(
nb_file,
f" for format {args.input_format}" if args.input_format else "",
)
)
return 0
raise
if args.output_format:
nb_dest = full_path(bp, args.output_format)
config = load_jupytext_config(os.path.abspath(nb_file))
# Just acting on metadata / pipe => save in place
save_in_place = not nb_dest and not args.sync
if save_in_place:
nb_dest = nb_file
if nb_dest == "-":
args.quiet = True
# I. ### Read the notebook ###
fmt = copy(args.input_format) or {}
if not fmt:
ext = os.path.splitext(nb_file)[1]
if ext:
fmt = {"extension": ext}
if fmt:
set_format_options(fmt, args.format_options)
log(
"[jupytext] Reading {}{}".format(
nb_file if nb_file != "-" else "stdin",
f" in format {short_form_one_format(fmt)}" if "extension" in fmt else "",
)
)
notebook = read(nb_file, fmt=fmt, config=config)
if "extension" in fmt and "format_name" not in fmt:
text_representation = notebook.metadata.get("jupytext", {}).get(
"text_representation", {}
)
if text_representation.get("extension") == fmt["extension"]:
fmt["format_name"] = text_representation["format_name"]
# Compute actual extension when using script/auto, and update nb_dest if necessary
dest_fmt = args.output_format
if dest_fmt and dest_fmt["extension"] == ".auto":
dest_fmt = check_auto_ext(dest_fmt, notebook.metadata, "--to")
if not args.output and nb_file != "-":
nb_dest = full_path(base_path(nb_file, args.input_format), dest_fmt)
# Set the kernel
set_kernel = args.set_kernel
if (
(not set_kernel)
and args.execute
and notebook.metadata.get("kernelspec", {}).get("name") is None
):
set_kernel = "-"
if set_kernel:
if set_kernel == "-":
language = (
notebook.metadata.get("jupytext", {}).get("main_language")
or notebook.metadata["kernelspec"]["language"]
)
if not language:
raise ValueError(
"Cannot infer a kernel as notebook language is not defined"
)
kernelspec = kernelspec_from_language(language)
else:
try:
kernelspec = get_kernel_spec(set_kernel)
except KeyError as err:
raise KeyError(
"Please choose a kernel name among {}".format(
find_kernel_specs().keys()
)
) from err
kernelspec = {
"name": args.set_kernel,
"language": kernelspec.language,
"display_name": kernelspec.display_name,
}
log("[jupytext] Setting kernel {}".format(kernelspec.get("name")))
args.update_metadata["kernelspec"] = kernelspec
# Are we updating a text file that has a metadata filter? #212
if args.update_metadata or args.format_options:
if (
notebook.metadata.get("jupytext", {}).get("notebook_metadata_filter")
== "-all"
):
notebook.metadata.get("jupytext", {}).pop("notebook_metadata_filter")
# Update the metadata
if args.update_metadata:
log(
"[jupytext] Updating notebook metadata with '{}'".format(
json.dumps(args.update_metadata)
)
)
if (
"kernelspec" in args.update_metadata
and "main_language" in notebook.metadata.get("jupytext", {})
):
notebook.metadata["jupytext"].pop("main_language")
recursive_update(notebook.metadata, args.update_metadata)
# Read paired notebooks, except if the pair is being created
nb_files = [nb_file, nb_dest]
if args.sync:
formats = notebook_formats(
notebook, config, nb_file, fallback_on_current_fmt=False
)
set_prefix_and_suffix(fmt, formats, nb_file)
if args.set_formats is None:
try:
notebook, inputs_nb_file, outputs_nb_file = load_paired_notebook(
notebook, fmt, config, formats, nb_file, log, args.pre_commit_mode
)
nb_files = [inputs_nb_file, outputs_nb_file]
except NotAPairedNotebook as err:
sys.stderr.write("[jupytext] Warning: " + str(err) + "\n")
return 0
except InconsistentVersions as err:
sys.stderr.write("[jupytext] Error: " + str(err) + "\n")
return 1
else:
nb_files = [nb_file]
# II. ### Apply commands onto the notebook ###
# Pipe the notebook into the desired commands
prefix = None if nb_file == "-" else os.path.splitext(os.path.basename(nb_file))[0]
for cmd in args.pipe or []:
notebook = pipe_notebook(
notebook, cmd, args.pipe_fmt, prefix=prefix, warn_only=args.warn_only
)
# and/or test the desired commands onto the notebook
for cmd in args.check or []:
pipe_notebook(
notebook,
cmd,
args.pipe_fmt,
update=False,
prefix=prefix,
warn_only=args.warn_only,
)
if (
args.execute
and args.pre_commit_mode
and execution_counts_are_in_order(notebook)
and not code_cells_have_changed(notebook, nb_files)
):
log(
f"[jupytext] Execution of {shlex.quote(nb_file)} "
f"skipped as code cells have not changed and outputs are present."
)
args.execute = False
# Execute the notebook
if args.execute:
kernel_name = notebook.metadata.get("kernelspec", {}).get("name")
log(f"[jupytext] Executing notebook with kernel {kernel_name}")
if nb_dest is not None and nb_dest != "-":
nb_path = os.path.dirname(nb_dest)
elif nb_file != "-":
nb_path = os.path.dirname(nb_file)
else:
nb_path = None
run_path = args.run_path or nb_path
if args.run_path and not os.path.isdir(run_path):
# is this a relative directory?
for base_dir in [nb_path, os.getcwd()]:
try_path = os.path.join(base_dir, run_path)
if os.path.isdir(try_path):
run_path = try_path
break
if not os.path.isdir(run_path):
raise ValueError(f"--run-path={args.run_path} is not a valid path")
if run_path:
resources = {"metadata": {"path": run_path}}
else:
resources = {}
try:
from nbconvert.preprocessors import ExecutePreprocessor
exec_proc = ExecutePreprocessor(timeout=None, kernel_name=kernel_name)
exec_proc.preprocess(notebook, resources=resources)
except (ImportError, RuntimeError) as err:
if args.pre_commit_mode:
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that you have listed 'nbconvert' and 'ipykernel' "
"under 'additional_dependencies' in the jupytext hook."
) from err
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that 'nbconvert' and 'ipykernel' are installed."
) from err
# III. ### Possible actions ###
# a. Test round trip conversion
if args.test or args.test_strict:
try:
# Round trip from an ipynb document
if fmt["extension"] == ".ipynb":
test_round_trip_conversion(
notebook,
dest_fmt,
update=args.update,
allow_expected_differences=not args.test_strict,
stop_on_first_error=args.stop_on_first_error,
)
# Round trip from a text file
else:
with open(nb_file, encoding="utf-8") as fp:
org_text = fp.read()
# If the destination is not ipynb, we convert to/back that format
if dest_fmt["extension"] != ".ipynb":
dest_text = writes(notebook, fmt=dest_fmt)
notebook = reads(dest_text, fmt=dest_fmt)
text = writes(notebook, fmt=fmt, config=config)
if args.test_strict:
compare(text, org_text)
else:
# we ignore the YAML header in the comparison #414
comment = _SCRIPT_EXTENSIONS.get(fmt["extension"], {}).get(
"comment", ""
)
# white spaces between the comment char and the YAML delimiters are allowed
if comment:
comment = comment + r"\s*"
yaml_header = re.compile(
r"^{comment}---\s*\n.*\n{comment}---\s*\n".format(
comment=comment
),
re.MULTILINE | re.DOTALL,
)
compare(
re.sub(yaml_header, "", text), re.sub(yaml_header, "", org_text)
)
except (NotebookDifference, AssertionError) as err:
sys.stdout.write(f"{nb_file}: {str(err)}")
return 1
return 0
# b. Output to the desired file or format
untracked_files = 0
def lazy_write(path, fmt=None, action=None, update_timestamp_only=False):
"""Write the notebook only if it has changed"""
if path == "-":
write(notebook, "-", fmt=fmt)
return
nonlocal untracked_files
if update_timestamp_only:
modified = False
else:
_, ext = os.path.splitext(path)
fmt = copy(fmt or {})
fmt = long_form_one_format(fmt, update={"extension": ext})
new_content = writes(notebook, fmt=fmt, config=config)
diff = None
if not new_content.endswith("\n"):
new_content += "\n"
if not os.path.isfile(path):
modified = True
else:
with open(path, encoding="utf-8") as fp:
current_content = fp.read()
modified = new_content != current_content
if modified and args.show_changes:
diff = compare(
new_content,
current_content,
"",
"",
return_diff=True,
)
if modified:
# The text representation of the notebook has changed, we write it on disk
if action is None:
message = f"[jupytext] Updating {shlex.quote(path)}"
else:
message = "[jupytext] Writing {path}{format}{action}".format(
path=shlex.quote(path),
format=" in format " + short_form_one_format(fmt)
if fmt and "format_name" in fmt
else "",
action=action,
)
if diff is not None:
message += " with this change:\n" + diff
log(message)
create_prefix_dir(path, fmt)
with open(path, "w", encoding="utf-8") as fp:
fp.write(new_content)
# Otherwise, we only update the timestamp of the text file to make sure
# they remain more recent than the ipynb file, for compatibility with the
# Jupytext contents manager for Jupyter
if args.use_source_timestamp:
log(
f"[jupytext] Setting the timestamp of {shlex.quote(path)} equal to that of {shlex.quote(nb_file)}"
)
os.utime(path, (os.stat(path).st_atime, os.stat(nb_file).st_mtime))
elif not modified and not path.endswith(".ipynb"):
log(f"[jupytext] Updating the timestamp of {shlex.quote(path)}")
os.utime(path, None)
if args.pre_commit:
system("git", "add", path)
if args.pre_commit_mode and is_untracked(path):
log(
f"[jupytext] Error: the git index is outdated.\n"
f"Please add the paired notebook with:\n"
f" git add {shlex.quote(path)}"
)
untracked_files += 1
return
if nb_dest:
if nb_dest == nb_file and not dest_fmt:
dest_fmt = fmt
# Test consistency between dest name and output format
if dest_fmt and nb_dest != "-":
base_path(nb_dest, dest_fmt)
# Describe what jupytext is doing
if save_in_place:
action = ""
elif os.path.isfile(nb_dest) and args.update:
if not nb_dest.endswith(".ipynb"):
raise ValueError("--update is only for ipynb files")
action = " (destination file updated)"
check_file_version(notebook, nb_file, nb_dest)
notebook = combine_inputs_with_outputs(notebook, read(nb_dest), fmt=fmt)
elif os.path.isfile(nb_dest):
suggest_update = (
" [use --update to preserve cell outputs and ids]"
if nb_dest.endswith(".ipynb")
else ""
)
action = f" (destination file replaced{suggest_update})"
else:
action = ""
formats = notebook.metadata.get("jupytext", {}).get("formats")
formats = long_form_multiple_formats(formats)
if formats:
try:
base_path_out, _ = find_base_path_and_format(nb_dest, formats)
except InconsistentPath:
# Drop 'formats' if the destination is not part of the paired notebooks
formats = {}
notebook.metadata.get("jupytext", {}).pop("formats")
lazy_write(nb_dest, fmt=dest_fmt, action=action)
nb_dest_in_pair = formats and any(
os.path.exists(alt_path) and os.path.samefile(nb_dest, alt_path)
for alt_path, _ in paired_paths(nb_file, fmt, formats)
)
if (
nb_dest_in_pair
and os.path.isfile(nb_file)
and not nb_file.endswith(".ipynb")
and os.path.isfile(nb_dest)
and nb_dest.endswith(".ipynb")
):
# If the destination is an ipynb file and is in the pair, then we
# update the original text file timestamp, as required by our Content Manager
# Otherwise Jupyter will refuse to open the paired notebook #335
# NB: An alternative is --use-source-timestamp
lazy_write(nb_file, update_timestamp_only=True)
# c. Synchronize paired notebooks
elif args.sync:
write_pair(nb_file, formats, lazy_write)
return untracked_files
def notebooks_in_git_index(fmt):
"""Return the list of modified and deleted ipynb files in the git index that match the given format"""
git_status = system("git", "status", "--porcelain")
re_modified = re.compile(r"^[AM]+\s+(?P<name>.*)", re.MULTILINE)
modified_files_in_git_index = re_modified.findall(git_status)
files = []
for nb_file in modified_files_in_git_index:
if nb_file.startswith('"') and nb_file.endswith('"'):
nb_file = nb_file[1:-1]
try:
base_path(nb_file, fmt)
files.append(nb_file)
except InconsistentPath:
continue
return files
def is_untracked(filepath):
"""Check whether a file was created or modified and needs to be added to the git index"""
if not filepath:
return False
output = system("git", "ls-files", filepath).strip()
if output == "":
return True
output = system("git", "diff", filepath).strip()
if output != "":
return True
return False
def print_paired_paths(nb_file, fmt):
"""Display the paired paths for this notebook"""
notebook = read(nb_file, fmt=fmt)
formats = notebook.metadata.get("jupytext", {}).get("formats")
if formats:
for path, _ in paired_paths(nb_file, fmt, formats):
if path != nb_file:
sys.stdout.write(path + "\n")
def set_format_options(fmt, format_options):
"""Apply the desired format options to the format description fmt"""
if not format_options:
return
for opt in format_options:
try:
key, value = opt.split("=")
except ValueError as err:
raise ValueError(
"Format options are expected to be of the form key=value, not '{}'".format(
opt
)
) from err
if key not in _VALID_FORMAT_OPTIONS:
raise ValueError(
"'{}' is not a valid format option. Expected one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)
)
)
if key in _BINARY_FORMAT_OPTIONS:
value = str2bool(value)
fmt[key] = value
def set_prefix_and_suffix(fmt, formats, nb_file):
"""Add prefix and suffix information from jupytext.formats if format and path matches"""
for alt_fmt in long_form_multiple_formats(formats):
if alt_fmt["extension"] == fmt["extension"] and fmt.get(
"format_name"
) == alt_fmt.get("format_name"):
try:
base_path(nb_file, alt_fmt)
fmt.update(alt_fmt)
return
except InconsistentPath:
continue
class NotAPairedNotebook(ValueError):
"""An error raised when a notebook is not a paired notebook"""
class InconsistentVersions(ValueError):
"""An error raised when two paired files in the git index contain inconsistent representations"""
def file_in_git_index(path):
if not os.path.isfile(path):
return False
return system("git", "status", "--porcelain", path).strip().startswith(("M", "A"))
def git_timestamp(path):
if not os.path.isfile(path):
return None
# Files that are in the git index are considered most recent
if file_in_git_index(path):
return float("inf")
# Return the commit timestamp
try:
git_ts_str = system("git", "log", "-1", "--pretty=%ct", path).strip()
except SystemExit as err:
if err.code == 128:
# git not initialized
git_ts_str = ""
else:
raise
if git_ts_str:
return float(git_ts_str)
# The file is not in the git index
return get_timestamp(path)
def get_timestamp(path):
if not os.path.isfile(path):
return None
return os.lstat(path).st_mtime
def load_paired_notebook(notebook, fmt, config, formats, nb_file, log, pre_commit_mode):
"""Update the notebook with the inputs and outputs of the most recent paired files"""
if not formats:
raise NotAPairedNotebook(f"{shlex.quote(nb_file)} is not a paired notebook")
formats = long_form_multiple_formats(formats)
_, fmt_with_prefix_suffix = find_base_path_and_format(nb_file, formats)
fmt.update(fmt_with_prefix_suffix)
def read_one_file(path, fmt):
if path == nb_file:
return notebook
log(f"[jupytext] Loading {shlex.quote(path)}")
return read(path, fmt=fmt, config=config)
if pre_commit_mode and file_in_git_index(nb_file):
# We raise an error if two representations of this notebook in the git index are inconsistent
nb_files_in_git_index = sorted(
(
(alt_path, alt_fmt)
for alt_path, alt_fmt in paired_paths(nb_file, fmt, formats)
if file_in_git_index(alt_path)
),
key=lambda x: 0 if x[1]["extension"] != ".ipynb" else 1,
)
if len(nb_files_in_git_index) > 1:
path0, fmt0 = nb_files_in_git_index[0]
with open(path0, encoding="utf-8") as fp:
text0 = fp.read()
for alt_path, alt_fmt in nb_files_in_git_index[1:]:
nb = read(alt_path, fmt=alt_fmt, config=config)
alt_text = writes(nb, fmt=fmt0, config=config)
if alt_text != text0:
diff = compare(alt_text, text0, alt_path, path0, return_diff=True)
raise InconsistentVersions(
f"{shlex.quote(alt_path)} and {shlex.quote(path0)} are inconsistent.\n"
+ diff
+ f"\nPlease revert JUST ONE of the files with EITHER\n"
f" git reset {shlex.quote(alt_path)} && git checkout -- {shlex.quote(alt_path)}\nOR\n"
f" git reset {shlex.quote(path0)} && git checkout -- {shlex.quote(path0)}\n"
)
inputs, outputs = latest_inputs_and_outputs(
nb_file, fmt, formats, git_timestamp if pre_commit_mode else get_timestamp
)
notebook = read_pair(inputs, outputs, read_one_file)
return notebook, inputs.path, outputs.path
def exec_command(command, input=None, capture=False, warn_only=False):
"""Execute the desired command, and pipe the given input into it"""
assert isinstance(command, list)
sys.stdout.write("[jupytext] Executing {}\n".format(" ".join(command)))
process = subprocess.Popen(
command,
**(
dict(stdout=subprocess.PIPE, stdin=subprocess.PIPE)
if input is not None
else {}
),
)
out, err = process.communicate(input=input)
if out and not capture:
sys.stdout.write(out.decode("utf-8"))
if err:
sys.stderr.write(err.decode("utf-8"))
if process.returncode:
msg = f"The command '{' '.join(command)}' exited with code {process.returncode}"
hint = (
"" if warn_only else " (use --warn-only to turn this error into a warning)"
)
sys.stderr.write(
f"[jupytext] {'Warning' if warn_only else 'Error'}: {msg}{hint}\n"
)
if not warn_only:
raise SystemExit(process.returncode)
return out
def pipe_notebook(
notebook, command, fmt="py:percent", update=True, prefix=None, warn_only=False
):
"""Pipe the notebook, in the desired representation, to the given command. Update the notebook
with the returned content if desired."""
if command in ["black", "flake8", "autopep8"]:
command = command + " -"
elif command in ["pytest", "unittest"]:
command = command + " {}"
fmt = long_form_one_format(
fmt, notebook.metadata, auto_ext_requires_language_info=False
)
fmt = check_auto_ext(fmt, notebook.metadata, "--pipe-fmt")
text = writes(notebook, fmt)
command = shlex.split(command)
if "{}" in command:
if prefix is not None:
prefix = prefix + (" " if " " in prefix else "_")
tmp_file_args = dict(
mode="w+",
encoding="utf8",
prefix=prefix,
suffix=fmt["extension"],
delete=False,
)
try:
tmp = NamedTemporaryFile(**tmp_file_args)
except TypeError:
# NamedTemporaryFile does not have an 'encoding' argument on pypy
tmp_file_args.pop("encoding")
tmp = NamedTemporaryFile(**tmp_file_args)
try:
tmp.write(text)
tmp.close()
exec_command(
[cmd if cmd != "{}" else tmp.name for cmd in command],
capture=update,
warn_only=warn_only,
)
if not update:
return notebook
piped_notebook = read(tmp.name, fmt=fmt)
finally:
os.remove(tmp.name)
else:
cmd_output = exec_command(
command, text.encode("utf-8"), capture=update, warn_only=warn_only
)
if not update:
return notebook
if not cmd_output:
sys.stderr.write(
"[jupytext] The command '{}' had no output. As a result, the notebook is empty. "
"Is this expected? If not, use --check rather than --pipe for this command.".format(
command
)
)
piped_notebook = reads(cmd_output.decode("utf-8"), fmt)
if fmt["extension"] != ".ipynb":
piped_notebook = combine_inputs_with_outputs(piped_notebook, notebook, fmt)
# Remove jupytext / text_representation entry
if "jupytext" in notebook.metadata:
piped_notebook.metadata["jupytext"] = notebook.metadata["jupytext"]
else:
piped_notebook.metadata.pop("jupytext", None)
return piped_notebook
def execution_counts_are_in_order(notebook):
"""Returns True if all the code cells have an execution count, ordered from 1 to N with no missing number"""
expected_execution_count = 1
for cell in notebook.cells:
if cell.cell_type == "code":
if cell.execution_count != expected_execution_count:
return False
expected_execution_count += 1
return True
def code_cells_have_changed(notebook, nb_files):
"""The source for the code cells has not changed"""
for nb_file in nb_files:
if not os.path.exists(nb_file):
return True
nb_ref = read(nb_file)
# Are the new code cells equals to those in the file?
ref = [cell.source for cell in nb_ref.cells if cell.cell_type == "code"]
new = [cell.source for cell in notebook.cells if cell.cell_type == "code"]
if ref != new:
return True
return False
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""pytest fixtures for use with the aiida.restapi tests"""
import pytest
@pytest.fixture(scope='function')
def restapi_server():
"""Make REST API server"""
from werkzeug.serving import make_server
from aiida.restapi.common.config import CLI_DEFAULTS
from aiida.restapi.run_api import configure_api
def _restapi_server(restapi=None):
if restapi is None:
flask_restapi = configure_api()
else:
flask_restapi = configure_api(flask_api=restapi)
return make_server(
host=CLI_DEFAULTS['HOST_NAME'],
port=int(CLI_DEFAULTS['PORT']),
app=flask_restapi.app,
threaded=True,
processes=1,
request_handler=None,
passthrough_errors=True,
ssl_context=None,
fd=None
)
return _restapi_server
@pytest.fixture
def server_url():
from aiida.restapi.common.config import CLI_DEFAULTS, API_CONFIG
return f"http://{CLI_DEFAULTS["HOST_NAME"]}:{CLI_DEFAULTS["PORT"]}{API_CONFIG["PREFIX"]}"
@pytest.fixture
def restrict_sqlalchemy_queuepool(aiida_profile):
"""Create special SQLAlchemy engine for use with QueryBuilder - backend-agnostic"""
from aiida.manage.manager import get_manager
backend_manager = get_manager().get_backend_manager()
backend_manager.reset_backend_environment()
backend_manager.load_backend_environment(aiida_profile, pool_timeout=1, max_overflow=0)
@pytest.fixture
def populate_restapi_database(clear_database_before_test):
"""Populates the database with a considerable set of nodes to test the restAPI"""
# pylint: disable=unused-argument
from aiida import orm
struct_forcif = orm.StructureData().store()
orm.StructureData().store()
orm.StructureData().store()
orm.Dict().store()
orm.Dict().store()
orm.CifData(ase=struct_forcif.get_ase()).store()
orm.KpointsData().store()
orm.FolderData().store()
orm.CalcFunctionNode().store()
orm.CalcJobNode().store()
orm.CalcJobNode().store()
orm.WorkFunctionNode().store()
orm.WorkFunctionNode().store()
orm.WorkChainNode().store()
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""pytest fixtures for use with the aiida.restapi tests"""
import pytest
@pytest.fixture(scope='function')
def restapi_server():
"""Make REST API server"""
from werkzeug.serving import make_server
from aiida.restapi.common.config import CLI_DEFAULTS
from aiida.restapi.run_api import configure_api
def _restapi_server(restapi=None):
if restapi is None:
flask_restapi = configure_api()
else:
flask_restapi = configure_api(flask_api=restapi)
return make_server(
host=CLI_DEFAULTS['HOST_NAME'],
port=int(CLI_DEFAULTS['PORT']),
app=flask_restapi.app,
threaded=True,
processes=1,
request_handler=None,
passthrough_errors=True,
ssl_context=None,
fd=None
)
return _restapi_server
@pytest.fixture
def server_url():
from aiida.restapi.common.config import CLI_DEFAULTS, API_CONFIG
return f"http://{CLI_DEFAULTS['HOST_NAME']}:{CLI_DEFAULTS['PORT']}{API_CONFIG['PREFIX']}"
@pytest.fixture
def restrict_sqlalchemy_queuepool(aiida_profile):
"""Create special SQLAlchemy engine for use with QueryBuilder - backend-agnostic"""
from aiida.manage.manager import get_manager
backend_manager = get_manager().get_backend_manager()
backend_manager.reset_backend_environment()
backend_manager.load_backend_environment(aiida_profile, pool_timeout=1, max_overflow=0)
@pytest.fixture
def populate_restapi_database(clear_database_before_test):
"""Populates the database with a considerable set of nodes to test the restAPI"""
# pylint: disable=unused-argument
from aiida import orm
struct_forcif = orm.StructureData().store()
orm.StructureData().store()
orm.StructureData().store()
orm.Dict().store()
orm.Dict().store()
orm.CifData(ase=struct_forcif.get_ase()).store()
orm.KpointsData().store()
orm.FolderData().store()
orm.CalcFunctionNode().store()
orm.CalcJobNode().store()
orm.CalcJobNode().store()
orm.WorkFunctionNode().store()
orm.WorkFunctionNode().store()
orm.WorkChainNode().store()
|
from PIL import Image
from django.conf import settings
from . import forms, recognition
from . import utils
from . import models
from django.shortcuts import render, redirect
from django.contrib import admin
from django.core.mail import send_mail
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
import json
def signup(request):
if request.method == 'POST':
form = forms.UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('../admin/')
else:
form = forms.UserCreationForm()
return render(request, 'admin/logon.html',
{'form': form, 'site_header': admin.site.site_header, 'site_title': admin.site.site_title})
@method_decorator(csrf_exempt, name='dispatch')
def hello(request) -> JsonResponse:
"""hello API endpoint, clients request for access tokens through this api by their device_id"""
data = json.loads(request.body)
try:
device_id = data['device_id']
if (device := models.Device.objects.filter(id=device_id)).count():
device = device[0]
else:
# registering newly connected device (waiting for user to claim)
device = models.Device(id=data['device_id'])
device.save()
if not device.user:
return JsonResponse(data=utils.base_response(ok=False, message='Device is yet to be claimed by a user'))
tokens = models.AccessToken.objects.filter(device=device)
if tokens.count():
# request for new token -> invalidate old token
last_token = tokens.latest('time')
last_token.valid = False
last_token.save()
# create new access token
token = models.AccessToken(
device=device, ip=utils.get_client_ip(request))
token.save()
return JsonResponse(data=utils.base_response(response=dict(token=token.token)))
except KeyError:
return JsonResponse(data=utils.base_response(ok=False, message='No `device_id` specified'))
def authenticate_device(funct):
@method_decorator(csrf_exempt, name='dispatch')
def view_wrapper(request, *args, **kwargs):
if request.POST:
data = dict(request.POST)
file = request.FILES.get('image', None)
else:
data = json.loads(request.body)
file = None
try:
token = data['token']
if isinstance(token, list):
token = token[0]
access_token = models.AccessToken.objects.get(token=token)
if not access_token.is_valid(request):
return JsonResponse(data=utils.base_response(message='This token is no longer valid.', ok=False))
auth_res = dict(user=access_token.device.user,
device=access_token.device)
except KeyError:
return JsonResponse(data=utils.base_response(message='No `token` was specified.', ok=False))
except (models.models.ObjectDoesNotExist, Exception):
return JsonResponse(data=utils.base_response(message='Invalid `token` was specified.', ok=False))
return funct(request, *args, data=data, file=file, auth_res=auth_res, **kwargs)
return view_wrapper
@authenticate_device
def fetch(request, data: dict = None, file=None, auth_res=None):
return JsonResponse(
data=utils.base_response(
response=dict(faces=[
dict(embedding=face.embedding, face_id=face.id) for face in
models.Face.objects.filter(user=auth_res['user'])
],
in_count=auth_res['device'].inside_count(),
)
)
)
@authenticate_device
def introduce(request, data: dict = None, file=None, auth_res=None):
try:
embedding = data['embedding']
embedding = json.loads(embedding if not isinstance(
embedding, list) else embedding[0])
image = Image.open(file).convert('RGB')
face = recognition.find_face(
auth_res['user'], image=image, embedding=embedding)
if isinstance(face, bool):
face = models.Face.save_pil(
user=auth_res['user'], image=image, embedding=embedding)
return JsonResponse(data=utils.base_response(response=dict(face_id=face.id)))
except KeyError:
return JsonResponse(data=utils.base_response(message='Embedding was not mentioned', ok=False))
def mail_message(log):
device = f'{log.device.name if log.device.name else log.device.id}'
face = f'{log.face.name if log.face.name else log.face.id}'
kind = f'{'enter' if log.kind == 'E' else 'exit'}'
num_in = log.device.inside_count()
return f'Your device "{device}", saw "{face}" {kind}.\nThere are currently {num_in} people' \
f' inside this property.'
@authenticate_device
def log(request, data: dict = None, file=None, auth_res=None):
try:
face_id = data['face_id'] if not isinstance(
data['face_id'], list) else data['face_id'][0]
face = models.Face.objects.get(id=face_id)
kind = data['kind'] if not isinstance(
data['kind'], list) else data['kind'][0]
device = auth_res['device']
image = Image.open(file).convert('RGB') if file is not None else None
log = models.Log.save_pil(
face=face, device=device, kind=kind, image=image)
if settings.GMAIL:
send_mail(subject='Surveillance Log',
message=mail_message(log),
from_email=settings.GMAIL,
recipient_list=[device.user.email],
fail_silently=True)
return JsonResponse(data=utils.base_response(
ok=True, message='Logged successfully', response=dict(
in_count=log.device.inside_count(), name='Unknown' if not log.face.name else log.face.name)
))
except KeyError:
return JsonResponse(
data=utils.base_response(message='Both `face_id` and `kind` are expected to be specified', ok=False))
except (models.models.ObjectDoesNotExist,):
return JsonResponse(data=utils.base_response(message='Invalid `face_id` is specified', ok=False))
|
from PIL import Image
from django.conf import settings
from . import forms, recognition
from . import utils
from . import models
from django.shortcuts import render, redirect
from django.contrib import admin
from django.core.mail import send_mail
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
import json
def signup(request):
if request.method == 'POST':
form = forms.UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('../admin/')
else:
form = forms.UserCreationForm()
return render(request, 'admin/logon.html',
{'form': form, 'site_header': admin.site.site_header, 'site_title': admin.site.site_title})
@method_decorator(csrf_exempt, name='dispatch')
def hello(request) -> JsonResponse:
"""hello API endpoint, clients request for access tokens through this api by their device_id"""
data = json.loads(request.body)
try:
device_id = data['device_id']
if (device := models.Device.objects.filter(id=device_id)).count():
device = device[0]
else:
# registering newly connected device (waiting for user to claim)
device = models.Device(id=data['device_id'])
device.save()
if not device.user:
return JsonResponse(data=utils.base_response(ok=False, message='Device is yet to be claimed by a user'))
tokens = models.AccessToken.objects.filter(device=device)
if tokens.count():
# request for new token -> invalidate old token
last_token = tokens.latest('time')
last_token.valid = False
last_token.save()
# create new access token
token = models.AccessToken(
device=device, ip=utils.get_client_ip(request))
token.save()
return JsonResponse(data=utils.base_response(response=dict(token=token.token)))
except KeyError:
return JsonResponse(data=utils.base_response(ok=False, message='No `device_id` specified'))
def authenticate_device(funct):
@method_decorator(csrf_exempt, name='dispatch')
def view_wrapper(request, *args, **kwargs):
if request.POST:
data = dict(request.POST)
file = request.FILES.get('image', None)
else:
data = json.loads(request.body)
file = None
try:
token = data['token']
if isinstance(token, list):
token = token[0]
access_token = models.AccessToken.objects.get(token=token)
if not access_token.is_valid(request):
return JsonResponse(data=utils.base_response(message='This token is no longer valid.', ok=False))
auth_res = dict(user=access_token.device.user,
device=access_token.device)
except KeyError:
return JsonResponse(data=utils.base_response(message='No `token` was specified.', ok=False))
except (models.models.ObjectDoesNotExist, Exception):
return JsonResponse(data=utils.base_response(message='Invalid `token` was specified.', ok=False))
return funct(request, *args, data=data, file=file, auth_res=auth_res, **kwargs)
return view_wrapper
@authenticate_device
def fetch(request, data: dict = None, file=None, auth_res=None):
return JsonResponse(
data=utils.base_response(
response=dict(faces=[
dict(embedding=face.embedding, face_id=face.id) for face in
models.Face.objects.filter(user=auth_res['user'])
],
in_count=auth_res['device'].inside_count(),
)
)
)
@authenticate_device
def introduce(request, data: dict = None, file=None, auth_res=None):
try:
embedding = data['embedding']
embedding = json.loads(embedding if not isinstance(
embedding, list) else embedding[0])
image = Image.open(file).convert('RGB')
face = recognition.find_face(
auth_res['user'], image=image, embedding=embedding)
if isinstance(face, bool):
face = models.Face.save_pil(
user=auth_res['user'], image=image, embedding=embedding)
return JsonResponse(data=utils.base_response(response=dict(face_id=face.id)))
except KeyError:
return JsonResponse(data=utils.base_response(message='Embedding was not mentioned', ok=False))
def mail_message(log):
device = f'{log.device.name if log.device.name else log.device.id}'
face = f'{log.face.name if log.face.name else log.face.id}'
kind = f'{"enter" if log.kind == "E" else "exit"}'
num_in = log.device.inside_count()
return f'Your device "{device}", saw "{face}" {kind}.\nThere are currently {num_in} people' \
f' inside this property.'
@authenticate_device
def log(request, data: dict = None, file=None, auth_res=None):
try:
face_id = data['face_id'] if not isinstance(
data['face_id'], list) else data['face_id'][0]
face = models.Face.objects.get(id=face_id)
kind = data['kind'] if not isinstance(
data['kind'], list) else data['kind'][0]
device = auth_res['device']
image = Image.open(file).convert('RGB') if file is not None else None
log = models.Log.save_pil(
face=face, device=device, kind=kind, image=image)
if settings.GMAIL:
send_mail(subject='Surveillance Log',
message=mail_message(log),
from_email=settings.GMAIL,
recipient_list=[device.user.email],
fail_silently=True)
return JsonResponse(data=utils.base_response(
ok=True, message='Logged successfully', response=dict(
in_count=log.device.inside_count(), name='Unknown' if not log.face.name else log.face.name)
))
except KeyError:
return JsonResponse(
data=utils.base_response(message='Both `face_id` and `kind` are expected to be specified', ok=False))
except (models.models.ObjectDoesNotExist,):
return JsonResponse(data=utils.base_response(message='Invalid `face_id` is specified', ok=False))
|
"""Support for Sure PetCare Flaps/Pets sensors."""
from __future__ import annotations
from typing import Any, cast
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_VOLTAGE,
DEVICE_CLASS_BATTERY,
MASS_GRAMS,
PERCENTAGE,
VOLUME_MILLILITERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from surepy.entities import SurepyEntity
from surepy.entities.devices import (
Feeder as SureFeeder,
FeederBowl as SureFeederBowl,
Felaqua as SureFelaqua,
Flap as SureFlap,
SurepyDevice,
)
from surepy.enums import EntityType, LockState
# pylint: disable=relative-beyond-top-level
from . import SurePetcareAPI
from .const import ATTR_VOLTAGE_FULL, ATTR_VOLTAGE_LOW, DOMAIN, SPC, SURE_MANUFACTURER
PARALLEL_UPDATES = 2
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: Any,
discovery_info: Any = None,
) -> None:
"""Set up Sure PetCare sensor platform."""
await async_setup_entry(hass, config, async_add_entities)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Any
) -> None:
"""Set up config entry Sure PetCare Flaps sensors."""
entities: list[Flap | Felaqua | Feeder | FeederBowl | Battery] = []
spc: SurePetcareAPI = hass.data[DOMAIN][SPC]
for surepy_entity in spc.coordinator.data.values():
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
]:
entities.append(Flap(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FELAQUA:
entities.append(Felaqua(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FEEDER:
for bowl in surepy_entity.bowls.values():
entities.append(
FeederBowl(spc.coordinator, surepy_entity.id, spc, bowl.raw_data())
)
entities.append(Feeder(spc.coordinator, surepy_entity.id, spc))
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
EntityType.FEEDER,
EntityType.FELAQUA,
]:
voltage_batteries_full = cast(
float, config_entry.options.get(ATTR_VOLTAGE_FULL)
)
voltage_batteries_low = cast(
float, config_entry.options.get(ATTR_VOLTAGE_LOW)
)
entities.append(
Battery(
spc.coordinator,
surepy_entity.id,
spc,
voltage_full=voltage_batteries_full,
voltage_low=voltage_batteries_low,
)
)
async_add_entities(entities)
class SurePetcareSensor(CoordinatorEntity, SensorEntity):
"""A binary sensor implementation for Sure Petcare Entities."""
_attr_should_poll = False
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
"""Initialize a Sure Petcare sensor."""
super().__init__(coordinator)
self._id = _id
self._spc: SurePetcareAPI = spc
self._coordinator = coordinator
self._surepy_entity: SurepyEntity = self._coordinator.data[_id]
self._state: dict[str, Any] = self._surepy_entity.raw_data()["status"]
self._attr_available = bool(self._state)
self._attr_unique_id = f"{self._surepy_entity.household_id}-{self._id}"
self._attr_extra_state_attributes = (
{**self._surepy_entity.raw_data()} if self._state else {}
)
self._attr_name: str = (
f"{self._surepy_entity.type.name.replace("_", " ").title()} "
f"{self._surepy_entity.name.capitalize()}"
)
@property
def device_info(self):
device = {}
try:
model = f"{self._surepy_entity.type.name.replace("_", " ").title()}"
if serial := self._surepy_entity.raw_data().get("serial_number"):
model = f"{model} ({serial})"
elif mac_address := self._surepy_entity.raw_data().get("mac_address"):
model = f"{model} ({mac_address})"
elif tag_id := self._surepy_entity.raw_data().get("tag_id"):
model = f"{model} ({tag_id})"
device = {
"identifiers": {(DOMAIN, self._id)},
"name": self._surepy_entity.name.capitalize(),
"manufacturer": SURE_MANUFACTURER,
"model": model,
}
if self._state:
versions = self._state.get("version", {})
if dev_fw_version := versions.get("device", {}).get("firmware"):
device["sw_version"] = dev_fw_version
if (lcd_version := versions.get("lcd", {})) and (
rf_version := versions.get("rf", {})
):
device["sw_version"] = (
f"lcd: {lcd_version.get("version", lcd_version)["firmware"]} | "
f"fw: {rf_version.get("version", rf_version)["firmware"]}"
)
except AttributeError:
pass
return device
class Flap(SurePetcareSensor):
"""Sure Petcare Flap."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI) -> None:
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFlap
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = None
if self._state:
self._attr_extra_state_attributes = {
"learn_mode": bool(self._state["learn_mode"]),
**self._surepy_entity.raw_data(),
}
if locking := self._state.get("locking"):
self._attr_state = LockState(locking["mode"]).name.casefold()
@property
def state(self) -> str | None:
"""Return battery level in percent."""
if (
state := cast(SureFlap, self._coordinator.data[self._id])
.raw_data()
.get("status")
):
return LockState(state["locking"]["mode"]).name.casefold()
class Felaqua(SurePetcareSensor):
"""Sure Petcare Felaqua."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFelaqua
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = VOLUME_MILLILITERS
@property
def state(self) -> float | None:
"""Return the remaining water."""
if felaqua := cast(SureFelaqua, self._coordinator.data[self._id]):
return int(felaqua.water_remaining) if felaqua.water_remaining else None
class FeederBowl(SurePetcareSensor):
"""Sure Petcare Feeder Bowl."""
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
bowl_data: dict[str, int | str],
):
"""Initialize a Bowl sensor."""
super().__init__(coordinator, _id, spc)
self.feeder_id = _id
self.bowl_id = int(bowl_data["index"])
self._id = int(f"{_id}{str(self.bowl_id)}")
self._spc: SurePetcareAPI = spc
self._surepy_feeder_entity: SurepyEntity = self._coordinator.data[_id]
self._surepy_entity: SureFeederBowl = self._coordinator.data[_id].bowls[
self.bowl_id
]
self._state: dict[str, Any] = bowl_data
# https://github.com/PyCQA/pylint/issues/2062
# pylint: disable=no-member
self._attr_name = (
f"{EntityType.FEEDER.name.replace("_", " ").title()} "
f"{self._surepy_entity.name.capitalize()}"
)
self._attr_icon = "mdi:bowl"
self._attr_state = int(self._surepy_entity.weight)
self._attr_unique_id = (
f"{self._surepy_feeder_entity.household_id}-{self.feeder_id}-{self.bowl_id}"
)
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
"""Return the remaining water."""
if (feeder := cast(SureFeeder, self._coordinator.data[self.feeder_id])) and (
weight := feeder.bowls[self.bowl_id].weight
):
return int(weight) if weight and weight > 0 else None
class Feeder(SurePetcareSensor):
"""Sure Petcare Feeder."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFeeder
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
"""Return the total remaining food."""
if feeder := cast(SureFeeder, self._coordinator.data[self._id]):
return int(feeder.total_weight) if feeder.total_weight else None
class Battery(SurePetcareSensor):
"""Sure Petcare Flap."""
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
voltage_full: float,
voltage_low: float,
):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SurepyDevice
self._attr_name = f"{self._attr_name} Battery Level"
self.voltage_low = voltage_low
self.voltage_full = voltage_full
self._attr_unit_of_measurement = PERCENTAGE
self._attr_device_class = DEVICE_CLASS_BATTERY
self._attr_unique_id = (
f"{self._surepy_entity.household_id}-{self._surepy_entity.id}-battery"
)
@property
def state(self) -> int | None:
"""Return battery level in percent."""
if battery := cast(SurepyDevice, self._coordinator.data[self._id]):
self._surepy_entity = battery
battery_level = battery.calculate_battery_level(
voltage_full=self.voltage_full, voltage_low=self.voltage_low
)
# return batterie level between 0 and 100
return battery_level
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the additional attrs."""
attrs = {}
if (device := cast(SurepyDevice, self._coordinator.data[self._id])) and (
state := device.raw_data().get("status")
):
self._surepy_entity = device
voltage = float(state["battery"])
attrs = {
"battery_level": device.battery_level,
ATTR_VOLTAGE: f"{voltage:.2f}",
f"{ATTR_VOLTAGE}_per_battery": f"{voltage / 4:.2f}",
}
return attrs
|
"""Support for Sure PetCare Flaps/Pets sensors."""
from __future__ import annotations
from typing import Any, cast
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_VOLTAGE,
DEVICE_CLASS_BATTERY,
MASS_GRAMS,
PERCENTAGE,
VOLUME_MILLILITERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from surepy.entities import SurepyEntity
from surepy.entities.devices import (
Feeder as SureFeeder,
FeederBowl as SureFeederBowl,
Felaqua as SureFelaqua,
Flap as SureFlap,
SurepyDevice,
)
from surepy.enums import EntityType, LockState
# pylint: disable=relative-beyond-top-level
from . import SurePetcareAPI
from .const import ATTR_VOLTAGE_FULL, ATTR_VOLTAGE_LOW, DOMAIN, SPC, SURE_MANUFACTURER
PARALLEL_UPDATES = 2
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: Any,
discovery_info: Any = None,
) -> None:
"""Set up Sure PetCare sensor platform."""
await async_setup_entry(hass, config, async_add_entities)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Any
) -> None:
"""Set up config entry Sure PetCare Flaps sensors."""
entities: list[Flap | Felaqua | Feeder | FeederBowl | Battery] = []
spc: SurePetcareAPI = hass.data[DOMAIN][SPC]
for surepy_entity in spc.coordinator.data.values():
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
]:
entities.append(Flap(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FELAQUA:
entities.append(Felaqua(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FEEDER:
for bowl in surepy_entity.bowls.values():
entities.append(
FeederBowl(spc.coordinator, surepy_entity.id, spc, bowl.raw_data())
)
entities.append(Feeder(spc.coordinator, surepy_entity.id, spc))
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
EntityType.FEEDER,
EntityType.FELAQUA,
]:
voltage_batteries_full = cast(
float, config_entry.options.get(ATTR_VOLTAGE_FULL)
)
voltage_batteries_low = cast(
float, config_entry.options.get(ATTR_VOLTAGE_LOW)
)
entities.append(
Battery(
spc.coordinator,
surepy_entity.id,
spc,
voltage_full=voltage_batteries_full,
voltage_low=voltage_batteries_low,
)
)
async_add_entities(entities)
class SurePetcareSensor(CoordinatorEntity, SensorEntity):
"""A binary sensor implementation for Sure Petcare Entities."""
_attr_should_poll = False
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
"""Initialize a Sure Petcare sensor."""
super().__init__(coordinator)
self._id = _id
self._spc: SurePetcareAPI = spc
self._coordinator = coordinator
self._surepy_entity: SurepyEntity = self._coordinator.data[_id]
self._state: dict[str, Any] = self._surepy_entity.raw_data()["status"]
self._attr_available = bool(self._state)
self._attr_unique_id = f"{self._surepy_entity.household_id}-{self._id}"
self._attr_extra_state_attributes = (
{**self._surepy_entity.raw_data()} if self._state else {}
)
self._attr_name: str = (
f"{self._surepy_entity.type.name.replace('_', ' ').title()} "
f"{self._surepy_entity.name.capitalize()}"
)
@property
def device_info(self):
device = {}
try:
model = f"{self._surepy_entity.type.name.replace('_', ' ').title()}"
if serial := self._surepy_entity.raw_data().get("serial_number"):
model = f"{model} ({serial})"
elif mac_address := self._surepy_entity.raw_data().get("mac_address"):
model = f"{model} ({mac_address})"
elif tag_id := self._surepy_entity.raw_data().get("tag_id"):
model = f"{model} ({tag_id})"
device = {
"identifiers": {(DOMAIN, self._id)},
"name": self._surepy_entity.name.capitalize(),
"manufacturer": SURE_MANUFACTURER,
"model": model,
}
if self._state:
versions = self._state.get("version", {})
if dev_fw_version := versions.get("device", {}).get("firmware"):
device["sw_version"] = dev_fw_version
if (lcd_version := versions.get("lcd", {})) and (
rf_version := versions.get("rf", {})
):
device["sw_version"] = (
f"lcd: {lcd_version.get('version', lcd_version)['firmware']} | "
f"fw: {rf_version.get('version', rf_version)['firmware']}"
)
except AttributeError:
pass
return device
class Flap(SurePetcareSensor):
"""Sure Petcare Flap."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI) -> None:
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFlap
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = None
if self._state:
self._attr_extra_state_attributes = {
"learn_mode": bool(self._state["learn_mode"]),
**self._surepy_entity.raw_data(),
}
if locking := self._state.get("locking"):
self._attr_state = LockState(locking["mode"]).name.casefold()
@property
def state(self) -> str | None:
"""Return battery level in percent."""
if (
state := cast(SureFlap, self._coordinator.data[self._id])
.raw_data()
.get("status")
):
return LockState(state["locking"]["mode"]).name.casefold()
class Felaqua(SurePetcareSensor):
"""Sure Petcare Felaqua."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFelaqua
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = VOLUME_MILLILITERS
@property
def state(self) -> float | None:
"""Return the remaining water."""
if felaqua := cast(SureFelaqua, self._coordinator.data[self._id]):
return int(felaqua.water_remaining) if felaqua.water_remaining else None
class FeederBowl(SurePetcareSensor):
"""Sure Petcare Feeder Bowl."""
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
bowl_data: dict[str, int | str],
):
"""Initialize a Bowl sensor."""
super().__init__(coordinator, _id, spc)
self.feeder_id = _id
self.bowl_id = int(bowl_data["index"])
self._id = int(f"{_id}{str(self.bowl_id)}")
self._spc: SurePetcareAPI = spc
self._surepy_feeder_entity: SurepyEntity = self._coordinator.data[_id]
self._surepy_entity: SureFeederBowl = self._coordinator.data[_id].bowls[
self.bowl_id
]
self._state: dict[str, Any] = bowl_data
# https://github.com/PyCQA/pylint/issues/2062
# pylint: disable=no-member
self._attr_name = (
f"{EntityType.FEEDER.name.replace('_', ' ').title()} "
f"{self._surepy_entity.name.capitalize()}"
)
self._attr_icon = "mdi:bowl"
self._attr_state = int(self._surepy_entity.weight)
self._attr_unique_id = (
f"{self._surepy_feeder_entity.household_id}-{self.feeder_id}-{self.bowl_id}"
)
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
"""Return the remaining water."""
if (feeder := cast(SureFeeder, self._coordinator.data[self.feeder_id])) and (
weight := feeder.bowls[self.bowl_id].weight
):
return int(weight) if weight and weight > 0 else None
class Feeder(SurePetcareSensor):
"""Sure Petcare Feeder."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFeeder
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
"""Return the total remaining food."""
if feeder := cast(SureFeeder, self._coordinator.data[self._id]):
return int(feeder.total_weight) if feeder.total_weight else None
class Battery(SurePetcareSensor):
"""Sure Petcare Flap."""
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
voltage_full: float,
voltage_low: float,
):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SurepyDevice
self._attr_name = f"{self._attr_name} Battery Level"
self.voltage_low = voltage_low
self.voltage_full = voltage_full
self._attr_unit_of_measurement = PERCENTAGE
self._attr_device_class = DEVICE_CLASS_BATTERY
self._attr_unique_id = (
f"{self._surepy_entity.household_id}-{self._surepy_entity.id}-battery"
)
@property
def state(self) -> int | None:
"""Return battery level in percent."""
if battery := cast(SurepyDevice, self._coordinator.data[self._id]):
self._surepy_entity = battery
battery_level = battery.calculate_battery_level(
voltage_full=self.voltage_full, voltage_low=self.voltage_low
)
# return batterie level between 0 and 100
return battery_level
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the additional attrs."""
attrs = {}
if (device := cast(SurepyDevice, self._coordinator.data[self._id])) and (
state := device.raw_data().get("status")
):
self._surepy_entity = device
voltage = float(state["battery"])
attrs = {
"battery_level": device.battery_level,
ATTR_VOLTAGE: f"{voltage:.2f}",
f"{ATTR_VOLTAGE}_per_battery": f"{voltage / 4:.2f}",
}
return attrs
|
'''
Developed by Abhijith Boppe - linkedin.com/in/abhijith-boppe/
'''
import socket
import ssl
import time
data_maxLength = 65535
fields_maxLength =1024
sock = ''
device_id = ''
device_key = ''
time_stamps = []
def connectionSet(host, port, id_, key, Encrypt=1, cert_path=None):
global sock, device_id, device_key, time_stamps
device_id = id_
device_key = key
time_stamps = []
sock = socket.create_connection((host, port))
if Encrypt == 1:
ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT).load_verify_locations(cert_path)
sock = ssl.wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_SSLv23)
sock.settimeout(1)
def chkTime(server_time, device_time):
"""
Check if the time matches the server time and
to make sure there are no reused time (no replay attacks)
"""
global time_stamps
time_drop_max = 3 # packet with time difference 30sec will not be accepted
device_time = float(device_time)
server_time = float(server_time)
if(server_time in time_stamps):
raise Exception(f"ERROR: Replay attack observer. Time stamps:{time_stamps}, Replayed time: {server_time}")
return False
else:
if len(time_stamps) < 100: # if 100 req in less than 30sec
time_diff = abs(device_time - server_time)
if len(time_stamps) > 1: # to remove old time stamps (to reduce memory usage)
if (abs(time_stamps[-1] - server_time) > time_drop_max):
time_stamps = []
if (time_diff > time_drop_max):
return 0
elif (time_diff < time_drop_max):
time_stamps.append(server_time)
return 1
else:
raise Exception(
"ERROR: DOS attack more than 100 requests from server in 30sec")
def recvData():
time_now = f'{time.time():.4f}'
try:
# 65535 max data (including headers)
data = sock.recv(data_maxLength)
except socket.timeout as _:
data = b''
pass
except Exception as _:
raise Exception("socket closed/refused by server")
data = data.decode()
if not data:
return ''
else:
data = data.split('|#|') # split data at delimeter
while '' in data:
data.remove('')
if data[0]: # clear the remaining queue/buffer and read only first element/data
data = data[0]
# split headers and data
fields, data = data.split("\r\n\r\n", 1)
fields, data = fields.strip() if len(
fields) < fields_maxLength else 0, data.strip() if len(data) < (data_maxLength-3000) else ''
headers = {}
for field in fields.split('\r\n'):
# split each line by http field name and value
key, value = field.split(':')
headers[key] = value
if len(headers) > 10:
break
if len(headers) != 5 or len(data) < 5:
raise Exception("ERROR: Header length issue ")
else:
if(headers['IOT'] == '1.1'):
time_chk = chkTime(headers['TIME'], time_now)
if(time_chk):
return data
else:
raise Exception(
f"ERROR: Incorrect time stamp. server time {headers["TIME"]} client time {time_now}")
else:
raise Exception(
f"ERROR: Incorrect IOT version detected {headers["IOT"]}")
def _headers():
time_now = f'{time.time():.4f}'
headers = '''IOT:1.1
DATE:12/12/2019
TIME:{time_now}
DEVICE:{device_id}
KEY:{device_key}
'''.format(time_now=time_now, device_id= device_id, device_key=device_key)
return headers
def sendData(data):
if len(data) > 5 and len(data) < 60000:
try:
headers = _headers()
data = headers.replace('\n','\r\n') + data.replace('|#|','') + '|#|'
sock.send(data.encode())
except socket.timeout as e:
raise Exception("Socket time out")
except Exception as e:
raise Exception("Socket closed by server")
# ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None)
|
'''
Developed by Abhijith Boppe - linkedin.com/in/abhijith-boppe/
'''
import socket
import ssl
import time
data_maxLength = 65535
fields_maxLength =1024
sock = ''
device_id = ''
device_key = ''
time_stamps = []
def connectionSet(host, port, id_, key, Encrypt=1, cert_path=None):
global sock, device_id, device_key, time_stamps
device_id = id_
device_key = key
time_stamps = []
sock = socket.create_connection((host, port))
if Encrypt == 1:
ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT).load_verify_locations(cert_path)
sock = ssl.wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_SSLv23)
sock.settimeout(1)
def chkTime(server_time, device_time):
"""
Check if the time matches the server time and
to make sure there are no reused time (no replay attacks)
"""
global time_stamps
time_drop_max = 3 # packet with time difference 30sec will not be accepted
device_time = float(device_time)
server_time = float(server_time)
if(server_time in time_stamps):
raise Exception(f"ERROR: Replay attack observer. Time stamps:{time_stamps}, Replayed time: {server_time}")
return False
else:
if len(time_stamps) < 100: # if 100 req in less than 30sec
time_diff = abs(device_time - server_time)
if len(time_stamps) > 1: # to remove old time stamps (to reduce memory usage)
if (abs(time_stamps[-1] - server_time) > time_drop_max):
time_stamps = []
if (time_diff > time_drop_max):
return 0
elif (time_diff < time_drop_max):
time_stamps.append(server_time)
return 1
else:
raise Exception(
"ERROR: DOS attack more than 100 requests from server in 30sec")
def recvData():
time_now = f'{time.time():.4f}'
try:
# 65535 max data (including headers)
data = sock.recv(data_maxLength)
except socket.timeout as _:
data = b''
pass
except Exception as _:
raise Exception("socket closed/refused by server")
data = data.decode()
if not data:
return ''
else:
data = data.split('|#|') # split data at delimeter
while '' in data:
data.remove('')
if data[0]: # clear the remaining queue/buffer and read only first element/data
data = data[0]
# split headers and data
fields, data = data.split("\r\n\r\n", 1)
fields, data = fields.strip() if len(
fields) < fields_maxLength else 0, data.strip() if len(data) < (data_maxLength-3000) else ''
headers = {}
for field in fields.split('\r\n'):
# split each line by http field name and value
key, value = field.split(':')
headers[key] = value
if len(headers) > 10:
break
if len(headers) != 5 or len(data) < 5:
raise Exception("ERROR: Header length issue ")
else:
if(headers['IOT'] == '1.1'):
time_chk = chkTime(headers['TIME'], time_now)
if(time_chk):
return data
else:
raise Exception(
f"ERROR: Incorrect time stamp. server time {headers['TIME']} client time {time_now}")
else:
raise Exception(
f"ERROR: Incorrect IOT version detected {headers['IOT']}")
def _headers():
time_now = f'{time.time():.4f}'
headers = '''IOT:1.1
DATE:12/12/2019
TIME:{time_now}
DEVICE:{device_id}
KEY:{device_key}
'''.format(time_now=time_now, device_id= device_id, device_key=device_key)
return headers
def sendData(data):
if len(data) > 5 and len(data) < 60000:
try:
headers = _headers()
data = headers.replace('\n','\r\n') + data.replace('|#|','') + '|#|'
sock.send(data.encode())
except socket.timeout as e:
raise Exception("Socket time out")
except Exception as e:
raise Exception("Socket closed by server")
# ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None)
|
import logging
import os
import uuid
from distutils import util
from pathlib import Path
import pytest
import test_infra.utils as infra_utils
from test_infra import assisted_service_api, consts, utils
qe_env = False
def is_qe_env():
return os.environ.get('NODE_ENV') == 'QE_VM'
def _get_cluster_name():
cluster_name = utils.get_env('CLUSTER_NAME', f'{consts.CLUSTER_PREFIX}')
if cluster_name == consts.CLUSTER_PREFIX:
cluster_name = cluster_name + '-' + str(uuid.uuid4())[:8]
return cluster_name
# TODO changes it
if is_qe_env():
from test_infra.controllers.node_controllers.qe_vm_controler import \
QeVmController as nodeController
qe_env = True
else:
from test_infra.controllers.node_controllers.terraform_controller import \
TerraformController as nodeController
private_ssh_key_path_default = os.path.join(os.getcwd(), "ssh_key/key") if not qe_env else \
os.path.join(str(Path.home()), ".ssh/id_rsa")
env_variables = {"ssh_public_key": utils.get_env('SSH_PUB_KEY'),
"remote_service_url": utils.get_env('REMOTE_SERVICE_URL'),
"pull_secret": utils.get_env('PULL_SECRET'),
"offline_token": utils.get_env('OFFLINE_TOKEN'),
"openshift_version": utils.get_openshift_version(),
"base_domain": utils.get_env('BASE_DOMAIN', "redhat.com"),
"num_masters": int(utils.get_env('NUM_MASTERS', consts.NUMBER_OF_MASTERS)),
"num_workers": max(2, int(utils.get_env('NUM_WORKERS', 0))),
"vip_dhcp_allocation": bool(util.strtobool(utils.get_env('VIP_DHCP_ALLOCATION'))),
"worker_memory": int(utils.get_env('WORKER_MEMORY', '8892')),
"master_memory": int(utils.get_env('MASTER_MEMORY', '16984')),
"network_mtu": utils.get_env('NETWORK_MTU', '1500'),
"worker_disk": int(utils.get_env('WORKER_DISK', '21474836480')),
"master_disk": int(utils.get_env('MASTER_DISK', '128849018880')),
"storage_pool_path": utils.get_env('STORAGE_POOL_PATH', os.path.join(os.getcwd(), "storage_pool")),
"cluster_name": _get_cluster_name(),
"private_ssh_key_path": utils.get_env('PRIVATE_KEY_PATH', private_ssh_key_path_default),
"kubeconfig_path": utils.get_env('KUBECONFIG', ''),
"log_folder": utils.get_env('LOG_FOLDER', consts.LOG_FOLDER),
"service_cidr": utils.get_env('SERVICE_CIDR', '172.30.0.0/16'),
"cluster_cidr": utils.get_env('CLUSTER_CIDR', '10.128.0.0/14'),
"host_prefix": int(utils.get_env('HOST_PREFIX', '23')),
"iso_image_type": utils.get_env('ISO_IMAGE_TYPE', consts.ImageType.FULL_ISO),
"worker_vcpu": utils.get_env('WORKER_CPU', consts.WORKER_CPU),
"master_vcpu": utils.get_env('MASTER_CPU', consts.MASTER_CPU),
"test_teardown": bool(util.strtobool(utils.get_env('TEST_TEARDOWN', 'true'))),
"namespace": utils.get_env('NAMESPACE', consts.DEFAULT_NAMESPACE),
"olm_operators": utils.get_env('OLM_OPERATORS', []),
}
cluster_mid_name = infra_utils.get_random_name()
# Tests running on terraform parallel must have unique ISO file
if not qe_env:
image = utils.get_env('ISO',
os.path.join(consts.IMAGE_FOLDER, f'{env_variables['cluster_name']}-{cluster_mid_name}-'
f'installer-image.iso')).strip()
env_variables["kubeconfig_path"] = f'/tmp/test_kubeconfig_{cluster_mid_name}'
else:
image = utils.get_env('ISO',
os.path.join(consts.IMAGE_FOLDER, f'{env_variables['cluster_name']}-installer-image.iso')). \
strip()
env_variables["iso_download_path"] = image
env_variables["num_nodes"] = env_variables["num_workers"] + env_variables["num_masters"]
@pytest.fixture(scope="session")
def api_client():
logging.info('--- SETUP --- api_client\n')
yield get_api_client()
def get_api_client(offline_token=env_variables['offline_token'], **kwargs):
url = env_variables['remote_service_url']
if not url:
url = utils.get_local_assisted_service_url(
utils.get_env('PROFILE'), env_variables['namespace'], 'assisted-service', utils.get_env('DEPLOY_TARGET'))
return assisted_service_api.create_client(url, offline_token, **kwargs)
@pytest.fixture(scope="session")
def setup_node_controller():
logging.info('--- SETUP --- node controller\n')
yield nodeController
logging.info('--- TEARDOWN --- node controller\n')
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
result = outcome.get_result()
setattr(item, "result_" + result.when, result)
|
import logging
import os
import uuid
from distutils import util
from pathlib import Path
import pytest
import test_infra.utils as infra_utils
from test_infra import assisted_service_api, consts, utils
qe_env = False
def is_qe_env():
return os.environ.get('NODE_ENV') == 'QE_VM'
def _get_cluster_name():
cluster_name = utils.get_env('CLUSTER_NAME', f'{consts.CLUSTER_PREFIX}')
if cluster_name == consts.CLUSTER_PREFIX:
cluster_name = cluster_name + '-' + str(uuid.uuid4())[:8]
return cluster_name
# TODO changes it
if is_qe_env():
from test_infra.controllers.node_controllers.qe_vm_controler import \
QeVmController as nodeController
qe_env = True
else:
from test_infra.controllers.node_controllers.terraform_controller import \
TerraformController as nodeController
private_ssh_key_path_default = os.path.join(os.getcwd(), "ssh_key/key") if not qe_env else \
os.path.join(str(Path.home()), ".ssh/id_rsa")
env_variables = {"ssh_public_key": utils.get_env('SSH_PUB_KEY'),
"remote_service_url": utils.get_env('REMOTE_SERVICE_URL'),
"pull_secret": utils.get_env('PULL_SECRET'),
"offline_token": utils.get_env('OFFLINE_TOKEN'),
"openshift_version": utils.get_openshift_version(),
"base_domain": utils.get_env('BASE_DOMAIN', "redhat.com"),
"num_masters": int(utils.get_env('NUM_MASTERS', consts.NUMBER_OF_MASTERS)),
"num_workers": max(2, int(utils.get_env('NUM_WORKERS', 0))),
"vip_dhcp_allocation": bool(util.strtobool(utils.get_env('VIP_DHCP_ALLOCATION'))),
"worker_memory": int(utils.get_env('WORKER_MEMORY', '8892')),
"master_memory": int(utils.get_env('MASTER_MEMORY', '16984')),
"network_mtu": utils.get_env('NETWORK_MTU', '1500'),
"worker_disk": int(utils.get_env('WORKER_DISK', '21474836480')),
"master_disk": int(utils.get_env('MASTER_DISK', '128849018880')),
"storage_pool_path": utils.get_env('STORAGE_POOL_PATH', os.path.join(os.getcwd(), "storage_pool")),
"cluster_name": _get_cluster_name(),
"private_ssh_key_path": utils.get_env('PRIVATE_KEY_PATH', private_ssh_key_path_default),
"kubeconfig_path": utils.get_env('KUBECONFIG', ''),
"log_folder": utils.get_env('LOG_FOLDER', consts.LOG_FOLDER),
"service_cidr": utils.get_env('SERVICE_CIDR', '172.30.0.0/16'),
"cluster_cidr": utils.get_env('CLUSTER_CIDR', '10.128.0.0/14'),
"host_prefix": int(utils.get_env('HOST_PREFIX', '23')),
"iso_image_type": utils.get_env('ISO_IMAGE_TYPE', consts.ImageType.FULL_ISO),
"worker_vcpu": utils.get_env('WORKER_CPU', consts.WORKER_CPU),
"master_vcpu": utils.get_env('MASTER_CPU', consts.MASTER_CPU),
"test_teardown": bool(util.strtobool(utils.get_env('TEST_TEARDOWN', 'true'))),
"namespace": utils.get_env('NAMESPACE', consts.DEFAULT_NAMESPACE),
"olm_operators": utils.get_env('OLM_OPERATORS', []),
}
cluster_mid_name = infra_utils.get_random_name()
# Tests running on terraform parallel must have unique ISO file
if not qe_env:
image = utils.get_env('ISO',
os.path.join(consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-{cluster_mid_name}-'
f'installer-image.iso')).strip()
env_variables["kubeconfig_path"] = f'/tmp/test_kubeconfig_{cluster_mid_name}'
else:
image = utils.get_env('ISO',
os.path.join(consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-installer-image.iso')). \
strip()
env_variables["iso_download_path"] = image
env_variables["num_nodes"] = env_variables["num_workers"] + env_variables["num_masters"]
@pytest.fixture(scope="session")
def api_client():
logging.info('--- SETUP --- api_client\n')
yield get_api_client()
def get_api_client(offline_token=env_variables['offline_token'], **kwargs):
url = env_variables['remote_service_url']
if not url:
url = utils.get_local_assisted_service_url(
utils.get_env('PROFILE'), env_variables['namespace'], 'assisted-service', utils.get_env('DEPLOY_TARGET'))
return assisted_service_api.create_client(url, offline_token, **kwargs)
@pytest.fixture(scope="session")
def setup_node_controller():
logging.info('--- SETUP --- node controller\n')
yield nodeController
logging.info('--- TEARDOWN --- node controller\n')
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
result = outcome.get_result()
setattr(item, "result_" + result.when, result)
|
#!/usr/bin/env python
# coding: utf-8
# # Loading data
import pandas as pd
import plotly.express as px
from tqdm import tqdm
import functools
import numpy as np
from difflib import SequenceMatcher
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
from datetime import datetime, timedelta
import pprint
import requests
import os
import getpass
import json
from queue import Queue
from threading import Thread
from time import time
import logging
import os
#cashing in case of multiple calls.
@functools.lru_cache(maxsize=128)
def get_tiles(municipalityId: int) -> pd.DataFrame:
"""Fetches tile information for a municipality id.
Args:
municipalityId: id of the municipality as defined in by the federal office of statistics,
https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.assetdetail.11467406.html
Return:
A dataframe containing the following columns:
[tileId, ll_lon, ll_lat, urL-lon, ur_lat]
tileID: corresponds to a unique ID as defined in the Swisscom FAQ page.
ll_lon: longitude coordinate of the lower left corner of the tile.
ll_lat: latitude coordinate of the lower left corner of the tile.
ur_lon: longitude coordinate of the upper right corner of the tile.
ur_lat: latitude coordinate of the upper right corner of the tile.
If municipalityId is invalid will print an error message and return an empty DataFrame
"""
api_request = (
BASE_URL
+ f'/grids/municipalities/{municipalityId}'
)
data = oauth.get(api_request, headers=headers).json()
if(data.get('status') == None):
tileID = [t['tileId'] for t in data['tiles']]
ll_lon = [t['ll']['x'] for t in data['tiles']]
ll_lat= [t['ll']['y'] for t in data['tiles']]
ur_lon = [t['ur']['x'] for t in data['tiles']]
ur_lat = [t['ur']['y'] for t in data['tiles']]
else:
print(f'get_tiles: failed with status code {data.get('status')}. {data.get('message')}')
return pd.DataFrame(data={'tileID': [], 'll_lat': [], 'll_lon': [], 'ur_lat': [], 'ur_lon': []})
return pd.DataFrame(data={'tileID': tileID, 'll_lat': ll_lat, 'll_lon': ll_lon, 'ur_lat': ur_lat, 'ur_lon': ur_lon})
def get_municipalityID(name: str) -> np.array(int):
"""Converts a municipality name to ID
Args:
name of municipality.
Returns:
An array containing all the municipality ID's corresponding to the name.
If the name invalid will return an empty array.
"""
return commune.loc[commune.GDENAME == name].GDENR.to_numpy()
def visualize_coordinates(df: pd.DataFrame, latitude: str, longitude: str) -> None :
"""Visualizes coordinates in dataframe on map
Retrieves columns with name latitude and logitude and visualizes it on a map.
Args:
df: A dataframe containing the coordinates.
latitude: String key of the column in the dataframe containing the latitude.
longitude: String key of the column in the dataframe containing the longitude.
"""
fig = px.scatter_mapbox(df, lat=latitude, lon=longitude,
color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10,
mapbox_style="carto-positron")
fig.show()
def get_all_tiles_switzerland() -> pd.DataFrame:
"""Fetches the tile information for all the tiles in Switzerland.
Returns:
A Dataframe containg the tile information for every tile in switzerland.
The format of the DataFrame is the same as the return of get_tiles()
"""
tiles = get_tiles(commune.GDENR.unique()[0])
for c in tqdm(commune.GDENR.unique().tolist()):
tiles = tiles.append(get_tiles(c))
return tiles
def get_daily_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches daily demographics
Fetches the daily demographics, age distribution, of the tiles.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
A dataframe containing as a key the tileID and as columns ageDistribution and the maleProportion
+----------+-----------------------+---------------------+
| | ageDistribution | maleProportion |
+----------+-----------------------+---------------------+
| 44554639 | NaN | 0.49828359484672546 |
+----------+-----------------------+---------------------+
| 44271906 | [0.21413850784301758, | 0.493218 |
| | 0.27691012620925903, | |
| | 0.37422287464141846, | |
| | 0.13472850620746613] | |
+----------+-----------------------+---------------------+
In the example above tile 44554639 does not have any age distribution data.
The data is k-anonymized. Therefor is some tiles are missing data it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split('T')[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score[t['tileId']] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return pd.DataFrame.from_dict(date2score).transpose()
def get_hourly_demographics_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches hourly demographics of age categories for 24 hours
Fetches the hourly demographics, age distribution, of the tiles.
Age categories are the following 0 - 19, 20 - 39, 40 - 64, >64
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the demographics. The name
of the collumns are:
[age_cat, age_distribution, male_proportion]
+----------+---------------------+---------+------------------+-----------------+
| | | age_cat | age_distribution | male_proportion |
+----------+---------------------+---------+------------------+-----------------+
| tileID | time | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44394309 | 2020-01-27T00:00:00 | NaN | NaN | 0.474876 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T01:00:00 | NaN | NaN | 0.483166 |
+----------+---------------------+---------+------------------+-----------------+
| | ... | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44290729 | 2020-01-27T06:00:00 | 0.0 | 0.192352 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 1.0 | 0.269984 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 2.0 | 0.363481 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 3.0 | 0.174183 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
def get_hourly_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches hourly male proportion and age categories for 24 hours
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
Returns a dictionary with as a key the tileID, and as a value an object that is as follows:
{tileID: {dateTime:{ "ageDistribution": [0-19, 20-39, 40-64, 64+], "maleProportion": value},
{dateTime2: ...}}}
26994514: {'2020-01-27T00:00:00': {'ageDistribution': [0.1925136297941208,
0.2758632302284241,
0.362215131521225,
0.16940800845623016],
'maleProportion': 0.4727686941623688},
'2020-01-27T01:00:00': {'ageDistribution': None,
'maleProportion': 0.4896690547466278},
'2020-01-27T02:00:00': {'ageDistribution': None,
'maleProportion': 0.48882684111595154},
The data is k-anonymized. Therefor is some values are None it means that no data was available
To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for dt in tqdm(dates, desc="get_hourly_demographics: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return date2score
data = get_hourly_demographics(tiles, day)
tile_id = []
time_data = []
age_distribution = []
age_cat = []
male_proportion = []
for i in data:
for time in data[i]:
if data[i][time].get("ageDistribution") != None:
for (idx,a) in enumerate(data[i][time].get("ageDistribution", [])):
age_cat.append(idx)
age_distribution.append(a)
tile_id.append(i)
time_data.append(time)
male_proportion.append(data[i][time].get("maleProportion"))
else:
tile_id.append(i)
time_data.append(time)
age_distribution.append(None)
male_proportion.append(data[i][time].get("maleProportion"))
age_cat.append(None)
return pd.DataFrame(data={'tileID': tile_id, "age_cat": age_cat, 'age_distribution':age_distribution, "male_proportion": male_proportion, 'time': time_data}).set_index(['tileID', 'time'])
def get_daily_density(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches the daily density of tiles.
Fetches the daily density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the tileID
+----------+-------+
| | score |
+----------+-------+
| tileID | |
+----------+-------+
| 44394309 | 1351 |
+----------+-------+
| 44394315 | 1103 |
+----------+-------+
| 44460297 | 875 |
+----------+-------+
| 44488589 | 1387 |
+----------+-------+
| 44498028 | 678 |
+----------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
tileID = []
score = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/daily/{day.isoformat().split('T')[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
if data.get("tiles") != None:
for t in data["tiles"]:
tileID.append(t['tileId'])
score.append(t["score"])
return pd.DataFrame(data={'tileID': tileID, 'score':score}).set_index("tileID")
def get_hourly_density_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches the hourly density of tiles for 24 hours.
Fetches the hourly density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the [tileID, time]
+----------+---------------------+-------+
| | | score |
+----------+---------------------+-------+
| tileID | time | |
+----------+---------------------+-------+
| 44394309 | 2020-01-27T00:00:00 | 52 |
| +---------------------+-------+
| | 2020-01-27T01:00:00 | 68 |
| +---------------------+-------+
| | 2020-01-27T02:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T03:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T04:00:00 | 69 |
+----------+---------------------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
def get_hourly_density(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
print("getHourlyDensity")
for dt in tqdm(dates, desc="get_hourly_density: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
for t in oauth.get(api_request, headers=headers).json().get("tiles",[]):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = t['score']
return date2score
tiles_data = []
time_data = []
score = []
data = get_hourly_density(tiles, day)
for t in data:
for time in data[t]:
time_data.append(time)
tiles_data.append(t)
score.append(data[t][time])
return pd.DataFrame(data={'tileID': tiles_data, 'score':score, 'time': time_data}).set_index(['tileID', 'time'])
def fetch_data_city(city: str) -> None:
"""Fetches the data for a city if the data is not yet cashed on the computer.
"""
compression = ".xz"
folder = os.path.join(".","data")
def file_path(file_name: str) -> str:
return os.path.join(folder, file_name)
if not(os.path.exists(folder)):
os.mkdir(folder)
tiles_path = file_path(f'{city}Tiles.pkl{compression}')
hourly_dem_path = file_path(f'{city}HourlyDemographics.pkl{compression}')
hourly_density_path = file_path(f'{city}HourlyDensity.pkl{compression}')
daily_density_path = file_path(f'{city}DensityDaily.pkl{compression}')
daily_demographics_path = file_path(f'{city}DemographicsDaily.pkl{compression}')
if not(os.path.isfile(tiles_path)):
tiles = get_tiles(get_municipalityID(city)[0])
tiles.to_pickle(tiles_path)
else:
tiles = pd.read_pickle(tiles_path)
if not(os.path.isfile(hourly_dem_path)):
hourly_dem = get_hourly_demographics_dataframe(tiles['tileID'].to_numpy())
hourly_dem.to_pickle(hourly_dem_path)
if not(os.path.isfile(hourly_density_path)):
hourly_dens = get_hourly_density_dataframe(tiles['tileID'].to_numpy())
hourly_dens.to_pickle(hourly_density_path)
if not(os.path.isfile(daily_density_path)):
get_daily_density(tiles['tileID'].to_numpy()).to_pickle(daily_density_path)
if not(os.path.isfile(daily_demographics_path)):
get_daily_demographics(tiles['tileID'].to_numpy()).to_pickle(daily_demographics_path)
def clean_cities_list(cities: [str]) -> [str]:
"""Cleans the list of cities by removing all the cities that are not found in the
official list of cities provided by the Federal Statisitics Office.
Args:
List of cities to check and clean.
Return:
List containing a subset of the input list such that all elements are valid.
"""
invalid_cities = []
#validation that the cities names are valid
for c in cities:
if len(commune.loc[commune.GDENAME == c].GDENR.to_numpy()) == 0:
city = []
sim_value = []
for f in commune.GDENAME:
r = SequenceMatcher(None, c, f).ratio()
if r > 0.5:
city.append(f)
sim_value.append(r)
d = pd.DataFrame(data={"city": city, "value": sim_value})
potential_cities = d.sort_values("value", ascending=False).head(5).city.to_numpy()
print(f"City nammed: {c} cannot be found in official records. Did you mean: {potential_cities} ? {c} will be ignored.")
invalid_cities.append(c)
return [c for c in cities if not(c in invalid_cities)]
# Multithread fetch implementation
class DownloadWorker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the work from the queue and expand the tuple
city = self.queue.get()
if city == -1:
self.queue.put(-1)
break
try:
fetch_data_city(city)
finally:
self.queue.task_done()
def download_commune_excel() -> None:
'''
Downloads the excel spreadsheet from the Swiss Federal Statistical Office that maps the town name to unique ID
'''
print('Beginning commune file download with requests')
folder = os.path.join(".","data")
if not(os.path.exists(folder)):
os.mkdir(folder)
url = 'https://www.bfs.admin.ch/bfsstatic/dam/assets/11467406/master'
r = requests.get(url)
with open(os.path.join(".", "data", 'commune.xlsx'), 'wb') as f:
f.write(r.content)
print("End of commune file download")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BASE_URL = "https://api.swisscom.com/layer/heatmaps/demo"
TOKEN_URL = "https://consent.swisscom.com/o/oauth2/token"
MAX_NB_TILES_REQUEST = 100
headers = {"scs-version": "2"}
client_id = "" # customer key in the Swisscom digital market place
client_secret = "" # customer secret in the Swisscom digital market place
if client_id == "":
client_id = os.environ.get("CLIENT_ID", "")
if client_id == "":
client_id = input("Enter MIP Client ID: ")
os.environ["CLIENT_ID"] = client_id
if client_secret == "":
client_secret = os.environ.get("CLIENT_SECRET", "")
if client_secret == "":
client_secret = getpass.getpass('Enter MIP client secret:')
os.environ["CLIENT_SECRET"] = client_secret
# Fetch an access token
client = BackendApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client)
oauth.fetch_token(token_url=TOKEN_URL, client_id=client_id,
client_secret=client_secret)
def main():
ts = time()
if not(os.path.exists(os.path.join(".", "data", 'commune.xlsx'))):
download_commune_excel()
global commune
commune = pd.read_excel(os.path.join(".", "data", 'commune.xlsx'), sheet_name='GDE')
cities = ["Saas-Fee", "Arosa", "Bulle", "Laax","Belp" ,"Saanen","Adelboden", "Andermatt", "Davos", "Bulle", "Bern", "Genève", "Lausanne", "Zürich", "Neuchâtel", "Sion", "St. Gallen", "Appenzell", "Solothurn", "Zug", "Fribourg", "Luzern", "Ecublens (VD)", "Kloten", "Le Grand-Saconnex", "Nyon", "Zermatt", "Lugano"]
cities = clean_cities_list(cities)
queue = Queue()
for x in range(2):
worker = DownloadWorker(queue)
worker.deamen = True
worker.start()
for c in cities:
logger.info('Queueing {}'.format(c))
queue.put(c)
queue.join()
queue.put(-1)
logger.info('Took %s', time() - ts)
list_of_cities_path = os.path.join(".", "data","CityList.json")
cityList=[]
if os.path.isfile(list_of_cities_path):
with open(list_of_cities_path, "r") as filehandle:
cityList = json.load(filehandle)
with open(list_of_cities_path, "w") as filehandle:
for city in cities:
if not(city in cityList):
cityList.append(city)
json.dump(cityList, filehandle)
if __name__ == "__main__":
main()
# Other functions not currently used
def get_daily_demographics_male(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches Daily demographics.
Fetches the daily male proportion of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the tileId and the proportion of male. The name of the collumns are:
[tileID, maleProportion]
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
tileID = []
maleProportion = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split('T')[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
if data.get("tiles") != None:
for t in data["tiles"]:
if t.get("maleProportion") != None:
tileID.append(t['tileId'])
maleProportion.append(t["maleProportion"])
return pd.DataFrame(data={'tileID': tileID, 'maleProportion':maleProportion})
def get_daily_demographics_age(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches daily demographics of age categories
Fetches the daily demographics, age distribution, of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the tileId and a array of values corresponding to the age distribution. The name
of the collumns are:
[tileID, ageDistribution]
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
tileID = []
ageDistribution = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split('T')[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if t.get("ageDistribution") != None:
tileID.append(t['tileId'])
ageDistribution.append(t["ageDistribution"])
return pd.DataFrame(data={'tileID': tileID, 'ageDistribution':ageDistribution})
|
#!/usr/bin/env python
# coding: utf-8
# # Loading data
import pandas as pd
import plotly.express as px
from tqdm import tqdm
import functools
import numpy as np
from difflib import SequenceMatcher
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
from datetime import datetime, timedelta
import pprint
import requests
import os
import getpass
import json
from queue import Queue
from threading import Thread
from time import time
import logging
import os
#cashing in case of multiple calls.
@functools.lru_cache(maxsize=128)
def get_tiles(municipalityId: int) -> pd.DataFrame:
"""Fetches tile information for a municipality id.
Args:
municipalityId: id of the municipality as defined in by the federal office of statistics,
https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.assetdetail.11467406.html
Return:
A dataframe containing the following columns:
[tileId, ll_lon, ll_lat, urL-lon, ur_lat]
tileID: corresponds to a unique ID as defined in the Swisscom FAQ page.
ll_lon: longitude coordinate of the lower left corner of the tile.
ll_lat: latitude coordinate of the lower left corner of the tile.
ur_lon: longitude coordinate of the upper right corner of the tile.
ur_lat: latitude coordinate of the upper right corner of the tile.
If municipalityId is invalid will print an error message and return an empty DataFrame
"""
api_request = (
BASE_URL
+ f'/grids/municipalities/{municipalityId}'
)
data = oauth.get(api_request, headers=headers).json()
if(data.get('status') == None):
tileID = [t['tileId'] for t in data['tiles']]
ll_lon = [t['ll']['x'] for t in data['tiles']]
ll_lat= [t['ll']['y'] for t in data['tiles']]
ur_lon = [t['ur']['x'] for t in data['tiles']]
ur_lat = [t['ur']['y'] for t in data['tiles']]
else:
print(f'get_tiles: failed with status code {data.get("status")}. {data.get("message")}')
return pd.DataFrame(data={'tileID': [], 'll_lat': [], 'll_lon': [], 'ur_lat': [], 'ur_lon': []})
return pd.DataFrame(data={'tileID': tileID, 'll_lat': ll_lat, 'll_lon': ll_lon, 'ur_lat': ur_lat, 'ur_lon': ur_lon})
def get_municipalityID(name: str) -> np.array(int):
"""Converts a municipality name to ID
Args:
name of municipality.
Returns:
An array containing all the municipality ID's corresponding to the name.
If the name invalid will return an empty array.
"""
return commune.loc[commune.GDENAME == name].GDENR.to_numpy()
def visualize_coordinates(df: pd.DataFrame, latitude: str, longitude: str) -> None :
"""Visualizes coordinates in dataframe on map
Retrieves columns with name latitude and logitude and visualizes it on a map.
Args:
df: A dataframe containing the coordinates.
latitude: String key of the column in the dataframe containing the latitude.
longitude: String key of the column in the dataframe containing the longitude.
"""
fig = px.scatter_mapbox(df, lat=latitude, lon=longitude,
color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10,
mapbox_style="carto-positron")
fig.show()
def get_all_tiles_switzerland() -> pd.DataFrame:
"""Fetches the tile information for all the tiles in Switzerland.
Returns:
A Dataframe containg the tile information for every tile in switzerland.
The format of the DataFrame is the same as the return of get_tiles()
"""
tiles = get_tiles(commune.GDENR.unique()[0])
for c in tqdm(commune.GDENR.unique().tolist()):
tiles = tiles.append(get_tiles(c))
return tiles
def get_daily_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches daily demographics
Fetches the daily demographics, age distribution, of the tiles.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
A dataframe containing as a key the tileID and as columns ageDistribution and the maleProportion
+----------+-----------------------+---------------------+
| | ageDistribution | maleProportion |
+----------+-----------------------+---------------------+
| 44554639 | NaN | 0.49828359484672546 |
+----------+-----------------------+---------------------+
| 44271906 | [0.21413850784301758, | 0.493218 |
| | 0.27691012620925903, | |
| | 0.37422287464141846, | |
| | 0.13472850620746613] | |
+----------+-----------------------+---------------------+
In the example above tile 44554639 does not have any age distribution data.
The data is k-anonymized. Therefor is some tiles are missing data it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score[t['tileId']] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return pd.DataFrame.from_dict(date2score).transpose()
def get_hourly_demographics_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches hourly demographics of age categories for 24 hours
Fetches the hourly demographics, age distribution, of the tiles.
Age categories are the following 0 - 19, 20 - 39, 40 - 64, >64
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the demographics. The name
of the collumns are:
[age_cat, age_distribution, male_proportion]
+----------+---------------------+---------+------------------+-----------------+
| | | age_cat | age_distribution | male_proportion |
+----------+---------------------+---------+------------------+-----------------+
| tileID | time | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44394309 | 2020-01-27T00:00:00 | NaN | NaN | 0.474876 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T01:00:00 | NaN | NaN | 0.483166 |
+----------+---------------------+---------+------------------+-----------------+
| | ... | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44290729 | 2020-01-27T06:00:00 | 0.0 | 0.192352 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 1.0 | 0.269984 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 2.0 | 0.363481 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 3.0 | 0.174183 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
def get_hourly_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches hourly male proportion and age categories for 24 hours
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
Returns a dictionary with as a key the tileID, and as a value an object that is as follows:
{tileID: {dateTime:{ "ageDistribution": [0-19, 20-39, 40-64, 64+], "maleProportion": value},
{dateTime2: ...}}}
26994514: {'2020-01-27T00:00:00': {'ageDistribution': [0.1925136297941208,
0.2758632302284241,
0.362215131521225,
0.16940800845623016],
'maleProportion': 0.4727686941623688},
'2020-01-27T01:00:00': {'ageDistribution': None,
'maleProportion': 0.4896690547466278},
'2020-01-27T02:00:00': {'ageDistribution': None,
'maleProportion': 0.48882684111595154},
The data is k-anonymized. Therefor is some values are None it means that no data was available
To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for dt in tqdm(dates, desc="get_hourly_demographics: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return date2score
data = get_hourly_demographics(tiles, day)
tile_id = []
time_data = []
age_distribution = []
age_cat = []
male_proportion = []
for i in data:
for time in data[i]:
if data[i][time].get("ageDistribution") != None:
for (idx,a) in enumerate(data[i][time].get("ageDistribution", [])):
age_cat.append(idx)
age_distribution.append(a)
tile_id.append(i)
time_data.append(time)
male_proportion.append(data[i][time].get("maleProportion"))
else:
tile_id.append(i)
time_data.append(time)
age_distribution.append(None)
male_proportion.append(data[i][time].get("maleProportion"))
age_cat.append(None)
return pd.DataFrame(data={'tileID': tile_id, "age_cat": age_cat, 'age_distribution':age_distribution, "male_proportion": male_proportion, 'time': time_data}).set_index(['tileID', 'time'])
def get_daily_density(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches the daily density of tiles.
Fetches the daily density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the tileID
+----------+-------+
| | score |
+----------+-------+
| tileID | |
+----------+-------+
| 44394309 | 1351 |
+----------+-------+
| 44394315 | 1103 |
+----------+-------+
| 44460297 | 875 |
+----------+-------+
| 44488589 | 1387 |
+----------+-------+
| 44498028 | 678 |
+----------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
tileID = []
score = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
if data.get("tiles") != None:
for t in data["tiles"]:
tileID.append(t['tileId'])
score.append(t["score"])
return pd.DataFrame(data={'tileID': tileID, 'score':score}).set_index("tileID")
def get_hourly_density_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches the hourly density of tiles for 24 hours.
Fetches the hourly density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the [tileID, time]
+----------+---------------------+-------+
| | | score |
+----------+---------------------+-------+
| tileID | time | |
+----------+---------------------+-------+
| 44394309 | 2020-01-27T00:00:00 | 52 |
| +---------------------+-------+
| | 2020-01-27T01:00:00 | 68 |
| +---------------------+-------+
| | 2020-01-27T02:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T03:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T04:00:00 | 69 |
+----------+---------------------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
def get_hourly_density(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
print("getHourlyDensity")
for dt in tqdm(dates, desc="get_hourly_density: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
for t in oauth.get(api_request, headers=headers).json().get("tiles",[]):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = t['score']
return date2score
tiles_data = []
time_data = []
score = []
data = get_hourly_density(tiles, day)
for t in data:
for time in data[t]:
time_data.append(time)
tiles_data.append(t)
score.append(data[t][time])
return pd.DataFrame(data={'tileID': tiles_data, 'score':score, 'time': time_data}).set_index(['tileID', 'time'])
def fetch_data_city(city: str) -> None:
"""Fetches the data for a city if the data is not yet cashed on the computer.
"""
compression = ".xz"
folder = os.path.join(".","data")
def file_path(file_name: str) -> str:
return os.path.join(folder, file_name)
if not(os.path.exists(folder)):
os.mkdir(folder)
tiles_path = file_path(f'{city}Tiles.pkl{compression}')
hourly_dem_path = file_path(f'{city}HourlyDemographics.pkl{compression}')
hourly_density_path = file_path(f'{city}HourlyDensity.pkl{compression}')
daily_density_path = file_path(f'{city}DensityDaily.pkl{compression}')
daily_demographics_path = file_path(f'{city}DemographicsDaily.pkl{compression}')
if not(os.path.isfile(tiles_path)):
tiles = get_tiles(get_municipalityID(city)[0])
tiles.to_pickle(tiles_path)
else:
tiles = pd.read_pickle(tiles_path)
if not(os.path.isfile(hourly_dem_path)):
hourly_dem = get_hourly_demographics_dataframe(tiles['tileID'].to_numpy())
hourly_dem.to_pickle(hourly_dem_path)
if not(os.path.isfile(hourly_density_path)):
hourly_dens = get_hourly_density_dataframe(tiles['tileID'].to_numpy())
hourly_dens.to_pickle(hourly_density_path)
if not(os.path.isfile(daily_density_path)):
get_daily_density(tiles['tileID'].to_numpy()).to_pickle(daily_density_path)
if not(os.path.isfile(daily_demographics_path)):
get_daily_demographics(tiles['tileID'].to_numpy()).to_pickle(daily_demographics_path)
def clean_cities_list(cities: [str]) -> [str]:
"""Cleans the list of cities by removing all the cities that are not found in the
official list of cities provided by the Federal Statisitics Office.
Args:
List of cities to check and clean.
Return:
List containing a subset of the input list such that all elements are valid.
"""
invalid_cities = []
#validation that the cities names are valid
for c in cities:
if len(commune.loc[commune.GDENAME == c].GDENR.to_numpy()) == 0:
city = []
sim_value = []
for f in commune.GDENAME:
r = SequenceMatcher(None, c, f).ratio()
if r > 0.5:
city.append(f)
sim_value.append(r)
d = pd.DataFrame(data={"city": city, "value": sim_value})
potential_cities = d.sort_values("value", ascending=False).head(5).city.to_numpy()
print(f"City nammed: {c} cannot be found in official records. Did you mean: {potential_cities} ? {c} will be ignored.")
invalid_cities.append(c)
return [c for c in cities if not(c in invalid_cities)]
# Multithread fetch implementation
class DownloadWorker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the work from the queue and expand the tuple
city = self.queue.get()
if city == -1:
self.queue.put(-1)
break
try:
fetch_data_city(city)
finally:
self.queue.task_done()
def download_commune_excel() -> None:
'''
Downloads the excel spreadsheet from the Swiss Federal Statistical Office that maps the town name to unique ID
'''
print('Beginning commune file download with requests')
folder = os.path.join(".","data")
if not(os.path.exists(folder)):
os.mkdir(folder)
url = 'https://www.bfs.admin.ch/bfsstatic/dam/assets/11467406/master'
r = requests.get(url)
with open(os.path.join(".", "data", 'commune.xlsx'), 'wb') as f:
f.write(r.content)
print("End of commune file download")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BASE_URL = "https://api.swisscom.com/layer/heatmaps/demo"
TOKEN_URL = "https://consent.swisscom.com/o/oauth2/token"
MAX_NB_TILES_REQUEST = 100
headers = {"scs-version": "2"}
client_id = "" # customer key in the Swisscom digital market place
client_secret = "" # customer secret in the Swisscom digital market place
if client_id == "":
client_id = os.environ.get("CLIENT_ID", "")
if client_id == "":
client_id = input("Enter MIP Client ID: ")
os.environ["CLIENT_ID"] = client_id
if client_secret == "":
client_secret = os.environ.get("CLIENT_SECRET", "")
if client_secret == "":
client_secret = getpass.getpass('Enter MIP client secret:')
os.environ["CLIENT_SECRET"] = client_secret
# Fetch an access token
client = BackendApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client)
oauth.fetch_token(token_url=TOKEN_URL, client_id=client_id,
client_secret=client_secret)
def main():
ts = time()
if not(os.path.exists(os.path.join(".", "data", 'commune.xlsx'))):
download_commune_excel()
global commune
commune = pd.read_excel(os.path.join(".", "data", 'commune.xlsx'), sheet_name='GDE')
cities = ["Saas-Fee", "Arosa", "Bulle", "Laax","Belp" ,"Saanen","Adelboden", "Andermatt", "Davos", "Bulle", "Bern", "Genève", "Lausanne", "Zürich", "Neuchâtel", "Sion", "St. Gallen", "Appenzell", "Solothurn", "Zug", "Fribourg", "Luzern", "Ecublens (VD)", "Kloten", "Le Grand-Saconnex", "Nyon", "Zermatt", "Lugano"]
cities = clean_cities_list(cities)
queue = Queue()
for x in range(2):
worker = DownloadWorker(queue)
worker.deamen = True
worker.start()
for c in cities:
logger.info('Queueing {}'.format(c))
queue.put(c)
queue.join()
queue.put(-1)
logger.info('Took %s', time() - ts)
list_of_cities_path = os.path.join(".", "data","CityList.json")
cityList=[]
if os.path.isfile(list_of_cities_path):
with open(list_of_cities_path, "r") as filehandle:
cityList = json.load(filehandle)
with open(list_of_cities_path, "w") as filehandle:
for city in cities:
if not(city in cityList):
cityList.append(city)
json.dump(cityList, filehandle)
if __name__ == "__main__":
main()
# Other functions not currently used
def get_daily_demographics_male(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches Daily demographics.
Fetches the daily male proportion of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the tileId and the proportion of male. The name of the collumns are:
[tileID, maleProportion]
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
tileID = []
maleProportion = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
if data.get("tiles") != None:
for t in data["tiles"]:
if t.get("maleProportion") != None:
tileID.append(t['tileId'])
maleProportion.append(t["maleProportion"])
return pd.DataFrame(data={'tileID': tileID, 'maleProportion':maleProportion})
def get_daily_demographics_age(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches daily demographics of age categories
Fetches the daily demographics, age distribution, of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the tileId and a array of values corresponding to the age distribution. The name
of the collumns are:
[tileID, ageDistribution]
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
tileID = []
ageDistribution = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if t.get("ageDistribution") != None:
tileID.append(t['tileId'])
ageDistribution.append(t["ageDistribution"])
return pd.DataFrame(data={'tileID': tileID, 'ageDistribution':ageDistribution})
|
import asyncio
import discord
import logging
from random import randint
from random import choice as randchoice
from redbot.core import bank, checks, commands, Config
from redbot.core.errors import BalanceTooHigh
from redbot.core.utils.chat_formatting import box, humanize_list, pagify
from .phrases import FRIENDS, SNACKBURR_PHRASES
log = logging.getLogger("red.aikaterna.snacktime")
class Snacktime(commands.Cog):
"""Snackburr's passing out pb jars!"""
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 2712291001, force_registration=True)
self.snackSchedule = {}
self.snacktimePrediction = {}
self.previousSpeaker = {}
self.snackInProgress = {}
self.acceptInput = {}
self.alreadySnacked = {}
self.msgsPassed = {}
self.startLock = {}
self.snacktimeCheckLock = {}
self.lockRequests = {}
self.channel_persona = {}
default_guild = {
"DELIVER_CHANNELS": [],
"FRIENDS": False,
"EVENT_START_DELAY": 1800,
"EVENT_START_DELAY_VARIANCE": 900,
"SNACK_DURATION": 240,
"SNACK_DURATION_VARIANCE": 120,
"MSGS_BEFORE_EVENT": 8,
"SNACK_AMOUNT": 200,
}
default_channel = {"repeatMissedSnacktimes": 0}
self.config.register_guild(**default_guild)
self.config.register_channel(**default_channel)
async def persona_choice(self, ctx: None, message: None):
if ctx:
invite_friends = await self.config.guild(ctx.guild).FRIENDS()
else:
invite_friends = await self.config.guild(message.guild).FRIENDS()
personas = FRIENDS
if not invite_friends:
return "Snackburr" if message else "ʕ •ᴥ•ʔ <"
elif invite_friends is True:
try:
del personas["Snackburr"]
except KeyError:
pass
if message:
return randchoice(list(personas.keys()))
else:
return randchoice(list(personas.values()))
async def get_response(self, msg, phrase_type):
scid = f"{msg.guild.id}-{msg.channel.id}"
persona = self.channel_persona[scid]
persona_phrase = FRIENDS.get(persona)
phrase = randchoice(SNACKBURR_PHRASES[phrase_type])
return f"`{persona_phrase} {phrase}`"
@commands.cooldown(1, 1, commands.BucketType.channel)
@commands.guild_only()
@commands.command()
async def eat(self, ctx, amount: int):
"""
all this talk about pb is makin me hungry.
how bout you guys?
"""
persona = await self.persona_choice(ctx=ctx, message=None)
if amount < 0:
return await ctx.send(f"`{persona} Woah slow down!`")
if amount > await bank.get_balance(ctx.author):
return await ctx.send(f"`{persona} You don't got that much pb!.. don't look at me..`")
await bank.withdraw_credits(ctx.author, amount)
first_phrase = randchoice(SNACKBURR_PHRASES["EAT_BEFORE"])
second_phrase = randchoice(SNACKBURR_PHRASES["EAT_AFTER"])
await ctx.send(f"`{persona} {ctx.author.display_name} {first_phrase} {second_phrase} {amount} whole pb jars!`")
@commands.guild_only()
@commands.group()
@checks.mod_or_permissions(manage_guild=True)
async def snackset(self, ctx):
"""snack stuff"""
if ctx.invoked_subcommand is None:
guild_data = await self.config.guild(ctx.guild).all()
channel_names = []
if guild_data["DELIVER_CHANNELS"]:
for channel_id in guild_data["DELIVER_CHANNELS"]:
channel_obj = self.bot.get_channel(channel_id)
if channel_obj:
channel_names.append(channel_obj.name)
if len(channel_names) == 0:
channel_names = ["No channels set."]
if guild_data["FRIENDS"] is True:
invite_friends = "Friends only"
elif guild_data["FRIENDS"] is False:
invite_friends = "Snackburr only"
else:
invite_friends = "Everyone's invited!"
msg = f"[Delivering in]: {humanize_list(channel_names)}\n"
msg += f"[Event start delay]: {guild_data["EVENT_START_DELAY"]} seconds\n"
msg += f"[Event start variance]: {guild_data["EVENT_START_DELAY_VARIANCE"]} seconds\n"
msg += f"[Friends status]: {invite_friends}\n"
msg += f"[Messages before event]: {guild_data["MSGS_BEFORE_EVENT"]}\n"
msg += f"[Snack amount limit]: {guild_data["SNACK_AMOUNT"]} pb\n"
msg += f"[Snack duration]: {guild_data["SNACK_DURATION"]} seconds\n"
msg += f"[Snack duration variance]: {guild_data["SNACK_DURATION_VARIANCE"]} seconds\n"
for page in pagify(msg, delims=["\n"]):
await ctx.send(box(page, lang="ini"))
@snackset.command()
async def errandtime(self, ctx, seconds: int):
"""How long snackburr needs to be out doin errands.. more or less."""
event_start_delay_variance = await self.config.guild(ctx.guild).EVENT_START_DELAY_VARIANCE()
if seconds <= event_start_delay_variance:
await ctx.send("errandtime must be greater than errandvariance!")
elif seconds <= 0:
await ctx.send("errandtime must be greater than 0")
else:
await self.config.guild(ctx.guild).EVENT_START_DELAY.set(seconds)
await ctx.send(f"snackburr's errands will now take around {round(seconds/60, 2)} minutes!")
@snackset.command()
async def errandvariance(self, ctx, seconds: int):
"""How early or late snackburr might be to snacktime"""
event_start_delay = await self.config.guild(ctx.guild).EVENT_START_DELAY()
if seconds >= event_start_delay:
await ctx.send("errandvariance must be less than errandtime!")
elif seconds < 0:
await ctx.send("errandvariance must be 0 or greater!")
else:
await self.config.guild(ctx.guild).EVENT_START_DELAY_VARIANCE.set(seconds)
await ctx.send(f"snackburr now might be {round(seconds/60, 2)} minutes early or late to snacktime")
@snackset.command(name="snacktime")
async def snacktimetime(self, ctx, seconds: int):
"""How long snackburr will hang out giving out snacks!.. more or less."""
snack_duration_variance = await self.config.guild(ctx.guild).SNACK_DURATION_VARIANCE()
if seconds <= snack_duration_variance:
await ctx.send("snacktime must be greater than snackvariance!")
elif seconds <= 0:
await ctx.send("snacktime must be greater than 0")
else:
await self.config.guild(ctx.guild).SNACK_DURATION.set(seconds)
await ctx.send(f"snacktimes will now last around {round(seconds/60, 2)} minutes!")
@snackset.command(name="snackvariance")
async def snacktimevariance(self, ctx, seconds: int):
"""How early or late snackburr might have to leave for errands"""
snack_duration = await self.config.guild(ctx.guild).SNACK_DURATION()
if seconds >= snack_duration:
await ctx.send("snackvariance must be less than snacktime!")
elif seconds < 0:
await ctx.send("snackvariance must be 0 or greater!")
else:
await self.config.guild(ctx.guild).SNACK_DURATION_VARIANCE.set(seconds)
await ctx.send(f"snackburr now may have to leave snacktime {round(seconds/60, 2)} minutes early or late")
@snackset.command()
async def msgsneeded(self, ctx, amt: int):
"""How many messages must pass in a conversation before a snacktime can start"""
if amt <= 0:
await ctx.send("msgsneeded must be greater than 0")
else:
await self.config.guild(ctx.guild).MSGS_BEFORE_EVENT.set(amt)
await ctx.send(f"snackburr will now wait until {amt} messages pass until he comes with snacks")
@snackset.command()
async def amount(self, ctx, amt: int):
"""How much pb max snackburr should give out to each person per snacktime"""
if amt <= 0:
await ctx.send("amount must be greater than 0")
else:
await self.config.guild(ctx.guild).SNACK_AMOUNT.set(amt)
await ctx.send(f"snackburr will now give out {amt} pb max per person per snacktime.")
@snackset.command(name="friends")
async def snackset_friends(self, ctx, choice: int):
"""snackburr's friends wanna know what all the hub-bub's about!
Do you want to
1: invite them to the party,
2: only allow snackburr to chillax with you guys, or
3: kick snackburr out on the curb in favor of his obviously cooler friends?
"""
if choice not in (1, 2, 3):
return await ctx.send_help()
choices = {
1: ("both", "Everybody's invited!"),
2: (False, "You chose to not invite snackburr's friends."),
3: (True, "You kick snackburr out in favor of his friends! Ouch. Harsh..."),
}
choice = choices[choice]
await self.config.guild(ctx.guild).FRIENDS.set(choice[0])
await ctx.send(choice[1])
@snackset.command()
async def deliver(self, ctx):
"""Asks snackburr to start delivering to this channel"""
deliver_channels = await self.config.guild(ctx.guild).DELIVER_CHANNELS()
if not deliver_channels:
deliver_channels = []
if ctx.channel.id not in deliver_channels:
deliver_channels.append(ctx.channel.id)
await self.config.guild(ctx.guild).DELIVER_CHANNELS.set(deliver_channels)
await ctx.send("snackburr will start delivering here!")
else:
deliver_channels.remove(ctx.channel.id)
await self.config.guild(ctx.guild).DELIVER_CHANNELS.set(deliver_channels)
await ctx.send("snackburr will stop delivering here!")
@commands.guild_only()
@commands.command()
async def snacktime(self, ctx):
"""Man i'm hungry! When's snackburr gonna get back with more snacks?"""
scid = f"{ctx.message.guild.id}-{ctx.message.channel.id}"
if self.snacktimePrediction.get(scid, None) == None:
if self.acceptInput.get(scid, False):
return
else:
phrases = [
r"Don't look at me. I donno where snackburr's at ¯\_(ツ)_/¯",
"I hear snackburr likes parties. *wink wink",
"I hear snackburr is attracted to channels with active conversations",
"If you party, snackburr will come! 〈( ^o^)ノ",
]
await ctx.send(randchoice(phrases))
return
seconds = self.snacktimePrediction[scid] - self.bot.loop.time()
if self.snacktimeCheckLock.get(scid, False):
if randint(1, 4) == 4:
await ctx.send("Hey, snackburr's on errands. I ain't his keeper Kappa")
return
self.snacktimeCheckLock[scid] = True
if seconds < 0:
await ctx.send(f"I'm not sure where snackburr is.. He's already {round(abs(seconds/60), 2)} minutes late!")
else:
await ctx.send(f"snackburr's out on errands! I think he'll be back in {round(seconds/60, 2)} minutes")
await asyncio.sleep(40)
self.snacktimeCheckLock[scid] = False
async def startSnack(self, message):
scid = f"{message.guild.id}-{message.channel.id}"
if self.acceptInput.get(scid, False):
return
self.channel_persona[scid] = await self.persona_choice(ctx=None, message=message)
await message.channel.send(await self.get_response(message, "SNACKTIME"))
self.acceptInput[scid] = True
self.alreadySnacked[scid] = []
guild_data = await self.config.guild(message.guild).all()
duration = guild_data["SNACK_DURATION"] + randint(
-guild_data["SNACK_DURATION_VARIANCE"], guild_data["SNACK_DURATION_VARIANCE"]
)
await asyncio.sleep(duration)
# sometimes fails sending messages and stops all future snacktimes. Hopefully this fixes it.
try:
# list isn't empty
if self.alreadySnacked.get(scid, False):
await message.channel.send(await self.get_response(message, "OUT"))
await self.config.channel(message.channel).repeatMissedSnacktimes.set(0)
else:
await message.channel.send(await self.get_response(message, "NO_TAKERS"))
repeat_missed_snacktimes = await self.config.channel(message.channel).repeatMissedSnacktimes()
await self.config.channel(message.channel).repeatMissedSnacktimes.set(repeat_missed_snacktimes + 1)
await asyncio.sleep(2)
if (repeat_missed_snacktimes + 1) > 9: # move to a setting
await message.channel.send(await self.get_response(message, "LONELY"))
deliver_channels = await self.config.guild(message.guild).DELIVER_CHANNELS()
new_deliver_channels = deliver_channels.remove(message.channel.id)
await self.config.guild(message.guild).DELIVER_CHANNELS.set(new_deliver_channels)
await self.config.channel(message.channel).repeatMissedSnacktimes.set(0)
except:
log.error("Snacktime: Failed to send message in startSnack")
self.acceptInput[scid] = False
self.snackInProgress[scid] = False
@commands.Cog.listener()
async def on_message(self, message):
if not message.guild:
return
if message.author.bot:
return
if not message.channel.permissions_for(message.guild.me).send_messages:
return
deliver_channels = await self.config.guild(message.guild).DELIVER_CHANNELS()
if not deliver_channels:
return
if message.channel.id not in deliver_channels:
return
scid = f"{message.guild.id}-{message.channel.id}"
if message.author.id != self.bot.user.id:
# if nobody has said anything since start
if self.previousSpeaker.get(scid, None) == None:
self.previousSpeaker[scid] = message.author.id
# if new speaker
elif self.previousSpeaker[scid] != message.author.id:
self.previousSpeaker[scid] = message.author.id
msgTime = self.bot.loop.time()
# if there's a scheduled snack
if self.snackSchedule.get(scid, None) != None:
# if it's time for a snack
if msgTime > self.snackSchedule[scid]:
# 1 schedule at a time, so remove schedule
self.snackSchedule[scid] = None
self.snackInProgress[scid] = True
# wait to make it more natural
naturalWait = randint(30, 240)
log.debug(f"Snacktime: snack trigger msg: {message.content}")
log.debug(f"Snacktime: Waiting {str(naturalWait)} seconds")
await asyncio.sleep(naturalWait)
# start snacktime
await self.startSnack(message)
# if no snack coming, schedule one
elif self.snackInProgress.get(scid, False) == False and not self.startLock.get(scid, False):
self.msgsPassed[scid] = self.msgsPassed.get(scid, 0) + 1
# check for collisions
msgs_before_event = await self.config.guild(message.guild).MSGS_BEFORE_EVENT()
if self.msgsPassed[scid] > msgs_before_event:
self.startLock[scid] = True
if self.lockRequests.get(scid, None) == None:
self.lockRequests[scid] = []
self.lockRequests[scid].append(message)
await asyncio.sleep(1)
log.debug(
f"Snacktime: :-+-|||||-+-: Lock request: {str(self.lockRequests[scid][0] == message)}"
)
if self.lockRequests[scid][0] == message:
await asyncio.sleep(5)
log.debug(f"Snacktime: {message.author.name} - I got the Lock")
self.lockRequests[scid] = []
# someone got through already
if self.msgsPassed[scid] < msgs_before_event or self.snackInProgress.get(scid, False):
log.debug("Snacktime: Lock: someone got through already.")
return
else:
log.debug(
"Snacktime: Lock: looks like i'm in the clear. lifting lock. If someone comes now, they should get the lock"
)
self.msgsPassed[scid] = msgs_before_event
self.startLock[scid] = False
else:
log.debug(f"Snacktime: {message.author.name} Failed lock")
return
if self.msgsPassed[scid] == msgs_before_event:
# schedule a snack
log.debug(f"Snacktime: activity: {message.content}")
guild_data = await self.config.guild(message.guild).all()
timeTillSnack = guild_data["EVENT_START_DELAY"] + randint(
-guild_data["EVENT_START_DELAY_VARIANCE"], guild_data["EVENT_START_DELAY_VARIANCE"],
)
log.debug(f"Snacktime: {str(timeTillSnack)} seconds till snacktime")
self.snacktimePrediction[scid] = msgTime + guild_data["EVENT_START_DELAY"]
self.snackSchedule[scid] = msgTime + timeTillSnack
self.msgsPassed[scid] = 0
# it's snacktime! who want's snacks?
if self.acceptInput.get(scid, False):
if message.author.id not in self.alreadySnacked.get(scid, []):
agree_phrases = [
"holds out hand",
"im ready",
"i'm ready",
"hit me up",
"hand over",
"hand me",
"kindly",
"i want",
"i'll have",
"ill have",
"yes",
"pls",
"plz",
"please",
"por favor",
"can i",
"i'd like",
"i would",
"may i",
"in my mouth",
"in my belly",
"snack me",
"gimme",
"give me",
"i'll take",
"ill take",
"i am",
"about me",
"me too",
"of course",
]
userWants = False
for agreePhrase in agree_phrases:
# no one word answers
if agreePhrase in message.content.lower() and len(message.content.split()) > 1:
userWants = True
break
if userWants:
if self.alreadySnacked.get(scid, None) == None:
self.alreadySnacked[scid] = []
self.alreadySnacked[scid].append(message.author.id)
# If user is blacklisted, don't give him/her anything.
# We're still passing it to the list to avoid this calculation down the line,
if await self.bot.allowed_by_whitelist_blacklist(
who=message.author
) is False:
return
await asyncio.sleep(randint(1, 6))
snack_amount = await self.config.guild(message.guild).SNACK_AMOUNT()
snackAmt = randint(1, snack_amount)
try:
if self.acceptInput.get(scid, False):
resp = await self.get_response(message, "GIVE")
resp = resp.format(message.author.name, snackAmt)
await message.channel.send(resp)
else:
resp = await self.get_response(message, "LAST_SECOND")
resp = resp.format(message.author.name, snackAmt)
await message.channel.send(resp)
try:
await bank.deposit_credits(message.author, snackAmt)
except BalanceTooHigh as b:
await bank.set_balance(message.author, b.max_balance)
except Exception as e:
log.info(
f"Failed to send pb message. {message.author.name} didn't get pb\n", exc_info=True,
)
else:
more_phrases = [
"more pl",
"i have some more",
"i want more",
"i have another",
"i have more",
"more snack",
]
userWants = False
for morePhrase in more_phrases:
if morePhrase in message.content.lower():
userWants = True
break
if userWants:
if await self.bot.allowed_by_whitelist_blacklist(
who=message.author
) is False:
return
await asyncio.sleep(randint(1, 6))
if self.acceptInput.get(scid, False):
resp = await self.get_response(message, "GREEDY")
await message.channel.send(resp.format(message.author.name))
|
import asyncio
import discord
import logging
from random import randint
from random import choice as randchoice
from redbot.core import bank, checks, commands, Config
from redbot.core.errors import BalanceTooHigh
from redbot.core.utils.chat_formatting import box, humanize_list, pagify
from .phrases import FRIENDS, SNACKBURR_PHRASES
log = logging.getLogger("red.aikaterna.snacktime")
class Snacktime(commands.Cog):
"""Snackburr's passing out pb jars!"""
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 2712291001, force_registration=True)
self.snackSchedule = {}
self.snacktimePrediction = {}
self.previousSpeaker = {}
self.snackInProgress = {}
self.acceptInput = {}
self.alreadySnacked = {}
self.msgsPassed = {}
self.startLock = {}
self.snacktimeCheckLock = {}
self.lockRequests = {}
self.channel_persona = {}
default_guild = {
"DELIVER_CHANNELS": [],
"FRIENDS": False,
"EVENT_START_DELAY": 1800,
"EVENT_START_DELAY_VARIANCE": 900,
"SNACK_DURATION": 240,
"SNACK_DURATION_VARIANCE": 120,
"MSGS_BEFORE_EVENT": 8,
"SNACK_AMOUNT": 200,
}
default_channel = {"repeatMissedSnacktimes": 0}
self.config.register_guild(**default_guild)
self.config.register_channel(**default_channel)
async def persona_choice(self, ctx: None, message: None):
if ctx:
invite_friends = await self.config.guild(ctx.guild).FRIENDS()
else:
invite_friends = await self.config.guild(message.guild).FRIENDS()
personas = FRIENDS
if not invite_friends:
return "Snackburr" if message else "ʕ •ᴥ•ʔ <"
elif invite_friends is True:
try:
del personas["Snackburr"]
except KeyError:
pass
if message:
return randchoice(list(personas.keys()))
else:
return randchoice(list(personas.values()))
async def get_response(self, msg, phrase_type):
scid = f"{msg.guild.id}-{msg.channel.id}"
persona = self.channel_persona[scid]
persona_phrase = FRIENDS.get(persona)
phrase = randchoice(SNACKBURR_PHRASES[phrase_type])
return f"`{persona_phrase} {phrase}`"
@commands.cooldown(1, 1, commands.BucketType.channel)
@commands.guild_only()
@commands.command()
async def eat(self, ctx, amount: int):
"""
all this talk about pb is makin me hungry.
how bout you guys?
"""
persona = await self.persona_choice(ctx=ctx, message=None)
if amount < 0:
return await ctx.send(f"`{persona} Woah slow down!`")
if amount > await bank.get_balance(ctx.author):
return await ctx.send(f"`{persona} You don't got that much pb!.. don't look at me..`")
await bank.withdraw_credits(ctx.author, amount)
first_phrase = randchoice(SNACKBURR_PHRASES["EAT_BEFORE"])
second_phrase = randchoice(SNACKBURR_PHRASES["EAT_AFTER"])
await ctx.send(f"`{persona} {ctx.author.display_name} {first_phrase} {second_phrase} {amount} whole pb jars!`")
@commands.guild_only()
@commands.group()
@checks.mod_or_permissions(manage_guild=True)
async def snackset(self, ctx):
"""snack stuff"""
if ctx.invoked_subcommand is None:
guild_data = await self.config.guild(ctx.guild).all()
channel_names = []
if guild_data["DELIVER_CHANNELS"]:
for channel_id in guild_data["DELIVER_CHANNELS"]:
channel_obj = self.bot.get_channel(channel_id)
if channel_obj:
channel_names.append(channel_obj.name)
if len(channel_names) == 0:
channel_names = ["No channels set."]
if guild_data["FRIENDS"] is True:
invite_friends = "Friends only"
elif guild_data["FRIENDS"] is False:
invite_friends = "Snackburr only"
else:
invite_friends = "Everyone's invited!"
msg = f"[Delivering in]: {humanize_list(channel_names)}\n"
msg += f"[Event start delay]: {guild_data['EVENT_START_DELAY']} seconds\n"
msg += f"[Event start variance]: {guild_data['EVENT_START_DELAY_VARIANCE']} seconds\n"
msg += f"[Friends status]: {invite_friends}\n"
msg += f"[Messages before event]: {guild_data['MSGS_BEFORE_EVENT']}\n"
msg += f"[Snack amount limit]: {guild_data['SNACK_AMOUNT']} pb\n"
msg += f"[Snack duration]: {guild_data['SNACK_DURATION']} seconds\n"
msg += f"[Snack duration variance]: {guild_data['SNACK_DURATION_VARIANCE']} seconds\n"
for page in pagify(msg, delims=["\n"]):
await ctx.send(box(page, lang="ini"))
@snackset.command()
async def errandtime(self, ctx, seconds: int):
"""How long snackburr needs to be out doin errands.. more or less."""
event_start_delay_variance = await self.config.guild(ctx.guild).EVENT_START_DELAY_VARIANCE()
if seconds <= event_start_delay_variance:
await ctx.send("errandtime must be greater than errandvariance!")
elif seconds <= 0:
await ctx.send("errandtime must be greater than 0")
else:
await self.config.guild(ctx.guild).EVENT_START_DELAY.set(seconds)
await ctx.send(f"snackburr's errands will now take around {round(seconds/60, 2)} minutes!")
@snackset.command()
async def errandvariance(self, ctx, seconds: int):
"""How early or late snackburr might be to snacktime"""
event_start_delay = await self.config.guild(ctx.guild).EVENT_START_DELAY()
if seconds >= event_start_delay:
await ctx.send("errandvariance must be less than errandtime!")
elif seconds < 0:
await ctx.send("errandvariance must be 0 or greater!")
else:
await self.config.guild(ctx.guild).EVENT_START_DELAY_VARIANCE.set(seconds)
await ctx.send(f"snackburr now might be {round(seconds/60, 2)} minutes early or late to snacktime")
@snackset.command(name="snacktime")
async def snacktimetime(self, ctx, seconds: int):
"""How long snackburr will hang out giving out snacks!.. more or less."""
snack_duration_variance = await self.config.guild(ctx.guild).SNACK_DURATION_VARIANCE()
if seconds <= snack_duration_variance:
await ctx.send("snacktime must be greater than snackvariance!")
elif seconds <= 0:
await ctx.send("snacktime must be greater than 0")
else:
await self.config.guild(ctx.guild).SNACK_DURATION.set(seconds)
await ctx.send(f"snacktimes will now last around {round(seconds/60, 2)} minutes!")
@snackset.command(name="snackvariance")
async def snacktimevariance(self, ctx, seconds: int):
"""How early or late snackburr might have to leave for errands"""
snack_duration = await self.config.guild(ctx.guild).SNACK_DURATION()
if seconds >= snack_duration:
await ctx.send("snackvariance must be less than snacktime!")
elif seconds < 0:
await ctx.send("snackvariance must be 0 or greater!")
else:
await self.config.guild(ctx.guild).SNACK_DURATION_VARIANCE.set(seconds)
await ctx.send(f"snackburr now may have to leave snacktime {round(seconds/60, 2)} minutes early or late")
@snackset.command()
async def msgsneeded(self, ctx, amt: int):
"""How many messages must pass in a conversation before a snacktime can start"""
if amt <= 0:
await ctx.send("msgsneeded must be greater than 0")
else:
await self.config.guild(ctx.guild).MSGS_BEFORE_EVENT.set(amt)
await ctx.send(f"snackburr will now wait until {amt} messages pass until he comes with snacks")
@snackset.command()
async def amount(self, ctx, amt: int):
"""How much pb max snackburr should give out to each person per snacktime"""
if amt <= 0:
await ctx.send("amount must be greater than 0")
else:
await self.config.guild(ctx.guild).SNACK_AMOUNT.set(amt)
await ctx.send(f"snackburr will now give out {amt} pb max per person per snacktime.")
@snackset.command(name="friends")
async def snackset_friends(self, ctx, choice: int):
"""snackburr's friends wanna know what all the hub-bub's about!
Do you want to
1: invite them to the party,
2: only allow snackburr to chillax with you guys, or
3: kick snackburr out on the curb in favor of his obviously cooler friends?
"""
if choice not in (1, 2, 3):
return await ctx.send_help()
choices = {
1: ("both", "Everybody's invited!"),
2: (False, "You chose to not invite snackburr's friends."),
3: (True, "You kick snackburr out in favor of his friends! Ouch. Harsh..."),
}
choice = choices[choice]
await self.config.guild(ctx.guild).FRIENDS.set(choice[0])
await ctx.send(choice[1])
@snackset.command()
async def deliver(self, ctx):
"""Asks snackburr to start delivering to this channel"""
deliver_channels = await self.config.guild(ctx.guild).DELIVER_CHANNELS()
if not deliver_channels:
deliver_channels = []
if ctx.channel.id not in deliver_channels:
deliver_channels.append(ctx.channel.id)
await self.config.guild(ctx.guild).DELIVER_CHANNELS.set(deliver_channels)
await ctx.send("snackburr will start delivering here!")
else:
deliver_channels.remove(ctx.channel.id)
await self.config.guild(ctx.guild).DELIVER_CHANNELS.set(deliver_channels)
await ctx.send("snackburr will stop delivering here!")
@commands.guild_only()
@commands.command()
async def snacktime(self, ctx):
"""Man i'm hungry! When's snackburr gonna get back with more snacks?"""
scid = f"{ctx.message.guild.id}-{ctx.message.channel.id}"
if self.snacktimePrediction.get(scid, None) == None:
if self.acceptInput.get(scid, False):
return
else:
phrases = [
r"Don't look at me. I donno where snackburr's at ¯\_(ツ)_/¯",
"I hear snackburr likes parties. *wink wink",
"I hear snackburr is attracted to channels with active conversations",
"If you party, snackburr will come! 〈( ^o^)ノ",
]
await ctx.send(randchoice(phrases))
return
seconds = self.snacktimePrediction[scid] - self.bot.loop.time()
if self.snacktimeCheckLock.get(scid, False):
if randint(1, 4) == 4:
await ctx.send("Hey, snackburr's on errands. I ain't his keeper Kappa")
return
self.snacktimeCheckLock[scid] = True
if seconds < 0:
await ctx.send(f"I'm not sure where snackburr is.. He's already {round(abs(seconds/60), 2)} minutes late!")
else:
await ctx.send(f"snackburr's out on errands! I think he'll be back in {round(seconds/60, 2)} minutes")
await asyncio.sleep(40)
self.snacktimeCheckLock[scid] = False
async def startSnack(self, message):
scid = f"{message.guild.id}-{message.channel.id}"
if self.acceptInput.get(scid, False):
return
self.channel_persona[scid] = await self.persona_choice(ctx=None, message=message)
await message.channel.send(await self.get_response(message, "SNACKTIME"))
self.acceptInput[scid] = True
self.alreadySnacked[scid] = []
guild_data = await self.config.guild(message.guild).all()
duration = guild_data["SNACK_DURATION"] + randint(
-guild_data["SNACK_DURATION_VARIANCE"], guild_data["SNACK_DURATION_VARIANCE"]
)
await asyncio.sleep(duration)
# sometimes fails sending messages and stops all future snacktimes. Hopefully this fixes it.
try:
# list isn't empty
if self.alreadySnacked.get(scid, False):
await message.channel.send(await self.get_response(message, "OUT"))
await self.config.channel(message.channel).repeatMissedSnacktimes.set(0)
else:
await message.channel.send(await self.get_response(message, "NO_TAKERS"))
repeat_missed_snacktimes = await self.config.channel(message.channel).repeatMissedSnacktimes()
await self.config.channel(message.channel).repeatMissedSnacktimes.set(repeat_missed_snacktimes + 1)
await asyncio.sleep(2)
if (repeat_missed_snacktimes + 1) > 9: # move to a setting
await message.channel.send(await self.get_response(message, "LONELY"))
deliver_channels = await self.config.guild(message.guild).DELIVER_CHANNELS()
new_deliver_channels = deliver_channels.remove(message.channel.id)
await self.config.guild(message.guild).DELIVER_CHANNELS.set(new_deliver_channels)
await self.config.channel(message.channel).repeatMissedSnacktimes.set(0)
except:
log.error("Snacktime: Failed to send message in startSnack")
self.acceptInput[scid] = False
self.snackInProgress[scid] = False
@commands.Cog.listener()
async def on_message(self, message):
if not message.guild:
return
if message.author.bot:
return
if not message.channel.permissions_for(message.guild.me).send_messages:
return
deliver_channels = await self.config.guild(message.guild).DELIVER_CHANNELS()
if not deliver_channels:
return
if message.channel.id not in deliver_channels:
return
scid = f"{message.guild.id}-{message.channel.id}"
if message.author.id != self.bot.user.id:
# if nobody has said anything since start
if self.previousSpeaker.get(scid, None) == None:
self.previousSpeaker[scid] = message.author.id
# if new speaker
elif self.previousSpeaker[scid] != message.author.id:
self.previousSpeaker[scid] = message.author.id
msgTime = self.bot.loop.time()
# if there's a scheduled snack
if self.snackSchedule.get(scid, None) != None:
# if it's time for a snack
if msgTime > self.snackSchedule[scid]:
# 1 schedule at a time, so remove schedule
self.snackSchedule[scid] = None
self.snackInProgress[scid] = True
# wait to make it more natural
naturalWait = randint(30, 240)
log.debug(f"Snacktime: snack trigger msg: {message.content}")
log.debug(f"Snacktime: Waiting {str(naturalWait)} seconds")
await asyncio.sleep(naturalWait)
# start snacktime
await self.startSnack(message)
# if no snack coming, schedule one
elif self.snackInProgress.get(scid, False) == False and not self.startLock.get(scid, False):
self.msgsPassed[scid] = self.msgsPassed.get(scid, 0) + 1
# check for collisions
msgs_before_event = await self.config.guild(message.guild).MSGS_BEFORE_EVENT()
if self.msgsPassed[scid] > msgs_before_event:
self.startLock[scid] = True
if self.lockRequests.get(scid, None) == None:
self.lockRequests[scid] = []
self.lockRequests[scid].append(message)
await asyncio.sleep(1)
log.debug(
f"Snacktime: :-+-|||||-+-: Lock request: {str(self.lockRequests[scid][0] == message)}"
)
if self.lockRequests[scid][0] == message:
await asyncio.sleep(5)
log.debug(f"Snacktime: {message.author.name} - I got the Lock")
self.lockRequests[scid] = []
# someone got through already
if self.msgsPassed[scid] < msgs_before_event or self.snackInProgress.get(scid, False):
log.debug("Snacktime: Lock: someone got through already.")
return
else:
log.debug(
"Snacktime: Lock: looks like i'm in the clear. lifting lock. If someone comes now, they should get the lock"
)
self.msgsPassed[scid] = msgs_before_event
self.startLock[scid] = False
else:
log.debug(f"Snacktime: {message.author.name} Failed lock")
return
if self.msgsPassed[scid] == msgs_before_event:
# schedule a snack
log.debug(f"Snacktime: activity: {message.content}")
guild_data = await self.config.guild(message.guild).all()
timeTillSnack = guild_data["EVENT_START_DELAY"] + randint(
-guild_data["EVENT_START_DELAY_VARIANCE"], guild_data["EVENT_START_DELAY_VARIANCE"],
)
log.debug(f"Snacktime: {str(timeTillSnack)} seconds till snacktime")
self.snacktimePrediction[scid] = msgTime + guild_data["EVENT_START_DELAY"]
self.snackSchedule[scid] = msgTime + timeTillSnack
self.msgsPassed[scid] = 0
# it's snacktime! who want's snacks?
if self.acceptInput.get(scid, False):
if message.author.id not in self.alreadySnacked.get(scid, []):
agree_phrases = [
"holds out hand",
"im ready",
"i'm ready",
"hit me up",
"hand over",
"hand me",
"kindly",
"i want",
"i'll have",
"ill have",
"yes",
"pls",
"plz",
"please",
"por favor",
"can i",
"i'd like",
"i would",
"may i",
"in my mouth",
"in my belly",
"snack me",
"gimme",
"give me",
"i'll take",
"ill take",
"i am",
"about me",
"me too",
"of course",
]
userWants = False
for agreePhrase in agree_phrases:
# no one word answers
if agreePhrase in message.content.lower() and len(message.content.split()) > 1:
userWants = True
break
if userWants:
if self.alreadySnacked.get(scid, None) == None:
self.alreadySnacked[scid] = []
self.alreadySnacked[scid].append(message.author.id)
# If user is blacklisted, don't give him/her anything.
# We're still passing it to the list to avoid this calculation down the line,
if await self.bot.allowed_by_whitelist_blacklist(
who=message.author
) is False:
return
await asyncio.sleep(randint(1, 6))
snack_amount = await self.config.guild(message.guild).SNACK_AMOUNT()
snackAmt = randint(1, snack_amount)
try:
if self.acceptInput.get(scid, False):
resp = await self.get_response(message, "GIVE")
resp = resp.format(message.author.name, snackAmt)
await message.channel.send(resp)
else:
resp = await self.get_response(message, "LAST_SECOND")
resp = resp.format(message.author.name, snackAmt)
await message.channel.send(resp)
try:
await bank.deposit_credits(message.author, snackAmt)
except BalanceTooHigh as b:
await bank.set_balance(message.author, b.max_balance)
except Exception as e:
log.info(
f"Failed to send pb message. {message.author.name} didn't get pb\n", exc_info=True,
)
else:
more_phrases = [
"more pl",
"i have some more",
"i want more",
"i have another",
"i have more",
"more snack",
]
userWants = False
for morePhrase in more_phrases:
if morePhrase in message.content.lower():
userWants = True
break
if userWants:
if await self.bot.allowed_by_whitelist_blacklist(
who=message.author
) is False:
return
await asyncio.sleep(randint(1, 6))
if self.acceptInput.get(scid, False):
resp = await self.get_response(message, "GREEDY")
await message.channel.send(resp.format(message.author.name))
|
import argparse
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, non_max_suppression, apply_classifier, scale_coords
from utils.general import xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
from utils.draw_name import draw_name
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://'))
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f'{n} {names[int(c)]}s, ' # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or view_img: # Add bbox to image
cv2.imwrite("img.jpg", im0)
im0 = draw_name(im0, colors[int(cls)]) # 填上人名
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
cv2.imshow('Masks detect', im0)
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob("labels/*.txt")))} labels saved to {save_dir / "labels"}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
print(f'Done. ({time.time() - t0:.3f}s)')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp/weights/best.pt',
help='model.pt path(s)')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
global opt
opt = parser.parse_args()
print(opt)
check_requirements()
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
|
import argparse
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, non_max_suppression, apply_classifier, scale_coords
from utils.general import xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
from utils.draw_name import draw_name
def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://'))
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f'{n} {names[int(c)]}s, ' # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or view_img: # Add bbox to image
cv2.imwrite("img.jpg", im0)
im0 = draw_name(im0, colors[int(cls)]) # 填上人名
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
cv2.imshow('Masks detect', im0)
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
print(f'Done. ({time.time() - t0:.3f}s)')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp/weights/best.pt',
help='model.pt path(s)')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
global opt
opt = parser.parse_args()
print(opt)
check_requirements()
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
|
"""Script to train the Hamiltonian Generative Network
"""
import ast
import argparse
import copy
import pprint
import os
import warnings
import yaml
import numpy as np
import torch
import tqdm
from utilities.integrator import Integrator
from utilities.training_logger import TrainingLogger
from utilities import loader
from utilities.loader import load_hgn, get_online_dataloaders, get_offline_dataloaders
from utilities.losses import reconstruction_loss, kld_loss, geco_constraint
from utilities.statistics import mean_confidence_interval
def _avoid_overwriting(experiment_id):
# This function throws an error if the given experiment data already exists in runs/
logdir = os.path.join('runs', experiment_id)
if os.path.exists(logdir):
assert len(os.listdir(logdir)) == 0,\
f'Experiment id {experiment_id} already exists in runs/. Remove it, change the name ' \
f'in the yaml file.'
class HgnTrainer:
def __init__(self, params, resume=False):
"""Instantiate and train the Hamiltonian Generative Network.
Args:
params (dict): Experiment parameters (see experiment_params folder).
"""
self.params = params
self.resume = resume
if not resume: # Fail if experiment_id already exist in runs/
_avoid_overwriting(params["experiment_id"])
# Set device
self.device = params["device"]
if "cuda" in self.device and not torch.cuda.is_available():
warnings.warn(
"Warning! Set to train in GPU but cuda is not available. Device is set to CPU.")
self.device = "cpu"
# Get dtype, will raise a 'module 'torch' has no attribute' if there is a typo
self.dtype = torch.__getattribute__(params["networks"]["dtype"])
# Load hgn from parameters to deice
self.hgn = load_hgn(params=self.params,
device=self.device,
dtype=self.dtype)
if 'load_path' in self.params:
self.load_and_reset(self.params, self.device, self.dtype)
# Either generate data on-the-fly or load the data from disk
if "train_data" in self.params["dataset"]:
print("Training with OFFLINE data...")
self.train_data_loader, self.test_data_loader = get_offline_dataloaders(self.params)
else:
print("Training with ONLINE data...")
self.train_data_loader, self.test_data_loader = get_online_dataloaders(self.params)
# Initialize training logger
self.training_logger = TrainingLogger(
hyper_params=self.params,
loss_freq=100,
rollout_freq=1000,
model_freq=10000
)
# Initialize tensorboard writer
self.model_save_file = os.path.join(
self.params["model_save_dir"],
self.params["experiment_id"]
)
# Define optimization modules
optim_params = [
{
'params': self.hgn.encoder.parameters(),
'lr': params["optimization"]["encoder_lr"]
},
{
'params': self.hgn.transformer.parameters(),
'lr': params["optimization"]["transformer_lr"]
},
{
'params': self.hgn.hnn.parameters(),
'lr': params["optimization"]["hnn_lr"]
},
{
'params': self.hgn.decoder.parameters(),
'lr': params["optimization"]["decoder_lr"]
},
]
self.optimizer = torch.optim.Adam(optim_params)
def load_and_reset(self, params, device, dtype):
"""Load the HGN from the path specified in params['load_path'] and reset the networks in
params['reset'].
Args:
params (dict): Dictionary with all the necessary parameters to load the networks.
device (str): 'gpu:N' or 'cpu'
dtype (torch.dtype): Data type to be used in computations.
"""
self.hgn.load(params['load_path'])
if 'reset' in params:
if isinstance(params['reset'], list):
for net in params['reset']:
assert net in ['encoder', 'decoder', 'hamiltonian', 'transformer']
else:
assert params['reset'] in ['encoder', 'decoder', 'hamiltonian', 'transformer']
if 'encoder' in params['reset']:
self.hgn.encoder = loader.instantiate_encoder(params, device, dtype)
if 'decoder' in params['reset']:
self.hgn.decoder = loader.instantiate_decoder(params, device, dtype)
if 'transformer' in params['reset']:
self.hgn.transformer = loader.instantiate_transformer(params, device, dtype)
if 'hamiltonian' in params['reset']:
self.hgn.hnn = loader.instantiate_hamiltonian(params, device, dtype)
def training_step(self, rollouts):
"""Perform a training step with the given rollouts batch.
Args:
rollouts (torch.Tensor): Tensor of shape (batch_size, seq_len, channels, height, width)
corresponding to a batch of sampled rollouts.
Returns:
A dictionary of losses and the model's prediction of the rollout. The reconstruction loss and
KL divergence are floats and prediction is the HGNResult object with data of the forward pass.
"""
self.optimizer.zero_grad()
rollout_len = rollouts.shape[1]
input_frames = self.params['optimization']['input_frames']
assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length
roll = rollouts[:, :input_frames]
hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)
target = rollouts[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)
prediction = hgn_output.reconstructed_rollout
if self.params["networks"]["variational"]:
tol = self.params["geco"]["tol"]
alpha = self.params["geco"]["alpha"]
lagrange_mult_param = self.params["geco"]["lagrange_multiplier_param"]
C, rec_loss = geco_constraint(target, prediction, tol) # C has gradient
# Compute moving average of constraint C (without gradient)
if self.C_ma is None:
self.C_ma = C.detach()
else:
self.C_ma = alpha * self.C_ma + (1 - alpha) * C.detach()
C_curr = C.detach().item() # keep track for logging
C = C + (self.C_ma - C.detach()) # Move C without affecting its gradient
# Compute KL divergence
mu = hgn_output.z_mean
logvar = hgn_output.z_logvar
kld = kld_loss(mu=mu, logvar=logvar)
# normalize by number of frames, channels and pixels per frame
kld_normalizer = prediction.flatten(1).size(1)
kld = kld / kld_normalizer
# Compute losses
train_loss = kld + self.langrange_multiplier * C
# clamping the langrange multiplier to avoid inf values
self.langrange_multiplier = self.langrange_multiplier * torch.exp(
lagrange_mult_param * C.detach())
self.langrange_multiplier = torch.clamp(self.langrange_multiplier, 1e-10, 1e10)
losses = {
'loss/train': train_loss.item(),
'loss/kld': kld.item(),
'loss/C': C_curr,
'loss/C_ma': self.C_ma.item(),
'loss/rec': rec_loss.item(),
'other/langrange_mult': self.langrange_multiplier.item()
}
else: # not variational
# Compute frame reconstruction error
train_loss = reconstruction_loss(
target=target,
prediction=prediction)
losses = {'loss/train': train_loss.item()}
train_loss.backward()
self.optimizer.step()
return losses, hgn_output
def fit(self):
"""The trainer fits an HGN.
Returns:
(HGN) An HGN model that has been fitted to the data
"""
# Initial values for geco algorithm
if self.params["networks"]["variational"]:
self.langrange_multiplier = self.params["geco"]["initial_lagrange_multiplier"]
self.C_ma = None
# TRAIN
for ep in range(self.params["optimization"]["epochs"]):
print("Epoch %s / %s" % (str(ep + 1), str(self.params["optimization"]["epochs"])))
pbar = tqdm.tqdm(self.train_data_loader)
for batch_idx, rollout_batch in enumerate(pbar):
# Move to device and change dtype
rollout_batch = rollout_batch.to(self.device).type(self.dtype)
# Do an optimization step
losses, prediction = self.training_step(rollouts=rollout_batch)
# Log progress
self.training_logger.step(losses=losses,
rollout_batch=rollout_batch,
prediction=prediction,
model=self.hgn)
# Progress-bar msg
msg = ", ".join([
f"{k}: {v:.2e}" for k, v in losses.items() if v is not None
])
pbar.set_description(msg)
# Save model
self.hgn.save(self.model_save_file)
self.test()
return self.hgn
def compute_reconst_kld_errors(self, dataloader):
"""Computes reconstruction error and KL divergence.
Args:
dataloader (torch.utils.data.DataLoader): DataLoader to retrieve errors from.
Returns:
(reconst_error_mean, reconst_error_h), (kld_mean, kld_h): Tuples where the mean and 95%
conficence interval is shown.
"""
first = True
pbar = tqdm.tqdm(dataloader)
for _, rollout_batch in enumerate(pbar):
# Move to device and change dtype
rollout_batch = rollout_batch.to(self.device).type(self.dtype)
rollout_len = rollout_batch.shape[1]
input_frames = self.params['optimization']['input_frames']
assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length
roll = rollout_batch[:, :input_frames]
hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)
target = rollout_batch[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)
prediction = hgn_output.reconstructed_rollout
error = reconstruction_loss(
target=target,
prediction=prediction, mean_reduction=False).detach().cpu(
).numpy()
if self.params["networks"]["variational"]:
kld = kld_loss(mu=hgn_output.z_mean, logvar=hgn_output.z_logvar, mean_reduction=False).detach().cpu(
).numpy()
# normalize by number of frames, channels and pixels per frame
kld_normalizer = prediction.flatten(1).size(1)
kld = kld / kld_normalizer
if first:
first = False
set_errors = error
if self.params["networks"]["variational"]:
set_klds = kld
else:
set_errors = np.concatenate((set_errors, error))
if self.params["networks"]["variational"]:
set_klds = np.concatenate((set_klds, kld))
err_mean, err_h = mean_confidence_interval(set_errors)
if self.params["networks"]["variational"]:
kld_mean, kld_h = mean_confidence_interval(set_klds)
return (err_mean, err_h), (kld_mean, kld_h)
else:
return (err_mean, err_h), None
def test(self):
"""Test after the training is finished and logs result to tensorboard.
"""
print("Calculating final training error...")
(err_mean, err_h), kld = self.compute_reconst_kld_errors(self.train_data_loader)
self.training_logger.log_error("Train reconstruction error", err_mean, err_h)
if kld is not None:
kld_mean, kld_h = kld
self.training_logger.log_error("Train KL divergence", kld_mean, kld_h)
print("Calculating final test error...")
(err_mean, err_h), kld = self.compute_reconst_kld_errors(self.test_data_loader)
self.training_logger.log_error("Test reconstruction error", err_mean, err_h)
if kld is not None:
kld_mean, kld_h = kld
self.training_logger.log_error("Test KL divergence", kld_mean, kld_h)
def _overwrite_config_with_cmd_arguments(config, args):
if args.name is not None:
config['experiment_id'] = args.name[0]
if args.epochs is not None:
config['optimization']['epochs'] = args.epochs[0]
if args.dataset_path is not None:
# Read the parameters.yaml file in the given dataset path
dataset_config = _read_config(os.path.join(_args.dataset_path[0], 'parameters.yaml'))
for key, value in dataset_config.items():
config[key] = value
if args.env is not None:
if 'train_data' in config['dataset']:
raise ValueError(
f'--env was given but configuration is set for offline training: '
f'train_data={config['dataset']['train_data']}'
)
env_params = _read_config(DEFAULT_ENVIRONMENTS_PATH + args.env[0] + '.yaml')
config['environment'] = env_params['environment']
if args.params is not None:
for p in args.params:
key, value = p.split('=')
ptr = config
keys = key.split('.')
for i, k in enumerate(keys):
if i == len(keys) - 1:
ptr[k] = ast.literal_eval(value)
else:
ptr = ptr[k]
if args.load is not None:
config['load_path'] = args.load[0]
if args.reset is not None:
config['reset'] = args.reset
def _read_config(config_file):
with open(config_file, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
def _merge_configs(train_config, dataset_config):
config = copy.deepcopy(train_config)
for key, value in dataset_config.items():
config[key] = value
# If the config specifies a dataset path, we take the rollout from the configuration file
# in the given dataset
if 'dataset' in config and 'train_data' in config['dataset']:
dataset_config = _read_config( # Read parameters.yaml in root of given dataset
os.path.join(os.path.dirname(config['dataset']['train_data']), 'parameters.yaml'))
config['dataset']['rollout'] = dataset_config['dataset']['rollout']
return config
def _ask_confirmation(config):
printer = pprint.PrettyPrinter(indent=4)
print(f'The training will be run with the following configuration:')
printed_config = copy.deepcopy(_config)
printed_config.pop('networks')
printer.pprint(printed_config)
print('Proceed? (y/n):')
if input() != 'y':
print('Abort.')
exit()
if __name__ == "__main__":
DEFAULT_TRAIN_CONFIG_FILE = "experiment_params/train_config_default.yaml"
DEFAULT_DATASET_CONFIG_FILE = "experiment_params/dataset_online_default.yaml"
DEFAULT_ENVIRONMENTS_PATH = "experiment_params/default_environments/"
DEFAULT_SAVE_MODELS_DIR = "saved_models/"
parser = argparse.ArgumentParser()
parser.add_argument(
'--train-config', action='store', nargs=1, type=str, required=True,
help=f'Path to the training configuration yaml file.'
)
parser.add_argument(
'--dataset-config', action='store', nargs=1, type=str, required=False,
help=f'Path to the dataset configuration yaml file.'
)
parser.add_argument(
'--name', action='store', nargs=1, required=False,
help='If specified, this name will be used instead of experiment_id of the yaml file.'
)
parser.add_argument(
'--epochs', action='store', nargs=1, type=int, required=False,
help='The number of training epochs. If not specified, optimization.epochs of the '
'training configuration will be used.'
)
parser.add_argument(
'--env', action='store', nargs=1, type=str, required=False,
help='The environment to use (for online training only). Possible values are '
'\'pendulum\', \'spring\', \'two_bodies\', \'three_bodies\', corresponding to '
'environment configurations in experiment_params/default_environments/. If not '
'specified, the environment specified in the given --dataset-config will be used.'
)
parser.add_argument(
'--dataset-path', action='store', nargs=1, type=str, required=False,
help='Path to a stored dataset to use for training. For offline training only. In this '
'case no dataset configuration file will be loaded.'
)
parser.add_argument(
'--params', action='store', nargs='+', required=False,
help='Override one or more parameters in the config. The format of an argument is '
'param_name=param_value. Nested parameters are accessible by using a dot, '
'i.e. --param dataset.img_size=32. IMPORTANT: lists must be enclosed in double '
'quotes, i.e. --param environment.mass:"[0.5, 0.5]".'
)
parser.add_argument(
'-y', '-y', action='store_true', default=False, required=False,
help='Whether to skip asking for user confirmation before starting the training.'
)
parser.add_argument(
'--resume', action='store', required=False, nargs='?', default=None,
help='NOT IMPLEMENTED YET. Resume the training from a saved model. If a path is provided, '
'the training will be resumed from the given checkpoint. Otherwise, the last '
'checkpoint will be taken from saved_models/<experiment_id>.'
)
parser.add_argument(
'--load', action='store', type=str, required=False, nargs=1,
help='Path from which to load the HGN.'
)
parser.add_argument(
'--reset', action='store', nargs='+', required=False,
help='Use only in combimation with --load, tells the trainer to reinstantiate the given '
'networks. Values: \'encoder\', \'transformer\', \'decoder\', \'hamiltonian\'.'
)
_args = parser.parse_args()
# Read configurations
_train_config = _read_config(_args.train_config[0])
if _args.dataset_path is None: # Will use the dataset config file (or default if not given)
_dataset_config_file = DEFAULT_DATASET_CONFIG_FILE if _args.dataset_config is None else \
_args.dataset_config[0]
_dataset_config = _read_config(_dataset_config_file)
_config = _merge_configs(_train_config, _dataset_config)
else: # Will use the dataset given in the command line arguments
assert _args.dataset_config is None, 'Both --dataset-path and --dataset-config were given.'
_config = _train_config
# Overwrite configuration with command line arguments
_overwrite_config_with_cmd_arguments(_config, _args)
# Show configuration and ask user for confirmation
if not _args.y:
_ask_confirmation(_config)
# Train HGN network
trainer = HgnTrainer(_config)
hgn = trainer.fit()
|
"""Script to train the Hamiltonian Generative Network
"""
import ast
import argparse
import copy
import pprint
import os
import warnings
import yaml
import numpy as np
import torch
import tqdm
from utilities.integrator import Integrator
from utilities.training_logger import TrainingLogger
from utilities import loader
from utilities.loader import load_hgn, get_online_dataloaders, get_offline_dataloaders
from utilities.losses import reconstruction_loss, kld_loss, geco_constraint
from utilities.statistics import mean_confidence_interval
def _avoid_overwriting(experiment_id):
# This function throws an error if the given experiment data already exists in runs/
logdir = os.path.join('runs', experiment_id)
if os.path.exists(logdir):
assert len(os.listdir(logdir)) == 0,\
f'Experiment id {experiment_id} already exists in runs/. Remove it, change the name ' \
f'in the yaml file.'
class HgnTrainer:
def __init__(self, params, resume=False):
"""Instantiate and train the Hamiltonian Generative Network.
Args:
params (dict): Experiment parameters (see experiment_params folder).
"""
self.params = params
self.resume = resume
if not resume: # Fail if experiment_id already exist in runs/
_avoid_overwriting(params["experiment_id"])
# Set device
self.device = params["device"]
if "cuda" in self.device and not torch.cuda.is_available():
warnings.warn(
"Warning! Set to train in GPU but cuda is not available. Device is set to CPU.")
self.device = "cpu"
# Get dtype, will raise a 'module 'torch' has no attribute' if there is a typo
self.dtype = torch.__getattribute__(params["networks"]["dtype"])
# Load hgn from parameters to deice
self.hgn = load_hgn(params=self.params,
device=self.device,
dtype=self.dtype)
if 'load_path' in self.params:
self.load_and_reset(self.params, self.device, self.dtype)
# Either generate data on-the-fly or load the data from disk
if "train_data" in self.params["dataset"]:
print("Training with OFFLINE data...")
self.train_data_loader, self.test_data_loader = get_offline_dataloaders(self.params)
else:
print("Training with ONLINE data...")
self.train_data_loader, self.test_data_loader = get_online_dataloaders(self.params)
# Initialize training logger
self.training_logger = TrainingLogger(
hyper_params=self.params,
loss_freq=100,
rollout_freq=1000,
model_freq=10000
)
# Initialize tensorboard writer
self.model_save_file = os.path.join(
self.params["model_save_dir"],
self.params["experiment_id"]
)
# Define optimization modules
optim_params = [
{
'params': self.hgn.encoder.parameters(),
'lr': params["optimization"]["encoder_lr"]
},
{
'params': self.hgn.transformer.parameters(),
'lr': params["optimization"]["transformer_lr"]
},
{
'params': self.hgn.hnn.parameters(),
'lr': params["optimization"]["hnn_lr"]
},
{
'params': self.hgn.decoder.parameters(),
'lr': params["optimization"]["decoder_lr"]
},
]
self.optimizer = torch.optim.Adam(optim_params)
def load_and_reset(self, params, device, dtype):
"""Load the HGN from the path specified in params['load_path'] and reset the networks in
params['reset'].
Args:
params (dict): Dictionary with all the necessary parameters to load the networks.
device (str): 'gpu:N' or 'cpu'
dtype (torch.dtype): Data type to be used in computations.
"""
self.hgn.load(params['load_path'])
if 'reset' in params:
if isinstance(params['reset'], list):
for net in params['reset']:
assert net in ['encoder', 'decoder', 'hamiltonian', 'transformer']
else:
assert params['reset'] in ['encoder', 'decoder', 'hamiltonian', 'transformer']
if 'encoder' in params['reset']:
self.hgn.encoder = loader.instantiate_encoder(params, device, dtype)
if 'decoder' in params['reset']:
self.hgn.decoder = loader.instantiate_decoder(params, device, dtype)
if 'transformer' in params['reset']:
self.hgn.transformer = loader.instantiate_transformer(params, device, dtype)
if 'hamiltonian' in params['reset']:
self.hgn.hnn = loader.instantiate_hamiltonian(params, device, dtype)
def training_step(self, rollouts):
"""Perform a training step with the given rollouts batch.
Args:
rollouts (torch.Tensor): Tensor of shape (batch_size, seq_len, channels, height, width)
corresponding to a batch of sampled rollouts.
Returns:
A dictionary of losses and the model's prediction of the rollout. The reconstruction loss and
KL divergence are floats and prediction is the HGNResult object with data of the forward pass.
"""
self.optimizer.zero_grad()
rollout_len = rollouts.shape[1]
input_frames = self.params['optimization']['input_frames']
assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length
roll = rollouts[:, :input_frames]
hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)
target = rollouts[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)
prediction = hgn_output.reconstructed_rollout
if self.params["networks"]["variational"]:
tol = self.params["geco"]["tol"]
alpha = self.params["geco"]["alpha"]
lagrange_mult_param = self.params["geco"]["lagrange_multiplier_param"]
C, rec_loss = geco_constraint(target, prediction, tol) # C has gradient
# Compute moving average of constraint C (without gradient)
if self.C_ma is None:
self.C_ma = C.detach()
else:
self.C_ma = alpha * self.C_ma + (1 - alpha) * C.detach()
C_curr = C.detach().item() # keep track for logging
C = C + (self.C_ma - C.detach()) # Move C without affecting its gradient
# Compute KL divergence
mu = hgn_output.z_mean
logvar = hgn_output.z_logvar
kld = kld_loss(mu=mu, logvar=logvar)
# normalize by number of frames, channels and pixels per frame
kld_normalizer = prediction.flatten(1).size(1)
kld = kld / kld_normalizer
# Compute losses
train_loss = kld + self.langrange_multiplier * C
# clamping the langrange multiplier to avoid inf values
self.langrange_multiplier = self.langrange_multiplier * torch.exp(
lagrange_mult_param * C.detach())
self.langrange_multiplier = torch.clamp(self.langrange_multiplier, 1e-10, 1e10)
losses = {
'loss/train': train_loss.item(),
'loss/kld': kld.item(),
'loss/C': C_curr,
'loss/C_ma': self.C_ma.item(),
'loss/rec': rec_loss.item(),
'other/langrange_mult': self.langrange_multiplier.item()
}
else: # not variational
# Compute frame reconstruction error
train_loss = reconstruction_loss(
target=target,
prediction=prediction)
losses = {'loss/train': train_loss.item()}
train_loss.backward()
self.optimizer.step()
return losses, hgn_output
def fit(self):
"""The trainer fits an HGN.
Returns:
(HGN) An HGN model that has been fitted to the data
"""
# Initial values for geco algorithm
if self.params["networks"]["variational"]:
self.langrange_multiplier = self.params["geco"]["initial_lagrange_multiplier"]
self.C_ma = None
# TRAIN
for ep in range(self.params["optimization"]["epochs"]):
print("Epoch %s / %s" % (str(ep + 1), str(self.params["optimization"]["epochs"])))
pbar = tqdm.tqdm(self.train_data_loader)
for batch_idx, rollout_batch in enumerate(pbar):
# Move to device and change dtype
rollout_batch = rollout_batch.to(self.device).type(self.dtype)
# Do an optimization step
losses, prediction = self.training_step(rollouts=rollout_batch)
# Log progress
self.training_logger.step(losses=losses,
rollout_batch=rollout_batch,
prediction=prediction,
model=self.hgn)
# Progress-bar msg
msg = ", ".join([
f"{k}: {v:.2e}" for k, v in losses.items() if v is not None
])
pbar.set_description(msg)
# Save model
self.hgn.save(self.model_save_file)
self.test()
return self.hgn
def compute_reconst_kld_errors(self, dataloader):
"""Computes reconstruction error and KL divergence.
Args:
dataloader (torch.utils.data.DataLoader): DataLoader to retrieve errors from.
Returns:
(reconst_error_mean, reconst_error_h), (kld_mean, kld_h): Tuples where the mean and 95%
conficence interval is shown.
"""
first = True
pbar = tqdm.tqdm(dataloader)
for _, rollout_batch in enumerate(pbar):
# Move to device and change dtype
rollout_batch = rollout_batch.to(self.device).type(self.dtype)
rollout_len = rollout_batch.shape[1]
input_frames = self.params['optimization']['input_frames']
assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length
roll = rollout_batch[:, :input_frames]
hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)
target = rollout_batch[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)
prediction = hgn_output.reconstructed_rollout
error = reconstruction_loss(
target=target,
prediction=prediction, mean_reduction=False).detach().cpu(
).numpy()
if self.params["networks"]["variational"]:
kld = kld_loss(mu=hgn_output.z_mean, logvar=hgn_output.z_logvar, mean_reduction=False).detach().cpu(
).numpy()
# normalize by number of frames, channels and pixels per frame
kld_normalizer = prediction.flatten(1).size(1)
kld = kld / kld_normalizer
if first:
first = False
set_errors = error
if self.params["networks"]["variational"]:
set_klds = kld
else:
set_errors = np.concatenate((set_errors, error))
if self.params["networks"]["variational"]:
set_klds = np.concatenate((set_klds, kld))
err_mean, err_h = mean_confidence_interval(set_errors)
if self.params["networks"]["variational"]:
kld_mean, kld_h = mean_confidence_interval(set_klds)
return (err_mean, err_h), (kld_mean, kld_h)
else:
return (err_mean, err_h), None
def test(self):
"""Test after the training is finished and logs result to tensorboard.
"""
print("Calculating final training error...")
(err_mean, err_h), kld = self.compute_reconst_kld_errors(self.train_data_loader)
self.training_logger.log_error("Train reconstruction error", err_mean, err_h)
if kld is not None:
kld_mean, kld_h = kld
self.training_logger.log_error("Train KL divergence", kld_mean, kld_h)
print("Calculating final test error...")
(err_mean, err_h), kld = self.compute_reconst_kld_errors(self.test_data_loader)
self.training_logger.log_error("Test reconstruction error", err_mean, err_h)
if kld is not None:
kld_mean, kld_h = kld
self.training_logger.log_error("Test KL divergence", kld_mean, kld_h)
def _overwrite_config_with_cmd_arguments(config, args):
if args.name is not None:
config['experiment_id'] = args.name[0]
if args.epochs is not None:
config['optimization']['epochs'] = args.epochs[0]
if args.dataset_path is not None:
# Read the parameters.yaml file in the given dataset path
dataset_config = _read_config(os.path.join(_args.dataset_path[0], 'parameters.yaml'))
for key, value in dataset_config.items():
config[key] = value
if args.env is not None:
if 'train_data' in config['dataset']:
raise ValueError(
f'--env was given but configuration is set for offline training: '
f'train_data={config["dataset"]["train_data"]}'
)
env_params = _read_config(DEFAULT_ENVIRONMENTS_PATH + args.env[0] + '.yaml')
config['environment'] = env_params['environment']
if args.params is not None:
for p in args.params:
key, value = p.split('=')
ptr = config
keys = key.split('.')
for i, k in enumerate(keys):
if i == len(keys) - 1:
ptr[k] = ast.literal_eval(value)
else:
ptr = ptr[k]
if args.load is not None:
config['load_path'] = args.load[0]
if args.reset is not None:
config['reset'] = args.reset
def _read_config(config_file):
with open(config_file, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
def _merge_configs(train_config, dataset_config):
config = copy.deepcopy(train_config)
for key, value in dataset_config.items():
config[key] = value
# If the config specifies a dataset path, we take the rollout from the configuration file
# in the given dataset
if 'dataset' in config and 'train_data' in config['dataset']:
dataset_config = _read_config( # Read parameters.yaml in root of given dataset
os.path.join(os.path.dirname(config['dataset']['train_data']), 'parameters.yaml'))
config['dataset']['rollout'] = dataset_config['dataset']['rollout']
return config
def _ask_confirmation(config):
printer = pprint.PrettyPrinter(indent=4)
print(f'The training will be run with the following configuration:')
printed_config = copy.deepcopy(_config)
printed_config.pop('networks')
printer.pprint(printed_config)
print('Proceed? (y/n):')
if input() != 'y':
print('Abort.')
exit()
if __name__ == "__main__":
DEFAULT_TRAIN_CONFIG_FILE = "experiment_params/train_config_default.yaml"
DEFAULT_DATASET_CONFIG_FILE = "experiment_params/dataset_online_default.yaml"
DEFAULT_ENVIRONMENTS_PATH = "experiment_params/default_environments/"
DEFAULT_SAVE_MODELS_DIR = "saved_models/"
parser = argparse.ArgumentParser()
parser.add_argument(
'--train-config', action='store', nargs=1, type=str, required=True,
help=f'Path to the training configuration yaml file.'
)
parser.add_argument(
'--dataset-config', action='store', nargs=1, type=str, required=False,
help=f'Path to the dataset configuration yaml file.'
)
parser.add_argument(
'--name', action='store', nargs=1, required=False,
help='If specified, this name will be used instead of experiment_id of the yaml file.'
)
parser.add_argument(
'--epochs', action='store', nargs=1, type=int, required=False,
help='The number of training epochs. If not specified, optimization.epochs of the '
'training configuration will be used.'
)
parser.add_argument(
'--env', action='store', nargs=1, type=str, required=False,
help='The environment to use (for online training only). Possible values are '
'\'pendulum\', \'spring\', \'two_bodies\', \'three_bodies\', corresponding to '
'environment configurations in experiment_params/default_environments/. If not '
'specified, the environment specified in the given --dataset-config will be used.'
)
parser.add_argument(
'--dataset-path', action='store', nargs=1, type=str, required=False,
help='Path to a stored dataset to use for training. For offline training only. In this '
'case no dataset configuration file will be loaded.'
)
parser.add_argument(
'--params', action='store', nargs='+', required=False,
help='Override one or more parameters in the config. The format of an argument is '
'param_name=param_value. Nested parameters are accessible by using a dot, '
'i.e. --param dataset.img_size=32. IMPORTANT: lists must be enclosed in double '
'quotes, i.e. --param environment.mass:"[0.5, 0.5]".'
)
parser.add_argument(
'-y', '-y', action='store_true', default=False, required=False,
help='Whether to skip asking for user confirmation before starting the training.'
)
parser.add_argument(
'--resume', action='store', required=False, nargs='?', default=None,
help='NOT IMPLEMENTED YET. Resume the training from a saved model. If a path is provided, '
'the training will be resumed from the given checkpoint. Otherwise, the last '
'checkpoint will be taken from saved_models/<experiment_id>.'
)
parser.add_argument(
'--load', action='store', type=str, required=False, nargs=1,
help='Path from which to load the HGN.'
)
parser.add_argument(
'--reset', action='store', nargs='+', required=False,
help='Use only in combimation with --load, tells the trainer to reinstantiate the given '
'networks. Values: \'encoder\', \'transformer\', \'decoder\', \'hamiltonian\'.'
)
_args = parser.parse_args()
# Read configurations
_train_config = _read_config(_args.train_config[0])
if _args.dataset_path is None: # Will use the dataset config file (or default if not given)
_dataset_config_file = DEFAULT_DATASET_CONFIG_FILE if _args.dataset_config is None else \
_args.dataset_config[0]
_dataset_config = _read_config(_dataset_config_file)
_config = _merge_configs(_train_config, _dataset_config)
else: # Will use the dataset given in the command line arguments
assert _args.dataset_config is None, 'Both --dataset-path and --dataset-config were given.'
_config = _train_config
# Overwrite configuration with command line arguments
_overwrite_config_with_cmd_arguments(_config, _args)
# Show configuration and ask user for confirmation
if not _args.y:
_ask_confirmation(_config)
# Train HGN network
trainer = HgnTrainer(_config)
hgn = trainer.fit()
|
"""Demo unanimous voting: multiparty matching without embarrassments.
Unanimous voting between parties P[0],...,P[t] is implemented by securely
evaluating the product of their votes (using 1s and 0s to encode "yes"
and "no" votes, respectively) and revealing only whether the product
equals 1 (unanimous agreement) or 0 (someone disagrees). The MPyC method
mpc.all() modeled after Python's built-in function all() can be used
for this purpose.
The secure computation is designed to be maximally private, meaning
that any t parties colluding against the remaining party should be
unsuccessful: the colluding parties should not be able to find out
the vote of the remaining party in case they do not agree (and no
unanimous agreement will be reached anyway; if the colluding parties
all vote "yes", then there remains nothing to hide).
To achieve maximal privacy, we add t parties P[t+1],...,P[2t] to the
secure computation. These additional parties provide no input to the
secure computation and are trusted not to collude with any of the
parties P[0],...,P[t]. This way we have m=2t+1 parties in total, and we
can tolerate the desired maximum of t corrupt parties among P[0],...,P[t].
Moreover, the trusted parties do not receive any output, so they do
not learn anything at all, as their number does not exceed t.
Unanimous voting is a generalization of "matching without embarrassments",
where Alice and Bob like to find out if they want to go on a second date.
They might simply tell each other if they are interested, and in case they
are both interested, this simple solution is satisfactory. However, if for
example Alice is not interested in Bob but Bob is interested in Alice, Bob
may feel embarrassed afterwards because he expressed interest in her; if he
would have known beforehand that Alice was not interested anyway, he might
have told Alice that he was not interested either. See also this YouTube
video https://youtu.be/JnmESTrsQbg by TNO.
Matching without embarrassments corresponds to unanimous voting with t=1.
Alice acts as party P[0], Bob as party P[1], and P[2] is the trusted (third)
party. We run a 3-party secure computation tolerating 1 corrupt party.
Alice and Bob provide bits x and y as their respective private inputs;
the trusted party P[2] provides no input. Of course, Alice and Bob do not
collude with each other, and P[2] is assumed to collude with neither of them.
Therefore, Alice and Bob do not learn anything beyond what they can deduce
from their output bit x*y and their respective input bits x and y; the
trusted party P[2] receives no output, hence learns nothing at all.
"""
import sys
from mpyc.runtime import mpc
m = len(mpc.parties)
if m%2 == 0:
print('Odd number of parties required.')
sys.exit()
t = m//2
voters = list(range(t+1)) # parties P[0],...,P[t]
if mpc.pid in voters:
vote = int(sys.argv[1]) if sys.argv[1:] else 1 # default "yes"
else:
vote = None # no input
secbit = mpc.SecInt(1) # 1-bit integers suffice
mpc.run(mpc.start())
votes = mpc.input(secbit(vote), senders=voters)
result = mpc.run(mpc.output(mpc.all(votes), receivers=voters))
mpc.run(mpc.shutdown())
if result is None: # no output
print('Thanks for serving as oblivious matchmaker;)')
elif result:
print(f'Match: unanimous agreement between {t+1} part{'ies' if t else 'y'}!')
else:
print(f'No match: someone disagrees among {t+1} part{'ies' if t else 'y'}?')
|
"""Demo unanimous voting: multiparty matching without embarrassments.
Unanimous voting between parties P[0],...,P[t] is implemented by securely
evaluating the product of their votes (using 1s and 0s to encode "yes"
and "no" votes, respectively) and revealing only whether the product
equals 1 (unanimous agreement) or 0 (someone disagrees). The MPyC method
mpc.all() modeled after Python's built-in function all() can be used
for this purpose.
The secure computation is designed to be maximally private, meaning
that any t parties colluding against the remaining party should be
unsuccessful: the colluding parties should not be able to find out
the vote of the remaining party in case they do not agree (and no
unanimous agreement will be reached anyway; if the colluding parties
all vote "yes", then there remains nothing to hide).
To achieve maximal privacy, we add t parties P[t+1],...,P[2t] to the
secure computation. These additional parties provide no input to the
secure computation and are trusted not to collude with any of the
parties P[0],...,P[t]. This way we have m=2t+1 parties in total, and we
can tolerate the desired maximum of t corrupt parties among P[0],...,P[t].
Moreover, the trusted parties do not receive any output, so they do
not learn anything at all, as their number does not exceed t.
Unanimous voting is a generalization of "matching without embarrassments",
where Alice and Bob like to find out if they want to go on a second date.
They might simply tell each other if they are interested, and in case they
are both interested, this simple solution is satisfactory. However, if for
example Alice is not interested in Bob but Bob is interested in Alice, Bob
may feel embarrassed afterwards because he expressed interest in her; if he
would have known beforehand that Alice was not interested anyway, he might
have told Alice that he was not interested either. See also this YouTube
video https://youtu.be/JnmESTrsQbg by TNO.
Matching without embarrassments corresponds to unanimous voting with t=1.
Alice acts as party P[0], Bob as party P[1], and P[2] is the trusted (third)
party. We run a 3-party secure computation tolerating 1 corrupt party.
Alice and Bob provide bits x and y as their respective private inputs;
the trusted party P[2] provides no input. Of course, Alice and Bob do not
collude with each other, and P[2] is assumed to collude with neither of them.
Therefore, Alice and Bob do not learn anything beyond what they can deduce
from their output bit x*y and their respective input bits x and y; the
trusted party P[2] receives no output, hence learns nothing at all.
"""
import sys
from mpyc.runtime import mpc
m = len(mpc.parties)
if m%2 == 0:
print('Odd number of parties required.')
sys.exit()
t = m//2
voters = list(range(t+1)) # parties P[0],...,P[t]
if mpc.pid in voters:
vote = int(sys.argv[1]) if sys.argv[1:] else 1 # default "yes"
else:
vote = None # no input
secbit = mpc.SecInt(1) # 1-bit integers suffice
mpc.run(mpc.start())
votes = mpc.input(secbit(vote), senders=voters)
result = mpc.run(mpc.output(mpc.all(votes), receivers=voters))
mpc.run(mpc.shutdown())
if result is None: # no output
print('Thanks for serving as oblivious matchmaker;)')
elif result:
print(f'Match: unanimous agreement between {t+1} part{"ies" if t else "y"}!')
else:
print(f'No match: someone disagrees among {t+1} part{"ies" if t else "y"}?')
|
from utils import stringifySong
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class Migrater(object):
"""Migrater"""
def __init__(self, migrateFrom, migrateTo, mock=False):
# Store clients
self.source = migrateFrom
self.target = migrateTo
self.isMocked = mock
logger.info(f"MOCK MIGRATION? {mock}")
def migratePlaylists(self, interactive=True):
# Wrapper to gather input from the user
def should(text):
value = input(text)
return value.strip().lower() == 'y'
# Core to migrate each playlist
def migratePlaylist(playlist):
# Create a new playlist in the target server with a unique name
if not self.isMocked:
targetPlaylist = self.target.createPlaylist(f"{playlist["name"]} at {int(datetime.now().timestamp())}")
logger.info(f"Target playlist: '{targetPlaylist["name"]}'")
# Add each song in this playlist to the new targetPlaylist
playlist = self.source.getPlaylist(playlist["id"])
for song in playlist['entry']:
try:
matchSong = self.target.findClosestMatchToSong(song)
if matchSong:
logger.info(f"Migrating {stringifySong(song)} as {stringifySong(matchSong)}")
if not self.isMocked:
self.target.addSongToPlaylist(targetPlaylist['id'], matchSong['id'])
else:
logger.warning(f"No match to {stringifySong(song)}")
except Exception as e:
logger.exception(f"Unable to migrate song {stringifySong(song)}")
# Perform playlsts migration
try:
# Get all playlists
playlists = self.source.getPlaylists()
for playlist in playlists:
# Ask if this playlist should be migrated only if running on interactive mode
if not interactive or should(f"Migrate '{playlist["name"]}' with {playlist["songCount"]} songs Y/[N]? "):
logger.info(f"Migrating playlist '{playlist["name"]} ({playlist["songCount"]})'")
try:
migratePlaylist(playlist)
except Exception as e:
logger.exception(f"Unable to migrate playlist '{playlist["name"]} ({playlist["songCount"]})'")
except Exception as e:
logger.exception("Unable to migrate playlists")
def migrateStarred(self):
# TODO: support albums and artists too
try:
songs = self.source.getStarredSongs()
# Migrate each song
logger.info(f"{len(songs)} starred songs to migrate")
for song in songs:
try:
matchSong = self.target.findClosestMatchToSong(song)
if matchSong:
logger.info(f"Migrating {stringifySong(song)} as {stringifySong(matchSong)}")
if not self.isMocked:
self.target.starSong(matchSong['id'])
else:
logger.warning(f"No match to {stringifySong(song)}")
except Exception as e:
logger.exception(f"Unable to star song {stringifySong(song)}")
except Exception as e:
logger.exception("Unable to migrate starred songs")
|
from utils import stringifySong
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class Migrater(object):
"""Migrater"""
def __init__(self, migrateFrom, migrateTo, mock=False):
# Store clients
self.source = migrateFrom
self.target = migrateTo
self.isMocked = mock
logger.info(f"MOCK MIGRATION? {mock}")
def migratePlaylists(self, interactive=True):
# Wrapper to gather input from the user
def should(text):
value = input(text)
return value.strip().lower() == 'y'
# Core to migrate each playlist
def migratePlaylist(playlist):
# Create a new playlist in the target server with a unique name
if not self.isMocked:
targetPlaylist = self.target.createPlaylist(f"{playlist['name']} at {int(datetime.now().timestamp())}")
logger.info(f"Target playlist: '{targetPlaylist['name']}'")
# Add each song in this playlist to the new targetPlaylist
playlist = self.source.getPlaylist(playlist["id"])
for song in playlist['entry']:
try:
matchSong = self.target.findClosestMatchToSong(song)
if matchSong:
logger.info(f"Migrating {stringifySong(song)} as {stringifySong(matchSong)}")
if not self.isMocked:
self.target.addSongToPlaylist(targetPlaylist['id'], matchSong['id'])
else:
logger.warning(f"No match to {stringifySong(song)}")
except Exception as e:
logger.exception(f"Unable to migrate song {stringifySong(song)}")
# Perform playlsts migration
try:
# Get all playlists
playlists = self.source.getPlaylists()
for playlist in playlists:
# Ask if this playlist should be migrated only if running on interactive mode
if not interactive or should(f"Migrate '{playlist['name']}' with {playlist['songCount']} songs Y/[N]? "):
logger.info(f"Migrating playlist '{playlist['name']} ({playlist['songCount']})'")
try:
migratePlaylist(playlist)
except Exception as e:
logger.exception(f"Unable to migrate playlist '{playlist['name']} ({playlist['songCount']})'")
except Exception as e:
logger.exception("Unable to migrate playlists")
def migrateStarred(self):
# TODO: support albums and artists too
try:
songs = self.source.getStarredSongs()
# Migrate each song
logger.info(f"{len(songs)} starred songs to migrate")
for song in songs:
try:
matchSong = self.target.findClosestMatchToSong(song)
if matchSong:
logger.info(f"Migrating {stringifySong(song)} as {stringifySong(matchSong)}")
if not self.isMocked:
self.target.starSong(matchSong['id'])
else:
logger.warning(f"No match to {stringifySong(song)}")
except Exception as e:
logger.exception(f"Unable to star song {stringifySong(song)}")
except Exception as e:
logger.exception("Unable to migrate starred songs")
|
import os
win32_defines = ["#define _GLFW_WIN32 1",
"#ifdef _MSC_VER\n#define _CRT_SECURE_NO_WARNINGS\n#endif",
"#define LSH_GLFW_USE_HYBRID_HPG",
"#ifdef LSH_GLFW_USE_HYBRID_HPG\n#define _GLFW_USE_HYBRID_HPG 1\n#endif",
"#define _UNICODE",
"#ifdef MINGW\n#define UNICODE\n#define WINVER 0x0501\n#endif", ]
win32_headers = [ "internal.h", "mappings.h", "win32_platform.h", "win32_joystick.h", "wgl_context.h", "egl_context.h", "osmesa_context.h", ]
win32_sources = [ "win32_init.c", "win32_joystick.c", "win32_monitor.c", "win32_time.c", "win32_thread.c", "win32_window.c", "wgl_context.c", "egl_context.c", "osmesa_context.c", ]
osmesa_headers = [ "internal.h", "mappings.h","null_platform.h", "null_joystick.h", "posix_time.h", "posix_thread.h", "osmesa_context.h", ]
osmesa_sources = [ "null_init.c", "null_monitor.c", "null_window.c", "null_joystick.c", "posix_time.c", "posix_thread.c", "osmesa_context.c", ]
x11_headers = [ "internal.h", "mappings.h", "x11_platform.h", "xkb_unicode.h", "posix_time.h", "posix_thread.h", "glx_context.h", "egl_context.h", "osmesa_context.h", "linux_joystick.h", ]
x11_sources = [ "x11_init.c", "x11_monitor.c", "x11_window.c", "xkb_unicode.c", "posix_time.c", "posix_thread.c", "glx_context.c", "egl_context.c", "osmesa_context.c", "linux_joystick.c", ]
wayland_headers = [ "internal.h", "mappings.h", "wl_platform.h", "posix_time.h", "posix_thread.h", "xkb_unicode.h", "egl_context.h", "osmesa_context.h", "linux_joystick.h", ]
wayland_sources = [ "wl_init.c", "wl_monitor.c", "wl_window.c", "posix_time.c", "posix_thread.c", "xkb_unicode.c", "egl_context.c", "osmesa_context.c", "linux_joystick.c", ]
cocoa_headers = [ "internal.h", "mappings.h", "cocoa_platform.h", "cocoa_joystick.h", "posix_thread.h", "nsgl_context.h", "egl_context.h", "osmesa_context.h", ]
cocoa_sources = [ "cocoa_init.m", "cocoa_joystick.m", "cocoa_monitor.m", "cocoa_window.m", "cocoa_time.c", "posix_thread.c", "nsgl_context.m", "egl_context.c", "osmesa_context.c", ]
all_headers = list(set(win32_headers + osmesa_headers + x11_headers + wayland_headers + cocoa_headers))
shared_sources = [ "context.c", "init.c", "input.c", "monitor.c", "vulkan.c", "window.c", ]
# Get the file using this function since it might be cached
files_cache = {}
def lsh_get_file(it: str) -> str:
global files_cache
if it in files_cache.keys():
return files_cache[it]
guard = f"HEADER_GUARD_{it.replace(".", "_").upper()}"
code = open(f"./glfw/src/{it}").read()
files_cache[it] = f"\n#ifndef {guard}\n#define {guard}\n{code}\n#endif\n"
return files_cache[it]
# Include the headers into a source
def include_headers(headers, source: str) -> str:
if len(headers) == 0:
return source
for it in headers:
if source.find(f"#include \"{it}\"") != -1:
h = include_headers([i for i in headers if i != it], lsh_get_file(it))
source = source.replace(f"#include \"{it}\"", f"\n{h}\n")
return source
# Add shared code
shared_source_result = ""
for it in shared_sources:
shared_source_result += include_headers(all_headers, lsh_get_file(it))
# Add win32 code
win32_source_result = "\n#if defined _WIN32 || defined LSH_GLFW_WIN32\n"
for it in win32_defines:
win32_source_result += "\n" + it + "\n"
for it in win32_sources:
win32_source_result += include_headers(all_headers, lsh_get_file(it))
win32_source_result += "\n#endif\n"
# Add osmesa code
osmesa_source_result = "\n#ifdef LSH_GLFW_OSMESA\n"
for it in osmesa_sources:
osmesa_source_result += include_headers(all_headers, lsh_get_file(it))
osmesa_source_result += "\n#endif\n"
# Add x11 code
x11_source_result = "\n#ifdef LSH_GLFW_X11\n"
for it in x11_sources:
x11_source_result += include_headers(all_headers, lsh_get_file(it))
x11_source_result += "\n#endif\n"
# Add wayland code
wayland_source_result = "\n#ifdef LSH_GLFW_WAYLAND\n"
for it in wayland_sources:
wayland_source_result += include_headers(all_headers, lsh_get_file(it))
wayland_source_result += "\n#endif\n"
# Add cocoa code
cocoa_source_result = "\n#if defined LSH_GLFW_COCOA || defined __APPLE__\n"
for it in cocoa_sources:
cocoa_source_result += include_headers(all_headers, lsh_get_file(it))
cocoa_source_result += "\n#endif\n"
# Get the glfw headers
headers_result = open("./glfw/include/GLFW/glfw3.h").read() + "\n" + open("./glfw/include/GLFW/glfw3native.h").read() + "\n"
# Add single header
source_result = "\n#ifdef LSH_GLFW_IMPLEMENTATION\n"
source_result += win32_source_result + osmesa_source_result + x11_source_result + wayland_source_result + cocoa_source_result + shared_source_result
source_result += "\n#endif\n"
# Comment out options macro error
source_result = source_result.replace("#error \"You must not define any header option macros when compiling GLFW\"",
"//#error \"You must not define any header option macros when compiling GLFW\"")
# for it in win32_headers + osmesa_headers + x11_headers + wayland_headers + cocoa_headers:
# source_result = source_result.replace(f"#include \"{it}\"", f"//#include \"{it}\"")
source_result = source_result.replace("#include \"../include/GLFW/glfw3.h\"", "//#include \"../include/GLFW/glfw3.h\"")
# Make dirs
if not os.path.exists("./generated-single-header"):
os.makedirs("./generated-single-header")
if not os.path.exists("./generated-single-header-and-source"):
os.makedirs("./generated-single-header-and-source")
# Make single header
open("./generated-single-header/glfw.h", "w+").write(headers_result + source_result)
# Make single header + single source
open("./generated-single-header-and-source/glfw.h", "w+").write(headers_result)
open("./generated-single-header-and-source/glfw.c", "w+").write(
headers_result + "\n#define LSH_GLFW_IMPLEMENTATION\n" + source_result)
|
import os
win32_defines = ["#define _GLFW_WIN32 1",
"#ifdef _MSC_VER\n#define _CRT_SECURE_NO_WARNINGS\n#endif",
"#define LSH_GLFW_USE_HYBRID_HPG",
"#ifdef LSH_GLFW_USE_HYBRID_HPG\n#define _GLFW_USE_HYBRID_HPG 1\n#endif",
"#define _UNICODE",
"#ifdef MINGW\n#define UNICODE\n#define WINVER 0x0501\n#endif", ]
win32_headers = [ "internal.h", "mappings.h", "win32_platform.h", "win32_joystick.h", "wgl_context.h", "egl_context.h", "osmesa_context.h", ]
win32_sources = [ "win32_init.c", "win32_joystick.c", "win32_monitor.c", "win32_time.c", "win32_thread.c", "win32_window.c", "wgl_context.c", "egl_context.c", "osmesa_context.c", ]
osmesa_headers = [ "internal.h", "mappings.h","null_platform.h", "null_joystick.h", "posix_time.h", "posix_thread.h", "osmesa_context.h", ]
osmesa_sources = [ "null_init.c", "null_monitor.c", "null_window.c", "null_joystick.c", "posix_time.c", "posix_thread.c", "osmesa_context.c", ]
x11_headers = [ "internal.h", "mappings.h", "x11_platform.h", "xkb_unicode.h", "posix_time.h", "posix_thread.h", "glx_context.h", "egl_context.h", "osmesa_context.h", "linux_joystick.h", ]
x11_sources = [ "x11_init.c", "x11_monitor.c", "x11_window.c", "xkb_unicode.c", "posix_time.c", "posix_thread.c", "glx_context.c", "egl_context.c", "osmesa_context.c", "linux_joystick.c", ]
wayland_headers = [ "internal.h", "mappings.h", "wl_platform.h", "posix_time.h", "posix_thread.h", "xkb_unicode.h", "egl_context.h", "osmesa_context.h", "linux_joystick.h", ]
wayland_sources = [ "wl_init.c", "wl_monitor.c", "wl_window.c", "posix_time.c", "posix_thread.c", "xkb_unicode.c", "egl_context.c", "osmesa_context.c", "linux_joystick.c", ]
cocoa_headers = [ "internal.h", "mappings.h", "cocoa_platform.h", "cocoa_joystick.h", "posix_thread.h", "nsgl_context.h", "egl_context.h", "osmesa_context.h", ]
cocoa_sources = [ "cocoa_init.m", "cocoa_joystick.m", "cocoa_monitor.m", "cocoa_window.m", "cocoa_time.c", "posix_thread.c", "nsgl_context.m", "egl_context.c", "osmesa_context.c", ]
all_headers = list(set(win32_headers + osmesa_headers + x11_headers + wayland_headers + cocoa_headers))
shared_sources = [ "context.c", "init.c", "input.c", "monitor.c", "vulkan.c", "window.c", ]
# Get the file using this function since it might be cached
files_cache = {}
def lsh_get_file(it: str) -> str:
global files_cache
if it in files_cache.keys():
return files_cache[it]
guard = f"HEADER_GUARD_{it.replace('.', '_').upper()}"
code = open(f"./glfw/src/{it}").read()
files_cache[it] = f"\n#ifndef {guard}\n#define {guard}\n{code}\n#endif\n"
return files_cache[it]
# Include the headers into a source
def include_headers(headers, source: str) -> str:
if len(headers) == 0:
return source
for it in headers:
if source.find(f"#include \"{it}\"") != -1:
h = include_headers([i for i in headers if i != it], lsh_get_file(it))
source = source.replace(f"#include \"{it}\"", f"\n{h}\n")
return source
# Add shared code
shared_source_result = ""
for it in shared_sources:
shared_source_result += include_headers(all_headers, lsh_get_file(it))
# Add win32 code
win32_source_result = "\n#if defined _WIN32 || defined LSH_GLFW_WIN32\n"
for it in win32_defines:
win32_source_result += "\n" + it + "\n"
for it in win32_sources:
win32_source_result += include_headers(all_headers, lsh_get_file(it))
win32_source_result += "\n#endif\n"
# Add osmesa code
osmesa_source_result = "\n#ifdef LSH_GLFW_OSMESA\n"
for it in osmesa_sources:
osmesa_source_result += include_headers(all_headers, lsh_get_file(it))
osmesa_source_result += "\n#endif\n"
# Add x11 code
x11_source_result = "\n#ifdef LSH_GLFW_X11\n"
for it in x11_sources:
x11_source_result += include_headers(all_headers, lsh_get_file(it))
x11_source_result += "\n#endif\n"
# Add wayland code
wayland_source_result = "\n#ifdef LSH_GLFW_WAYLAND\n"
for it in wayland_sources:
wayland_source_result += include_headers(all_headers, lsh_get_file(it))
wayland_source_result += "\n#endif\n"
# Add cocoa code
cocoa_source_result = "\n#if defined LSH_GLFW_COCOA || defined __APPLE__\n"
for it in cocoa_sources:
cocoa_source_result += include_headers(all_headers, lsh_get_file(it))
cocoa_source_result += "\n#endif\n"
# Get the glfw headers
headers_result = open("./glfw/include/GLFW/glfw3.h").read() + "\n" + open("./glfw/include/GLFW/glfw3native.h").read() + "\n"
# Add single header
source_result = "\n#ifdef LSH_GLFW_IMPLEMENTATION\n"
source_result += win32_source_result + osmesa_source_result + x11_source_result + wayland_source_result + cocoa_source_result + shared_source_result
source_result += "\n#endif\n"
# Comment out options macro error
source_result = source_result.replace("#error \"You must not define any header option macros when compiling GLFW\"",
"//#error \"You must not define any header option macros when compiling GLFW\"")
# for it in win32_headers + osmesa_headers + x11_headers + wayland_headers + cocoa_headers:
# source_result = source_result.replace(f"#include \"{it}\"", f"//#include \"{it}\"")
source_result = source_result.replace("#include \"../include/GLFW/glfw3.h\"", "//#include \"../include/GLFW/glfw3.h\"")
# Make dirs
if not os.path.exists("./generated-single-header"):
os.makedirs("./generated-single-header")
if not os.path.exists("./generated-single-header-and-source"):
os.makedirs("./generated-single-header-and-source")
# Make single header
open("./generated-single-header/glfw.h", "w+").write(headers_result + source_result)
# Make single header + single source
open("./generated-single-header-and-source/glfw.h", "w+").write(headers_result)
open("./generated-single-header-and-source/glfw.c", "w+").write(
headers_result + "\n#define LSH_GLFW_IMPLEMENTATION\n" + source_result)
|
from inspect import signature
from typing import Callable, Any, List
import re
import copy
from .type import Type
class Function(Type):
def __init__(self, fn: Callable[..., Any], name: str = "anonymouse") -> None:
self.name = name
self.vars = list(signature(fn).parameters)
self.expr = "[built-in]"
self.fn = fn
self.varnum = len(signature(fn).parameters)
def __call__(self, *args, **kwds):
return self.fn(*args, **kwds)
def __str__(self) -> str:
return f"{self.name}({",".join(self.vars)})={self.expr}"
class ListFunction(Function):
pattern = r"[a-zA-Z]+\(.+\)"
def __init__(self, expr: str, vars: List[str], name: str = "anonymouse") -> None:
self.name = name
self.expr = expr
self.vars = vars
self.varnum = len(vars)
from ..expression import infix_to_rpnlist
rpn_list = infix_to_rpnlist(expr)
for i in range(len(rpn_list)):
if (rpn_list[i] in vars):
rpn_list[i] = str(vars.index(rpn_list[i]))
self.rpn_list = rpn_list
def __call__(self, *args, **kwds):
res = copy.deepcopy(self.rpn_list)
for i in range(len(self.rpn_list)):
if isinstance(res[i], str) and res[i].isdigit():
res[i] = args[int(res[i])]
from ..expression import eval_rpn
return eval_rpn(res)
def subvars(self):
# a function to replace variables with there values
def f(m: re.Match):
from ..ft_global import user_vars
word = m.group().lower()
if word in user_vars and not isinstance(user_vars[word], Function):
return(str(user_vars[word]))
else:
return(m.group())
result = re.sub(r"[a-zA-Z]+", f, self.expr)
return result.strip()
def __str__(self) -> str:
result = self.subvars()
return f"{self.name}({",".join(self.vars)}) = {result}"
|
from inspect import signature
from typing import Callable, Any, List
import re
import copy
from .type import Type
class Function(Type):
def __init__(self, fn: Callable[..., Any], name: str = "anonymouse") -> None:
self.name = name
self.vars = list(signature(fn).parameters)
self.expr = "[built-in]"
self.fn = fn
self.varnum = len(signature(fn).parameters)
def __call__(self, *args, **kwds):
return self.fn(*args, **kwds)
def __str__(self) -> str:
return f"{self.name}({','.join(self.vars)})={self.expr}"
class ListFunction(Function):
pattern = r"[a-zA-Z]+\(.+\)"
def __init__(self, expr: str, vars: List[str], name: str = "anonymouse") -> None:
self.name = name
self.expr = expr
self.vars = vars
self.varnum = len(vars)
from ..expression import infix_to_rpnlist
rpn_list = infix_to_rpnlist(expr)
for i in range(len(rpn_list)):
if (rpn_list[i] in vars):
rpn_list[i] = str(vars.index(rpn_list[i]))
self.rpn_list = rpn_list
def __call__(self, *args, **kwds):
res = copy.deepcopy(self.rpn_list)
for i in range(len(self.rpn_list)):
if isinstance(res[i], str) and res[i].isdigit():
res[i] = args[int(res[i])]
from ..expression import eval_rpn
return eval_rpn(res)
def subvars(self):
# a function to replace variables with there values
def f(m: re.Match):
from ..ft_global import user_vars
word = m.group().lower()
if word in user_vars and not isinstance(user_vars[word], Function):
return(str(user_vars[word]))
else:
return(m.group())
result = re.sub(r"[a-zA-Z]+", f, self.expr)
return result.strip()
def __str__(self) -> str:
result = self.subvars()
return f"{self.name}({','.join(self.vars)}) = {result}"
|
import logging
from collections import Counter, defaultdict
import aiogram
from aiogram import Bot, types
from aiogram.utils.emoji import emojize
from detector import Detector
from gwevents import Events, time_ago
from keyboard import InlineKeyboard
from permanentset import PermanentSet
class GraceBot(Bot):
def __init__(self, token: str):
super().__init__(token=token)
self.events: Events = Events()
self.events.update_all()
self.event_keyboards: dict = defaultdict(InlineKeyboard)
self.new_event_messages_send: PermanentSet = PermanentSet(
"new_event_messages_send.txt", str
)
self.subscribers: PermanentSet = PermanentSet("subscribers.txt", int)
self.event_types: dict = {
# Probability that the source is a binary black hole merger (both
# objects heavier than 5 solar masses)
"BBH": "binary black hole merger",
# Probability that the source is a binary neutron star merger
# (both objects lighter than 3 solar masses)
"BNS": "binary neutron star merger",
# Probability that the source is a neutron star-black hole merger
# (primary heavier than 5 solar masses, secondary lighter than 3
# solar masses)
"NSBH": "neutron star black hole merger",
# Probability that the source is terrestrial(i.e., a background
# noise fluctuation or a glitch)
"Terrestrial": "terrestrial",
# Probability that the source has at least one object between 3 and
# 5 solar masses
"MassGap": "mass gap",
}
async def send_preliminary(self, message):
event_id = event_id_from_message(message)
logging.info(f"Event to update from preliminary message: {event_id}")
if event_id in self.new_event_messages_send.data:
return
else:
self.events.update_events_last_week()
self.new_event_messages_send.add(event_id)
text = f"A new event has been measured!\n\n"
await self._send_event_info_to_all_users(event_id, text)
async def send_update(self, message):
event_id = event_id_from_message(message)
self.events.update_single(event_id)
text = f"Event {event_id} has been updated.\n\n"
await self._send_event_info_to_all_users(event_id, text)
async def send_retraction(self, message):
event_id = event_id_from_message(message)
text = f"Event {event_id} has been retracted. The event details were:\n\n"
await self._send_event_info_to_all_users(event_id, text)
self.events.update_all()
async def _send_event_info_to_all_users(self, event_id: str, pre_text: str) -> None:
for user_id in self.subscribers.data:
try:
await self.send_event_info(user_id, event_id, pre_text)
except aiogram.utils.exceptions.BotBlocked:
logging.info(f"User {user_id} has blocked the bot.")
continue
async def send_event_info(
self, chat_id: str, event_id: str, pre_text: str = ""
) -> None:
"""
Send information of a specific event to the user.
Parameters
----------
chat_id : str
Where to send the message to.
event_id : str
The event to send the information about.
pre_text : str
Will be added to the beginning of the message.
Returns
-------
None
"""
try:
event = self.events.data[event_id]
except KeyError:
logging.error(f"Warning couldn't find event with id {event_id}")
return
link = f"https://gracedb.ligo.org/superevents/{event_id}/view/"
text = (
pre_text + f"*{event_id.upper()}*\n" + f"{time_ago(event["created"])}\n\n"
)
try:
event_type = self.events.get_likely_event_type(event_id)
confidence = self.events.data[event_id]["event_types"][event_type]
text += (
f"Unconfirmed {self.event_types[event_type]} ({confidence:.2%}) event."
)
distance_mean = round(event["distance_mean_Mly"] / 1000, 2)
distance_std = round(event["distance_std_Mly"] / 1000, 2)
text = (
text[:-1] + f" at {distance_mean} ± {distance_std} billion light years."
)
instruments = self.events.data[event_id]["instruments_long"]
text += f" The event was measured by {inline_list(instruments)}."
except KeyError:
pass
text += f"\n\n[Event page]({link})"
await self.send_message(chat_id, text, parse_mode="markdown")
try:
with open(self.events.picture(event_id), "rb") as picture:
await self.send_photo(chat_id, picture)
except FileNotFoundError:
logging.error("Couldn't find the event image")
return None
async def send_welcome_message(self, message: types.Message) -> None:
"""
Send a welcome message to the user.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
text = (
"Stay up-to-date on LIGO/Virgo gravitational wave events!\n"
"\n"
"You can /subscribe to automatically receive a message whenever a new event is "
"measured, or an existing event is updated. Use /unsubscribe to stop receiving "
"messages.\n"
"\n"
"Furthermore you can check out the /latest event, or select a past /event. "
"Use /stats to see and overview of all O3 events or view the live detector /status."
)
await self.send_message(message.chat.id, text)
async def send_latest(self, message: types.Message) -> None:
"""
Send some details of the most recent gravitational wave event.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
event_id = list(self.events.latest)[0]
await self.send_event_info(message.chat.id, event_id)
@property
def event_keys(self) -> list:
return [f"{id}_{info["most_likely"]}" for id, info in self.events.data.items()]
async def send_event_selector(self, message: types.Message) -> None:
"""
User can select any event from the O3 run and get a message with the details.
Parameters
----------
message : types.Message
Returns
-------
None
"""
self.event_keyboards[message.chat.id] = InlineKeyboard(
self.event_keys, rows=4, columns=2
)
await self.send_message(
chat_id=message.chat.id,
text="Select the event you want to see the details of.",
reply_markup=self.event_keyboards[message.chat.id],
)
async def event_selector_callback_handler(self, query: types.CallbackQuery) -> None:
"""
This is called when the user presses a button to select an event.
Parameters
----------
query : types.CallbackQuery
Callback query which contains info on which message the InlineKeyboard is
attached to.
Returns
-------
None
"""
await query.answer() # send answer to close the rounding circle
answer_data = query.data
logging.debug(f"answer_data={answer_data}")
user_id = query.from_user.id
valid_event_ids = self.event_keyboards[user_id].visible_keys
if answer_data in valid_event_ids:
event_id, _ = answer_data.split("_")
await self.send_event_info(user_id, event_id)
else:
await self.event_keyboards[user_id].update(query)
async def send_o3_stats(self, message: types.Message) -> None:
"""
Send some statistics of observational run 3 (O3).
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
# TODO take confirmed from other source since it will not be updated
# in graceDB if they are confirmed. For that use:
# https://www.gw-openscience.org/catalog/GWTC-1-confident/html/
event_counter = Counter(
[info["most_likely"] for info in self.events.data.values()]
)
unconfirmed_bbh = event_counter["BBH"]
unconfirmed_bns = event_counter["BNS"]
unconfirmed_nsbh = event_counter["NSBH"]
unconfirmed_mg = event_counter["MassGap"]
terrestrial = event_counter["Terrestrial"]
text = (
f"Observational run 3 has detected *{len(self.events.data)}* "
"events since April 1st 2019.\n\n"
""
"*Event types*\n"
f"Binary black hole mergers: *{unconfirmed_bbh}*.\n"
f"Binary neutron star mergers: *{unconfirmed_bns}*.\n"
f"Neutron star black hole mergers: *{unconfirmed_nsbh}*\n"
f"At least one object between 3 and 5 solar masses: *{unconfirmed_mg}*.\n"
f"Likely terrestrial (false alarm): *{terrestrial}*.\n"
)
await self.send_message(message.chat.id, text, parse_mode="markdown")
async def send_detector_status(self, message: types.Message) -> None:
"""
Send status of all three detectors to the user.
Parameters
----------
message : types.Message
The message send by the user.
Returns
-------
None
"""
detectors = [Detector("Hanford"), Detector("Livingston"), Detector("Virgo")]
detector_status = []
for detector in detectors:
hours = detector.status_duration.days * 24 + (
detector.status_duration.seconds // 3600
)
minutes = (detector.status_duration.seconds % 3600) // 60
detector_status.append(
f"{emojize(detector.status_icon)} {detector.name}: "
f"{detector.status} {hours}h {minutes}m"
)
text = "\n".join(detector_status)
await self.send_message(message.chat.id, text)
async def add_subscriber(self, message: types.Message) -> None:
"""
Add the user from the message to the subscriber list.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
user_id = message.chat.id
if self.subscribers.is_in_list(user_id):
await self.send_message(user_id, "You are already subscribed.")
else:
self.subscribers.add(message.chat.id)
await self.send_message(
user_id, "You will now receive the latest event updates."
)
async def remove_subscriber(self, message: types.Message) -> None:
"""
Remove the user from the message from the subscriber list.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
user_id = message.chat.id
if not self.subscribers.is_in_list(user_id):
await self.send_message(user_id, "You are not subscribed.")
else:
self.subscribers.remove(message.chat.id)
await self.send_message(
user_id, "You will no longer receive the latest event updates."
)
def event_id_from_message(message: types.Message) -> str:
"""
Return the event id which is assumed to come right after the command.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
The event id.
"""
try:
event_id = message.text.split(" ")[-1]
except KeyError:
event_id = None
return event_id
def inline_list(items):
if len(items) == 0:
return ""
elif len(items) == 1:
return items[0]
else:
return ", ".join(items[:-1]) + f" and {items[-1]}"
|
import logging
from collections import Counter, defaultdict
import aiogram
from aiogram import Bot, types
from aiogram.utils.emoji import emojize
from detector import Detector
from gwevents import Events, time_ago
from keyboard import InlineKeyboard
from permanentset import PermanentSet
class GraceBot(Bot):
def __init__(self, token: str):
super().__init__(token=token)
self.events: Events = Events()
self.events.update_all()
self.event_keyboards: dict = defaultdict(InlineKeyboard)
self.new_event_messages_send: PermanentSet = PermanentSet(
"new_event_messages_send.txt", str
)
self.subscribers: PermanentSet = PermanentSet("subscribers.txt", int)
self.event_types: dict = {
# Probability that the source is a binary black hole merger (both
# objects heavier than 5 solar masses)
"BBH": "binary black hole merger",
# Probability that the source is a binary neutron star merger
# (both objects lighter than 3 solar masses)
"BNS": "binary neutron star merger",
# Probability that the source is a neutron star-black hole merger
# (primary heavier than 5 solar masses, secondary lighter than 3
# solar masses)
"NSBH": "neutron star black hole merger",
# Probability that the source is terrestrial(i.e., a background
# noise fluctuation or a glitch)
"Terrestrial": "terrestrial",
# Probability that the source has at least one object between 3 and
# 5 solar masses
"MassGap": "mass gap",
}
async def send_preliminary(self, message):
event_id = event_id_from_message(message)
logging.info(f"Event to update from preliminary message: {event_id}")
if event_id in self.new_event_messages_send.data:
return
else:
self.events.update_events_last_week()
self.new_event_messages_send.add(event_id)
text = f"A new event has been measured!\n\n"
await self._send_event_info_to_all_users(event_id, text)
async def send_update(self, message):
event_id = event_id_from_message(message)
self.events.update_single(event_id)
text = f"Event {event_id} has been updated.\n\n"
await self._send_event_info_to_all_users(event_id, text)
async def send_retraction(self, message):
event_id = event_id_from_message(message)
text = f"Event {event_id} has been retracted. The event details were:\n\n"
await self._send_event_info_to_all_users(event_id, text)
self.events.update_all()
async def _send_event_info_to_all_users(self, event_id: str, pre_text: str) -> None:
for user_id in self.subscribers.data:
try:
await self.send_event_info(user_id, event_id, pre_text)
except aiogram.utils.exceptions.BotBlocked:
logging.info(f"User {user_id} has blocked the bot.")
continue
async def send_event_info(
self, chat_id: str, event_id: str, pre_text: str = ""
) -> None:
"""
Send information of a specific event to the user.
Parameters
----------
chat_id : str
Where to send the message to.
event_id : str
The event to send the information about.
pre_text : str
Will be added to the beginning of the message.
Returns
-------
None
"""
try:
event = self.events.data[event_id]
except KeyError:
logging.error(f"Warning couldn't find event with id {event_id}")
return
link = f"https://gracedb.ligo.org/superevents/{event_id}/view/"
text = (
pre_text + f"*{event_id.upper()}*\n" + f"{time_ago(event['created'])}\n\n"
)
try:
event_type = self.events.get_likely_event_type(event_id)
confidence = self.events.data[event_id]["event_types"][event_type]
text += (
f"Unconfirmed {self.event_types[event_type]} ({confidence:.2%}) event."
)
distance_mean = round(event["distance_mean_Mly"] / 1000, 2)
distance_std = round(event["distance_std_Mly"] / 1000, 2)
text = (
text[:-1] + f" at {distance_mean} ± {distance_std} billion light years."
)
instruments = self.events.data[event_id]["instruments_long"]
text += f" The event was measured by {inline_list(instruments)}."
except KeyError:
pass
text += f"\n\n[Event page]({link})"
await self.send_message(chat_id, text, parse_mode="markdown")
try:
with open(self.events.picture(event_id), "rb") as picture:
await self.send_photo(chat_id, picture)
except FileNotFoundError:
logging.error("Couldn't find the event image")
return None
async def send_welcome_message(self, message: types.Message) -> None:
"""
Send a welcome message to the user.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
text = (
"Stay up-to-date on LIGO/Virgo gravitational wave events!\n"
"\n"
"You can /subscribe to automatically receive a message whenever a new event is "
"measured, or an existing event is updated. Use /unsubscribe to stop receiving "
"messages.\n"
"\n"
"Furthermore you can check out the /latest event, or select a past /event. "
"Use /stats to see and overview of all O3 events or view the live detector /status."
)
await self.send_message(message.chat.id, text)
async def send_latest(self, message: types.Message) -> None:
"""
Send some details of the most recent gravitational wave event.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
event_id = list(self.events.latest)[0]
await self.send_event_info(message.chat.id, event_id)
@property
def event_keys(self) -> list:
return [f"{id}_{info['most_likely']}" for id, info in self.events.data.items()]
async def send_event_selector(self, message: types.Message) -> None:
"""
User can select any event from the O3 run and get a message with the details.
Parameters
----------
message : types.Message
Returns
-------
None
"""
self.event_keyboards[message.chat.id] = InlineKeyboard(
self.event_keys, rows=4, columns=2
)
await self.send_message(
chat_id=message.chat.id,
text="Select the event you want to see the details of.",
reply_markup=self.event_keyboards[message.chat.id],
)
async def event_selector_callback_handler(self, query: types.CallbackQuery) -> None:
"""
This is called when the user presses a button to select an event.
Parameters
----------
query : types.CallbackQuery
Callback query which contains info on which message the InlineKeyboard is
attached to.
Returns
-------
None
"""
await query.answer() # send answer to close the rounding circle
answer_data = query.data
logging.debug(f"answer_data={answer_data}")
user_id = query.from_user.id
valid_event_ids = self.event_keyboards[user_id].visible_keys
if answer_data in valid_event_ids:
event_id, _ = answer_data.split("_")
await self.send_event_info(user_id, event_id)
else:
await self.event_keyboards[user_id].update(query)
async def send_o3_stats(self, message: types.Message) -> None:
"""
Send some statistics of observational run 3 (O3).
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
# TODO take confirmed from other source since it will not be updated
# in graceDB if they are confirmed. For that use:
# https://www.gw-openscience.org/catalog/GWTC-1-confident/html/
event_counter = Counter(
[info["most_likely"] for info in self.events.data.values()]
)
unconfirmed_bbh = event_counter["BBH"]
unconfirmed_bns = event_counter["BNS"]
unconfirmed_nsbh = event_counter["NSBH"]
unconfirmed_mg = event_counter["MassGap"]
terrestrial = event_counter["Terrestrial"]
text = (
f"Observational run 3 has detected *{len(self.events.data)}* "
"events since April 1st 2019.\n\n"
""
"*Event types*\n"
f"Binary black hole mergers: *{unconfirmed_bbh}*.\n"
f"Binary neutron star mergers: *{unconfirmed_bns}*.\n"
f"Neutron star black hole mergers: *{unconfirmed_nsbh}*\n"
f"At least one object between 3 and 5 solar masses: *{unconfirmed_mg}*.\n"
f"Likely terrestrial (false alarm): *{terrestrial}*.\n"
)
await self.send_message(message.chat.id, text, parse_mode="markdown")
async def send_detector_status(self, message: types.Message) -> None:
"""
Send status of all three detectors to the user.
Parameters
----------
message : types.Message
The message send by the user.
Returns
-------
None
"""
detectors = [Detector("Hanford"), Detector("Livingston"), Detector("Virgo")]
detector_status = []
for detector in detectors:
hours = detector.status_duration.days * 24 + (
detector.status_duration.seconds // 3600
)
minutes = (detector.status_duration.seconds % 3600) // 60
detector_status.append(
f"{emojize(detector.status_icon)} {detector.name}: "
f"{detector.status} {hours}h {minutes}m"
)
text = "\n".join(detector_status)
await self.send_message(message.chat.id, text)
async def add_subscriber(self, message: types.Message) -> None:
"""
Add the user from the message to the subscriber list.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
user_id = message.chat.id
if self.subscribers.is_in_list(user_id):
await self.send_message(user_id, "You are already subscribed.")
else:
self.subscribers.add(message.chat.id)
await self.send_message(
user_id, "You will now receive the latest event updates."
)
async def remove_subscriber(self, message: types.Message) -> None:
"""
Remove the user from the message from the subscriber list.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
None.
"""
user_id = message.chat.id
if not self.subscribers.is_in_list(user_id):
await self.send_message(user_id, "You are not subscribed.")
else:
self.subscribers.remove(message.chat.id)
await self.send_message(
user_id, "You will no longer receive the latest event updates."
)
def event_id_from_message(message: types.Message) -> str:
"""
Return the event id which is assumed to come right after the command.
Parameters
----------
message : aiogram.types.Message
The message send by the user.
Returns
-------
The event id.
"""
try:
event_id = message.text.split(" ")[-1]
except KeyError:
event_id = None
return event_id
def inline_list(items):
if len(items) == 0:
return ""
elif len(items) == 1:
return items[0]
else:
return ", ".join(items[:-1]) + f" and {items[-1]}"
|
def area(c, la):
print(f'A area de um terreno {c :.2f}m x {la :.2f}m é de {c * la :.2f}m².')
# Programa principal
print(f'{'Controle de Terrenos' :^30}\n'
f'{'-' * 30}')
comp = float(input('Comprimento (m)): '))
larg = float(input('Largura (m): '))
area(comp, larg)
|
def area(c, la):
print(f'A area de um terreno {c :.2f}m x {la :.2f}m é de {c * la :.2f}m².')
# Programa principal
print(f'{"Controle de Terrenos" :^30}\n'
f'{"-" * 30}')
comp = float(input('Comprimento (m)): '))
larg = float(input('Largura (m): '))
area(comp, larg)
|
from typing import List, Optional
import aiosqlite
from shamrock.types.blockchain_format.coin import Coin
from shamrock.types.blockchain_format.sized_bytes import bytes32
from shamrock.types.coin_record import CoinRecord
from shamrock.types.full_block import FullBlock
from shamrock.util.db_wrapper import DBWrapper
from shamrock.util.ints import uint32, uint64
from shamrock.util.lru_cache import LRUCache
class CoinStore:
"""
This object handles CoinRecords in DB.
A cache is maintained for quicker access to recent coins.
"""
coin_record_db: aiosqlite.Connection
coin_record_cache: LRUCache
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
await self.coin_record_db.execute("pragma journal_mode=wal")
await self.coin_record_db.execute("pragma synchronous=2")
await self.coin_record_db.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" timestamp bigint)"
)
)
# Useful for reorg lookups
await self.coin_record_db.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent on coin_record(spent)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)")
await self.coin_record_db.commit()
self.coin_record_cache = LRUCache(cache_size)
return self
async def new_block(self, block: FullBlock, tx_additions: List[Coin], tx_removals: List[bytes32]):
"""
Only called for blocks which are blocks (and thus have rewards and transactions)
"""
if block.is_transaction_block() is False:
return None
assert block.foliage_transaction_block is not None
for coin in tx_additions:
record: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
False,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(record, False)
included_reward_coins = block.get_included_reward_coins()
if block.height == 0:
assert len(included_reward_coins) == 0
else:
assert len(included_reward_coins) >= 2
for coin in included_reward_coins:
reward_coin_r: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
True,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(reward_coin_r, False)
total_amount_spent: int = 0
for coin_name in tx_removals:
total_amount_spent += await self._set_spent(coin_name, block.height)
# Sanity check, already checked in block_body_validation
assert sum([a.amount for a in tx_additions]) <= total_amount_spent
# Checks DB and DiffStores for CoinRecord with coin_name and returns it
async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]:
cached = self.coin_record_cache.get(coin_name)
if cached is not None:
return cached
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
record = CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])
self.coin_record_cache.put(record.coin.name(), record)
return record
return None
async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE confirmed_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.append(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return coins
async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE spent_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
spent: bool = bool(row[3])
if spent:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coin_record = CoinRecord(coin, row[1], row[2], spent, row[4], row[8])
coins.append(coin_record)
return coins
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(
self,
include_spent_coins: bool,
puzzle_hash: bytes32,
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
coins = set()
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record WHERE puzzle_hash=? AND confirmed_index>=? AND confirmed_index<? "
f"{"" if include_spent_coins else "AND spent=0"}",
(puzzle_hash.hex(), start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE puzzle_hash in ({'?,' * (len(puzzle_hashes_db) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{"" if include_spent_coins else "AND spent=0"}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def rollback_to_block(self, block_index: int):
"""
Note that block_index can be negative, in which case everything is rolled back
"""
# Update memory cache
delete_queue: bytes32 = []
for coin_name, coin_record in list(self.coin_record_cache.cache.items()):
if int(coin_record.spent_block_index) > block_index:
new_record = CoinRecord(
coin_record.coin,
coin_record.confirmed_block_index,
uint32(0),
False,
coin_record.coinbase,
coin_record.timestamp,
)
self.coin_record_cache.put(coin_record.coin.name(), new_record)
if int(coin_record.confirmed_block_index) > block_index:
delete_queue.append(coin_name)
for coin_name in delete_queue:
self.coin_record_cache.remove(coin_name)
# Delete from storage
c1 = await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,))
await c1.close()
c2 = await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?",
(block_index,),
)
await c2.close()
# Store CoinRecord in DB and ram cache
async def _add_coin_record(self, record: CoinRecord, allow_replace: bool) -> None:
if self.coin_record_cache.get(record.coin.name()) is not None:
self.coin_record_cache.remove(record.coin.name())
cursor = await self.coin_record_db.execute(
f"INSERT {"OR REPLACE " if allow_replace else ""}INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
record.coin.name().hex(),
record.confirmed_block_index,
record.spent_block_index,
int(record.spent),
int(record.coinbase),
str(record.coin.puzzle_hash.hex()),
str(record.coin.parent_coin_info.hex()),
bytes(record.coin.amount),
record.timestamp,
),
)
await cursor.close()
# Update coin_record to be spent in DB
async def _set_spent(self, coin_name: bytes32, index: uint32) -> uint64:
current: Optional[CoinRecord] = await self.get_coin_record(coin_name)
if current is None:
raise ValueError(f"Cannot spend a coin that does not exist in db: {coin_name}")
assert not current.spent # Redundant sanity check, already checked in block_body_validation
spent: CoinRecord = CoinRecord(
current.coin,
current.confirmed_block_index,
index,
True,
current.coinbase,
current.timestamp,
) # type: ignore # noqa
await self._add_coin_record(spent, True)
return current.coin.amount
|
from typing import List, Optional
import aiosqlite
from shamrock.types.blockchain_format.coin import Coin
from shamrock.types.blockchain_format.sized_bytes import bytes32
from shamrock.types.coin_record import CoinRecord
from shamrock.types.full_block import FullBlock
from shamrock.util.db_wrapper import DBWrapper
from shamrock.util.ints import uint32, uint64
from shamrock.util.lru_cache import LRUCache
class CoinStore:
"""
This object handles CoinRecords in DB.
A cache is maintained for quicker access to recent coins.
"""
coin_record_db: aiosqlite.Connection
coin_record_cache: LRUCache
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
await self.coin_record_db.execute("pragma journal_mode=wal")
await self.coin_record_db.execute("pragma synchronous=2")
await self.coin_record_db.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" timestamp bigint)"
)
)
# Useful for reorg lookups
await self.coin_record_db.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent on coin_record(spent)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)")
await self.coin_record_db.commit()
self.coin_record_cache = LRUCache(cache_size)
return self
async def new_block(self, block: FullBlock, tx_additions: List[Coin], tx_removals: List[bytes32]):
"""
Only called for blocks which are blocks (and thus have rewards and transactions)
"""
if block.is_transaction_block() is False:
return None
assert block.foliage_transaction_block is not None
for coin in tx_additions:
record: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
False,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(record, False)
included_reward_coins = block.get_included_reward_coins()
if block.height == 0:
assert len(included_reward_coins) == 0
else:
assert len(included_reward_coins) >= 2
for coin in included_reward_coins:
reward_coin_r: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
True,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(reward_coin_r, False)
total_amount_spent: int = 0
for coin_name in tx_removals:
total_amount_spent += await self._set_spent(coin_name, block.height)
# Sanity check, already checked in block_body_validation
assert sum([a.amount for a in tx_additions]) <= total_amount_spent
# Checks DB and DiffStores for CoinRecord with coin_name and returns it
async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]:
cached = self.coin_record_cache.get(coin_name)
if cached is not None:
return cached
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
record = CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])
self.coin_record_cache.put(record.coin.name(), record)
return record
return None
async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE confirmed_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.append(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return coins
async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE spent_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
spent: bool = bool(row[3])
if spent:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coin_record = CoinRecord(coin, row[1], row[2], spent, row[4], row[8])
coins.append(coin_record)
return coins
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(
self,
include_spent_coins: bool,
puzzle_hash: bytes32,
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
coins = set()
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record WHERE puzzle_hash=? AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
(puzzle_hash.hex(), start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE puzzle_hash in ({"?," * (len(puzzle_hashes_db) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def rollback_to_block(self, block_index: int):
"""
Note that block_index can be negative, in which case everything is rolled back
"""
# Update memory cache
delete_queue: bytes32 = []
for coin_name, coin_record in list(self.coin_record_cache.cache.items()):
if int(coin_record.spent_block_index) > block_index:
new_record = CoinRecord(
coin_record.coin,
coin_record.confirmed_block_index,
uint32(0),
False,
coin_record.coinbase,
coin_record.timestamp,
)
self.coin_record_cache.put(coin_record.coin.name(), new_record)
if int(coin_record.confirmed_block_index) > block_index:
delete_queue.append(coin_name)
for coin_name in delete_queue:
self.coin_record_cache.remove(coin_name)
# Delete from storage
c1 = await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,))
await c1.close()
c2 = await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?",
(block_index,),
)
await c2.close()
# Store CoinRecord in DB and ram cache
async def _add_coin_record(self, record: CoinRecord, allow_replace: bool) -> None:
if self.coin_record_cache.get(record.coin.name()) is not None:
self.coin_record_cache.remove(record.coin.name())
cursor = await self.coin_record_db.execute(
f"INSERT {'OR REPLACE ' if allow_replace else ''}INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
record.coin.name().hex(),
record.confirmed_block_index,
record.spent_block_index,
int(record.spent),
int(record.coinbase),
str(record.coin.puzzle_hash.hex()),
str(record.coin.parent_coin_info.hex()),
bytes(record.coin.amount),
record.timestamp,
),
)
await cursor.close()
# Update coin_record to be spent in DB
async def _set_spent(self, coin_name: bytes32, index: uint32) -> uint64:
current: Optional[CoinRecord] = await self.get_coin_record(coin_name)
if current is None:
raise ValueError(f"Cannot spend a coin that does not exist in db: {coin_name}")
assert not current.spent # Redundant sanity check, already checked in block_body_validation
spent: CoinRecord = CoinRecord(
current.coin,
current.confirmed_block_index,
index,
True,
current.coinbase,
current.timestamp,
) # type: ignore # noqa
await self._add_coin_record(spent, True)
return current.coin.amount
|
import streamlit as st
import plotly.express as px
import pandas as pd
import pickle
import os
import base64
from io import BytesIO
from datetime import datetime
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, index=False, sheet_name='Sheet1')
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df):
val = to_excel(df)
b64 = base64.b64encode(val)
date_now = datetime.utcnow()
file_name = f'data_resultado-{date_now.strftime('%Y%m%d%H%M%S')}.xlsx'
link_download = f""" <a href="data:application/octet-stream;base64,{b64.decode()}" download="{file_name}">Download xlsx file</a> """
return link_download
def plot_graph(df_graph):
fig = px.bar(
df_graph,
x='Labels',
y='Descrição',
# text='Text test',
title='Test',
labels={
"Labels": "Labels",
"Descrição": 'Número de coisas'
},
# width=1400,
height=500
)
return fig
def main(classificador):
st.title('Model')
process_file = st.file_uploader(
"Faça o upload do arquivo no campo abaixo.",
type=["csv", "xlsx"],
accept_multiple_files=False
)
print(process_file)
print(os.environ.get('TOKEN'))
if process_file != None:
if process_file.name.endswith('.csv'):
df = pd.read_csv(
process_file, header=0, skip_blank_lines=True, skipinitialspace=True, encoding='latin-1')
elif process_file.name.endswith('.xlsx'):
df = pd.read_excel(
process_file, engine="openpyxl")
with st.empty():
st.write('Fazendo as predições ...')
df['Labels'] = classificador.predict(
df["Descrição"].astype("unicode"))
st.write('Predições feitas com sucesso !!!')
st.dataframe(df.head(20))
df_graph = df.groupby(['Labels'], as_index=False)['Descrição'].count()
df_graph.sort_values(by=['Descrição'], inplace=True, ascending=False)
print(df_graph)
st.plotly_chart(plot_graph(df_graph), use_container_width=True)
st.text('Gerando link para download ...')
st.markdown(get_table_download_link(df), unsafe_allow_html=True)
st.success('Link gerado com sucesso.')
if __name__ == '__main__':
classificador = pickle.load(open("modelo_final.pkl", "rb"))
main(classificador)
|
import streamlit as st
import plotly.express as px
import pandas as pd
import pickle
import os
import base64
from io import BytesIO
from datetime import datetime
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, index=False, sheet_name='Sheet1')
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df):
val = to_excel(df)
b64 = base64.b64encode(val)
date_now = datetime.utcnow()
file_name = f'data_resultado-{date_now.strftime("%Y%m%d%H%M%S")}.xlsx'
link_download = f""" <a href="data:application/octet-stream;base64,{b64.decode()}" download="{file_name}">Download xlsx file</a> """
return link_download
def plot_graph(df_graph):
fig = px.bar(
df_graph,
x='Labels',
y='Descrição',
# text='Text test',
title='Test',
labels={
"Labels": "Labels",
"Descrição": 'Número de coisas'
},
# width=1400,
height=500
)
return fig
def main(classificador):
st.title('Model')
process_file = st.file_uploader(
"Faça o upload do arquivo no campo abaixo.",
type=["csv", "xlsx"],
accept_multiple_files=False
)
print(process_file)
print(os.environ.get('TOKEN'))
if process_file != None:
if process_file.name.endswith('.csv'):
df = pd.read_csv(
process_file, header=0, skip_blank_lines=True, skipinitialspace=True, encoding='latin-1')
elif process_file.name.endswith('.xlsx'):
df = pd.read_excel(
process_file, engine="openpyxl")
with st.empty():
st.write('Fazendo as predições ...')
df['Labels'] = classificador.predict(
df["Descrição"].astype("unicode"))
st.write('Predições feitas com sucesso !!!')
st.dataframe(df.head(20))
df_graph = df.groupby(['Labels'], as_index=False)['Descrição'].count()
df_graph.sort_values(by=['Descrição'], inplace=True, ascending=False)
print(df_graph)
st.plotly_chart(plot_graph(df_graph), use_container_width=True)
st.text('Gerando link para download ...')
st.markdown(get_table_download_link(df), unsafe_allow_html=True)
st.success('Link gerado com sucesso.')
if __name__ == '__main__':
classificador = pickle.load(open("modelo_final.pkl", "rb"))
main(classificador)
|
#! /usr/bin/env python3
'''
CBC Radio streams player/downloader
'''
from datetime import datetime
from argparse import ArgumentParser, OPTIONAL
from collections import namedtuple
import subprocess
import readline
import requests
from lxml import html
_STREAM_SNAPSHOT = [
("Radio One", "BC", "Kamloops",
"http://cbc_r1_kam.akacast.akamaistream.net/7/440/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kam"),
("Radio One", "BC", "Kelowna",
"http://cbc_r1_kel.akacast.akamaistream.net/7/229/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kel"),
("Radio One", "BC", "Prince George",
"http://cbc_r1_prg.akacast.akamaistream.net/7/966/451661/v1/rc.akacast.akamaistream.net/cbc_r1_prg"),
("Radio One", "BC", "Vancouver",
"http://cbc_r1_vcr.akacast.akamaistream.net/7/723/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vcr"),
("Radio One", "BC", "Victoria",
"http://cbc_r1_vic.akacast.akamaistream.net/7/728/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vic"),
("Radio One", "Yukon", "Whitehorse",
"http://cbc_r1_whs.akacast.akamaistream.net/7/319/451661/v1/rc.akacast.akamaistream.net/cbc_r1_whs"),
("Radio One", "Alberta", "Calgary",
"http://cbc_r1_cgy.akacast.akamaistream.net/7/298/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cgy"),
("Radio One", "Alberta", "Edmonton",
"http://cbc_r1_edm.akacast.akamaistream.net/7/904/451661/v1/rc.akacast.akamaistream.net/cbc_r1_edm"),
("Radio One", "Saskatchewan", "Regina",
"http://cbc_r1_reg.akacast.akamaistream.net/7/666/451661/v1/rc.akacast.akamaistream.net/cbc_r1_reg"),
("Radio One", "Saskatchewan", "Saskatoon",
"http://cbc_r1_ssk.akacast.akamaistream.net/7/842/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ssk"),
("Radio One", "Manitoba", "Winnipeg",
"http://cbc_r1_wpg.akacast.akamaistream.net/7/831/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wpg"),
("Radio One", "Nunavut", "Iqaluit",
"http://cbc_r1_iqa.akacast.akamaistream.net/7/325/451661/v1/rc.akacast.akamaistream.net/cbc_r1_iqa"),
("Radio One", "Ontario", "Kitchener-Waterloo",
"http://cbc_r1_ekw.akacast.akamaistream.net/7/63/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ekw"),
("Radio One", "Ontario", "London",
"http://cbc_r1_ldn.akacast.akamaistream.net/7/104/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ldn"),
("Radio One", "Ontario", "Ottawa",
"http://cbc_r1_ott.akacast.akamaistream.net/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott"),
("Radio One", "Ontario", "Sudbury",
"http://cbc_r1_sud.akacast.akamaistream.net/7/380/451661/v1/rc.akacast.akamaistream.net/cbc_r1_sud"),
("Radio One", "Ontario", "Thunder Bay",
"http://cbc_r1_tba.akacast.akamaistream.net/7/245/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tba"),
("Radio One", "Ontario", "Toronto",
"http://cbc_r1_tor.akacast.akamaistream.net/7/632/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tor"),
("Radio One", "Ontario", "Windsor",
"http://cbc_r1_wdr.akacast.akamaistream.net/7/813/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wdr"),
("Radio One", "Quebec", "Montreal",
"http://cbc_r1_mtl.akacast.akamaistream.net/7/35/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mtl"),
("Radio One", "Quebec", "Nord Quebec",
"http://cbc_r1_n_mtl.akacast.akamaistream.net/7/823/451661/v1/rc.akacast.akamaistream.net/cbc_r1_n_mtl"),
("Radio One", "Quebec", "Quebec City",
"http://cbc_r1_qqu.akacast.akamaistream.net/7/29/451661/v1/rc.akacast.akamaistream.net/cbc_r1_qqu"),
("Radio One", "New Brunswick", "Fredericton",
"http://cbc_r1_frd.akacast.akamaistream.net/7/553/451661/v1/rc.akacast.akamaistream.net/cbc_r1_frd"),
("Radio One", "New Brunswick", "Moncton",
"http://cbc_r1_mct.akacast.akamaistream.net/7/383/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mct"),
("Radio One", "New Brunswick", "Saint John",
"http://cbc_r1_snb.akacast.akamaistream.net/7/754/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snb"),
("Radio One", "Prince Edward Island", "Charlottetown",
"http://cbc_r1_chr.akacast.akamaistream.net/7/169/451661/v1/rc.akacast.akamaistream.net/cbc_r1_chr"),
("Radio One", "Nova Scotia", "Cape Breton",
"http://cbc_r1_syd.akacast.akamaistream.net/7/897/451661/v1/rc.akacast.akamaistream.net/cbc_r1_syd"),
("Radio One", "Nova Scotia", "Halifax",
"http://cbc_r1_hfx.akacast.akamaistream.net/7/981/451661/v1/rc.akacast.akamaistream.net/cbc_r1_hfx"),
("Radio One", "Newfoundland & Labrador", "Corner Brook",
"http://cbc_r2_cor.akacast.akamaistream.net/7/550/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cor"),
("Radio One", "Newfoundland & Labrador", "Grand Falls/Gander",
"http://cbc_r1_gfa.akacast.akamaistream.net/7/492/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gfa"),
("Radio One", "Newfoundland & Labrador", "Labrador",
"http://cbc_r1_gba.akacast.akamaistream.net/7/274/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gba"),
("Radio One", "Newfoundland & Labrador", "St. John's",
"http://cbc_r1_snf.akacast.akamaistream.net/7/750/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snf"),
("Radio One", "Northwest Territories", "Inuvik",
"http://cbc_r1_ink.akacast.akamaistream.net/7/967/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ink"),
("Radio One", "Northwest Territories", "Yellowknife",
"http://cbc_r1_ykn.akacast.akamaistream.net/7/369/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ykn"),
("Radio Two", "Atlantic", "Halifax",
"http://cbc_r2_hfx.akacast.akamaistream.net/7/917/451661/v1/rc.akacast.akamaistream.net/cbc_r2_hfx"),
("Radio Two", "Eastern", "Toronto",
"http://cbc_r2_tor.akacast.akamaistream.net/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor"),
("Radio Two", "Central", "Winnipeg",
"http://cbc_r2_wpg.akacast.akamaistream.net/7/233/451661/v1/rc.akacast.akamaistream.net/cbc_r2_wpg"),
("Radio Two", "Mountain", "Edmonton",
"http://cbc_r2_edm.akacast.akamaistream.net/7/40/451661/v1/rc.akacast.akamaistream.net/cbc_r2_edm"),
("Radio Two", "Pacific", "Vancouver",
"http://cbc_r2_vcr.akacast.akamaistream.net/7/773/451661/v1/rc.akacast.akamaistream.net/cbc_r2_vcr"),
("Radio Two", "International", "Pacific",
"http://cbc_r2_ipt.akacast.akamaistream.net/7/669/451661/v1/rc.akacast.akamaistream.net/cbc_r2_ipt"),
("Radio Two", "International", "Eastern",
"http://cbc_r2_iet.akacast.akamaistream.net/7/50/451661/v1/rc.akacast.akamaistream.net/cbc_r2_iet"),
]
# CBC Music stream list page
_STREAMS = 'http://www.cbc.ca/radio/includes/streams.html'
# CBC Radio 2 Eastern (Toronto) stream URL
CBC_RADIO_2 = 'http://cbc_r2_tor.akacast.akamaistream.net' \
'/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor'
# CBC Radio 1 Ottawa stream URL
CBC_RADIO_1 = 'http://cbc_r1_ott.akacast.akamaistream.net' \
'/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott'
argument_parser = ArgumentParser(__doc__)
argument_parser.add_argument('-l', '--list', action='store_true')
argument_parser.add_argument('-t', '--tee', action='store_true')
mutex_group = argument_parser.add_mutually_exclusive_group(required=False)
# Yuck, wish it was multiple arguments,
# but argparse doesn't support anything but OPTIONAL.
mutex_group.add_argument('stream', nargs=OPTIONAL, type=str.split,
help='Name of stream to play/record')
mutex_group.add_argument('-1', '--one', action='store_const', const=CBC_RADIO_1,
dest='url', help='CBC Radio One Eastern')
mutex_group.add_argument('-2', '--two', action='store_const', const=CBC_RADIO_2,
dest='url', help='CBC Radio Two Eastern')
PlaylistItem = namedtuple('PlaylistItem', ['radio', 'province', 'city', 'url'])
_COMPLETION_INDEX = {' '.join((radio, region, city)): url
for radio, region, city, url
in _STREAM_SNAPSHOT}
def get_streams():
'''
Get CBC Radio music streams as {name: stream_url}.
'''
r = requests.get(_STREAMS)
r.raise_for_status()
h = html.fromstring(r.content, base_url=r.url) # noqa
radio_one, radio_two = h.cssselect('table')
for row in radio_one.cssselect('tbody td'):
raise NotImplementedError()
for row in radio_two.cssselect('tbody td'):
raise NotImplementedError()
class Completer:
def __init__(self, streams):
self.streams = streams
self.previous_prefix = None
def complete(self, text, state):
if text != self.previous_prefix:
#print('!' * 200)
self.completions = [stream
for stream
in self.streams
if readline.get_line_buffer().strip() in stream]
self.previous_prefix = text
try:
return self.completions[state]
except IndexError:
return None
def mpv_cmdline(input_url):
'''
Return an mpv command-line to play BUT NOT record input_url.
'''
return ['mpv', '--vo=null', input_url]
def ffmpeg_cmdline(input_url, tee):
'''
Return a ffmpeg command to play and maybe record input_url.
:param tee: if True, also save to disk.
'''
return ['ffmpeg',
'-hide_banner',
'-nostdin',
'-i', f'async:{input_url}',
*([] if not tee else
['-f', 'mpegts',
'-c', 'copy',
f'''./{datetime.now()
.replace(microsecond=0)
.isoformat()}.m2ts''']),
'-f', 'alsa',
'default']
def play(input_url, tee=False):
'''
Play input_url, optionally also saving to disk.
'''
subprocess.check_call(ffmpeg_cmdline(input_url, tee=tee))
def print_streams(streams):
'''
Pretty print streams.
'''
print(*sorted(streams), sep='\n')
def autocomplete(streams):
'''
List choices, and prompt with autocompletion one item from streams.
'''
print_streams(streams)
# readline API doesn't make this undoable
readline.parse_and_bind('tab: complete')
try:
old_delims = readline.get_completer_delims()
readline.set_completer_delims('')
try:
old_completer = readline.get_completer()
readline.set_completer(Completer(streams).complete)
return streams[input('Playlist: ')]
finally:
readline.set_completer(old_completer)
finally:
readline.set_completer_delims(old_delims)
if __name__ == '__main__':
from sys import exit
args = argument_parser.parse_args()
#streams = get_streams()
streams = _COMPLETION_INDEX
if args.list:
print_streams(streams)
exit()
if args.url is not None:
stream_url = args.url
elif args.stream is None:
try:
stream_url = autocomplete(streams)
except (KeyboardInterrupt, EOFError):
exit(1)
else:
matches = {stream: url
for stream, url
in streams.items()
if all(map(stream.__contains__,
args.stream))}
if not matches:
exit(f'Not a valid stream: {' '.join(args.stream)}')
elif len(matches) > 1:
try:
stream_url = autocomplete(matches)
except (KeyboardInterrupt, EOFError):
exit(1)
else:
stream_url = next(iter(matches.values()))
play(stream_url, tee=args.tee)
|
#! /usr/bin/env python3
'''
CBC Radio streams player/downloader
'''
from datetime import datetime
from argparse import ArgumentParser, OPTIONAL
from collections import namedtuple
import subprocess
import readline
import requests
from lxml import html
_STREAM_SNAPSHOT = [
("Radio One", "BC", "Kamloops",
"http://cbc_r1_kam.akacast.akamaistream.net/7/440/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kam"),
("Radio One", "BC", "Kelowna",
"http://cbc_r1_kel.akacast.akamaistream.net/7/229/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kel"),
("Radio One", "BC", "Prince George",
"http://cbc_r1_prg.akacast.akamaistream.net/7/966/451661/v1/rc.akacast.akamaistream.net/cbc_r1_prg"),
("Radio One", "BC", "Vancouver",
"http://cbc_r1_vcr.akacast.akamaistream.net/7/723/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vcr"),
("Radio One", "BC", "Victoria",
"http://cbc_r1_vic.akacast.akamaistream.net/7/728/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vic"),
("Radio One", "Yukon", "Whitehorse",
"http://cbc_r1_whs.akacast.akamaistream.net/7/319/451661/v1/rc.akacast.akamaistream.net/cbc_r1_whs"),
("Radio One", "Alberta", "Calgary",
"http://cbc_r1_cgy.akacast.akamaistream.net/7/298/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cgy"),
("Radio One", "Alberta", "Edmonton",
"http://cbc_r1_edm.akacast.akamaistream.net/7/904/451661/v1/rc.akacast.akamaistream.net/cbc_r1_edm"),
("Radio One", "Saskatchewan", "Regina",
"http://cbc_r1_reg.akacast.akamaistream.net/7/666/451661/v1/rc.akacast.akamaistream.net/cbc_r1_reg"),
("Radio One", "Saskatchewan", "Saskatoon",
"http://cbc_r1_ssk.akacast.akamaistream.net/7/842/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ssk"),
("Radio One", "Manitoba", "Winnipeg",
"http://cbc_r1_wpg.akacast.akamaistream.net/7/831/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wpg"),
("Radio One", "Nunavut", "Iqaluit",
"http://cbc_r1_iqa.akacast.akamaistream.net/7/325/451661/v1/rc.akacast.akamaistream.net/cbc_r1_iqa"),
("Radio One", "Ontario", "Kitchener-Waterloo",
"http://cbc_r1_ekw.akacast.akamaistream.net/7/63/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ekw"),
("Radio One", "Ontario", "London",
"http://cbc_r1_ldn.akacast.akamaistream.net/7/104/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ldn"),
("Radio One", "Ontario", "Ottawa",
"http://cbc_r1_ott.akacast.akamaistream.net/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott"),
("Radio One", "Ontario", "Sudbury",
"http://cbc_r1_sud.akacast.akamaistream.net/7/380/451661/v1/rc.akacast.akamaistream.net/cbc_r1_sud"),
("Radio One", "Ontario", "Thunder Bay",
"http://cbc_r1_tba.akacast.akamaistream.net/7/245/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tba"),
("Radio One", "Ontario", "Toronto",
"http://cbc_r1_tor.akacast.akamaistream.net/7/632/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tor"),
("Radio One", "Ontario", "Windsor",
"http://cbc_r1_wdr.akacast.akamaistream.net/7/813/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wdr"),
("Radio One", "Quebec", "Montreal",
"http://cbc_r1_mtl.akacast.akamaistream.net/7/35/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mtl"),
("Radio One", "Quebec", "Nord Quebec",
"http://cbc_r1_n_mtl.akacast.akamaistream.net/7/823/451661/v1/rc.akacast.akamaistream.net/cbc_r1_n_mtl"),
("Radio One", "Quebec", "Quebec City",
"http://cbc_r1_qqu.akacast.akamaistream.net/7/29/451661/v1/rc.akacast.akamaistream.net/cbc_r1_qqu"),
("Radio One", "New Brunswick", "Fredericton",
"http://cbc_r1_frd.akacast.akamaistream.net/7/553/451661/v1/rc.akacast.akamaistream.net/cbc_r1_frd"),
("Radio One", "New Brunswick", "Moncton",
"http://cbc_r1_mct.akacast.akamaistream.net/7/383/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mct"),
("Radio One", "New Brunswick", "Saint John",
"http://cbc_r1_snb.akacast.akamaistream.net/7/754/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snb"),
("Radio One", "Prince Edward Island", "Charlottetown",
"http://cbc_r1_chr.akacast.akamaistream.net/7/169/451661/v1/rc.akacast.akamaistream.net/cbc_r1_chr"),
("Radio One", "Nova Scotia", "Cape Breton",
"http://cbc_r1_syd.akacast.akamaistream.net/7/897/451661/v1/rc.akacast.akamaistream.net/cbc_r1_syd"),
("Radio One", "Nova Scotia", "Halifax",
"http://cbc_r1_hfx.akacast.akamaistream.net/7/981/451661/v1/rc.akacast.akamaistream.net/cbc_r1_hfx"),
("Radio One", "Newfoundland & Labrador", "Corner Brook",
"http://cbc_r2_cor.akacast.akamaistream.net/7/550/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cor"),
("Radio One", "Newfoundland & Labrador", "Grand Falls/Gander",
"http://cbc_r1_gfa.akacast.akamaistream.net/7/492/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gfa"),
("Radio One", "Newfoundland & Labrador", "Labrador",
"http://cbc_r1_gba.akacast.akamaistream.net/7/274/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gba"),
("Radio One", "Newfoundland & Labrador", "St. John's",
"http://cbc_r1_snf.akacast.akamaistream.net/7/750/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snf"),
("Radio One", "Northwest Territories", "Inuvik",
"http://cbc_r1_ink.akacast.akamaistream.net/7/967/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ink"),
("Radio One", "Northwest Territories", "Yellowknife",
"http://cbc_r1_ykn.akacast.akamaistream.net/7/369/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ykn"),
("Radio Two", "Atlantic", "Halifax",
"http://cbc_r2_hfx.akacast.akamaistream.net/7/917/451661/v1/rc.akacast.akamaistream.net/cbc_r2_hfx"),
("Radio Two", "Eastern", "Toronto",
"http://cbc_r2_tor.akacast.akamaistream.net/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor"),
("Radio Two", "Central", "Winnipeg",
"http://cbc_r2_wpg.akacast.akamaistream.net/7/233/451661/v1/rc.akacast.akamaistream.net/cbc_r2_wpg"),
("Radio Two", "Mountain", "Edmonton",
"http://cbc_r2_edm.akacast.akamaistream.net/7/40/451661/v1/rc.akacast.akamaistream.net/cbc_r2_edm"),
("Radio Two", "Pacific", "Vancouver",
"http://cbc_r2_vcr.akacast.akamaistream.net/7/773/451661/v1/rc.akacast.akamaistream.net/cbc_r2_vcr"),
("Radio Two", "International", "Pacific",
"http://cbc_r2_ipt.akacast.akamaistream.net/7/669/451661/v1/rc.akacast.akamaistream.net/cbc_r2_ipt"),
("Radio Two", "International", "Eastern",
"http://cbc_r2_iet.akacast.akamaistream.net/7/50/451661/v1/rc.akacast.akamaistream.net/cbc_r2_iet"),
]
# CBC Music stream list page
_STREAMS = 'http://www.cbc.ca/radio/includes/streams.html'
# CBC Radio 2 Eastern (Toronto) stream URL
CBC_RADIO_2 = 'http://cbc_r2_tor.akacast.akamaistream.net' \
'/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor'
# CBC Radio 1 Ottawa stream URL
CBC_RADIO_1 = 'http://cbc_r1_ott.akacast.akamaistream.net' \
'/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott'
argument_parser = ArgumentParser(__doc__)
argument_parser.add_argument('-l', '--list', action='store_true')
argument_parser.add_argument('-t', '--tee', action='store_true')
mutex_group = argument_parser.add_mutually_exclusive_group(required=False)
# Yuck, wish it was multiple arguments,
# but argparse doesn't support anything but OPTIONAL.
mutex_group.add_argument('stream', nargs=OPTIONAL, type=str.split,
help='Name of stream to play/record')
mutex_group.add_argument('-1', '--one', action='store_const', const=CBC_RADIO_1,
dest='url', help='CBC Radio One Eastern')
mutex_group.add_argument('-2', '--two', action='store_const', const=CBC_RADIO_2,
dest='url', help='CBC Radio Two Eastern')
PlaylistItem = namedtuple('PlaylistItem', ['radio', 'province', 'city', 'url'])
_COMPLETION_INDEX = {' '.join((radio, region, city)): url
for radio, region, city, url
in _STREAM_SNAPSHOT}
def get_streams():
'''
Get CBC Radio music streams as {name: stream_url}.
'''
r = requests.get(_STREAMS)
r.raise_for_status()
h = html.fromstring(r.content, base_url=r.url) # noqa
radio_one, radio_two = h.cssselect('table')
for row in radio_one.cssselect('tbody td'):
raise NotImplementedError()
for row in radio_two.cssselect('tbody td'):
raise NotImplementedError()
class Completer:
def __init__(self, streams):
self.streams = streams
self.previous_prefix = None
def complete(self, text, state):
if text != self.previous_prefix:
#print('!' * 200)
self.completions = [stream
for stream
in self.streams
if readline.get_line_buffer().strip() in stream]
self.previous_prefix = text
try:
return self.completions[state]
except IndexError:
return None
def mpv_cmdline(input_url):
'''
Return an mpv command-line to play BUT NOT record input_url.
'''
return ['mpv', '--vo=null', input_url]
def ffmpeg_cmdline(input_url, tee):
'''
Return a ffmpeg command to play and maybe record input_url.
:param tee: if True, also save to disk.
'''
return ['ffmpeg',
'-hide_banner',
'-nostdin',
'-i', f'async:{input_url}',
*([] if not tee else
['-f', 'mpegts',
'-c', 'copy',
f'''./{datetime.now()
.replace(microsecond=0)
.isoformat()}.m2ts''']),
'-f', 'alsa',
'default']
def play(input_url, tee=False):
'''
Play input_url, optionally also saving to disk.
'''
subprocess.check_call(ffmpeg_cmdline(input_url, tee=tee))
def print_streams(streams):
'''
Pretty print streams.
'''
print(*sorted(streams), sep='\n')
def autocomplete(streams):
'''
List choices, and prompt with autocompletion one item from streams.
'''
print_streams(streams)
# readline API doesn't make this undoable
readline.parse_and_bind('tab: complete')
try:
old_delims = readline.get_completer_delims()
readline.set_completer_delims('')
try:
old_completer = readline.get_completer()
readline.set_completer(Completer(streams).complete)
return streams[input('Playlist: ')]
finally:
readline.set_completer(old_completer)
finally:
readline.set_completer_delims(old_delims)
if __name__ == '__main__':
from sys import exit
args = argument_parser.parse_args()
#streams = get_streams()
streams = _COMPLETION_INDEX
if args.list:
print_streams(streams)
exit()
if args.url is not None:
stream_url = args.url
elif args.stream is None:
try:
stream_url = autocomplete(streams)
except (KeyboardInterrupt, EOFError):
exit(1)
else:
matches = {stream: url
for stream, url
in streams.items()
if all(map(stream.__contains__,
args.stream))}
if not matches:
exit(f'Not a valid stream: {" ".join(args.stream)}')
elif len(matches) > 1:
try:
stream_url = autocomplete(matches)
except (KeyboardInterrupt, EOFError):
exit(1)
else:
stream_url = next(iter(matches.values()))
play(stream_url, tee=args.tee)
|
"""
le script principale sert à annoter un répertoire de fichiers xml de recettes
"""
import glob
import re
import os
from oper_utils import xml_to_recipe_annotated
from Ner_classifieur_annote import load_crf_model, predict_text, transform_to_xml_annote
from NER_ingredient_detector import get_content_from_xmlfile
from ComplexCalculator import ComplexCalculator
modelpath = "../ml_models/model-20210515.pkl"
ner_clf = load_crf_model(modelpath)
def annote_with_crf(filename, ner_clf):
"""
Annoter le fichier avec CRF, renvoie une string de recette avec annotation
"""
ingredients, text_recette = get_content_from_xmlfile(filename)
liste = predict_text(text_recette,ner_clf)
text_after = transform_to_xml_annote(liste)
return text_after
def transform_doc_to_xml(doc):
text_after = []
for token in doc:
if token.ent_iob_ == "O":
text_after.append(token.text)
elif token.ent_iob_ == "B" and token.i == doc[-1].i:
text_after.append(f'<{token.ent_type_} id="{token.ent_kb_id_ + token.ent_id_}">' + token.text + f"</{token.ent_type_}>")
elif token.ent_iob_ == "B" and doc[token.i+1].ent_iob_ == "I":
text_after.append(f'<{token.ent_type_} id="{token.ent_kb_id_ + token.ent_id_}">' + token.text)
elif token.ent_iob_ == "B" and doc[token.i+1].ent_iob_ != "I":
text_after.append(f'<{token.ent_type_} id="{token.ent_kb_id_ + token.ent_id_}">' + token.text + f"</{token.ent_type_}>")
elif token.ent_iob_ == "I" and token.i == doc[-1].i:
text_after.append(token.text + f"</{token.ent_type_}>")
elif token.ent_iob_ == "I" and doc[token.i+1].ent_iob_ == "I":
text_after.append(token.text)
elif token.ent_iob_ == "I" and doc[token.i+1].ent_iob_ != "I":
text_after.append(token.text + f"</{token.ent_type_}>")
text_after = " ".join(text_after)
text_after = re.sub("' ", "'", text_after)
text_after = re.sub(r" (,|\.)", "\\1", text_after)
return text_after
def parcours_corpus_annote(corpus_path, output_dir, liste=False):
if not liste:
fics = glob.glob(f"{corpus_path}\*.xml")
# fics = glob.glob(f"{corpus_path}{os.sep}*.xml")
else:
fics = corpus_path
for fic in fics:
try:
fic_name = fic.split(f'{os.sep}')[-1]
recette_annote_crf = annote_with_crf(fic, ner_clf)
recette_doc_spacy, dico_ingreds, dico_opers = xml_to_recipe_annotated(fic)
recette_annote_rules = transform_doc_to_xml(recette_doc_spacy)
calculator = ComplexCalculator(dico_ingreds, dico_opers)
complex_temps = calculator.get_O_temps()
complex_espace = calculator.O_espace_f()
ingreds = dico_ingreds_to_xml(dico_ingreds)
opers = dico_opers_to_xml(dico_opers)
## add to xmlfile
with open(fic,encoding="utf8") as f:
xml_text = f.read()
recette_xml_rules = '\n <annotation methode="symbolique">\n '+ recette_annote_rules + '\n </annotation>'
recette_xml_crf = '\n <annotation methode="crf">\n '+ recette_annote_crf + '\n </annotation>'
complexite_t = '\n <complexite>\n <temps>' + complex_temps + '</temps>\n <complexite>'
complexite_e = '\n <complexite>\n <espace>' + complex_espace + '</espace>\n <complexite>'
xml_text = re.sub("(</preparation>)", "\\1" + recette_xml_rules + recette_xml_crf + complexite_t + complexite_e + ingreds + opers, xml_text)
with open(output_dir + os.sep + fic_name, "w", encoding="utf8") as f:
f.write(xml_text)
except Exception:
print(f"Rencontrer problème pour: {fic}")
def dico_ingreds_to_xml(dico_ingreds):
liste = []
for ingred in dico_ingreds.values():
formate = f'ingredient:{ingred['ingredient']}\t id:{ingred['id']}\t quantité:{ingred['quantite']}\t unité:{ingred['unit']}\t denombrable:{ingred['denombrable']}\t recipient:{ingred['recipient']}\n'
liste.append(formate)
liste = "".join(liste)
liste = "\n<ingredients_trouve>\n<![CDATA[\n" + liste + "]]>\n</ingredients_trouve>"
return liste
def dico_opers_to_xml(dico_opers):
liste = []
for oper_id,oper in dico_opers.items():
formate = f'operation:{oper['action']}\t id:{oper_id}\t ingrédients_ralatifs:{oper['ingreds']}\t nombre_opération_atomique:{oper['nb_oper']}\t temps:{oper['temps']}\t recipient:{oper['recipient']}\n'
liste.append(formate)
liste = "".join(liste)
liste = "\n<operation_trouve>\n<![CDATA[\n" + liste + "]]>\n</operation_trouve>"
return liste
if __name__ == "__main__":
corpus_path = "../corpus_recettes/corpus_for_final"
output = "../corpus_recettes/out_put"
parcours_corpus_annote(corpus_path, output)
|
"""
le script principale sert à annoter un répertoire de fichiers xml de recettes
"""
import glob
import re
import os
from oper_utils import xml_to_recipe_annotated
from Ner_classifieur_annote import load_crf_model, predict_text, transform_to_xml_annote
from NER_ingredient_detector import get_content_from_xmlfile
from ComplexCalculator import ComplexCalculator
modelpath = "../ml_models/model-20210515.pkl"
ner_clf = load_crf_model(modelpath)
def annote_with_crf(filename, ner_clf):
"""
Annoter le fichier avec CRF, renvoie une string de recette avec annotation
"""
ingredients, text_recette = get_content_from_xmlfile(filename)
liste = predict_text(text_recette,ner_clf)
text_after = transform_to_xml_annote(liste)
return text_after
def transform_doc_to_xml(doc):
text_after = []
for token in doc:
if token.ent_iob_ == "O":
text_after.append(token.text)
elif token.ent_iob_ == "B" and token.i == doc[-1].i:
text_after.append(f'<{token.ent_type_} id="{token.ent_kb_id_ + token.ent_id_}">' + token.text + f"</{token.ent_type_}>")
elif token.ent_iob_ == "B" and doc[token.i+1].ent_iob_ == "I":
text_after.append(f'<{token.ent_type_} id="{token.ent_kb_id_ + token.ent_id_}">' + token.text)
elif token.ent_iob_ == "B" and doc[token.i+1].ent_iob_ != "I":
text_after.append(f'<{token.ent_type_} id="{token.ent_kb_id_ + token.ent_id_}">' + token.text + f"</{token.ent_type_}>")
elif token.ent_iob_ == "I" and token.i == doc[-1].i:
text_after.append(token.text + f"</{token.ent_type_}>")
elif token.ent_iob_ == "I" and doc[token.i+1].ent_iob_ == "I":
text_after.append(token.text)
elif token.ent_iob_ == "I" and doc[token.i+1].ent_iob_ != "I":
text_after.append(token.text + f"</{token.ent_type_}>")
text_after = " ".join(text_after)
text_after = re.sub("' ", "'", text_after)
text_after = re.sub(r" (,|\.)", "\\1", text_after)
return text_after
def parcours_corpus_annote(corpus_path, output_dir, liste=False):
if not liste:
fics = glob.glob(f"{corpus_path}\*.xml")
# fics = glob.glob(f"{corpus_path}{os.sep}*.xml")
else:
fics = corpus_path
for fic in fics:
try:
fic_name = fic.split(f'{os.sep}')[-1]
recette_annote_crf = annote_with_crf(fic, ner_clf)
recette_doc_spacy, dico_ingreds, dico_opers = xml_to_recipe_annotated(fic)
recette_annote_rules = transform_doc_to_xml(recette_doc_spacy)
calculator = ComplexCalculator(dico_ingreds, dico_opers)
complex_temps = calculator.get_O_temps()
complex_espace = calculator.O_espace_f()
ingreds = dico_ingreds_to_xml(dico_ingreds)
opers = dico_opers_to_xml(dico_opers)
## add to xmlfile
with open(fic,encoding="utf8") as f:
xml_text = f.read()
recette_xml_rules = '\n <annotation methode="symbolique">\n '+ recette_annote_rules + '\n </annotation>'
recette_xml_crf = '\n <annotation methode="crf">\n '+ recette_annote_crf + '\n </annotation>'
complexite_t = '\n <complexite>\n <temps>' + complex_temps + '</temps>\n <complexite>'
complexite_e = '\n <complexite>\n <espace>' + complex_espace + '</espace>\n <complexite>'
xml_text = re.sub("(</preparation>)", "\\1" + recette_xml_rules + recette_xml_crf + complexite_t + complexite_e + ingreds + opers, xml_text)
with open(output_dir + os.sep + fic_name, "w", encoding="utf8") as f:
f.write(xml_text)
except Exception:
print(f"Rencontrer problème pour: {fic}")
def dico_ingreds_to_xml(dico_ingreds):
liste = []
for ingred in dico_ingreds.values():
formate = f'ingredient:{ingred["ingredient"]}\t id:{ingred["id"]}\t quantité:{ingred["quantite"]}\t unité:{ingred["unit"]}\t denombrable:{ingred["denombrable"]}\t recipient:{ingred["recipient"]}\n'
liste.append(formate)
liste = "".join(liste)
liste = "\n<ingredients_trouve>\n<![CDATA[\n" + liste + "]]>\n</ingredients_trouve>"
return liste
def dico_opers_to_xml(dico_opers):
liste = []
for oper_id,oper in dico_opers.items():
formate = f'operation:{oper["action"]}\t id:{oper_id}\t ingrédients_ralatifs:{oper["ingreds"]}\t nombre_opération_atomique:{oper["nb_oper"]}\t temps:{oper["temps"]}\t recipient:{oper["recipient"]}\n'
liste.append(formate)
liste = "".join(liste)
liste = "\n<operation_trouve>\n<![CDATA[\n" + liste + "]]>\n</operation_trouve>"
return liste
if __name__ == "__main__":
corpus_path = "../corpus_recettes/corpus_for_final"
output = "../corpus_recettes/out_put"
parcours_corpus_annote(corpus_path, output)
|
#!/usr/bin/python3
mice = {"number": 2, "names": [{"name": "Pinky", "tag": "the real genius"},{"name": "The Brain", "tag": "insane one"}], "world_domination_status": "pending"}
## print following
## Pinky is the real genius, and The Brain is the insane one
print(f'{mice['names'][0]['name']} is {mice['names'][0]['tag']}, and {mice['names'][1]['name']} is the {mice['names'][1]['tag']}.')
|
#!/usr/bin/python3
mice = {"number": 2, "names": [{"name": "Pinky", "tag": "the real genius"},{"name": "The Brain", "tag": "insane one"}], "world_domination_status": "pending"}
## print following
## Pinky is the real genius, and The Brain is the insane one
print(f'{mice["names"][0]["name"]} is {mice["names"][0]["tag"]}, and {mice["names"][1]["name"]} is the {mice["names"][1]["tag"]}.')
|
from sqlalchemy.orm import Session
from datetime import datetime
from fastapi import HTTPException
from fastapi import status
from fastapi.encoders import jsonable_encoder
from literature.schemas import CrossReferenceSchema
from literature.schemas import CrossReferenceSchemaUpdate
from literature.models import CrossReferenceModel
from literature.models import ReferenceModel
from literature.models import ResourceModel
from literature.models import ResourceDescriptorModel
from literature.crud.reference_resource import create_obj, add_reference_resource
def create(db: Session, cross_reference: CrossReferenceSchema) -> str:
cross_reference_data = jsonable_encoder(cross_reference)
if db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == cross_reference_data['curie']).first():
raise HTTPException(status_code=status.HTTP_409_CONFLICT,
detail=f"CrossReference with curie {cross_reference_data["curie"]} already exists")
db_obj = create_obj(db, CrossReferenceModel, cross_reference_data)
db.add(db_obj)
db.commit()
return "created"
def destroy(db: Session, curie: str) -> None:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
db.delete(cross_reference)
db.commit()
return None
def patch(db: Session, curie: str, cross_reference_update: CrossReferenceSchemaUpdate) -> dict:
cross_reference_data = jsonable_encoder(cross_reference_update)
cross_reference_db_obj = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference_db_obj:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
add_reference_resource(db, cross_reference_db_obj, cross_reference_update, non_fatal=True)
for field, value in cross_reference_data.items():
setattr(cross_reference_db_obj, field, value)
cross_reference_db_obj.date_updated = datetime.utcnow()
db.commit()
return {"message": "updated"}
def show(db: Session, curie: str, indirect=True) -> dict:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"CrossReference with the curie {curie} is not available")
cross_reference_data = jsonable_encoder(cross_reference)
if cross_reference_data['resource_id']:
cross_reference_data['resource_curie'] = db.query(ResourceModel.curie).filter(ResourceModel.resource_id == cross_reference_data['resource_id']).first().curie
del cross_reference_data['resource_id']
if cross_reference_data['reference_id']:
cross_reference_data['reference_curie'] = db.query(ReferenceModel.curie).filter(ReferenceModel.reference_id == cross_reference_data['reference_id']).first().curie
del cross_reference_data['reference_id']
author_ids = []
editor_ids = []
if not indirect:
for author in cross_reference.authors:
author_ids.append(author.author_id)
for editor in cross_reference.editors:
editor_ids.append(editor.editor_id)
cross_reference_data['author_ids'] = author_ids
cross_reference_data['editor_ids'] = editor_ids
[db_prefix, local_id] = curie.split(":", 1)
resource_descriptor = db.query(ResourceDescriptorModel).filter(ResourceDescriptorModel.db_prefix == db_prefix).first()
if resource_descriptor:
default_url = resource_descriptor.default_url.replace("[%s]", local_id)
cross_reference_data['url'] = default_url
if cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
page_url = ""
for rd_page in resource_descriptor.pages:
if rd_page.name == cr_page:
page_url = rd_page.url
break
pages_data.append({"name": cr_page,
"url": page_url.replace("[%s]", local_id)})
cross_reference_data['pages'] = pages_data
elif cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
pages_data.append({"name": cr_page})
cross_reference_data['pages'] = pages_data
return cross_reference_data
def show_changesets(db: Session, curie: str):
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} is not available")
history = []
for version in cross_reference.versions:
tx = version.transaction
history.append({'transaction': {'id': tx.id,
'issued_at': tx.issued_at,
'user_id': tx.user_id},
'changeset': version.changeset})
return history
|
from sqlalchemy.orm import Session
from datetime import datetime
from fastapi import HTTPException
from fastapi import status
from fastapi.encoders import jsonable_encoder
from literature.schemas import CrossReferenceSchema
from literature.schemas import CrossReferenceSchemaUpdate
from literature.models import CrossReferenceModel
from literature.models import ReferenceModel
from literature.models import ResourceModel
from literature.models import ResourceDescriptorModel
from literature.crud.reference_resource import create_obj, add_reference_resource
def create(db: Session, cross_reference: CrossReferenceSchema) -> str:
cross_reference_data = jsonable_encoder(cross_reference)
if db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == cross_reference_data['curie']).first():
raise HTTPException(status_code=status.HTTP_409_CONFLICT,
detail=f"CrossReference with curie {cross_reference_data['curie']} already exists")
db_obj = create_obj(db, CrossReferenceModel, cross_reference_data)
db.add(db_obj)
db.commit()
return "created"
def destroy(db: Session, curie: str) -> None:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
db.delete(cross_reference)
db.commit()
return None
def patch(db: Session, curie: str, cross_reference_update: CrossReferenceSchemaUpdate) -> dict:
cross_reference_data = jsonable_encoder(cross_reference_update)
cross_reference_db_obj = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference_db_obj:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
add_reference_resource(db, cross_reference_db_obj, cross_reference_update, non_fatal=True)
for field, value in cross_reference_data.items():
setattr(cross_reference_db_obj, field, value)
cross_reference_db_obj.date_updated = datetime.utcnow()
db.commit()
return {"message": "updated"}
def show(db: Session, curie: str, indirect=True) -> dict:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"CrossReference with the curie {curie} is not available")
cross_reference_data = jsonable_encoder(cross_reference)
if cross_reference_data['resource_id']:
cross_reference_data['resource_curie'] = db.query(ResourceModel.curie).filter(ResourceModel.resource_id == cross_reference_data['resource_id']).first().curie
del cross_reference_data['resource_id']
if cross_reference_data['reference_id']:
cross_reference_data['reference_curie'] = db.query(ReferenceModel.curie).filter(ReferenceModel.reference_id == cross_reference_data['reference_id']).first().curie
del cross_reference_data['reference_id']
author_ids = []
editor_ids = []
if not indirect:
for author in cross_reference.authors:
author_ids.append(author.author_id)
for editor in cross_reference.editors:
editor_ids.append(editor.editor_id)
cross_reference_data['author_ids'] = author_ids
cross_reference_data['editor_ids'] = editor_ids
[db_prefix, local_id] = curie.split(":", 1)
resource_descriptor = db.query(ResourceDescriptorModel).filter(ResourceDescriptorModel.db_prefix == db_prefix).first()
if resource_descriptor:
default_url = resource_descriptor.default_url.replace("[%s]", local_id)
cross_reference_data['url'] = default_url
if cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
page_url = ""
for rd_page in resource_descriptor.pages:
if rd_page.name == cr_page:
page_url = rd_page.url
break
pages_data.append({"name": cr_page,
"url": page_url.replace("[%s]", local_id)})
cross_reference_data['pages'] = pages_data
elif cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
pages_data.append({"name": cr_page})
cross_reference_data['pages'] = pages_data
return cross_reference_data
def show_changesets(db: Session, curie: str):
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} is not available")
history = []
for version in cross_reference.versions:
tx = version.transaction
history.append({'transaction': {'id': tx.id,
'issued_at': tx.issued_at,
'user_id': tx.user_id},
'changeset': version.changeset})
return history
|
import setuptools
import urllib.request
DESCRIPTION = 'A standardized collection of python libs and tools'
try:
with open('README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
except FileNotFoundError:
LONG_DESCRIPTION = DESCRIPTION
try:
with open('VERSION', 'r') as f:
VERSION = f.read()
except FileNotFoundError:
VERSION = 'test'
# To whenever PYPI allows direct references for dependencies
# deps = [
# {
# 'name': 'aiocheck',
# 'url': 'https://github.com/kruserr/aiocheck',
# 'tag': '',
# },
# ]
# for i in range(len(deps)):
# try:
# if (deps[i]['tag'] is None) or (len(deps[i]['tag']) == 0):
# raise KeyError()
# except KeyError:
# request = urllib.request.urlopen(f"{deps[i]["url"]}/releases/latest").geturl()
# deps[i]['tag'] = request.split('/')[::-1][0]
# deps[i] = f"{deps[i]["name"]} @ git+{deps[i]["url"]}@{deps[i]["tag"]}"
setuptools.setup(
name='i6',
version=VERSION,
author='kruserr',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/kruserr/i6',
keywords='i6 toolchain collection libs tools',
project_urls={
'Documentation': 'https://github.com/kruserr/i6/wiki',
'Source': 'https://github.com/kruserr/i6',
},
packages=setuptools.find_packages(
where='src',
exclude=['tests*'],
),
package_dir={
'': 'src',
},
install_requires=[
'docker',
'pyftpdlib',
'SQLAlchemy',
'marshmallow',
'cryptography',
],
entry_points = {
'console_scripts': ['i6=i6.__main__:main'],
},
zip_safe=True,
classifiers=[
'Topic :: Software Development',
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.7',
)
|
import setuptools
import urllib.request
DESCRIPTION = 'A standardized collection of python libs and tools'
try:
with open('README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
except FileNotFoundError:
LONG_DESCRIPTION = DESCRIPTION
try:
with open('VERSION', 'r') as f:
VERSION = f.read()
except FileNotFoundError:
VERSION = 'test'
# To whenever PYPI allows direct references for dependencies
# deps = [
# {
# 'name': 'aiocheck',
# 'url': 'https://github.com/kruserr/aiocheck',
# 'tag': '',
# },
# ]
# for i in range(len(deps)):
# try:
# if (deps[i]['tag'] is None) or (len(deps[i]['tag']) == 0):
# raise KeyError()
# except KeyError:
# request = urllib.request.urlopen(f"{deps[i]['url']}/releases/latest").geturl()
# deps[i]['tag'] = request.split('/')[::-1][0]
# deps[i] = f"{deps[i]['name']} @ git+{deps[i]['url']}@{deps[i]['tag']}"
setuptools.setup(
name='i6',
version=VERSION,
author='kruserr',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/kruserr/i6',
keywords='i6 toolchain collection libs tools',
project_urls={
'Documentation': 'https://github.com/kruserr/i6/wiki',
'Source': 'https://github.com/kruserr/i6',
},
packages=setuptools.find_packages(
where='src',
exclude=['tests*'],
),
package_dir={
'': 'src',
},
install_requires=[
'docker',
'pyftpdlib',
'SQLAlchemy',
'marshmallow',
'cryptography',
],
entry_points = {
'console_scripts': ['i6=i6.__main__:main'],
},
zip_safe=True,
classifiers=[
'Topic :: Software Development',
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.7',
)
|
"""
Train a new model.
"""
import sys
import argparse
import h5py
import datetime
import subprocess as sp
import numpy as np
import pandas as pd
import gzip as gz
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import IterableDataset, DataLoader
from sklearn.metrics import average_precision_score as average_precision
import dscript
from dscript.utils import PairedDataset, collate_paired_sequences
from dscript.models.embedding import (
IdentityEmbed,
FullyConnectedEmbed,
)
from dscript.models.contact import ContactCNN
from dscript.models.interaction import ModelInteraction
def add_args(parser):
"""
Create parser for command line utility.
:meta private:
"""
data_grp = parser.add_argument_group("Data")
proj_grp = parser.add_argument_group("Projection Module")
contact_grp = parser.add_argument_group("Contact Module")
inter_grp = parser.add_argument_group("Interaction Module")
train_grp = parser.add_argument_group("Training")
misc_grp = parser.add_argument_group("Output and Device")
# Data
data_grp.add_argument("--train", help="Training data", required=True)
data_grp.add_argument("--val", help="Validation data", required=True)
data_grp.add_argument("--embedding", help="h5 file with embedded sequences", required=True)
data_grp.add_argument(
"--no-augment",
action="store_false",
dest='augment',
help="Set flag to not augment data by adding (B A) for all pairs (A B)",
)
# Embedding model
proj_grp.add_argument(
"--projection-dim",
type=int,
default=100,
help="Dimension of embedding projection layer (default: 100)",
)
proj_grp.add_argument(
"--dropout-p",
type=float,
default=0.5,
help="Parameter p for embedding dropout layer (default: 0.5)",
)
# Contact model
contact_grp.add_argument(
"--hidden-dim",
type=int,
default=50,
help="Number of hidden units for comparison layer in contact prediction (default: 50)",
)
contact_grp.add_argument(
"--kernel-width",
type=int,
default=7,
help="Width of convolutional filter for contact prediction (default: 7)",
)
# Interaction Model
inter_grp.add_argument(
"--no-w",
action="store_false",
dest='use_w',
help="Don't use weight matrix in interaction prediction model",
)
inter_grp.add_argument(
"--pool-width",
type=int,
default=9,
help="Size of max-pool in interaction model (default: 9)",
)
# Training
train_grp.add_argument(
"--negative-ratio",
type=int,
default=10,
help="Number of negative training samples for each positive training sample (default: 10)",
)
train_grp.add_argument(
"--epoch-scale",
type=int,
default=1,
help="Report heldout performance every this many epochs (default: 1)",
)
train_grp.add_argument("--num-epochs", type=int, default=10, help="Number of epochs (default: 10)")
train_grp.add_argument("--batch-size", type=int, default=25, help="Minibatch size (default: 25)")
train_grp.add_argument("--weight-decay", type=float, default=0, help="L2 regularization (default: 0)")
train_grp.add_argument("--lr", type=float, default=0.001, help="Learning rate (default: 0.001)")
train_grp.add_argument(
"--lambda",
dest="lambda_",
type=float,
default=0.35,
help="Weight on the similarity objective (default: 0.35)",
)
# Output
misc_grp.add_argument("-o", "--outfile", help="Output file path (default: stdout)")
misc_grp.add_argument("--save-prefix", help="Path prefix for saving models")
misc_grp.add_argument("-d", "--device", type=int, default=-1, help="Compute device to use")
misc_grp.add_argument("--checkpoint", help="Checkpoint model to start training from")
return parser
def predict_interaction(model, n0, n1, tensors, use_cuda):
"""
Predict whether a list of protein pairs will interact.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param n0: First protein names
:type n0: list[str]
:param n1: Second protein names
:type n1: list[str]
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
"""
b = len(n0)
p_hat = []
for i in range(b):
z_a = tensors[n0[i]]
z_b = tensors[n1[i]]
if use_cuda:
z_a = z_a.cuda()
z_b = z_b.cuda()
p_hat.append(model.predict(z_a, z_b))
p_hat = torch.stack(p_hat, 0)
return p_hat
def predict_cmap_interaction(model, n0, n1, tensors, use_cuda):
"""
Predict whether a list of protein pairs will interact, as well as their contact map.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param n0: First protein names
:type n0: list[str]
:param n1: Second protein names
:type n1: list[str]
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
"""
b = len(n0)
p_hat = []
c_map_mag = []
for i in range(b):
z_a = tensors[n0[i]]
z_b = tensors[n1[i]]
if use_cuda:
z_a = z_a.cuda()
z_b = z_b.cuda()
cm, ph = model.map_predict(z_a, z_b)
p_hat.append(ph)
c_map_mag.append(torch.mean(cm))
p_hat = torch.stack(p_hat, 0)
c_map_mag = torch.stack(c_map_mag, 0)
return c_map_mag, p_hat
def interaction_grad(model, n0, n1, y, tensors, use_cuda, weight=0.35):
"""
Compute gradient and backpropagate loss for a batch.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param n0: First protein names
:type n0: list[str]
:param n1: Second protein names
:type n1: list[str]
:param y: Interaction labels
:type y: torch.Tensor
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
:param weight: Weight on the contact map magnitude objective. BCE loss is :math:`1 - \\text{weight}`.
:type weight: float
:return: (Loss, number correct, mean square error, batch size)
:rtype: (torch.Tensor, int, torch.Tensor, int)
"""
c_map_mag, p_hat = predict_cmap_interaction(model, n0, n1, tensors, use_cuda)
if use_cuda:
y = y.cuda()
y = Variable(y)
bce_loss = F.binary_cross_entropy(p_hat.float(), y.float())
cmap_loss = torch.mean(c_map_mag)
loss = (weight * bce_loss) + ((1 - weight) * cmap_loss)
b = len(p_hat)
# backprop loss
loss.backward()
if use_cuda:
y = y.cpu()
p_hat = p_hat.cpu()
with torch.no_grad():
guess_cutoff = 0.5
p_hat = p_hat.float()
p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()
y = y.float()
correct = torch.sum(p_guess == y).item()
mse = torch.mean((y.float() - p_hat) ** 2).item()
return loss, correct, mse, b
def interaction_eval(model, test_iterator, tensors, use_cuda):
"""
Evaluate test data set performance.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param test_iterator: Test data iterator
:type test_iterator: torch.utils.data.DataLoader
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
:return: (Loss, number correct, mean square error, precision, recall, F1 Score, AUPR)
:rtype: (torch.Tensor, int, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor)
"""
p_hat = []
true_y = []
for n0, n1, y in test_iterator:
p_hat.append(predict_interaction(model, n0, n1, tensors, use_cuda))
true_y.append(y)
y = torch.cat(true_y, 0)
p_hat = torch.cat(p_hat, 0)
if use_cuda:
y.cuda()
p_hat = torch.Tensor([x.cuda() for x in p_hat])
p_hat.cuda()
loss = F.binary_cross_entropy(p_hat.float(), y.float()).item()
b = len(y)
with torch.no_grad():
guess_cutoff = torch.Tensor([0.5]).float()
p_hat = p_hat.float()
y = y.float()
p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()
correct = torch.sum(p_guess == y).item()
mse = torch.mean((y.float() - p_hat) ** 2).item()
tp = torch.sum(y * p_hat).item()
pr = tp / torch.sum(p_hat).item()
re = tp / torch.sum(y).item()
f1 = 2 * pr * re / (pr + re)
y = y.cpu().numpy()
p_hat = p_hat.data.cpu().numpy()
aupr = average_precision(y, p_hat)
return loss, correct, mse, pr, re, f1, aupr
def main(args):
"""
Run training from arguments.
:meta private:
"""
output = args.outfile
if output is None:
output = sys.stdout
else:
output = open(output, "w")
print(f'# Called as: {' '.join(sys.argv)}', file=output)
if output is not sys.stdout:
print(f'Called as: {' '.join(sys.argv)}')
# Set device
device = args.device
use_cuda = (device >= 0) and torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(device)
print(
f"# Using CUDA device {device} - {torch.cuda.get_device_name(device)}",
file=output,
)
else:
print("# Using CPU", file=output)
device = "cpu"
batch_size = args.batch_size
train_fi = args.train
test_fi = args.val
augment = args.augment
embedding_h5 = args.embedding
h5fi = h5py.File(embedding_h5, "r")
print(f"# Loading training pairs from {train_fi}...", file=output)
output.flush()
train_df = pd.read_csv(train_fi, sep="\t", header=None)
if augment:
train_n0 = pd.concat((train_df[0], train_df[1]), axis=0).reset_index(drop=True)
train_n1 = pd.concat((train_df[1], train_df[0]), axis=0).reset_index(drop=True)
train_y = torch.from_numpy(pd.concat((train_df[2], train_df[2])).values)
else:
train_n0, train_n1 = train_df[0], train_df[1]
train_y = torch.from_numpy(train_df[2].values)
print(f"# Loading testing pairs from {test_fi}...", file=output)
output.flush()
test_df = pd.read_csv(test_fi, sep="\t", header=None)
test_n0, test_n1 = test_df[0], test_df[1]
test_y = torch.from_numpy(test_df[2].values)
output.flush()
train_pairs = PairedDataset(train_n0, train_n1, train_y)
pairs_train_iterator = torch.utils.data.DataLoader(
train_pairs,
batch_size=batch_size,
collate_fn=collate_paired_sequences,
shuffle=True,
)
test_pairs = PairedDataset(test_n0, test_n1, test_y)
pairs_test_iterator = torch.utils.data.DataLoader(
test_pairs,
batch_size=batch_size,
collate_fn=collate_paired_sequences,
shuffle=True,
)
output.flush()
print(f"# Loading embeddings", file=output)
tensors = {}
all_proteins = set(train_n0).union(set(train_n1)).union(set(test_n0)).union(set(test_n1))
for prot_name in tqdm(all_proteins):
tensors[prot_name] = torch.from_numpy(h5fi[prot_name][:, :])
use_cuda = (args.device > -1) and torch.cuda.is_available()
if args.checkpoint is None:
projection_dim = args.projection_dim
dropout_p = args.dropout_p
embedding = FullyConnectedEmbed(6165, projection_dim, dropout=dropout_p)
print("# Initializing embedding model with:", file=output)
print(f"\tprojection_dim: {projection_dim}", file=output)
print(f"\tdropout_p: {dropout_p}", file=output)
# Create contact model
hidden_dim = args.hidden_dim
kernel_width = args.kernel_width
print("# Initializing contact model with:", file=output)
print(f"\thidden_dim: {hidden_dim}", file=output)
print(f"\tkernel_width: {kernel_width}", file=output)
contact = ContactCNN(projection_dim, hidden_dim, kernel_width)
# Create the full model
use_W = args.use_w
pool_width = args.pool_width
print("# Initializing interaction model with:", file=output)
print(f"\tpool_width: {pool_width}", file=output)
print(f"\tuse_w: {use_W}", file=output)
model = ModelInteraction(embedding, contact, use_W=use_W, pool_size=pool_width)
print(model, file=output)
else:
print("# Loading model from checkpoint {}".format(args.checkpoint), file=output)
model = torch.load(args.checkpoint)
model.use_cuda = use_cuda
if use_cuda:
model = model.cuda()
# Train the model
lr = args.lr
wd = args.weight_decay
num_epochs = args.num_epochs
batch_size = args.batch_size
report_steps = args.epoch_scale
inter_weight = args.lambda_
cmap_weight = 1 - inter_weight
digits = int(np.floor(np.log10(num_epochs))) + 1
save_prefix = args.save_prefix
if save_prefix is None:
save_prefix = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")
params = [p for p in model.parameters() if p.requires_grad]
optim = torch.optim.Adam(params, lr=lr, weight_decay=wd)
print(f'# Using save prefix "{save_prefix}"', file=output)
print(f"# Training with Adam: lr={lr}, weight_decay={wd}", file=output)
print(f"\tnum_epochs: {num_epochs}", file=output)
print(f"\tepoch_scale: {report_steps}", file=output)
print(f"\tbatch_size: {batch_size}", file=output)
print(f"\tinteraction weight: {inter_weight}", file=output)
print(f"\tcontact map weight: {cmap_weight}", file=output)
output.flush()
batch_report_fmt = "# [{}/{}] training {:.1%}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}"
epoch_report_fmt = "# Finished Epoch {}/{}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}, Precision={:.6}, Recall={:.6}, F1={:.6}, AUPR={:.6}"
N = len(pairs_train_iterator) * batch_size
for epoch in range(num_epochs):
model.train()
n = 0
loss_accum = 0
acc_accum = 0
mse_accum = 0
# Train batches
for (z0, z1, y) in tqdm(pairs_train_iterator, desc=f"Epoch {epoch+1}/{num_epochs}",total=len(pairs_train_iterator)):
loss, correct, mse, b = interaction_grad(model, z0, z1, y, tensors, use_cuda, weight=inter_weight)
n += b
delta = b * (loss - loss_accum)
loss_accum += delta / n
delta = correct - b * acc_accum
acc_accum += delta / n
delta = b * (mse - mse_accum)
mse_accum += delta / n
report = (n - b) // 100 < n // 100
optim.step()
optim.zero_grad()
model.clip()
if report:
tokens = [
epoch + 1,
num_epochs,
n / N,
loss_accum,
acc_accum,
mse_accum,
]
if output is not sys.stdout:
print(batch_report_fmt.format(*tokens), file=output)
output.flush()
if (epoch + 1) % report_steps == 0:
model.eval()
with torch.no_grad():
(
inter_loss,
inter_correct,
inter_mse,
inter_pr,
inter_re,
inter_f1,
inter_aupr,
) = interaction_eval(model, pairs_test_iterator, tensors, use_cuda)
tokens = [
epoch + 1,
num_epochs,
inter_loss,
inter_correct / (len(pairs_test_iterator) * batch_size),
inter_mse,
inter_pr,
inter_re,
inter_f1,
inter_aupr,
]
print(epoch_report_fmt.format(*tokens), file=output)
output.flush()
# Save the model
if save_prefix is not None:
save_path = save_prefix + "_epoch" + str(epoch + 1).zfill(digits) + ".sav"
print(f"# Saving model to {save_path}", file=output)
model.cpu()
torch.save(model, save_path)
if use_cuda:
model.cuda()
output.flush()
if save_prefix is not None:
save_path = save_prefix + "_final.sav"
print(f"# Saving final model to {save_path}", file=output)
model.cpu()
torch.save(model, save_path)
if use_cuda:
model.cuda()
output.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
add_args(parser)
main(parser.parse_args())
|
"""
Train a new model.
"""
import sys
import argparse
import h5py
import datetime
import subprocess as sp
import numpy as np
import pandas as pd
import gzip as gz
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import IterableDataset, DataLoader
from sklearn.metrics import average_precision_score as average_precision
import dscript
from dscript.utils import PairedDataset, collate_paired_sequences
from dscript.models.embedding import (
IdentityEmbed,
FullyConnectedEmbed,
)
from dscript.models.contact import ContactCNN
from dscript.models.interaction import ModelInteraction
def add_args(parser):
"""
Create parser for command line utility.
:meta private:
"""
data_grp = parser.add_argument_group("Data")
proj_grp = parser.add_argument_group("Projection Module")
contact_grp = parser.add_argument_group("Contact Module")
inter_grp = parser.add_argument_group("Interaction Module")
train_grp = parser.add_argument_group("Training")
misc_grp = parser.add_argument_group("Output and Device")
# Data
data_grp.add_argument("--train", help="Training data", required=True)
data_grp.add_argument("--val", help="Validation data", required=True)
data_grp.add_argument("--embedding", help="h5 file with embedded sequences", required=True)
data_grp.add_argument(
"--no-augment",
action="store_false",
dest='augment',
help="Set flag to not augment data by adding (B A) for all pairs (A B)",
)
# Embedding model
proj_grp.add_argument(
"--projection-dim",
type=int,
default=100,
help="Dimension of embedding projection layer (default: 100)",
)
proj_grp.add_argument(
"--dropout-p",
type=float,
default=0.5,
help="Parameter p for embedding dropout layer (default: 0.5)",
)
# Contact model
contact_grp.add_argument(
"--hidden-dim",
type=int,
default=50,
help="Number of hidden units for comparison layer in contact prediction (default: 50)",
)
contact_grp.add_argument(
"--kernel-width",
type=int,
default=7,
help="Width of convolutional filter for contact prediction (default: 7)",
)
# Interaction Model
inter_grp.add_argument(
"--no-w",
action="store_false",
dest='use_w',
help="Don't use weight matrix in interaction prediction model",
)
inter_grp.add_argument(
"--pool-width",
type=int,
default=9,
help="Size of max-pool in interaction model (default: 9)",
)
# Training
train_grp.add_argument(
"--negative-ratio",
type=int,
default=10,
help="Number of negative training samples for each positive training sample (default: 10)",
)
train_grp.add_argument(
"--epoch-scale",
type=int,
default=1,
help="Report heldout performance every this many epochs (default: 1)",
)
train_grp.add_argument("--num-epochs", type=int, default=10, help="Number of epochs (default: 10)")
train_grp.add_argument("--batch-size", type=int, default=25, help="Minibatch size (default: 25)")
train_grp.add_argument("--weight-decay", type=float, default=0, help="L2 regularization (default: 0)")
train_grp.add_argument("--lr", type=float, default=0.001, help="Learning rate (default: 0.001)")
train_grp.add_argument(
"--lambda",
dest="lambda_",
type=float,
default=0.35,
help="Weight on the similarity objective (default: 0.35)",
)
# Output
misc_grp.add_argument("-o", "--outfile", help="Output file path (default: stdout)")
misc_grp.add_argument("--save-prefix", help="Path prefix for saving models")
misc_grp.add_argument("-d", "--device", type=int, default=-1, help="Compute device to use")
misc_grp.add_argument("--checkpoint", help="Checkpoint model to start training from")
return parser
def predict_interaction(model, n0, n1, tensors, use_cuda):
"""
Predict whether a list of protein pairs will interact.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param n0: First protein names
:type n0: list[str]
:param n1: Second protein names
:type n1: list[str]
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
"""
b = len(n0)
p_hat = []
for i in range(b):
z_a = tensors[n0[i]]
z_b = tensors[n1[i]]
if use_cuda:
z_a = z_a.cuda()
z_b = z_b.cuda()
p_hat.append(model.predict(z_a, z_b))
p_hat = torch.stack(p_hat, 0)
return p_hat
def predict_cmap_interaction(model, n0, n1, tensors, use_cuda):
"""
Predict whether a list of protein pairs will interact, as well as their contact map.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param n0: First protein names
:type n0: list[str]
:param n1: Second protein names
:type n1: list[str]
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
"""
b = len(n0)
p_hat = []
c_map_mag = []
for i in range(b):
z_a = tensors[n0[i]]
z_b = tensors[n1[i]]
if use_cuda:
z_a = z_a.cuda()
z_b = z_b.cuda()
cm, ph = model.map_predict(z_a, z_b)
p_hat.append(ph)
c_map_mag.append(torch.mean(cm))
p_hat = torch.stack(p_hat, 0)
c_map_mag = torch.stack(c_map_mag, 0)
return c_map_mag, p_hat
def interaction_grad(model, n0, n1, y, tensors, use_cuda, weight=0.35):
"""
Compute gradient and backpropagate loss for a batch.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param n0: First protein names
:type n0: list[str]
:param n1: Second protein names
:type n1: list[str]
:param y: Interaction labels
:type y: torch.Tensor
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
:param weight: Weight on the contact map magnitude objective. BCE loss is :math:`1 - \\text{weight}`.
:type weight: float
:return: (Loss, number correct, mean square error, batch size)
:rtype: (torch.Tensor, int, torch.Tensor, int)
"""
c_map_mag, p_hat = predict_cmap_interaction(model, n0, n1, tensors, use_cuda)
if use_cuda:
y = y.cuda()
y = Variable(y)
bce_loss = F.binary_cross_entropy(p_hat.float(), y.float())
cmap_loss = torch.mean(c_map_mag)
loss = (weight * bce_loss) + ((1 - weight) * cmap_loss)
b = len(p_hat)
# backprop loss
loss.backward()
if use_cuda:
y = y.cpu()
p_hat = p_hat.cpu()
with torch.no_grad():
guess_cutoff = 0.5
p_hat = p_hat.float()
p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()
y = y.float()
correct = torch.sum(p_guess == y).item()
mse = torch.mean((y.float() - p_hat) ** 2).item()
return loss, correct, mse, b
def interaction_eval(model, test_iterator, tensors, use_cuda):
"""
Evaluate test data set performance.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param test_iterator: Test data iterator
:type test_iterator: torch.utils.data.DataLoader
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
:return: (Loss, number correct, mean square error, precision, recall, F1 Score, AUPR)
:rtype: (torch.Tensor, int, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor)
"""
p_hat = []
true_y = []
for n0, n1, y in test_iterator:
p_hat.append(predict_interaction(model, n0, n1, tensors, use_cuda))
true_y.append(y)
y = torch.cat(true_y, 0)
p_hat = torch.cat(p_hat, 0)
if use_cuda:
y.cuda()
p_hat = torch.Tensor([x.cuda() for x in p_hat])
p_hat.cuda()
loss = F.binary_cross_entropy(p_hat.float(), y.float()).item()
b = len(y)
with torch.no_grad():
guess_cutoff = torch.Tensor([0.5]).float()
p_hat = p_hat.float()
y = y.float()
p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()
correct = torch.sum(p_guess == y).item()
mse = torch.mean((y.float() - p_hat) ** 2).item()
tp = torch.sum(y * p_hat).item()
pr = tp / torch.sum(p_hat).item()
re = tp / torch.sum(y).item()
f1 = 2 * pr * re / (pr + re)
y = y.cpu().numpy()
p_hat = p_hat.data.cpu().numpy()
aupr = average_precision(y, p_hat)
return loss, correct, mse, pr, re, f1, aupr
def main(args):
"""
Run training from arguments.
:meta private:
"""
output = args.outfile
if output is None:
output = sys.stdout
else:
output = open(output, "w")
print(f'# Called as: {" ".join(sys.argv)}', file=output)
if output is not sys.stdout:
print(f'Called as: {" ".join(sys.argv)}')
# Set device
device = args.device
use_cuda = (device >= 0) and torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(device)
print(
f"# Using CUDA device {device} - {torch.cuda.get_device_name(device)}",
file=output,
)
else:
print("# Using CPU", file=output)
device = "cpu"
batch_size = args.batch_size
train_fi = args.train
test_fi = args.val
augment = args.augment
embedding_h5 = args.embedding
h5fi = h5py.File(embedding_h5, "r")
print(f"# Loading training pairs from {train_fi}...", file=output)
output.flush()
train_df = pd.read_csv(train_fi, sep="\t", header=None)
if augment:
train_n0 = pd.concat((train_df[0], train_df[1]), axis=0).reset_index(drop=True)
train_n1 = pd.concat((train_df[1], train_df[0]), axis=0).reset_index(drop=True)
train_y = torch.from_numpy(pd.concat((train_df[2], train_df[2])).values)
else:
train_n0, train_n1 = train_df[0], train_df[1]
train_y = torch.from_numpy(train_df[2].values)
print(f"# Loading testing pairs from {test_fi}...", file=output)
output.flush()
test_df = pd.read_csv(test_fi, sep="\t", header=None)
test_n0, test_n1 = test_df[0], test_df[1]
test_y = torch.from_numpy(test_df[2].values)
output.flush()
train_pairs = PairedDataset(train_n0, train_n1, train_y)
pairs_train_iterator = torch.utils.data.DataLoader(
train_pairs,
batch_size=batch_size,
collate_fn=collate_paired_sequences,
shuffle=True,
)
test_pairs = PairedDataset(test_n0, test_n1, test_y)
pairs_test_iterator = torch.utils.data.DataLoader(
test_pairs,
batch_size=batch_size,
collate_fn=collate_paired_sequences,
shuffle=True,
)
output.flush()
print(f"# Loading embeddings", file=output)
tensors = {}
all_proteins = set(train_n0).union(set(train_n1)).union(set(test_n0)).union(set(test_n1))
for prot_name in tqdm(all_proteins):
tensors[prot_name] = torch.from_numpy(h5fi[prot_name][:, :])
use_cuda = (args.device > -1) and torch.cuda.is_available()
if args.checkpoint is None:
projection_dim = args.projection_dim
dropout_p = args.dropout_p
embedding = FullyConnectedEmbed(6165, projection_dim, dropout=dropout_p)
print("# Initializing embedding model with:", file=output)
print(f"\tprojection_dim: {projection_dim}", file=output)
print(f"\tdropout_p: {dropout_p}", file=output)
# Create contact model
hidden_dim = args.hidden_dim
kernel_width = args.kernel_width
print("# Initializing contact model with:", file=output)
print(f"\thidden_dim: {hidden_dim}", file=output)
print(f"\tkernel_width: {kernel_width}", file=output)
contact = ContactCNN(projection_dim, hidden_dim, kernel_width)
# Create the full model
use_W = args.use_w
pool_width = args.pool_width
print("# Initializing interaction model with:", file=output)
print(f"\tpool_width: {pool_width}", file=output)
print(f"\tuse_w: {use_W}", file=output)
model = ModelInteraction(embedding, contact, use_W=use_W, pool_size=pool_width)
print(model, file=output)
else:
print("# Loading model from checkpoint {}".format(args.checkpoint), file=output)
model = torch.load(args.checkpoint)
model.use_cuda = use_cuda
if use_cuda:
model = model.cuda()
# Train the model
lr = args.lr
wd = args.weight_decay
num_epochs = args.num_epochs
batch_size = args.batch_size
report_steps = args.epoch_scale
inter_weight = args.lambda_
cmap_weight = 1 - inter_weight
digits = int(np.floor(np.log10(num_epochs))) + 1
save_prefix = args.save_prefix
if save_prefix is None:
save_prefix = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")
params = [p for p in model.parameters() if p.requires_grad]
optim = torch.optim.Adam(params, lr=lr, weight_decay=wd)
print(f'# Using save prefix "{save_prefix}"', file=output)
print(f"# Training with Adam: lr={lr}, weight_decay={wd}", file=output)
print(f"\tnum_epochs: {num_epochs}", file=output)
print(f"\tepoch_scale: {report_steps}", file=output)
print(f"\tbatch_size: {batch_size}", file=output)
print(f"\tinteraction weight: {inter_weight}", file=output)
print(f"\tcontact map weight: {cmap_weight}", file=output)
output.flush()
batch_report_fmt = "# [{}/{}] training {:.1%}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}"
epoch_report_fmt = "# Finished Epoch {}/{}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}, Precision={:.6}, Recall={:.6}, F1={:.6}, AUPR={:.6}"
N = len(pairs_train_iterator) * batch_size
for epoch in range(num_epochs):
model.train()
n = 0
loss_accum = 0
acc_accum = 0
mse_accum = 0
# Train batches
for (z0, z1, y) in tqdm(pairs_train_iterator, desc=f"Epoch {epoch+1}/{num_epochs}",total=len(pairs_train_iterator)):
loss, correct, mse, b = interaction_grad(model, z0, z1, y, tensors, use_cuda, weight=inter_weight)
n += b
delta = b * (loss - loss_accum)
loss_accum += delta / n
delta = correct - b * acc_accum
acc_accum += delta / n
delta = b * (mse - mse_accum)
mse_accum += delta / n
report = (n - b) // 100 < n // 100
optim.step()
optim.zero_grad()
model.clip()
if report:
tokens = [
epoch + 1,
num_epochs,
n / N,
loss_accum,
acc_accum,
mse_accum,
]
if output is not sys.stdout:
print(batch_report_fmt.format(*tokens), file=output)
output.flush()
if (epoch + 1) % report_steps == 0:
model.eval()
with torch.no_grad():
(
inter_loss,
inter_correct,
inter_mse,
inter_pr,
inter_re,
inter_f1,
inter_aupr,
) = interaction_eval(model, pairs_test_iterator, tensors, use_cuda)
tokens = [
epoch + 1,
num_epochs,
inter_loss,
inter_correct / (len(pairs_test_iterator) * batch_size),
inter_mse,
inter_pr,
inter_re,
inter_f1,
inter_aupr,
]
print(epoch_report_fmt.format(*tokens), file=output)
output.flush()
# Save the model
if save_prefix is not None:
save_path = save_prefix + "_epoch" + str(epoch + 1).zfill(digits) + ".sav"
print(f"# Saving model to {save_path}", file=output)
model.cpu()
torch.save(model, save_path)
if use_cuda:
model.cuda()
output.flush()
if save_prefix is not None:
save_path = save_prefix + "_final.sav"
print(f"# Saving final model to {save_path}", file=output)
model.cpu()
torch.save(model, save_path)
if use_cuda:
model.cuda()
output.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
add_args(parser)
main(parser.parse_args())
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron arguments."""
import argparse
import collections
import os
import re
import time
import torch
import deepspeed
from megatron.enums import PositionEmbeddingType
import megatron
from megatron.logging import log_levels
def parse_args(extra_args_provider=None, defaults={},
ignore_unknown_args=False):
"""Parse all arguments."""
parser = argparse.ArgumentParser(description='Megatron-LM Arguments',
allow_abbrev=False)
# Standard arguments.
parser = _add_network_size_args(parser)
parser = _add_regularization_args(parser)
parser = _add_training_args(parser)
parser = _add_initialization_args(parser)
parser = _add_learning_rate_args(parser)
parser = _add_checkpointing_args(parser)
parser = _add_mixed_precision_args(parser)
parser = _add_distributed_args(parser)
parser = _add_validation_args(parser)
parser = _add_data_args(parser)
parser = _add_autoresume_args(parser)
parser = _add_biencoder_args(parser)
parser = _add_vit_args(parser)
parser = _add_logging_args(parser)
parser = _add_zero_args(parser)
parser = _add_memoryopt_args(parser)
parser = _add_activation_checkpoint_args(parser)
# Custom arguments.
if extra_args_provider is not None:
parser = extra_args_provider(parser)
parser = deepspeed.add_config_arguments(parser)
# Parse.
if ignore_unknown_args:
args, _ = parser.parse_known_args()
else:
args = parser.parse_args()
# Distributed args.
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
# Tensor model parallel size.
args.tensor_model_parallel_size = min(
args.tensor_model_parallel_size, args.world_size)
assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\
' ({}) is not divisible by tensor model parallel size ({})'.format(
args.world_size, args.tensor_model_parallel_size)
# Pipeline model parallel size.
args.pipeline_model_parallel_size = min(
args.pipeline_model_parallel_size,
(args.world_size // args.tensor_model_parallel_size))
# Checks.
model_parallel_size = args.pipeline_model_parallel_size * \
args.tensor_model_parallel_size
assert args.world_size % model_parallel_size == 0, 'world size is not'\
' divisible by tensor parallel size ({}) times pipeline parallel ' \
'size ({})'.format(args.world_size, args.tensor_model_parallel_size,
args.pipeline_model_parallel_size)
args.data_parallel_size = args.world_size // model_parallel_size
if args.rank == 0:
print('using world size: {}, data-parallel-size: {}, '
'tensor-model-parallel size: {}, '
'pipeline-model-parallel size: {} '.format(
args.world_size, args.data_parallel_size,
args.tensor_model_parallel_size,
args.pipeline_model_parallel_size), flush=True)
# --data-path and --train-weighted-splits-paths
message = "Data loading Mode 1: --data-path and --split "\
"and Mode 2: --(train|valid|test)-weighted-split-paths"\
"are mutually exclusive i.e. cannot be set together."
if args.data_path:
assert args.train_weighted_split_paths is None, message
setattr(args, "valid_weighted_split_names", None)
setattr(args, "valid_weighted_split_weights", None)
setattr(args, "valid_weighted_split_splits", None)
setattr(args, "test_weighted_split_names", None)
setattr(args, "test_weighted_split_weights", None)
setattr(args, "test_weighted_split_splits", None)
# args.split default value in the args is None it is set here in order
# to check that it does not to overlap with the 2nd mode of data loading
if args.split is None:
args.split = "969, 30, 1"
if args.train_weighted_split_paths or args.valid_weighted_split_paths or \
args.test_weighted_split_paths:
assert args.data_path is None and args.split is None, message
# Deprecated arguments
assert args.batch_size is None, '--batch-size argument is no longer ' \
'valid, use --micro-batch-size instead'
del args.batch_size
assert args.warmup is None, '--warmup argument is no longer valid, use ' \
'--lr-warmup-fraction instead'
del args.warmup
assert args.model_parallel_size is None, '--model-parallel-size is no ' \
'longer valid, use --tensor-model-parallel-size instead'
del args.model_parallel_size
# Set input defaults.
for key in defaults:
# For default to be valid, it should not be provided in the
# arguments that are passed to the program. We check this by
# ensuring the arg is set to None.
if getattr(args, key) is not None:
if args.rank == 0:
print('WARNING: overriding default arguments for {key}:{v} \
with {key}:{v2}'.format(key=key, v=defaults[key],
v2=getattr(args, key)),
flush=True)
else:
setattr(args, key, defaults[key])
# Batch size.
assert args.micro_batch_size is not None
assert args.micro_batch_size > 0
if args.global_batch_size is None:
args.global_batch_size = args.micro_batch_size * args.data_parallel_size
if args.rank == 0:
print('setting global batch size to {}'.format(
args.global_batch_size), flush=True)
assert args.global_batch_size > 0
if args.num_layers_per_virtual_pipeline_stage is not None:
assert args.pipeline_model_parallel_size > 2, \
'pipeline-model-parallel size should be greater than 2 with ' \
'interleaved schedule'
assert args.num_layers % args.num_layers_per_virtual_pipeline_stage == 0, \
'number of layers is not divisible by number of layers per virtual ' \
'pipeline stage'
args.virtual_pipeline_model_parallel_size = \
(args.num_layers // args.pipeline_model_parallel_size) // \
args.num_layers_per_virtual_pipeline_stage
else:
args.virtual_pipeline_model_parallel_size = None
# Parameters dtype.
args.params_dtype = torch.float
if args.fp16:
assert not args.bf16
args.params_dtype = torch.half
if args.bf16:
assert not args.fp16
args.params_dtype = torch.bfloat16
# bfloat16 requires gradient accumulation and all-reduce to
# be done in fp32.
if not args.accumulate_allreduce_grads_in_fp32:
args.accumulate_allreduce_grads_in_fp32 = True
if args.rank == 0:
print('accumulate and all-reduce gradients in fp32 for '
'bfloat16 data type.', flush=True)
if args.rank == 0:
print('using {} for parameters ...'.format(args.params_dtype),
flush=True)
# If we do accumulation and all-reduces in fp32, we need to have
# local DDP and we should set the use-contiguous-buffers-in-ddp.
if args.accumulate_allreduce_grads_in_fp32:
assert args.DDP_impl == 'local'
args.use_contiguous_buffers_in_ddp = True
if args.dataloader_type is None:
args.dataloader_type = 'single'
# Consumed tokens.
args.consumed_train_samples = 0
args.consumed_valid_samples = 0
args.consumed_train_tokens = 0
args.gigaflos_no_embeds = 0
# Iteration-based training.
if args.train_iters:
# If we use iteration-based training, make sure the
# sample-based options are off.
assert args.train_samples is None, \
'expected iteration-based training'
assert args.lr_decay_samples is None, \
'expected iteration-based learning rate decay'
assert args.lr_warmup_samples == 0, \
'expected iteration-based learning rate warmup'
assert args.rampup_batch_size is None, \
'expected no batch-size rampup for iteration-based training'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_iters == 0, \
'can only specify one of lr-warmup-fraction and lr-warmup-iters'
# Sample-based training.
if args.train_samples:
# If we use sample-based training, make sure the
# iteration-based options are off.
assert args.train_iters is None, \
'expected sample-based training'
assert args.lr_decay_iters is None, \
'expected sample-based learning rate decay'
assert args.lr_warmup_iters == 0, \
'expected sample-based learnig rate warmup'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_samples == 0, \
'can only specify one of lr-warmup-fraction ' \
'and lr-warmup-samples'
# Check required arguments.
required_args = ['num_layers', 'hidden_size', 'num_attention_heads']
for req_arg in required_args:
_check_arg_is_not_none(args, req_arg)
# Checks.
if args.ffn_hidden_size is None:
args.ffn_hidden_size = 4 * args.hidden_size
if args.kv_channels is None:
assert args.hidden_size % args.num_attention_heads == 0
args.kv_channels = args.hidden_size // args.num_attention_heads
if args.seq_length is not None:
assert args.encoder_seq_length is None
args.encoder_seq_length = args.seq_length
else:
assert args.encoder_seq_length is not None
args.seq_length = args.encoder_seq_length
if args.position_embedding_type == PositionEmbeddingType.absolute or args.position_embedding_type == PositionEmbeddingType.alibi:
assert args.max_position_embeddings is not None
if args.seq_length is not None:
assert args.max_position_embeddings >= args.seq_length
if args.decoder_seq_length is not None:
assert args.max_position_embeddings >= args.decoder_seq_length
else:
assert args.max_position_embeddings is None
if args.lr is not None:
assert args.min_lr <= args.lr
if args.save is not None:
assert args.save_interval is not None
# Mixed precision checks.
if args.fp16_lm_cross_entropy:
assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.'
if args.fp32_residual_connection:
assert args.fp16 or args.bf16, \
'residual connection in fp32 only supported when using fp16 or bf16.'
# Activation checkpointing.
if args.distribute_checkpointed_activations:
assert args.checkpoint_activations, \
'for distribute-checkpointed-activations to work you '\
'need to enable checkpoint-activations'
args.curriculum_learning = False
# Activation function
if args.glu_activation is not None and args.bias_gelu_fusion:
raise ValueError("if glu-activation is used, please set --no-bias-gelu-fusion")
# Skip train iterations
if args.skip_train_iteration_range is not None:
args.skip_train_iteration_range = [
list(map(int, range_.split("-"))) for range_ in args.skip_train_iteration_range
]
args.skip_train_iteration_range.sort()
skip_train_iteration_range = collections.deque()
for range_ in args.skip_train_iteration_range:
if len(range_) == 2:
start, end = range_
assert end >= start, \
"end of skip range cannot be smaller than start of skip range"
# merge overlapping intervals (e.g. 1-5 2-6 -> 1-6)
if not skip_train_iteration_range:
skip_train_iteration_range.append([start, end])
elif skip_train_iteration_range[-1][1] >= start:
skip_train_iteration_range[-1][1] = max(end, skip_train_iteration_range[-1][1])
else:
skip_train_iteration_range.append([start, end])
else:
raise ValueError(
"skip train iterations should be specified as two numbers, i.e. start-end"
)
args.skip_train_iteration_range = skip_train_iteration_range
if args.use_bnb_optimizer:
try:
import bitsandbytes as bnb
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install bitsandbytes from https://github.com/facebookresearch/bitsandbytes.")
_print_args(args)
return args
def _print_args(args):
"""Print arguments."""
if args.rank == 0:
print('------------------------ arguments ------------------------',
flush=True)
str_list = []
for arg in vars(args):
dots = '.' * (48 - len(arg))
str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg)))
if args.log_path is not None:
with open(os.path.join(args.log_path,f'args_{time.strftime('%Y-%m-%dT%H:%M:%S')}.txt'), 'w') as f:
for arg in sorted(str_list, key=lambda x: x.lower()):
f.write(arg+"\n")
print(arg, flush=True)
else:
for arg in sorted(str_list, key=lambda x: x.lower()):
print(arg, flush=True)
print('-------------------- end of arguments ---------------------',
flush=True)
def _check_arg_is_not_none(args, arg):
assert getattr(args, arg) is not None, '{} argument is None'.format(arg)
def _add_network_size_args(parser):
group = parser.add_argument_group(title='network size')
group.add_argument('--num-layers', type=int, default=None,
help='Number of transformer layers.')
group.add_argument('--hidden-size', type=int, default=None,
help='Tansformer hidden size.')
group.add_argument('--ffn-hidden-size', type=int, default=None,
help='Transformer Feed-Forward Network hidden size. '
'This is set to 4*hidden-size if not provided')
group.add_argument('--num-attention-heads', type=int, default=None,
help='Number of transformer attention heads.')
group.add_argument('--kv-channels', type=int, default=None,
help='Projection weights dimension in multi-head '
'attention. This is set to '
' args.hidden_size // args.num_attention_heads '
'if not provided.')
group.add_argument('--max-position-embeddings', type=int, default=None,
help='Maximum number of position embeddings to use. '
'This is the size of position embedding.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='Layer norm epsilon.')
group.add_argument('--apply-residual-connection-post-layernorm',
action='store_true',
help='If set, use original BERT residula connection '
'ordering.')
group.add_argument('--embed-layernorm', action='store_true',
help='use layernorm for embedding')
group.add_argument('--openai-gelu', action='store_true',
help='Use OpenAIs GeLU implementation. This option'
'should not be used unless for backward compatibility'
'reasons.')
group.add_argument('--onnx-safe', type=bool, required=False,
help='Use workarounds for known problems with '
'Torch ONNX exporter')
group.add_argument('--bert-no-binary-head', action='store_false',
help='Disable BERT binary head.',
dest='bert_binary_head')
group.add_argument('--position-embedding-type', type=lambda x: PositionEmbeddingType[x],
choices=list(PositionEmbeddingType),
default=PositionEmbeddingType.absolute,
help='Define position embedding type ("absolute" | "rotary" | "alibi"). "absolute" by default.'
)
group.add_argument('--glu-activation', type=str,
choices=megatron.model.glu_activations.GLU_ACTIVATIONS.keys(),
help='GLU activations to use.'
)
group.add_argument('--kill-switch-path', type=str,
help='path to look for a kill switch, which if found will automatically exit the program'
)
group.add_argument('--log-level', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', "
"'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and lets the "
"application set the level."
)
group.add_argument('--log-level-replica', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on replicas. Same choices as ``log_level``"
)
return parser
def _add_logging_args(parser):
group = parser.add_argument_group(title='logging')
group.add_argument('--log-params-norm', action='store_true',
help='If set, calculate and log parameters norm.')
group.add_argument('--log-num-zeros-in-grad', action='store_true',
help='If set, calculate and log the number of zeros in gradient.')
group.add_argument('--tensorboard-log-interval', type=int, default=1,
help='Report to tensorboard interval.')
group.add_argument('--tensorboard-queue-size', type=int, default=1000,
help='Size of the tensorboard queue for pending events '
'and summaries before one of the ‘add’ calls forces a '
'flush to disk.')
group.add_argument('--log-timers-to-tensorboard', action='store_true',
help='If set, write timers to tensorboard.')
group.add_argument('--log-batch-size-to-tensorboard', action='store_true',
help='If set, write batch-size to tensorboard.')
group.add_argument('--no-log-learnig-rate-to-tensorboard',
action='store_false',
help='Disable learning rate logging to tensorboard.',
dest='log_learning_rate_to_tensorboard')
group.add_argument('--no-log-loss-scale-to-tensorboard',
action='store_false',
help='Disable loss-scale logging to tensorboard.',
dest='log_loss_scale_to_tensorboard')
group.add_argument('--log-validation-ppl-to-tensorboard',
action='store_true',
help='If set, write validation perplexity to '
'tensorboard.')
return parser
def _add_regularization_args(parser):
group = parser.add_argument_group(title='regularization')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='Post attention dropout probability.')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='Dropout probability for hidden state transformer.')
group.add_argument('--weight-decay', type=float, default=0.01,
help='Weight decay coefficient for L2 regularization.')
group.add_argument('--clip-grad', type=float, default=1.0,
help='Gradient clipping based on global L2 norm.')
group.add_argument('--adam-beta1', type=float, default=0.9,
help='First coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-beta2', type=float, default=0.999,
help='Second coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-eps', type=float, default=1e-08,
help='Term added to the denominator to improve'
'numerical stability')
group.add_argument('--sgd-momentum', type=float, default=0.9,
help='Momentum factor for sgd')
return parser
def _add_training_args(parser):
group = parser.add_argument_group(title='training')
group.add_argument('--micro-batch-size', type=int, default=None,
help='Batch size per model instance (local batch size). '
'Global batch size is local batch size times data '
'parallel size times number of micro batches.')
group.add_argument('--batch-size', type=int, default=None,
help='Old batch size parameter, do not use. '
'Use --micro-batch-size instead')
group.add_argument('--global-batch-size', type=int, default=None,
help='Training batch size. If set, it should be a '
'multiple of micro-batch-size times data-parallel-size. '
'If this value is None, then '
'use micro-batch-size * data-parallel-size as the '
'global batch size. This choice will result in 1 for '
'number of micro-batches.')
group.add_argument('--rampup-batch-size', nargs='*', default=None,
help='Batch size ramp up with the following values:'
' --rampup-batch-size <start batch size> '
' <batch size increment> '
' <ramp-up samples> '
'For example: '
' --rampup-batch-size 16 8 300000 '
' --global-batch-size 1024 '
'will start with global batch size 16 and over '
' (1024 - 16) / 8 = 126 intervals will increase '
'the batch size linearly to 1024. In each interval '
'we will use approximately 300000 / 126 = 2380 samples.')
group.add_argument('--checkpoint-activations', action='store_true',
help='Checkpoint activation to allow for training '
'with larger models, sequences, and batch sizes.')
group.add_argument('--distribute-checkpointed-activations',
action='store_true',
help='If set, distribute checkpointed activations '
'across model parallel group.')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing.')
group.add_argument('--train-iters', type=int, default=None,
help='Total number of iterations to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-samples', type=int, default=None,
help='Total number of samples to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-tokens', type=int, default=None,
help='Total number of tokens to train over all '
'training runs.')
group.add_argument('--log-interval', type=int, default=100,
help='Report loss and timing interval.')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after the iteration is divisible '
'by this value.')
group.add_argument('--exit-duration-in-mins', type=int, default=None,
help='Exit the program after this many minutes.')
group.add_argument('--tensorboard-dir', type=str, default=None,
help='Write TensorBoard logs to this directory.')
group.add_argument('--no-masked-softmax-fusion',
action='store_false',
help='Disable fusion of query_key_value scaling, '
'masking, and softmax.',
dest='masked_softmax_fusion')
group.add_argument('--no-bias-gelu-fusion', action='store_false',
help='Disable bias and gelu fusion.',
dest='bias_gelu_fusion')
group.add_argument('--no-bias-dropout-fusion', action='store_false',
help='Disable bias and dropout fusion.',
dest='bias_dropout_fusion')
group.add_argument('--optimizer', type=str, default='adam',
choices=['adam', 'sgd'],
help='Optimizer function')
group.add_argument('--use-bnb-optimizer', action='store_true',
help='Use bitsandbytes optimizer for efficient training,'
'please refer https://github.com/facebookresearch/bitsandbytes.',
dest='use_bnb_optimizer')
group.add_argument('--dataloader-type', type=str, default=None,
choices=['single', 'cyclic'],
help='Single pass vs multiple pass data loader')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
group.add_argument('--codecarbon-dir', type=str, default=None,
help='Write CodeCarbon logs to this directory.')
group.add_argument('--eval-only', type=bool, required=False,
help='If set to True, no train step will be performed.'
'and only the evaluation on the `valid` and `test` sets '
'will be performed' )
group.add_argument('--skip-train-iteration-range', type=str, nargs='+', default=None,
help='Iteration ranges to skip. The values are one or more dash-separated ranges. e.g., 101-200 251-300.')
group.add_argument('--abort-on-unmet-fused-kernel-constraints', action='store_true',
help="If set to True, the program will abort if the constraints for loading a fused kernel aren't met")
return parser
def _add_initialization_args(parser):
group = parser.add_argument_group(title='initialization')
group.add_argument('--seed', type=int, default=1234,
help='Random seed used for python, numpy, '
'pytorch, and cuda.')
group.add_argument('--init-method-std', type=float, default=0.02,
help='Standard deviation of the zero mean normal '
'distribution used for weight initialization.')
group.add_argument('--init-method-xavier-uniform', action='store_true',
help='Enable Xavier uniform parameter initialization')
return parser
def _add_learning_rate_args(parser):
group = parser.add_argument_group(title='learning rate')
group.add_argument('--lr', type=float, default=None,
help='Initial learning rate. Depending on decay style '
'and initial warmup, the learing rate at each '
'iteration would be different.')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine'],
help='Learning rate decay function.')
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay learning rate over,'
' If None defaults to `--train-iters`')
group.add_argument('--lr-decay-samples', type=int, default=None,
help='number of samples to decay learning rate over,'
' If None defaults to `--train-samples`')
group.add_argument('--lr-decay-tokens', type=int, default=None,
help='number of tokens to decay learning rate over,'
' If not None will override iter/sample-based decay')
group.add_argument('--lr-warmup-fraction', type=float, default=None,
help='fraction of lr-warmup-(iters/samples) to use '
'for warmup (as a float)')
group.add_argument('--lr-warmup-iters', type=int, default=0,
help='number of iterations to linearly warmup '
'learning rate over.')
group.add_argument('--lr-warmup-samples', type=int, default=0,
help='number of samples to linearly warmup '
'learning rate over.')
group.add_argument('--warmup', type=int, default=None,
help='Old lr warmup argument, do not use. Use one of the'
'--lr-warmup-* arguments above')
group.add_argument('--min-lr', type=float, default=0.0,
help='Minumum value for learning rate. The scheduler'
'clip values below this threshold.')
group.add_argument('--override-lr-scheduler', action='store_true',
help='Reset the values of the scheduler (learning rate,'
'warmup iterations, minimum learning rate, maximum '
'number of iterations, and decay style from input '
'arguments and ignore values from checkpoints. Note'
'that all the above values will be reset.')
group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
help='Use checkpoint to set the values of the scheduler '
'(learning rate, warmup iterations, minimum learning '
'rate, maximum number of iterations, and decay style '
'from checkpoint and ignore input arguments.')
return parser
def _add_checkpointing_args(parser):
group = parser.add_argument_group(title='checkpointing')
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=None,
help='Number of iterations between checkpoint saves.')
group.add_argument('--no-save-optim', action='store_true', default=None,
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true', default=None,
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true', default=None,
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true', default=None,
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
return parser
def _add_mixed_precision_args(parser):
group = parser.add_argument_group(title='mixed precision')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode.')
group.add_argument('--bf16', action='store_true',
help='Run model in bfloat16 mode.')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--initial-loss-scale', type=float, default=2**32,
help='Initial loss-scale for dynamic loss scaling.')
group.add_argument('--min-loss-scale', type=float, default=1.0,
help='Minimum loss scale for dynamic loss scale.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale.')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--fp32-residual-connection', action='store_true',
help='Move residual connections to fp32.')
group.add_argument('--no-query-key-layer-scaling', action='store_false',
help='Do not scale Q * K^T by 1 / layer-number.',
dest='apply_query_key_layer_scaling')
group.add_argument('--attention-softmax-in-fp32', action='store_true',
help='Run attention masking and softmax in fp32. '
'This flag is ignored unless '
'--no-query-key-layer-scaling is specified.')
group.add_argument('--accumulate-allreduce-grads-in-fp32',
action='store_true',
help='Gradient accumulation and all-reduce in fp32.')
group.add_argument('--fp16-lm-cross-entropy', action='store_true',
help='Move the cross entropy unreduced loss calculation'
'for lm head to fp16.')
return parser
def _add_distributed_args(parser):
group = parser.add_argument_group(title='distributed')
group.add_argument('--tensor-model-parallel-size', type=int, default=1,
help='Degree of tensor model parallelism.')
group.add_argument('--pipeline-model-parallel-size', type=int, default=1,
help='Degree of pipeline model parallelism.')
group.add_argument('--model-parallel-size', type=int, default=None,
help='Old model parallel argument, do not use. Use '
'--tensor-model-parallel-size instead.')
group.add_argument('--num-layers-per-virtual-pipeline-stage', type=int, default=None,
help='Number of layers per virtual pipeline stage')
group.add_argument('--distributed-backend', default='nccl',
choices=['nccl', 'gloo'],
help='Which backend to use for distributed training.')
group.add_argument('--DDP-impl', default='local',
choices=['local', 'torch'],
help='which DistributedDataParallel implementation '
'to use.')
group.add_argument('--use-contiguous-buffers-in-ddp', action='store_true',
help='If set, use contiguous buffer in DDP. Note that '
'this option only works woth local DDP.' )
group.add_argument('--no-scatter-gather-tensors-in-pipeline', action='store_false',
help='Use scatter/gather to optimize communication of tensors in pipeline',
dest='scatter_gather_tensors_in_pipeline')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--lazy-mpu-init', type=bool, required=False,
help='If set to True, initialize_megatron() '
'skips DDP initialization and returns function to '
'complete it instead.Also turns on '
'--use-cpu-initialization flag. This is for '
'external DDP manager.' )
group.add_argument('--use-cpu-initialization', action='store_true',
default=None, help='If set, affine parallel weights '
'initialization uses CPU' )
return parser
def _add_validation_args(parser):
group = parser.add_argument_group(title='validation')
group.add_argument('--eval-iters', type=int, default=100,
help='Number of iterations to run for evaluation'
'validation/test for.')
group.add_argument('--eval-interval', type=int, default=1000,
help='Interval between running evaluation on '
'validation set.')
return parser
def _add_data_args(parser):
group = parser.add_argument_group(title='data and dataloader')
# option 1 for data loading (mutually exclusive with option2)
group.add_argument('--data-path', nargs='*', default=None,
help='Path to the training dataset. Accepted format:'
'1) a single data path, 2) multiple datasets in the'
'form: dataset1-weight dataset1-path dataset2-weight '
'dataset2-path ...')
group.add_argument('--split', type=str, default=None,
help='Comma-separated list of proportions for training,'
' validation, and test split. For example the split '
'`90,5,5` will use 90%% of data for training, 5%% for '
'validation and 5%% for test.')
# option 2 for data loading (mutually exclusive with option1)
# helper class to parse the --xxx-weighted-split-paths
# note here two args are set: extra valid dataset paths and names
class parse_data_paths(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if option_string == "--train-weighted-split-paths":
assert len(values) == 1, 'Only 1 dataset group is allowed to'
'be passed for the argument --train-weighted-split-paths'
# make sure string given in the correct format
err_message = 'Each data group should be input on the following format'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'where START < END'
for v in values:
# each prefix consists several datasets separated by commas
prefix = ":".join(v.split(":")[1:]) # remove GIVEN_NAME
datasets = prefix.split(",")
# check if each dataset is formatted like `WEIGHT START:END PATH`
for d in datasets:
assert len(d.split()) == 3, err_message
start, end = d.split()[1].split(":")
assert float(start) < float(end), err_message
names = [v.split(":")[0] for v in values]
prefixes = [":".join(v.split(":")[1:]).strip() for v in values]
weights = [[d.split()[0] for d in p.split(",")] for p in prefixes]
splits = [[d.split()[1] for d in p.split(",")] for p in prefixes]
paths = [[d.split()[2] for d in p.split(",")] for p in prefixes]
# # to keep consistency with Option 1 of data loading (through --data-path)
# # paths will contain strings on the following form
# # "WEIGHTS1 PATH1 WEIGHTS2 PATH2 WEIGHTS3 PATH3" for each dataset group
# # while data will be parsed in additional arguments below
# paths_option1_style = []
# for p, w in zip(paths, weights):
# paths_option1_style.append(" ".join([f"{w_i} {p_i}" for p_i, w_i in zip(p,w)]))
# setattr(args, self.dest, paths_option1_style)
setattr(args, self.dest, paths)
setattr(args, self.dest.replace("paths", "weights"), weights)
setattr(args, self.dest.replace("paths", "splits"), splits)
setattr(args, self.dest.replace("paths","names"), names)
group.add_argument('--train-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: ONE dataset groups could be'
'submitted in the following form between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0:0.6 A, 0.3 0:1 B, 0.1 0:1 C" '
'WEIGHT is used to up and down sample each dataset A,B,C in the group'
'START:END indicates the split portion of the dataset',
action=parse_data_paths)
group.add_argument('--valid-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'validation will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--test-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'test will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--log-path', type=str, default=None,
help='Path to the save arguments file.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file.')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file.')
group.add_argument('--vocab-extra-ids', type=int, default=0,
help='Number of additional vocabulary tokens. '
'They are used for span masking in the T5 model')
group.add_argument('--seq-length', type=int, default=None,
help='Maximum sequence length to process.')
group.add_argument('--encoder-seq-length', type=int, default=None,
help='Maximum encoder sequence length to process.'
'This should be exclusive of --seq-length')
group.add_argument('--decoder-seq-length', type=int, default=None,
help="Maximum decoder sequence length to process.")
group.add_argument('--retriever-seq-length', type=int, default=256,
help='Maximum sequence length for the biencoder model '
' for retriever')
group.add_argument('--sample-rate', type=float, default=1.0,
help='sample rate for training data. Supposed to be 0 '
' < sample_rate < 1')
group.add_argument('--mask-prob', type=float, default=0.15,
help='Probability of replacing a token with mask.')
group.add_argument('--short-seq-prob', type=float, default=0.1,
help='Probability of producing a short sequence.')
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--num-workers', type=int, default=2,
help="Dataloader number of workers.")
group.add_argument('--tokenizer-type', type=str,
default=None,
choices=['BertWordPieceLowerCase',
'BertWordPieceCase',
'GPT2BPETokenizer',
'PretrainedFromHF'],
help='What type of tokenizer to use.')
group.add_argument("--tokenizer-name-or-path", type=str, default=None,
help="Name or path of the huggingface tokenizer.")
group.add_argument('--data-impl', type=str, default='infer',
choices=['lazy', 'cached', 'mmap', 'infer'],
help='Implementation of indexed datasets.')
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token. Attention between tokens from different documents is null.')
group.add_argument('--eod-mask-loss', action='store_true',
help='Mask loss for the end of document tokens.')
group.add_argument('--loss-on-targets-only', action='store_true',
help='Mask loss on input sequence.')
group.add_argument('--reweight-loss-based-on-position-frequency', action="store_true",
help='Some objectives require us to sample loss_mask. This might introduce bias towards '
'specific positions. This option tries to un-bias the loss by reweighting loss on specific '
'positions based on how frequently we train on that position.'
'This is mostly used for prefix_lm training')
return parser
def _add_autoresume_args(parser):
group = parser.add_argument_group(title='autoresume')
group.add_argument('--adlr-autoresume', action='store_true',
help='Enable autoresume on adlr cluster.')
group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
help='Intervals over which check for autoresume'
'termination signal')
return parser
def _add_biencoder_args(parser):
group = parser.add_argument_group(title='biencoder')
# network size
group.add_argument('--ict-head-size', type=int, default=None,
help='Size of block embeddings to be used in ICT and '
'REALM (paper default: 128)')
group.add_argument('--biencoder-projection-dim', type=int, default=0,
help='Size of projection head used in biencoder (paper'
' default: 128)')
group.add_argument('--biencoder-shared-query-context-model', action='store_true',
help='Whether to share the parameters of the query '
'and context models or not')
# checkpointing
group.add_argument('--ict-load', type=str, default=None,
help='Directory containing an ICTBertModel checkpoint')
group.add_argument('--bert-load', type=str, default=None,
help='Directory containing an BertModel checkpoint '
'(needed to start ICT and REALM)')
# data
group.add_argument('--titles-data-path', type=str, default=None,
help='Path to titles dataset used for ICT')
group.add_argument('--query-in-block-prob', type=float, default=0.1,
help='Probability of keeping query in block for '
'ICT dataset')
group.add_argument('--use-one-sent-docs', action='store_true',
help='Whether to use one sentence documents in ICT')
group.add_argument('--evidence-data-path', type=str, default=None,
help='Path to Wikipedia Evidence frm DPR paper')
# training
group.add_argument('--retriever-report-topk-accuracies', nargs='+', type=int,
default=[], help="Which top-k accuracies to report "
"(e.g. '1 5 20')")
group.add_argument('--retriever-score-scaling', action='store_true',
help='Whether to scale retriever scores by inverse '
'square root of hidden size')
# faiss index
group.add_argument('--block-data-path', type=str, default=None,
help='Where to save/load BlockData to/from')
group.add_argument('--embedding-path', type=str, default=None,
help='Where to save/load Open-Retrieval Embedding'
' data to/from')
# indexer
group.add_argument('--indexer-batch-size', type=int, default=128,
help='How large of batches to use when doing indexing '
'jobs')
group.add_argument('--indexer-log-interval', type=int, default=1000,
help='After how many batches should the indexer '
'report progress')
return parser
def _add_vit_args(parser):
group = parser.add_argument_group(title="vit")
group.add_argument('--num-classes', type=int, default=1000,
help='num of classes in vision classificaiton task')
group.add_argument('--img-dim', type=int, default=224,
help='Image size for vision classification task')
group.add_argument('--num-channels', type=int, default=3,
help='Number of channels in input image data')
group.add_argument('--patch-dim', type=int, default=16,
help='patch dimension used in vit')
return parser
def _add_zero_args(parser):
"""Text generate arguments."""
group = parser.add_argument_group('ZeRO configurations', 'configurations')
group.add_argument("--zero-stage", type=int, default=1.0)
group.add_argument('--zero-reduce-scatter', action='store_true',
help='Use reduce scatter if specified')
group.add_argument('--zero-contigious-gradients', action='store_true',
help='Use contigious memory optimizaiton if specified')
group.add_argument("--zero-reduce-bucket-size", type=int, default=0.0)
group.add_argument("--zero-allgather-bucket-size", type=int, default=0.0)
group.add_argument('--remote-device', type=str, default='none', choices=['none', 'cpu', 'nvme'],
help='Remote device for ZeRO-3 initialized parameters.')
group.add_argument('--use-pin-memory', action='store_true',
help='Use pinned CPU memory for ZeRO-3 initialized model parameters.')
return parser
def _add_memoryopt_args(parser):
"""Memory optimization arguments."""
group = parser.add_argument_group('Memory optimizations', 'configurations')
group.add_argument("--scattered-embeddings", action='store_true',
help='Save memory by scattering embedding activations. '
'Introduces dropout differences across MP configurations.')
group.add_argument("--split-transformers", action='store_true',
help='Save memory by splitting transformer layers into two parts, '
'allowing for more frequent activation checkpoint savings.')
group.add_argument("--memory-centric-tiled-linear", action="store_true",
help='Save memory by tiling with deepspeed.zero.TiledLinear.')
group.add_argument("--tile-factor", type=int, default=1,
help='Make all linear layers the same size of [hidden/tile_factor, hidden/tile_factor]. '
'Must be enabled with --memory-centric-tiled-linear. '
'Example A: if tile_factor=1, the qkv layer [hidden, 3* hidden] would be converted into [1,3] tiles of size [hidden,hidden]. '
'Example B: if tile_factor=2, the intermediate layer [4*hidden, hidden] will be converted into [8, 2] tiles of size [hidden/2, hidden/2]. '
'Default is 1.')
return parser
def _add_activation_checkpoint_args(parser):
group = parser.add_argument_group('Activation Checkpointing',
'Checkpointing Configurations')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--partition-activations', action='store_true',
help='partition Activations across GPUs before checkpointing.')
group.add_argument('--contigious-checkpointing', action='store_true',
help='Contigious memory checkpointing for activatoins.')
group.add_argument('--checkpoint-in-cpu', action='store_true',
help='Move the activation checkpoints to CPU.')
group.add_argument('--synchronize-each-layer', action='store_true',
help='does a synchronize at the beginning and end of each checkpointed layer.')
group.add_argument('--profile-backward', action='store_true',
help='Enables backward pass profiling for checkpointed layers.')
return parser
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron arguments."""
import argparse
import collections
import os
import re
import time
import torch
import deepspeed
from megatron.enums import PositionEmbeddingType
import megatron
from megatron.logging import log_levels
def parse_args(extra_args_provider=None, defaults={},
ignore_unknown_args=False):
"""Parse all arguments."""
parser = argparse.ArgumentParser(description='Megatron-LM Arguments',
allow_abbrev=False)
# Standard arguments.
parser = _add_network_size_args(parser)
parser = _add_regularization_args(parser)
parser = _add_training_args(parser)
parser = _add_initialization_args(parser)
parser = _add_learning_rate_args(parser)
parser = _add_checkpointing_args(parser)
parser = _add_mixed_precision_args(parser)
parser = _add_distributed_args(parser)
parser = _add_validation_args(parser)
parser = _add_data_args(parser)
parser = _add_autoresume_args(parser)
parser = _add_biencoder_args(parser)
parser = _add_vit_args(parser)
parser = _add_logging_args(parser)
parser = _add_zero_args(parser)
parser = _add_memoryopt_args(parser)
parser = _add_activation_checkpoint_args(parser)
# Custom arguments.
if extra_args_provider is not None:
parser = extra_args_provider(parser)
parser = deepspeed.add_config_arguments(parser)
# Parse.
if ignore_unknown_args:
args, _ = parser.parse_known_args()
else:
args = parser.parse_args()
# Distributed args.
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
# Tensor model parallel size.
args.tensor_model_parallel_size = min(
args.tensor_model_parallel_size, args.world_size)
assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\
' ({}) is not divisible by tensor model parallel size ({})'.format(
args.world_size, args.tensor_model_parallel_size)
# Pipeline model parallel size.
args.pipeline_model_parallel_size = min(
args.pipeline_model_parallel_size,
(args.world_size // args.tensor_model_parallel_size))
# Checks.
model_parallel_size = args.pipeline_model_parallel_size * \
args.tensor_model_parallel_size
assert args.world_size % model_parallel_size == 0, 'world size is not'\
' divisible by tensor parallel size ({}) times pipeline parallel ' \
'size ({})'.format(args.world_size, args.tensor_model_parallel_size,
args.pipeline_model_parallel_size)
args.data_parallel_size = args.world_size // model_parallel_size
if args.rank == 0:
print('using world size: {}, data-parallel-size: {}, '
'tensor-model-parallel size: {}, '
'pipeline-model-parallel size: {} '.format(
args.world_size, args.data_parallel_size,
args.tensor_model_parallel_size,
args.pipeline_model_parallel_size), flush=True)
# --data-path and --train-weighted-splits-paths
message = "Data loading Mode 1: --data-path and --split "\
"and Mode 2: --(train|valid|test)-weighted-split-paths"\
"are mutually exclusive i.e. cannot be set together."
if args.data_path:
assert args.train_weighted_split_paths is None, message
setattr(args, "valid_weighted_split_names", None)
setattr(args, "valid_weighted_split_weights", None)
setattr(args, "valid_weighted_split_splits", None)
setattr(args, "test_weighted_split_names", None)
setattr(args, "test_weighted_split_weights", None)
setattr(args, "test_weighted_split_splits", None)
# args.split default value in the args is None it is set here in order
# to check that it does not to overlap with the 2nd mode of data loading
if args.split is None:
args.split = "969, 30, 1"
if args.train_weighted_split_paths or args.valid_weighted_split_paths or \
args.test_weighted_split_paths:
assert args.data_path is None and args.split is None, message
# Deprecated arguments
assert args.batch_size is None, '--batch-size argument is no longer ' \
'valid, use --micro-batch-size instead'
del args.batch_size
assert args.warmup is None, '--warmup argument is no longer valid, use ' \
'--lr-warmup-fraction instead'
del args.warmup
assert args.model_parallel_size is None, '--model-parallel-size is no ' \
'longer valid, use --tensor-model-parallel-size instead'
del args.model_parallel_size
# Set input defaults.
for key in defaults:
# For default to be valid, it should not be provided in the
# arguments that are passed to the program. We check this by
# ensuring the arg is set to None.
if getattr(args, key) is not None:
if args.rank == 0:
print('WARNING: overriding default arguments for {key}:{v} \
with {key}:{v2}'.format(key=key, v=defaults[key],
v2=getattr(args, key)),
flush=True)
else:
setattr(args, key, defaults[key])
# Batch size.
assert args.micro_batch_size is not None
assert args.micro_batch_size > 0
if args.global_batch_size is None:
args.global_batch_size = args.micro_batch_size * args.data_parallel_size
if args.rank == 0:
print('setting global batch size to {}'.format(
args.global_batch_size), flush=True)
assert args.global_batch_size > 0
if args.num_layers_per_virtual_pipeline_stage is not None:
assert args.pipeline_model_parallel_size > 2, \
'pipeline-model-parallel size should be greater than 2 with ' \
'interleaved schedule'
assert args.num_layers % args.num_layers_per_virtual_pipeline_stage == 0, \
'number of layers is not divisible by number of layers per virtual ' \
'pipeline stage'
args.virtual_pipeline_model_parallel_size = \
(args.num_layers // args.pipeline_model_parallel_size) // \
args.num_layers_per_virtual_pipeline_stage
else:
args.virtual_pipeline_model_parallel_size = None
# Parameters dtype.
args.params_dtype = torch.float
if args.fp16:
assert not args.bf16
args.params_dtype = torch.half
if args.bf16:
assert not args.fp16
args.params_dtype = torch.bfloat16
# bfloat16 requires gradient accumulation and all-reduce to
# be done in fp32.
if not args.accumulate_allreduce_grads_in_fp32:
args.accumulate_allreduce_grads_in_fp32 = True
if args.rank == 0:
print('accumulate and all-reduce gradients in fp32 for '
'bfloat16 data type.', flush=True)
if args.rank == 0:
print('using {} for parameters ...'.format(args.params_dtype),
flush=True)
# If we do accumulation and all-reduces in fp32, we need to have
# local DDP and we should set the use-contiguous-buffers-in-ddp.
if args.accumulate_allreduce_grads_in_fp32:
assert args.DDP_impl == 'local'
args.use_contiguous_buffers_in_ddp = True
if args.dataloader_type is None:
args.dataloader_type = 'single'
# Consumed tokens.
args.consumed_train_samples = 0
args.consumed_valid_samples = 0
args.consumed_train_tokens = 0
args.gigaflos_no_embeds = 0
# Iteration-based training.
if args.train_iters:
# If we use iteration-based training, make sure the
# sample-based options are off.
assert args.train_samples is None, \
'expected iteration-based training'
assert args.lr_decay_samples is None, \
'expected iteration-based learning rate decay'
assert args.lr_warmup_samples == 0, \
'expected iteration-based learning rate warmup'
assert args.rampup_batch_size is None, \
'expected no batch-size rampup for iteration-based training'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_iters == 0, \
'can only specify one of lr-warmup-fraction and lr-warmup-iters'
# Sample-based training.
if args.train_samples:
# If we use sample-based training, make sure the
# iteration-based options are off.
assert args.train_iters is None, \
'expected sample-based training'
assert args.lr_decay_iters is None, \
'expected sample-based learning rate decay'
assert args.lr_warmup_iters == 0, \
'expected sample-based learnig rate warmup'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_samples == 0, \
'can only specify one of lr-warmup-fraction ' \
'and lr-warmup-samples'
# Check required arguments.
required_args = ['num_layers', 'hidden_size', 'num_attention_heads']
for req_arg in required_args:
_check_arg_is_not_none(args, req_arg)
# Checks.
if args.ffn_hidden_size is None:
args.ffn_hidden_size = 4 * args.hidden_size
if args.kv_channels is None:
assert args.hidden_size % args.num_attention_heads == 0
args.kv_channels = args.hidden_size // args.num_attention_heads
if args.seq_length is not None:
assert args.encoder_seq_length is None
args.encoder_seq_length = args.seq_length
else:
assert args.encoder_seq_length is not None
args.seq_length = args.encoder_seq_length
if args.position_embedding_type == PositionEmbeddingType.absolute or args.position_embedding_type == PositionEmbeddingType.alibi:
assert args.max_position_embeddings is not None
if args.seq_length is not None:
assert args.max_position_embeddings >= args.seq_length
if args.decoder_seq_length is not None:
assert args.max_position_embeddings >= args.decoder_seq_length
else:
assert args.max_position_embeddings is None
if args.lr is not None:
assert args.min_lr <= args.lr
if args.save is not None:
assert args.save_interval is not None
# Mixed precision checks.
if args.fp16_lm_cross_entropy:
assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.'
if args.fp32_residual_connection:
assert args.fp16 or args.bf16, \
'residual connection in fp32 only supported when using fp16 or bf16.'
# Activation checkpointing.
if args.distribute_checkpointed_activations:
assert args.checkpoint_activations, \
'for distribute-checkpointed-activations to work you '\
'need to enable checkpoint-activations'
args.curriculum_learning = False
# Activation function
if args.glu_activation is not None and args.bias_gelu_fusion:
raise ValueError("if glu-activation is used, please set --no-bias-gelu-fusion")
# Skip train iterations
if args.skip_train_iteration_range is not None:
args.skip_train_iteration_range = [
list(map(int, range_.split("-"))) for range_ in args.skip_train_iteration_range
]
args.skip_train_iteration_range.sort()
skip_train_iteration_range = collections.deque()
for range_ in args.skip_train_iteration_range:
if len(range_) == 2:
start, end = range_
assert end >= start, \
"end of skip range cannot be smaller than start of skip range"
# merge overlapping intervals (e.g. 1-5 2-6 -> 1-6)
if not skip_train_iteration_range:
skip_train_iteration_range.append([start, end])
elif skip_train_iteration_range[-1][1] >= start:
skip_train_iteration_range[-1][1] = max(end, skip_train_iteration_range[-1][1])
else:
skip_train_iteration_range.append([start, end])
else:
raise ValueError(
"skip train iterations should be specified as two numbers, i.e. start-end"
)
args.skip_train_iteration_range = skip_train_iteration_range
if args.use_bnb_optimizer:
try:
import bitsandbytes as bnb
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install bitsandbytes from https://github.com/facebookresearch/bitsandbytes.")
_print_args(args)
return args
def _print_args(args):
"""Print arguments."""
if args.rank == 0:
print('------------------------ arguments ------------------------',
flush=True)
str_list = []
for arg in vars(args):
dots = '.' * (48 - len(arg))
str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg)))
if args.log_path is not None:
with open(os.path.join(args.log_path,f'args_{time.strftime("%Y-%m-%dT%H:%M:%S")}.txt'), 'w') as f:
for arg in sorted(str_list, key=lambda x: x.lower()):
f.write(arg+"\n")
print(arg, flush=True)
else:
for arg in sorted(str_list, key=lambda x: x.lower()):
print(arg, flush=True)
print('-------------------- end of arguments ---------------------',
flush=True)
def _check_arg_is_not_none(args, arg):
assert getattr(args, arg) is not None, '{} argument is None'.format(arg)
def _add_network_size_args(parser):
group = parser.add_argument_group(title='network size')
group.add_argument('--num-layers', type=int, default=None,
help='Number of transformer layers.')
group.add_argument('--hidden-size', type=int, default=None,
help='Tansformer hidden size.')
group.add_argument('--ffn-hidden-size', type=int, default=None,
help='Transformer Feed-Forward Network hidden size. '
'This is set to 4*hidden-size if not provided')
group.add_argument('--num-attention-heads', type=int, default=None,
help='Number of transformer attention heads.')
group.add_argument('--kv-channels', type=int, default=None,
help='Projection weights dimension in multi-head '
'attention. This is set to '
' args.hidden_size // args.num_attention_heads '
'if not provided.')
group.add_argument('--max-position-embeddings', type=int, default=None,
help='Maximum number of position embeddings to use. '
'This is the size of position embedding.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='Layer norm epsilon.')
group.add_argument('--apply-residual-connection-post-layernorm',
action='store_true',
help='If set, use original BERT residula connection '
'ordering.')
group.add_argument('--embed-layernorm', action='store_true',
help='use layernorm for embedding')
group.add_argument('--openai-gelu', action='store_true',
help='Use OpenAIs GeLU implementation. This option'
'should not be used unless for backward compatibility'
'reasons.')
group.add_argument('--onnx-safe', type=bool, required=False,
help='Use workarounds for known problems with '
'Torch ONNX exporter')
group.add_argument('--bert-no-binary-head', action='store_false',
help='Disable BERT binary head.',
dest='bert_binary_head')
group.add_argument('--position-embedding-type', type=lambda x: PositionEmbeddingType[x],
choices=list(PositionEmbeddingType),
default=PositionEmbeddingType.absolute,
help='Define position embedding type ("absolute" | "rotary" | "alibi"). "absolute" by default.'
)
group.add_argument('--glu-activation', type=str,
choices=megatron.model.glu_activations.GLU_ACTIVATIONS.keys(),
help='GLU activations to use.'
)
group.add_argument('--kill-switch-path', type=str,
help='path to look for a kill switch, which if found will automatically exit the program'
)
group.add_argument('--log-level', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', "
"'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and lets the "
"application set the level."
)
group.add_argument('--log-level-replica', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on replicas. Same choices as ``log_level``"
)
return parser
def _add_logging_args(parser):
group = parser.add_argument_group(title='logging')
group.add_argument('--log-params-norm', action='store_true',
help='If set, calculate and log parameters norm.')
group.add_argument('--log-num-zeros-in-grad', action='store_true',
help='If set, calculate and log the number of zeros in gradient.')
group.add_argument('--tensorboard-log-interval', type=int, default=1,
help='Report to tensorboard interval.')
group.add_argument('--tensorboard-queue-size', type=int, default=1000,
help='Size of the tensorboard queue for pending events '
'and summaries before one of the ‘add’ calls forces a '
'flush to disk.')
group.add_argument('--log-timers-to-tensorboard', action='store_true',
help='If set, write timers to tensorboard.')
group.add_argument('--log-batch-size-to-tensorboard', action='store_true',
help='If set, write batch-size to tensorboard.')
group.add_argument('--no-log-learnig-rate-to-tensorboard',
action='store_false',
help='Disable learning rate logging to tensorboard.',
dest='log_learning_rate_to_tensorboard')
group.add_argument('--no-log-loss-scale-to-tensorboard',
action='store_false',
help='Disable loss-scale logging to tensorboard.',
dest='log_loss_scale_to_tensorboard')
group.add_argument('--log-validation-ppl-to-tensorboard',
action='store_true',
help='If set, write validation perplexity to '
'tensorboard.')
return parser
def _add_regularization_args(parser):
group = parser.add_argument_group(title='regularization')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='Post attention dropout probability.')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='Dropout probability for hidden state transformer.')
group.add_argument('--weight-decay', type=float, default=0.01,
help='Weight decay coefficient for L2 regularization.')
group.add_argument('--clip-grad', type=float, default=1.0,
help='Gradient clipping based on global L2 norm.')
group.add_argument('--adam-beta1', type=float, default=0.9,
help='First coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-beta2', type=float, default=0.999,
help='Second coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-eps', type=float, default=1e-08,
help='Term added to the denominator to improve'
'numerical stability')
group.add_argument('--sgd-momentum', type=float, default=0.9,
help='Momentum factor for sgd')
return parser
def _add_training_args(parser):
group = parser.add_argument_group(title='training')
group.add_argument('--micro-batch-size', type=int, default=None,
help='Batch size per model instance (local batch size). '
'Global batch size is local batch size times data '
'parallel size times number of micro batches.')
group.add_argument('--batch-size', type=int, default=None,
help='Old batch size parameter, do not use. '
'Use --micro-batch-size instead')
group.add_argument('--global-batch-size', type=int, default=None,
help='Training batch size. If set, it should be a '
'multiple of micro-batch-size times data-parallel-size. '
'If this value is None, then '
'use micro-batch-size * data-parallel-size as the '
'global batch size. This choice will result in 1 for '
'number of micro-batches.')
group.add_argument('--rampup-batch-size', nargs='*', default=None,
help='Batch size ramp up with the following values:'
' --rampup-batch-size <start batch size> '
' <batch size increment> '
' <ramp-up samples> '
'For example: '
' --rampup-batch-size 16 8 300000 '
' --global-batch-size 1024 '
'will start with global batch size 16 and over '
' (1024 - 16) / 8 = 126 intervals will increase '
'the batch size linearly to 1024. In each interval '
'we will use approximately 300000 / 126 = 2380 samples.')
group.add_argument('--checkpoint-activations', action='store_true',
help='Checkpoint activation to allow for training '
'with larger models, sequences, and batch sizes.')
group.add_argument('--distribute-checkpointed-activations',
action='store_true',
help='If set, distribute checkpointed activations '
'across model parallel group.')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing.')
group.add_argument('--train-iters', type=int, default=None,
help='Total number of iterations to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-samples', type=int, default=None,
help='Total number of samples to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-tokens', type=int, default=None,
help='Total number of tokens to train over all '
'training runs.')
group.add_argument('--log-interval', type=int, default=100,
help='Report loss and timing interval.')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after the iteration is divisible '
'by this value.')
group.add_argument('--exit-duration-in-mins', type=int, default=None,
help='Exit the program after this many minutes.')
group.add_argument('--tensorboard-dir', type=str, default=None,
help='Write TensorBoard logs to this directory.')
group.add_argument('--no-masked-softmax-fusion',
action='store_false',
help='Disable fusion of query_key_value scaling, '
'masking, and softmax.',
dest='masked_softmax_fusion')
group.add_argument('--no-bias-gelu-fusion', action='store_false',
help='Disable bias and gelu fusion.',
dest='bias_gelu_fusion')
group.add_argument('--no-bias-dropout-fusion', action='store_false',
help='Disable bias and dropout fusion.',
dest='bias_dropout_fusion')
group.add_argument('--optimizer', type=str, default='adam',
choices=['adam', 'sgd'],
help='Optimizer function')
group.add_argument('--use-bnb-optimizer', action='store_true',
help='Use bitsandbytes optimizer for efficient training,'
'please refer https://github.com/facebookresearch/bitsandbytes.',
dest='use_bnb_optimizer')
group.add_argument('--dataloader-type', type=str, default=None,
choices=['single', 'cyclic'],
help='Single pass vs multiple pass data loader')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
group.add_argument('--codecarbon-dir', type=str, default=None,
help='Write CodeCarbon logs to this directory.')
group.add_argument('--eval-only', type=bool, required=False,
help='If set to True, no train step will be performed.'
'and only the evaluation on the `valid` and `test` sets '
'will be performed' )
group.add_argument('--skip-train-iteration-range', type=str, nargs='+', default=None,
help='Iteration ranges to skip. The values are one or more dash-separated ranges. e.g., 101-200 251-300.')
group.add_argument('--abort-on-unmet-fused-kernel-constraints', action='store_true',
help="If set to True, the program will abort if the constraints for loading a fused kernel aren't met")
return parser
def _add_initialization_args(parser):
group = parser.add_argument_group(title='initialization')
group.add_argument('--seed', type=int, default=1234,
help='Random seed used for python, numpy, '
'pytorch, and cuda.')
group.add_argument('--init-method-std', type=float, default=0.02,
help='Standard deviation of the zero mean normal '
'distribution used for weight initialization.')
group.add_argument('--init-method-xavier-uniform', action='store_true',
help='Enable Xavier uniform parameter initialization')
return parser
def _add_learning_rate_args(parser):
group = parser.add_argument_group(title='learning rate')
group.add_argument('--lr', type=float, default=None,
help='Initial learning rate. Depending on decay style '
'and initial warmup, the learing rate at each '
'iteration would be different.')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine'],
help='Learning rate decay function.')
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay learning rate over,'
' If None defaults to `--train-iters`')
group.add_argument('--lr-decay-samples', type=int, default=None,
help='number of samples to decay learning rate over,'
' If None defaults to `--train-samples`')
group.add_argument('--lr-decay-tokens', type=int, default=None,
help='number of tokens to decay learning rate over,'
' If not None will override iter/sample-based decay')
group.add_argument('--lr-warmup-fraction', type=float, default=None,
help='fraction of lr-warmup-(iters/samples) to use '
'for warmup (as a float)')
group.add_argument('--lr-warmup-iters', type=int, default=0,
help='number of iterations to linearly warmup '
'learning rate over.')
group.add_argument('--lr-warmup-samples', type=int, default=0,
help='number of samples to linearly warmup '
'learning rate over.')
group.add_argument('--warmup', type=int, default=None,
help='Old lr warmup argument, do not use. Use one of the'
'--lr-warmup-* arguments above')
group.add_argument('--min-lr', type=float, default=0.0,
help='Minumum value for learning rate. The scheduler'
'clip values below this threshold.')
group.add_argument('--override-lr-scheduler', action='store_true',
help='Reset the values of the scheduler (learning rate,'
'warmup iterations, minimum learning rate, maximum '
'number of iterations, and decay style from input '
'arguments and ignore values from checkpoints. Note'
'that all the above values will be reset.')
group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
help='Use checkpoint to set the values of the scheduler '
'(learning rate, warmup iterations, minimum learning '
'rate, maximum number of iterations, and decay style '
'from checkpoint and ignore input arguments.')
return parser
def _add_checkpointing_args(parser):
group = parser.add_argument_group(title='checkpointing')
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=None,
help='Number of iterations between checkpoint saves.')
group.add_argument('--no-save-optim', action='store_true', default=None,
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true', default=None,
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true', default=None,
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true', default=None,
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
return parser
def _add_mixed_precision_args(parser):
group = parser.add_argument_group(title='mixed precision')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode.')
group.add_argument('--bf16', action='store_true',
help='Run model in bfloat16 mode.')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--initial-loss-scale', type=float, default=2**32,
help='Initial loss-scale for dynamic loss scaling.')
group.add_argument('--min-loss-scale', type=float, default=1.0,
help='Minimum loss scale for dynamic loss scale.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale.')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--fp32-residual-connection', action='store_true',
help='Move residual connections to fp32.')
group.add_argument('--no-query-key-layer-scaling', action='store_false',
help='Do not scale Q * K^T by 1 / layer-number.',
dest='apply_query_key_layer_scaling')
group.add_argument('--attention-softmax-in-fp32', action='store_true',
help='Run attention masking and softmax in fp32. '
'This flag is ignored unless '
'--no-query-key-layer-scaling is specified.')
group.add_argument('--accumulate-allreduce-grads-in-fp32',
action='store_true',
help='Gradient accumulation and all-reduce in fp32.')
group.add_argument('--fp16-lm-cross-entropy', action='store_true',
help='Move the cross entropy unreduced loss calculation'
'for lm head to fp16.')
return parser
def _add_distributed_args(parser):
group = parser.add_argument_group(title='distributed')
group.add_argument('--tensor-model-parallel-size', type=int, default=1,
help='Degree of tensor model parallelism.')
group.add_argument('--pipeline-model-parallel-size', type=int, default=1,
help='Degree of pipeline model parallelism.')
group.add_argument('--model-parallel-size', type=int, default=None,
help='Old model parallel argument, do not use. Use '
'--tensor-model-parallel-size instead.')
group.add_argument('--num-layers-per-virtual-pipeline-stage', type=int, default=None,
help='Number of layers per virtual pipeline stage')
group.add_argument('--distributed-backend', default='nccl',
choices=['nccl', 'gloo'],
help='Which backend to use for distributed training.')
group.add_argument('--DDP-impl', default='local',
choices=['local', 'torch'],
help='which DistributedDataParallel implementation '
'to use.')
group.add_argument('--use-contiguous-buffers-in-ddp', action='store_true',
help='If set, use contiguous buffer in DDP. Note that '
'this option only works woth local DDP.' )
group.add_argument('--no-scatter-gather-tensors-in-pipeline', action='store_false',
help='Use scatter/gather to optimize communication of tensors in pipeline',
dest='scatter_gather_tensors_in_pipeline')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--lazy-mpu-init', type=bool, required=False,
help='If set to True, initialize_megatron() '
'skips DDP initialization and returns function to '
'complete it instead.Also turns on '
'--use-cpu-initialization flag. This is for '
'external DDP manager.' )
group.add_argument('--use-cpu-initialization', action='store_true',
default=None, help='If set, affine parallel weights '
'initialization uses CPU' )
return parser
def _add_validation_args(parser):
group = parser.add_argument_group(title='validation')
group.add_argument('--eval-iters', type=int, default=100,
help='Number of iterations to run for evaluation'
'validation/test for.')
group.add_argument('--eval-interval', type=int, default=1000,
help='Interval between running evaluation on '
'validation set.')
return parser
def _add_data_args(parser):
group = parser.add_argument_group(title='data and dataloader')
# option 1 for data loading (mutually exclusive with option2)
group.add_argument('--data-path', nargs='*', default=None,
help='Path to the training dataset. Accepted format:'
'1) a single data path, 2) multiple datasets in the'
'form: dataset1-weight dataset1-path dataset2-weight '
'dataset2-path ...')
group.add_argument('--split', type=str, default=None,
help='Comma-separated list of proportions for training,'
' validation, and test split. For example the split '
'`90,5,5` will use 90%% of data for training, 5%% for '
'validation and 5%% for test.')
# option 2 for data loading (mutually exclusive with option1)
# helper class to parse the --xxx-weighted-split-paths
# note here two args are set: extra valid dataset paths and names
class parse_data_paths(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if option_string == "--train-weighted-split-paths":
assert len(values) == 1, 'Only 1 dataset group is allowed to'
'be passed for the argument --train-weighted-split-paths'
# make sure string given in the correct format
err_message = 'Each data group should be input on the following format'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'where START < END'
for v in values:
# each prefix consists several datasets separated by commas
prefix = ":".join(v.split(":")[1:]) # remove GIVEN_NAME
datasets = prefix.split(",")
# check if each dataset is formatted like `WEIGHT START:END PATH`
for d in datasets:
assert len(d.split()) == 3, err_message
start, end = d.split()[1].split(":")
assert float(start) < float(end), err_message
names = [v.split(":")[0] for v in values]
prefixes = [":".join(v.split(":")[1:]).strip() for v in values]
weights = [[d.split()[0] for d in p.split(",")] for p in prefixes]
splits = [[d.split()[1] for d in p.split(",")] for p in prefixes]
paths = [[d.split()[2] for d in p.split(",")] for p in prefixes]
# # to keep consistency with Option 1 of data loading (through --data-path)
# # paths will contain strings on the following form
# # "WEIGHTS1 PATH1 WEIGHTS2 PATH2 WEIGHTS3 PATH3" for each dataset group
# # while data will be parsed in additional arguments below
# paths_option1_style = []
# for p, w in zip(paths, weights):
# paths_option1_style.append(" ".join([f"{w_i} {p_i}" for p_i, w_i in zip(p,w)]))
# setattr(args, self.dest, paths_option1_style)
setattr(args, self.dest, paths)
setattr(args, self.dest.replace("paths", "weights"), weights)
setattr(args, self.dest.replace("paths", "splits"), splits)
setattr(args, self.dest.replace("paths","names"), names)
group.add_argument('--train-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: ONE dataset groups could be'
'submitted in the following form between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0:0.6 A, 0.3 0:1 B, 0.1 0:1 C" '
'WEIGHT is used to up and down sample each dataset A,B,C in the group'
'START:END indicates the split portion of the dataset',
action=parse_data_paths)
group.add_argument('--valid-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'validation will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--test-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'test will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--log-path', type=str, default=None,
help='Path to the save arguments file.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file.')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file.')
group.add_argument('--vocab-extra-ids', type=int, default=0,
help='Number of additional vocabulary tokens. '
'They are used for span masking in the T5 model')
group.add_argument('--seq-length', type=int, default=None,
help='Maximum sequence length to process.')
group.add_argument('--encoder-seq-length', type=int, default=None,
help='Maximum encoder sequence length to process.'
'This should be exclusive of --seq-length')
group.add_argument('--decoder-seq-length', type=int, default=None,
help="Maximum decoder sequence length to process.")
group.add_argument('--retriever-seq-length', type=int, default=256,
help='Maximum sequence length for the biencoder model '
' for retriever')
group.add_argument('--sample-rate', type=float, default=1.0,
help='sample rate for training data. Supposed to be 0 '
' < sample_rate < 1')
group.add_argument('--mask-prob', type=float, default=0.15,
help='Probability of replacing a token with mask.')
group.add_argument('--short-seq-prob', type=float, default=0.1,
help='Probability of producing a short sequence.')
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--num-workers', type=int, default=2,
help="Dataloader number of workers.")
group.add_argument('--tokenizer-type', type=str,
default=None,
choices=['BertWordPieceLowerCase',
'BertWordPieceCase',
'GPT2BPETokenizer',
'PretrainedFromHF'],
help='What type of tokenizer to use.')
group.add_argument("--tokenizer-name-or-path", type=str, default=None,
help="Name or path of the huggingface tokenizer.")
group.add_argument('--data-impl', type=str, default='infer',
choices=['lazy', 'cached', 'mmap', 'infer'],
help='Implementation of indexed datasets.')
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token. Attention between tokens from different documents is null.')
group.add_argument('--eod-mask-loss', action='store_true',
help='Mask loss for the end of document tokens.')
group.add_argument('--loss-on-targets-only', action='store_true',
help='Mask loss on input sequence.')
group.add_argument('--reweight-loss-based-on-position-frequency', action="store_true",
help='Some objectives require us to sample loss_mask. This might introduce bias towards '
'specific positions. This option tries to un-bias the loss by reweighting loss on specific '
'positions based on how frequently we train on that position.'
'This is mostly used for prefix_lm training')
return parser
def _add_autoresume_args(parser):
group = parser.add_argument_group(title='autoresume')
group.add_argument('--adlr-autoresume', action='store_true',
help='Enable autoresume on adlr cluster.')
group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
help='Intervals over which check for autoresume'
'termination signal')
return parser
def _add_biencoder_args(parser):
group = parser.add_argument_group(title='biencoder')
# network size
group.add_argument('--ict-head-size', type=int, default=None,
help='Size of block embeddings to be used in ICT and '
'REALM (paper default: 128)')
group.add_argument('--biencoder-projection-dim', type=int, default=0,
help='Size of projection head used in biencoder (paper'
' default: 128)')
group.add_argument('--biencoder-shared-query-context-model', action='store_true',
help='Whether to share the parameters of the query '
'and context models or not')
# checkpointing
group.add_argument('--ict-load', type=str, default=None,
help='Directory containing an ICTBertModel checkpoint')
group.add_argument('--bert-load', type=str, default=None,
help='Directory containing an BertModel checkpoint '
'(needed to start ICT and REALM)')
# data
group.add_argument('--titles-data-path', type=str, default=None,
help='Path to titles dataset used for ICT')
group.add_argument('--query-in-block-prob', type=float, default=0.1,
help='Probability of keeping query in block for '
'ICT dataset')
group.add_argument('--use-one-sent-docs', action='store_true',
help='Whether to use one sentence documents in ICT')
group.add_argument('--evidence-data-path', type=str, default=None,
help='Path to Wikipedia Evidence frm DPR paper')
# training
group.add_argument('--retriever-report-topk-accuracies', nargs='+', type=int,
default=[], help="Which top-k accuracies to report "
"(e.g. '1 5 20')")
group.add_argument('--retriever-score-scaling', action='store_true',
help='Whether to scale retriever scores by inverse '
'square root of hidden size')
# faiss index
group.add_argument('--block-data-path', type=str, default=None,
help='Where to save/load BlockData to/from')
group.add_argument('--embedding-path', type=str, default=None,
help='Where to save/load Open-Retrieval Embedding'
' data to/from')
# indexer
group.add_argument('--indexer-batch-size', type=int, default=128,
help='How large of batches to use when doing indexing '
'jobs')
group.add_argument('--indexer-log-interval', type=int, default=1000,
help='After how many batches should the indexer '
'report progress')
return parser
def _add_vit_args(parser):
group = parser.add_argument_group(title="vit")
group.add_argument('--num-classes', type=int, default=1000,
help='num of classes in vision classificaiton task')
group.add_argument('--img-dim', type=int, default=224,
help='Image size for vision classification task')
group.add_argument('--num-channels', type=int, default=3,
help='Number of channels in input image data')
group.add_argument('--patch-dim', type=int, default=16,
help='patch dimension used in vit')
return parser
def _add_zero_args(parser):
"""Text generate arguments."""
group = parser.add_argument_group('ZeRO configurations', 'configurations')
group.add_argument("--zero-stage", type=int, default=1.0)
group.add_argument('--zero-reduce-scatter', action='store_true',
help='Use reduce scatter if specified')
group.add_argument('--zero-contigious-gradients', action='store_true',
help='Use contigious memory optimizaiton if specified')
group.add_argument("--zero-reduce-bucket-size", type=int, default=0.0)
group.add_argument("--zero-allgather-bucket-size", type=int, default=0.0)
group.add_argument('--remote-device', type=str, default='none', choices=['none', 'cpu', 'nvme'],
help='Remote device for ZeRO-3 initialized parameters.')
group.add_argument('--use-pin-memory', action='store_true',
help='Use pinned CPU memory for ZeRO-3 initialized model parameters.')
return parser
def _add_memoryopt_args(parser):
"""Memory optimization arguments."""
group = parser.add_argument_group('Memory optimizations', 'configurations')
group.add_argument("--scattered-embeddings", action='store_true',
help='Save memory by scattering embedding activations. '
'Introduces dropout differences across MP configurations.')
group.add_argument("--split-transformers", action='store_true',
help='Save memory by splitting transformer layers into two parts, '
'allowing for more frequent activation checkpoint savings.')
group.add_argument("--memory-centric-tiled-linear", action="store_true",
help='Save memory by tiling with deepspeed.zero.TiledLinear.')
group.add_argument("--tile-factor", type=int, default=1,
help='Make all linear layers the same size of [hidden/tile_factor, hidden/tile_factor]. '
'Must be enabled with --memory-centric-tiled-linear. '
'Example A: if tile_factor=1, the qkv layer [hidden, 3* hidden] would be converted into [1,3] tiles of size [hidden,hidden]. '
'Example B: if tile_factor=2, the intermediate layer [4*hidden, hidden] will be converted into [8, 2] tiles of size [hidden/2, hidden/2]. '
'Default is 1.')
return parser
def _add_activation_checkpoint_args(parser):
group = parser.add_argument_group('Activation Checkpointing',
'Checkpointing Configurations')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--partition-activations', action='store_true',
help='partition Activations across GPUs before checkpointing.')
group.add_argument('--contigious-checkpointing', action='store_true',
help='Contigious memory checkpointing for activatoins.')
group.add_argument('--checkpoint-in-cpu', action='store_true',
help='Move the activation checkpoints to CPU.')
group.add_argument('--synchronize-each-layer', action='store_true',
help='does a synchronize at the beginning and end of each checkpointed layer.')
group.add_argument('--profile-backward', action='store_true',
help='Enables backward pass profiling for checkpointed layers.')
return parser
|
def metade(valor=0, formato=False):
res = valor/2
return res if formato is False else moeda(res)
def dobro(valor=0, formato=False):
res = valor*2
return res if formato is False else moeda(res)
def aumentar(valor=0, porcentagem=0, formato=False):
res = valor+(valor * porcentagem/100)
return res if formato is False else moeda(res)
def diminuir(valor=0, porcentagem=0, formato=False):
res = valor-(valor * porcentagem/100)
return res if not formato else moeda(res)
#! lembra que if formato: => formato = True ||| if not formato: => formato = False
# aqui moeda fica como segundo parametro pois o primeiro a ser importado é o valor
def moeda(valor=0, moeda='R$'): # Posso mudar a moeda apenas informando outra lá na hora de importar
return f'{moeda}{valor:.2f}'.replace('.', ',')
def resumo(p=0, por1=10, por2=5):
print('-'*40)
# .rjust() => direita | .ljust() => esquerda | .center() => centralizar
print('RESUMO DO VALOR'.center(40))
print('-'*40)
print(f'Preço Analisado:\t{moeda(p)}')
print(f'Dobro do preço:\t\t{dobro(p,True)}')
print(f'Metade do preço:\t{metade(p,True)}')
print(f'{por1}% de aumento:\t\t{aumentar(p,por1,True)}')
print(f'{por2}% de Redução:\t\t{diminuir(p,por2,True)}')
print('-'*40)
'''
print(f'\nA metade de {moeda.moeda(p, 'US$')} é {moeda.metade(p, True )}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.dobro(p, True)}')
print(f'Aumentando 10% temos {moeda.aumentar(p, 10, True)}')
print(f'Reduzindo 13% temos {moeda.diminuir(p, 13, True)}')
'''
|
def metade(valor=0, formato=False):
res = valor/2
return res if formato is False else moeda(res)
def dobro(valor=0, formato=False):
res = valor*2
return res if formato is False else moeda(res)
def aumentar(valor=0, porcentagem=0, formato=False):
res = valor+(valor * porcentagem/100)
return res if formato is False else moeda(res)
def diminuir(valor=0, porcentagem=0, formato=False):
res = valor-(valor * porcentagem/100)
return res if not formato else moeda(res)
#! lembra que if formato: => formato = True ||| if not formato: => formato = False
# aqui moeda fica como segundo parametro pois o primeiro a ser importado é o valor
def moeda(valor=0, moeda='R$'): # Posso mudar a moeda apenas informando outra lá na hora de importar
return f'{moeda}{valor:.2f}'.replace('.', ',')
def resumo(p=0, por1=10, por2=5):
print('-'*40)
# .rjust() => direita | .ljust() => esquerda | .center() => centralizar
print('RESUMO DO VALOR'.center(40))
print('-'*40)
print(f'Preço Analisado:\t{moeda(p)}')
print(f'Dobro do preço:\t\t{dobro(p,True)}')
print(f'Metade do preço:\t{metade(p,True)}')
print(f'{por1}% de aumento:\t\t{aumentar(p,por1,True)}')
print(f'{por2}% de Redução:\t\t{diminuir(p,por2,True)}')
print('-'*40)
'''
print(f'\nA metade de {moeda.moeda(p, "US$")} é {moeda.metade(p, True )}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.dobro(p, True)}')
print(f'Aumentando 10% temos {moeda.aumentar(p, 10, True)}')
print(f'Reduzindo 13% temos {moeda.diminuir(p, 13, True)}')
'''
|
#!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import ast
import copy
import os
import subprocess
import sys
import time
from collections import defaultdict
from collections.abc import Mapping, Sequence
from typing import List
import zmq
from .controller import close_socket, create_socket, send_message_to_controller
from .messages import encode_msg, decode_msg
from .eval import SoS_eval, SoS_exec, accessed_vars, KeepOnlyImportAndDefine
from .executor_utils import (
__named_output__,
__null_func__,
__output_from__,
__traced__,
clear_output,
create_task,
get_traceback_msg,
reevaluate_output,
statementMD5,
validate_step_sig,
verify_input,
ExecuteError,
)
from .syntax import (
SOS_DEPENDS_OPTIONS,
SOS_INPUT_OPTIONS,
SOS_OUTPUT_OPTIONS,
SOS_TARGETS_OPTIONS,
)
from .targets import (
RemovedTarget,
RuntimeInfo,
UnavailableLock,
sos_variable,
UnknownTarget,
dynamic,
file_target,
sos_step,
sos_targets,
invalid_target
)
from .tasks import MasterTaskParams, TaskFile
from .utils import (
ArgumentError,
StopInputGroup,
TerminateExecution,
env,
get_traceback,
short_repr,
ProcessKilled,
get_localhost_ip,
textMD5,
)
__all__: List = []
class TaskManager:
# manage tasks created by the step
def __init__(self, num_tasks, trunk_size, trunk_workers):
super(TaskManager, self).__init__()
self.num_tasks = num_tasks
import math
self._slots = [[] for x in range(math.ceil(num_tasks / trunk_size))]
self._last_slot_size = (
trunk_size if (num_tasks % trunk_size == 0) else (num_tasks % trunk_size)
)
self.trunk_size = trunk_size
self.trunk_workers = trunk_workers
self._submitted_tasks = []
# entire groups
self._unsubmitted_slots = []
# collection of partial groups if some tasks are completed
self._unsubmitted_tasks = []
# derived from _unsubmitted_slots
self._all_ids = []
self._all_output = []
#
self._terminate = False
#
self._tags = {}
def set(self, idx, task_def):
slot = idx // self.trunk_size
#
# slot [
# [idx, None] <- for empty
# [idx, taskdef] <- for non empty
# ]
self._slots[slot].append([idx, task_def])
# the slot is full
if len(self._slots[slot]) == self.trunk_size or (
slot == len(self._slots) - 1
and len(self._slots[slot]) == self._last_slot_size
):
# if there are valida tasks
if not all([x[1] is None for x in self._slots[slot]]):
# remove empty tasks and sort by id
if self.trunk_size == 1 or any(x[1] is None for x in self._slots[slot]):
# if partial, sent to partial list
self._unsubmitted_tasks.extend(
[x[1] for x in self._slots[slot] if x[1] is not None]
)
else:
self._unsubmitted_slots.append(
sorted(self._slots[slot], key=lambda x: x[0])
)
# clear skit
self._slots[slot] = []
if not task_def:
return
if isinstance(task_def[2], Sequence):
self._all_output.extend(task_def[2])
self._all_ids.append(task_def[0])
self._tags[task_def[0]] = task_def[1].tags
def tags(self, task_id):
return self._tags.get(task_id, [])
def index_of(self, task_id):
if task_id in self._all_ids:
return self._all_ids.index(task_id)
else:
return -1
def has_output(self, output):
if not isinstance(output, Sequence) or not self._unsubmitted_slots:
return False
return any(x in self._all_output for x in output)
def get_job(self, all_tasks=False):
# single tasks
ids = []
# submit all tasks without trunk, easy
for slot in self._unsubmitted_slots:
# create a master task
master = MasterTaskParams(self.trunk_workers)
for _, (task_id, taskdef, _) in slot:
master.push(task_id, taskdef)
ids.append(master.ID)
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{"creation_time": {time.time()}}}",
]
)
self._unsubmitted_slots = []
# individual tasks...
if self.trunk_size == 1 or all_tasks:
to_be_submitted = self._unsubmitted_tasks
[
to_be_submitted.extend([x[1] for x in slot if x[1] is not None])
for slot in self._slots
if slot
]
self._unsubmitted_tasks = []
else:
# save complete blocks
num_tasks = (
len(self._unsubmitted_tasks) // self.trunk_size * self.trunk_size
)
to_be_submitted = self._unsubmitted_tasks[:num_tasks]
self._unsubmitted_tasks = self._unsubmitted_tasks[num_tasks:]
if self.trunk_size == 1 or (all_tasks and len(self._unsubmitted_tasks) == 1):
for task_id, taskdef, _ in to_be_submitted:
# if the task file, perhaps it is already running, we do not change
# the task file. Otherwise we are changing the status of the task
TaskFile(task_id).save(taskdef)
send_message_to_controller(
[
"workflow_sig",
"task",
task_id,
f"{{"creation_time": {time.time()}}}",
]
)
ids.append(task_id)
else:
master = None
for task_id, taskdef, _ in to_be_submitted:
if master is not None and master.num_tasks() == self.trunk_size:
ids.append(master.ID)
TaskFile(master.ID).save(master)
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{"creation_time": {time.time()}}}",
]
)
master = None
if master is None:
master = MasterTaskParams(self.trunk_workers)
master.push(task_id, taskdef)
# the last piece
if master is not None:
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{"creation_time": {time.time()}}}",
]
)
ids.append(master.ID)
if not ids:
return None
self._submitted_tasks.extend(ids)
return ids
def clear_submitted(self):
self._submitted_tasks = []
def expand_input_files(*args, **kwargs):
# if unspecified, use __step_output__ as input (default)
# resolve dynamic input.
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
# if no input,
if not args and not kwargs:
return env.sos_dict["step_input"]
# if only group_by ...
elif not args and all(x in SOS_TARGETS_OPTIONS for x in kwargs.keys()):
return sos_targets(
env.sos_dict["step_input"],
_verify_existence=env.config["error_mode"] != "ignore",
**kwargs,
)
else:
return sos_targets(
*args,
**kwargs,
_verify_existence=env.config["error_mode"] != "ignore",
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_depends_files(*args, **kwargs):
"""handle directive depends"""
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
return sos_targets(
*args,
**kwargs,
_verify_existence=True,
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_output_files(value, *args, **kwargs):
"""Process output files (perhaps a pattern) to determine input files."""
if any(isinstance(x, dynamic) for x in args) or any(
isinstance(y, dynamic) for y in kwargs.values()
):
return sos_targets(_undetermined=value)
else:
return sos_targets(
*args, **kwargs, _undetermined=False, _source=env.sos_dict["step_name"]
)
def parse_shared_vars(option):
shared_vars = set()
if not option:
return shared_vars
if isinstance(option, str):
shared_vars.add(option)
elif isinstance(option, Mapping):
for val in option.values():
shared_vars |= accessed_vars(val, mode="eval")
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
shared_vars.add(item)
elif isinstance(item, Mapping):
for val in item.values():
shared_vars |= accessed_vars(val, mode="eval")
return shared_vars
def evaluate_shared(vars, option):
# handle option shared and store variables in a "__shared_vars" variable
shared_vars = {}
env.sos_dict.quick_update(vars[-1])
for key in vars[-1].keys():
try:
if key in ("output", "depends", "input"):
env.logger.warning(
f"Cannot overwrite variable step_{key} from substep variable {key}"
)
else:
env.sos_dict.set("step_" + key, [x[key] for x in vars])
except Exception as e:
env.logger.warning(f"Failed to create step level variable step_{key}: {e}")
if isinstance(option, str):
if option in env.sos_dict:
shared_vars[option] = env.sos_dict[option]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(option, Mapping):
for var, val in option.items():
try:
if var == val:
shared_vars[var] = env.sos_dict[var]
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
# if there are dictionaries in the sequence, e.g.
# shared=['A', 'B', {'C':'D"}]
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
if item in env.sos_dict:
shared_vars[item] = env.sos_dict[item]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(item, Mapping):
for var, val in item.items():
try:
if var == val:
continue
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str or mapping are accepted in sequence: {option}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str, sequence, or mapping are accepted in sequence: {option}"
)
return shared_vars
def get_value_of_param(name, param_list, extra_dict={}):
tree = ast.parse(f"__null_func__({param_list})")
# x.func can be an attribute (e.g. a.b()) and do not have id
kwargs = [
x for x in ast.walk(tree) if x.__class__.__name__ == "keyword" and x.arg == name
]
if not kwargs:
return []
try:
return [ast.literal_eval(kwargs[0].value)]
except Exception:
return [
eval(
compile(
ast.Expression(body=kwargs[0].value),
filename="<string>",
mode="eval",
),
extra_dict,
)
]
def is_sos_run_the_only_last_stmt(stmt):
tree = ast.parse(stmt)
return (
len(tree.body) >= 1
and isinstance(tree.body[-1], ast.Expr)
and isinstance(tree.body[-1].value, ast.Call)
and hasattr(tree.body[-1].value.func, "id")
and tree.body[-1].value.func.id == "sos_run"
and len(
[
x
for x in ast.walk(tree)
if isinstance(x, ast.Call)
and hasattr(x.func, "id")
and x.func.id == "sos_run"
]
)
== 1
)
class Base_Step_Executor:
# This base class defines how steps are executed. The derived classes will reimplement
# some function to behave differently in different modes.
#
def __init__(self, step):
self.step = step
self.task_manager = None
self.exec_error = ExecuteError(self.step.step_name())
#
# Functions that should be redefined in derived class
#
def submit_tasks(self, tasks):
raise RuntimeError("Undefined base function submit_tasks")
def wait_for_tasks(self, tasks, all_submitted):
# this will be redefined in subclasses
raise RuntimeError("Undefined base function wait_for_tasks")
def wait_for_subworkflows(self, allow_pending=0):
raise RuntimeError("Undefined base function wait_for_subworkflows")
def handle_unknown_target(self, e):
raise RuntimeError("Undefined base function handle_unknown_target")
def init_input_output_vars(self):
# if there is __step_output__ from previous step, use it as default input
# otherwise, reset to empty
if (
"__step_output__" not in env.sos_dict
or env.sos_dict["__step_output__"].unspecified()
):
env.sos_dict.set("step_input", sos_targets([]))
else:
env.sos_dict.set("step_input", env.sos_dict["__step_output__"])
# input can be Undetermined from undetermined output from last step
env.sos_dict.set("_input", copy.deepcopy(env.sos_dict["step_input"]))
# if there is default output for auxiliary steps, use it as step_output and _output
# otherwise reset to unspecified.
if "__default_output__" in env.sos_dict:
# if step is triggered by sos_step, it should not be considered as
# output of the step. #981
env.sos_dict.set(
"__default_output__",
sos_targets(
[
x
for x in env.sos_dict["__default_output__"]._targets
if not isinstance(x, sos_step)
]
),
)
env.sos_dict.set(
"step_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
env.sos_dict.set(
"_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
else:
env.sos_dict.set("step_output", sos_targets([]))
# output is said to be unspecified until output: is used
env.sos_dict.set("_output", sos_targets(_undetermined=True))
env.sos_dict.set("step_depends", sos_targets([]))
env.sos_dict.set("_depends", sos_targets([]))
#
# Common functions
#
def verify_output(self):
missing = sos_targets([])
if env.sos_dict["step_output"] is None:
return
if not env.sos_dict["step_output"].valid():
raise RuntimeError(
"Output of a completed step cannot be undetermined or unspecified."
)
for target in env.sos_dict["step_output"]:
if isinstance(target, (sos_step, invalid_target)):
continue
if isinstance(target, str):
if not file_target(target).target_exists("any"):
if env.config["run_mode"] == "dryrun":
# in dryrun mode, we just create these targets
file_target(target).create_placeholder()
else:
# latency wait for 2 seconds because the file system might be slow
if env.config["run_mode"] == "run":
time.sleep(2)
if not file_target(target).target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict['step_name']} (curdir={os.getcwd()})'
)
elif not target.target_exists("any"):
if env.config["run_mode"] == "dryrun":
target.create_placeholder()
else:
if env.config["run_mode"] == "run":
time.sleep(2)
if not target.target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict['step_name']}'
)
return missing
# directive input
def process_input_args(self, ifiles: sos_targets, **kwargs):
"""This function handles directive input and all its parameters.
It
determines and set __step_input__
determines and set pattern variables if needed
returns
_groups
_vars
which are groups of _input and related _vars
"""
if ifiles.unspecified():
env.sos_dict.set("step_input", sos_targets([]))
env.sos_dict.set("_input", sos_targets([]))
env.sos_dict.set("step_output", sos_targets())
return [sos_targets([])], [{}]
assert isinstance(ifiles, sos_targets)
if env.sos_dict.get("__dynamic_input__", False):
runner = self.verify_dynamic_targets(
[x for x in ifiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# input file is the filtered files
env.sos_dict.set("step_input", ifiles)
env.sos_dict.set("_input", ifiles)
if ifiles._num_groups() == 0:
ifiles._group("all")
#
return ifiles.groups
def verify_dynamic_targets(self, target):
yield None
return True
def process_depends_args(self, dfiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_DEPENDS_OPTIONS:
raise RuntimeError(f"Unrecognized depends option {k}")
if dfiles.undetermined():
raise ValueError(r"Depends needs to handle undetermined")
if env.sos_dict.get("__dynamic_depends__", False):
runner = self.verify_dynamic_targets(
[x for x in dfiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.set("_depends", dfiles)
env.sos_dict.set("step_depends", dfiles)
def process_output_args(self, ofiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_OUTPUT_OPTIONS:
raise RuntimeError(f"Unrecognized output option {k}")
if ofiles._num_groups() > 0:
if ofiles._num_groups() == 1:
ofiles = ofiles._get_group(0)
elif ofiles._num_groups() != len(self._substeps):
raise RuntimeError(
f"Inconsistent number of output ({ofiles._num_groups()}) and input ({len(self._substeps)}) groups."
)
else:
ofiles = ofiles._get_group(env.sos_dict["_index"])
# create directory
if ofiles.valid():
parents = set(
[
os.path.abspath(os.path.join(ofile, os.pardir))
for ofile in ofiles
if isinstance(ofile, file_target)
]
)
for parent_dir in parents:
if parent_dir and not os.path.isdir(parent_dir):
os.makedirs(parent_dir, exist_ok=True)
# set variables
env.sos_dict.set("_output", ofiles)
env.sos_dict.set("step_output", ofiles)
#
for ofile in ofiles:
oname = ofile.target_name()
if oname in self._all_outputs:
raise ValueError(
f'Output {ofile} from substep {env.sos_dict['_index']} of {env.sos_dict['__num_groups__']} substeps overlaps with output from a previous substep.'
)
self._all_outputs.add(oname)
def submit_task(self, task_info):
if self.task_manager is None:
if self.step.task_params:
for key in ("trunk_size", "trunk_workers", "queue"):
val = get_value_of_param(
key, self.step.task_params, extra_dict=env.sos_dict.dict()
)
if val:
env.sos_dict["_runtime"][key] = val[0]
if "trunk_size" in env.sos_dict["_runtime"]:
trunk_size = env.sos_dict["_runtime"]["trunk_size"]
if trunk_size is None or trunk_size <= 0:
trunk_size = env.sos_dict["__num_groups__"]
if not isinstance(trunk_size, int):
raise ValueError(
f'An integer value or None is expected for runtime option trunk_size, "{trunk_size}" provided'
)
else:
trunk_size = 1
if "trunk_workers" in env.sos_dict["_runtime"]:
if "nodes" in env.sos_dict["_runtime"]:
raise ValueError(
'Option "trunk_workers" that specifies number of nodes and processes for the execution '
'of single-node jobs and option "nodes" that specifies number of nodes for single multi-node '
"jobs cannot be used at the same time."
)
trunk_workers = env.sos_dict["_runtime"]["trunk_workers"]
else:
trunk_workers = None
# if 'queue' in env.sos_dict['_runtime'] and env.sos_dict['_runtime']['queue']:
# host = env.sos_dict['_runtime']['queue']
# else:
# # otherwise, use workflow default
# host = '__default__'
self.task_manager = TaskManager(
env.sos_dict["__num_groups__"], trunk_size, trunk_workers
)
task_id = task_info["task_id"]
task_index = task_info["index"]
if task_id is None:
self.task_manager.set(task_index, None)
return None
taskdef = task_info["task_def"]
task_vars = task_info["task_vars"]
# 618
# it is possible that identical tasks are executed (with different underlying random numbers)
# we should either give a warning or produce different ids...
if self.task_manager.index_of(task_id) >= 0:
raise RuntimeError(
f'Task {task_id} generated for (_index={env.sos_dict['_index']}) is identical to a previous one (_index={self.task_manager.index_of(task_id)}).'
)
elif self.task_manager.has_output(task_vars["_output"]):
raise RuntimeError(
f'Task produces output files {', '.join(task_vars['_output'])} that are output of other tasks.'
)
# if no trunk_size, the job will be submitted immediately
# otherwise tasks will be accumulated and submitted in batch
self.task_manager.set(task_index, (task_id, taskdef, task_vars["_output"]))
tasks = self.task_manager.get_job()
if tasks:
self.submit_tasks(tasks)
return task_id
def wait_for_results(self, all_submitted):
# this is a generator function because wait_for_tasks is a generator
# function and needs to yield to the caller
if self.concurrent_substep:
try:
runner = self.wait_for_substep()
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
if self.task_manager is None:
return {}
#
# report task
# what we should do here is to get the alias of the Host
# because it can be different (e.g. not localhost
queue = env.sos_dict["_runtime"]["queue"]
# submit the last batch of tasks
tasks = self.task_manager.get_job(all_tasks=True)
if tasks:
self.submit_tasks(tasks)
# waiting for results of specified IDs
try:
# 1218
runner = self.wait_for_tasks(
self.task_manager._submitted_tasks, all_submitted
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
results = e.value
for id, result in results.items():
# turn to string to avoid naming lookup issue
rep_result = {
x: (y if isinstance(y, (int, bool, float, str)) else short_repr(y))
for x, y in result.items()
}
rep_result["tags"] = " ".join(self.task_manager.tags(id))
rep_result["queue"] = queue
send_message_to_controller(["workflow_sig", "task", id, repr(rep_result)])
self.task_manager.clear_submitted()
# if in dryrun mode, we display the output of the dryrun task
if env.config["run_mode"] == "dryrun":
tid = list(results.keys())[0]
tf = TaskFile(tid)
if tf.has_stdout():
print(TaskFile(tid).stdout)
for idx, task in self.proc_results.items():
# if it is done
if isinstance(task, dict):
continue
if task in results:
self.proc_results[idx] = results[task]
else:
# can be a subtask
for _, mres in results.items():
if "subtasks" in mres and task in mres["subtasks"]:
self.proc_results[idx] = mres["subtasks"][task]
# elif 'exception' in mres:
# self.proc_results[idx] = mres
#
# check if all have results?
if any(isinstance(x, str) for x in self.proc_results.values()):
raise RuntimeError(
f'Failed to get results for tasks {', '.join(x for x in self.proc_results.values() if isinstance(x, str))}'
)
#
for idx, res in self.proc_results.items():
if "skipped" in res and res["skipped"]:
self.completed["__task_skipped__"] += 1
# complete case: task skipped
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
# complete case: task completed
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.completed["__task_completed__"] += 1
if "shared" in res:
self.shared_vars[idx].update(res["shared"])
def log(self, stage=None, msg=""):
if stage == "start":
env.logger.info(
f'{'Checking' if env.config['run_mode'] == 'dryrun' else 'Running'} ``{self.step.step_name(True)}``: {self.step.comment.strip()}'
)
elif stage == "input statement":
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("STEP", f"Handling input statement {msg}")
elif stage == "_input":
if env.sos_dict["_input"] is not None and len(env.sos_dict["_input"]) > 0:
env.logger.debug(
f'_input: ``{short_repr(env.sos_dict['_input'])}``{msg}'
)
elif stage == "_depends":
if env.sos_dict["_depends"] is not None:
env.logger.debug(
f'_depends: ``{short_repr(env.sos_dict['_depends'])}``{msg}'
)
elif stage == "input":
if env.sos_dict["step_input"] is not None:
env.logger.info(
f'input: ``{short_repr(env.sos_dict['step_input'])}``{msg}'
)
elif stage == "output":
if (
env.sos_dict["step_output"] is not None
and len(env.sos_dict["step_output"]) > 0
):
env.logger.info(
f'``{self.step.step_name(True)}`` output: ``{short_repr(env.sos_dict['step_output'])}``{msg}'
)
def execute(self, stmt, return_result=False):
try:
self.last_res = SoS_exec(
stmt,
return_result=return_result or env.config["run_mode"] == "interactive",
)
if return_result:
return self.last_res
except (StopInputGroup, TerminateExecution, UnavailableLock):
raise
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stderr)
except ArgumentError:
raise
except ProcessKilled:
raise
except KeyboardInterrupt as e:
raise RuntimeError(get_traceback_msg(e))
except Exception as e:
raise RuntimeError(get_traceback_msg(e))
def prepare_substep(self):
# socket to collect result
self.result_pull_socket = create_socket(
env.zmq_context, zmq.PULL, "substep result collector"
)
local_ip = get_localhost_ip()
port = self.result_pull_socket.bind_to_random_port(f"tcp://{local_ip}")
env.config["sockets"]["result_push_socket"] = f"tcp://{local_ip}:{port}"
def submit_substep(self, param):
send_message_to_controller(["substep", param])
def process_returned_substep_result(self, till=None, wait=True):
while True:
if not wait:
# 1213
cur_index = env.sos_dict["_index"]
pending_substeps = cur_index - self._completed_concurrent_substeps + 1
if pending_substeps < (
100
if isinstance(self.concurrent_substep, bool)
else self.concurrent_substep
):
if not self.result_pull_socket.poll(0):
return
elif (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
# if there are more than 100 pending substeps
# we wait indefinitely for the results
env.log_to_file(
"STEP",
f"Wait for more substeps to be done before submitting. (index={cur_index}, processed={self._completed_concurrent_substeps})",
)
elif self._completed_concurrent_substeps == till:
return
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
if isinstance(res["exception"], ProcessKilled):
raise res["exception"]
elif isinstance(res["exception"], RemovedTarget):
pass
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict['step_id']}, index={res['index']})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict['step_id']})'
)
env.logger.warning(
f"""Ignoring error from ``{self.step.step_name(True)}`` {idx_msg}: {res["exception"]}."""
)
res["output"] = sos_targets(invalid_target())
elif env.config["error_mode"] == "abort":
idx_msg = (
f'(id={env.sos_dict['step_id']}, index={res['index']})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict['step_id']})'
)
self.exec_error.append(idx_msg, res["exception"])
# try to stop everything but wait till for submitted tasks to
# complete
self._completed_concurrent_substeps + 1
waiting = till - 1 - self._completed_concurrent_substeps
env.logger.warning(
f'``{self.step.step_name(True)}`` {idx_msg} returns an error.{f' Terminating step after completing {waiting} submitted substeps.' if waiting else ' Terminating now.'}'
)
for i in range(waiting):
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
self.exec_error.append(
f'index={res['index']}', res["exception"]
)
raise self.exec_error
else:
# default or unspecified
idx_msg = (
f'(id={env.sos_dict['step_id']}, index={res['index']})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict['step_id']})'
)
self.exec_error.append(idx_msg, res["exception"])
#
if "index" not in res:
raise RuntimeError(
"Result received from substep does not have key index"
)
if "task_id" in res:
task = self.submit_task(res)
# if substep returns tasks, ...
if res["task_id"]:
self.proc_results[res["index"]] = task
else:
# if there is no task_id, the substep must have
# been skipped.
self.proc_results[res["index"]] = res
else:
self.proc_results[res["index"]] = res
self._completed_concurrent_substeps += 1
def wait_for_substep(self):
while self._completed_concurrent_substeps < len(self.proc_results):
try:
runner = self.process_returned_substep_result(
till=len(self.proc_results), wait=True
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
def collect_result(self):
# only results will be sent back to the master process
#
# __step_input__: input of this step
# __steo_output__: output of this step
# __step_depends__: dependent files of this step
result = {
"__step_input__": env.sos_dict["step_input"],
"__step_output__": env.sos_dict["step_output"],
"__step_depends__": env.sos_dict["step_depends"],
"__step_name__": env.sos_dict["step_name"],
"__completed__": self.completed,
}
result["__last_res__"] = self.last_res
result["__shared__"] = {}
if "shared" in self.step.options:
result["__shared__"] = self.shared_vars
for x in result["__step_output__"].targets:
if isinstance(x, sos_variable):
result["__shared__"][x.target_name()] = env.sos_dict[x.target_name()]
send_message_to_controller(
[
"progress",
"step_completed",
-1
if "sos_run" in env.sos_dict["__signature_vars__"]
else self.completed["__step_completed__"],
env.sos_dict["step_name"],
env.sos_dict["step_output"],
]
)
return result
def set_task_queue_from_task_params(self):
if self.step.task_params:
try:
task_queue = get_value_of_param(
"queue", self.step.task_params, extra_dict=env.sos_dict.dict()
)
if task_queue:
env.sos_dict["_runtime"]["queue"] = task_queue[0]
except Exception as e:
raise ValueError(
f"Failed to determine value of parameter queue of {self.step.task_params}: {e}"
)
# # check concurrent #1134
# try:
# task_concurrency = get_value_of_param(
# 'concurrent',
# self.step.task_params,
# extra_dict=env.sos_dict.dict())
# if task_concurrency:
# env.sos_dict['_runtime']['concurrent'] = task_concurrency[0]
# except Exception as e:
# raise ValueError(
# f'Failed to determine value of parameter queue of {self.step.task_params}: {e}'
# )
# if -q is unspecified and option queue is unspecified,
# or queue=None is specified, disregard the task keyword
if (
env.config["default_queue"] is None
and "queue" not in env.sos_dict["_runtime"]
) or (
"queue" in env.sos_dict["_runtime"]
and env.sos_dict["_runtime"]["queue"] is None
):
# remove task statement
if len(self.step.statements) >= 1 and self.step.statements[-1][0] == "!":
self.step.statements[-1][1] += "\n" + self.step.task
else:
self.step.statements.append(["!", self.step.task])
self.step.task = None
# is queue is unspecified, it take value from command line
# in this case -q should have been specified
elif "queue" not in env.sos_dict["_runtime"]:
env.sos_dict["_runtime"]["queue"] = env.config["default_queue"]
def local_exec_without_signature(self, statement):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP", f'Execute substep {env.sos_dict['step_name']} without signature'
)
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f' (index={idx})' if len(self._substeps) > 1 else ''} is ``completed``{' (pending nested workflow)' if self._subworkflow_results else ''}.'
)
finally:
if not self.step.task:
# if no task, this step is __completed
# complete case: local skip without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
def local_exec_with_signature(self, statement, sig):
idx = env.sos_dict["_index"]
# signature might be built outside of the function
# not in a debug mode delayed to now
if sig is None:
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
# if singaure match, we skip the substep even if
# there are tasks.
matched = validate_step_sig(sig)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
return True
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict['step_name']} with signature {sig.sig_id}',
)
sig.lock()
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f' (index={idx})' if len(self._substeps) > 1 else ''} is ``completed``{' (pending nested workflow)' if self._subworkflow_results else ''}.'
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
finally:
# if this is the end of substep, save the signature
# otherwise we need to wait for the completion
# of the task.
if not self.step.task:
if env.sos_dict["step_output"].undetermined():
output = reevaluate_output()
self.output_groups[env.sos_dict["_index"]] = output
sig.set_output(output)
sig.write()
# complete case : local execution without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
self.pending_signatures[idx] = sig
sig.release()
return False
def skip_substep(self):
idx = env.sos_dict["_index"]
# if concurrent substep, there might be later steps that needs to be rerun
# and we need to mark some steps has been completed.
if self.concurrent_substep:
self._completed_concurrent_substeps += 1
self.proc_results[idx] = {
"index": idx,
"ret_code": 0,
"output": copy.deepcopy(env.sos_dict["_output"]),
}
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
def concurrent_exec(self, statement, sig=None):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict['step_name']} {idx} concurrently with {self._completed_concurrent_substeps} completed',
)
# the ignatures are supposed to be written by substep worker, however
# the substep worker might send tasks back to the step worker and
# we should write the signatures after the tasks are completed
if (
env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
and self.step.task
):
self.pending_signatures[idx] = (
sig
if sig
else RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
)
#
# step_output: needed only when it is undetermined
# step_input: not needed
# _input, _output, _depends, _index: needed
# step_name: for debug scripts
# step_id, workflow_id: for reporting to controller
# '__signature_vars__' to be used for signature creation
#
# __step_context__ is not needed because substep
# executor does not support nested workflow
proc_vars = (
env.sos_dict["__signature_vars__"]
| env.sos_dict["__environ_vars__"]
| {
"_input",
"_output",
"_depends",
"_index",
"step_output",
"step_name",
"_runtime",
"step_id",
"workflow_id",
"__num_groups__",
"__signature_vars__",
}
)
self.proc_results[env.sos_dict["_index"]] = {}
self.submit_substep(
dict(
stmt=statement[1],
global_def=self.step.global_def,
# 1225: the step might contain large variables from global section, but
# we do not have to sent them if they are not used in substeps.
cwd=os.getcwd(),
global_vars={
x: y
for x, y in self.step.global_vars.items()
if x in env.sos_dict["__signature_vars__"]
},
task=self.step.task,
task_params=self.step.task_params,
proc_vars=env.sos_dict.clone_selected_vars(proc_vars),
shared_vars=self.vars_to_be_shared,
config=env.config,
)
)
def check_task_sig(self):
idx = env.sos_dict["_index"]
sig = RuntimeInfo(
statementMD5([self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
env.log_to_file(
"STEP",
f'Check task-only step {env.sos_dict['step_name']} with signature {sig.sig_id}',
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[env.sos_dict["_index"]] = matched["output"]
self.shared_vars[env.sos_dict["_index"]].update(matched["vars"])
# complete case: step with task ignored
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.pending_signatures[idx] = sig
return skip_index
# def is_task_active(self):
# active = env.sos_dict['_runtime']['active']
# env.logger.error(active)
# if active is True:
# return True
# elif active is False:
# return False
# elif isinstance(active, int):
# if active >= 0 and env.sos_dict['_index'] != active:
# return False
# if active < 0 and env.sos_dict[
# '_index'] != active + env.sos_dict['__num_groups__']:
# return False
# return True
# elif isinstance(active, Sequence):
# allowed_index = list([
# x if x >= 0 else env.sos_dict['__num_groups__'] + x
# for x in active
# ])
# return env.sos_dict['_index'] in allowed_index
# elif isinstance(active, slice):
# allowed_index = list(range(env.sos_dict['__num_groups__']))[active]
# return env.sos_dict['_index'] in allowed_index
# else:
# raise RuntimeError(
# f'Unacceptable value for option active: {active}')
def check_results(self):
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] == 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
# now that output is settled, we can write remaining signatures
for idx, res in self.proc_results.items():
if (
self.pending_signatures[idx] is not None
and res["ret_code"] == 0
and "sig_skipped" not in res
):
# task might return output with vars #1355
self.pending_signatures[idx].set_output(self.output_groups[idx])
self.pending_signatures[idx].write()
if res["ret_code"] != 0 and "output" in res:
clear_output(output=res["output"])
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] != 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
if "exception" in proc_result:
excp = proc_result["exception"]
if isinstance(excp, StopInputGroup):
if excp.message:
env.logger.info(excp.message)
self.output_groups[proc_result["index"]] = sos_targets([])
elif isinstance(excp, RemovedTarget):
raise excp
elif "task" in proc_result:
if env.config["error_mode"] == "ignore":
env.logger.warning(f"Ignore failed task {proc_result["task"]}.")
# if the exception is from a task...
self.exec_error.append(proc_result["task"], excp)
else:
self.exec_error.append(
RuntimeError(
f"Substep failed with return code {proc_result["ret_code"]}"
)
)
# this is after all substeps have been completed
if self.exec_error.errors:
raise self.exec_error
def calculate_completed(self):
substeps = (
self.completed["__substep_completed__"]
+ self.completed["__substep_skipped__"]
)
self.completed["__step_completed__"] = (
self.completed["__substep_completed__"] / substeps
)
self.completed["__step_skipped__"] = (
self.completed["__substep_skipped__"] / substeps
)
if self.completed["__step_completed__"].is_integer():
self.completed["__step_completed__"] = int(
self.completed["__step_completed__"]
)
if self.completed["__step_skipped__"].is_integer():
self.completed["__step_skipped__"] = int(self.completed["__step_skipped__"])
def run(self):
"""Execute a single step and return results. The result for batch mode is the
input, output etc returned as alias, and for interactive mode is the return value
of the last expression."""
# return value of the last executed statement
self.last_res = None
self.start_time = time.time()
self.completed = defaultdict(int)
#
# prepare environments, namely variables that can be used by the step
#
# * step_name: name of the step, can be used by step process to determine
# actions dynamically.
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set("__last_step__", self.step.last_step)
self.log("start")
env.sos_dict.set(
"step_id",
textMD5(
f'{env.sos_dict['workflow_id']} {env.sos_dict['step_name']} {self.step.md5}'
),
)
env.sos_dict.set("master_id", env.config["master_id"])
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
env.sos_dict.set("_runtime", {})
# * input: input files, which should be __step_output__ if it is defined, or
# None otherwise.
# * _input: first batch of input, which should be input if no input statement is used
# * output: None at first, can be redefined by output statement
# * _output: None at first, can be redefined by output statement
# * depends: None at first, can be redefined by depends statement
# * _depends: None at first, can be redefined by depends statement
#
self.init_input_output_vars()
# _index is needed for pre-input action's active option and for debug output of scripts
env.sos_dict.set("_index", 0)
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file(
"STEP",
f'Executing step {env.sos_dict['step_name']} with step_input {env.sos_dict['step_input']} and step_output {env.sos_dict['step_output']}',
)
self.set_task_queue_from_task_params()
# look for input statement.
input_statement_idx = [
idx
for idx, x in enumerate(self.step.statements)
if x[0] == ":" and x[1] == "input"
]
if not input_statement_idx:
input_statement_idx = None
elif len(input_statement_idx) == 1:
input_statement_idx = input_statement_idx[0]
else:
raise ValueError(
f"More than one step input are specified in step {self.step.step_name(True)}"
)
# if shared is true, we have to disable concurrent because we
# do not yet return anything from shared.
self.concurrent_substep = "shared" not in self.step.options
# and \
# ('concurrent' not in env.sos_dict['_runtime'] or env.sos_dict['_runtime']['concurrent'] is True)
if input_statement_idx is not None:
# execute before input stuff
for statement in self.step.statements[:input_statement_idx]:
if statement[0] == ":":
# wait for all dependent targets to be resolved to be resolved
key, value = statement[1:3]
if key != "depends":
raise ValueError(f"Step input should be specified before {key}")
while True:
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(dfiles, **kwargs)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
break
else:
try:
# 1354
# if there are definition before input, the definitions and imports
# must be added to global_def in order to be executed by substeps
if any(x in statement[1] for x in ("class", "def", "import")):
step_def = KeepOnlyImportAndDefine().visit(
ast.parse(statement[1])
)
if step_def.body:
if isinstance(self.step.global_def, ast.Module):
self.step.global_def.body.extend(step_def.body)
else:
self.step.global_def = step_def
self.execute(statement[1])
except StopInputGroup as e:
# stop before substeps, because there is no output statement before it
# we do not have to worry about keep_output
if e.message:
env.logger.info(e.message)
return self.collect_result()
# input statement
stmt = self.step.statements[input_statement_idx][2]
self.log("input statement", stmt)
while True:
# wait for all targets to be resovled
try:
args, kwargs = SoS_eval(
f"__null_func__({stmt})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# Files will be expanded differently with different running modes
input_files: sos_targets = expand_input_files(
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_INPUT_OPTIONS
},
)
runner = self.process_input_args(
input_files,
**{k: v for k, v in kwargs.items() if k in SOS_INPUT_OPTIONS},
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
self._substeps = e.value
#
if "concurrent" in kwargs and self.concurrent_substep:
# concurrent can be True/False or an integer
self.concurrent_substep = kwargs["concurrent"]
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise ValueError(f"Failed to process input statement {stmt}: {e}")
break
input_statement_idx += 1
elif env.sos_dict["step_input"].groups:
# if default has groups...
# default case
self._substeps = env.sos_dict["step_input"].groups
# assuming everything starts from 0 is after input
input_statement_idx = 0
else:
# default case
self._substeps = [env.sos_dict["step_input"]]
# assuming everything starts from 0 is after input
input_statement_idx = 0
self.proc_results = {}
self.vars_to_be_shared = set()
if "shared" in self.step.options:
self.vars_to_be_shared = parse_shared_vars(self.step.options["shared"])
self.vars_to_be_shared = sorted(
[
x[5:] if x.startswith("step_") else x
for x in self.vars_to_be_shared
if x not in ("step_", "step_input", "step_output", "step_depends")
]
)
self.shared_vars = [{} for x in self._substeps]
# run steps after input statement, which will be run multiple times for each input
# group.
env.sos_dict.set("__num_groups__", len(self._substeps))
# determine if a single index or the whole step should be skipped
skip_index = False
# signatures of each index, which can remain to be None if no output
# is defined.
self.output_groups = [sos_targets([]) for x in self._substeps]
self.depends_groups = [sos_targets([]) for x in self._substeps]
# used to prevent overlapping output from substeps
self._all_outputs = set()
self._subworkflow_results = []
if (
any("sos_run" in x[1] for x in self.step.statements[input_statement_idx:])
and "shared" not in self.step.options
and not self.step.task
and self.step.statements[-1][0] == "!"
and (len(self.step.statements) == 1 or self.step.statements[-2][0] == ":")
and is_sos_run_the_only_last_stmt(self.step.statements[-1][1])
):
env.sos_dict.set("__concurrent_subworkflow__", True)
if self.concurrent_substep:
if len(self._substeps) <= 1 or env.config["run_mode"] == "dryrun":
self.concurrent_substep = False
elif any(
"sos_run" in x[1] for x in self.step.statements[input_statement_idx:]
):
self.concurrent_substep = False
env.logger.debug(
"Substeps are executed sequentially because of existence of multiple nested workflow."
)
else:
self.prepare_substep()
try:
self.completed["__substep_skipped__"] = 0
self.completed["__substep_completed__"] = len(self._substeps)
self._completed_concurrent_substeps = 0
# pending signatures are signatures for steps with external tasks
self.pending_signatures = [None for x in self._substeps]
for idx, g in enumerate(self._substeps):
#
# https://github.com/vatlab/sos/issues/1376
#
# [default]
# input: for_each=dict(i=range(1000))
# sos_run('a', t=i)
#
# when we have workflow like the following when steps
# are executed quickly with subworkflows submitted to the master
# the master process could be swamped with subworkflows, causing
# "too many open files".
#
# the following code will stop the step from continued
# execution and wait for the subworkflows to complete.
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(
allow_pending=env.config["worker_procs"]
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# other variables
#
_vars = {}
# now, let us expose target level variables as lists
if len(g) > 1:
names = set.union(*[set(x._dict.keys()) for x in g._targets])
elif len(g) == 1:
names = set(g._targets[0]._dict.keys())
else:
names = set()
for name in names:
_vars[name] = [x.get(name) for x in g._targets]
# then we expose all group level variables
_vars.update(g._dict)
_vars.update(env.sos_dict["step_input"]._dict)
env.sos_dict.update(_vars)
env.sos_dict.set("_input", copy.deepcopy(g))
# set vars to _input
# env.sos_dict['_input'].set(**v)
self.log("_input")
env.sos_dict.set("_index", idx)
if env.config["error_mode"] == "ignore":
missed = [x for x in g.targets if not x.target_exists()]
if missed:
if any(isinstance(x, invalid_target) for x in missed):
env.logger.warning(
f'{self.step.step_name(True)}{f' (index={idx})' if len(self._substeps) > 1 else ''} ignored due to invalid input caused by previous failed substep.'
)
else:
env.logger.warning(
f'{self.step.step_name(True)}{f' (index={idx})' if len(self._substeps) > 1 else ''} ignored due to missing input {sos_targets(missed)}'
)
self.output_groups[idx] = sos_targets(invalid_target())
env.sos_dict.set("_output", sos_targets(invalid_target()))
self.skip_substep()
continue
# in interactive mode, because sos_dict are always shared
# execution of a substep, especially when it calls a nested
# workflow, would change step_name, __step_context__ etc, and
# we will have to reset these variables to make sure the next
# substep would execute normally. Batch mode is immune to this
# problem because nested workflows are executed in their own
# process/context etc
if env.config["run_mode"] == "interactive":
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set(
"step_id",
hash(
(
env.sos_dict["workflow_id"],
env.sos_dict["step_name"],
self.step.md5,
)
),
)
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
#
pre_statement = []
if (
not any(
st[0] == ":" and st[1] == "output"
for st in self.step.statements[input_statement_idx:]
)
and "__default_output__" in env.sos_dict
):
pre_statement = [[":", "output", "_output"]]
# if there is no statement, no task, claim success
post_statement = []
if not self.step.statements or self.step.statements[-1][0] != "!":
if self.step.task:
# if there is only task, we insert a fake statement so that it can be executed by the executor
post_statement = [["!", ""]]
else:
# complete case: no step, no statement
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
all_statements = (
pre_statement
+ self.step.statements[input_statement_idx:]
+ post_statement
)
self.is_input_verified = True
for statement_idx, statement in enumerate(all_statements):
is_last_runblock = statement_idx == len(all_statements) - 1
# if input is undertermined, we can only process output:
if not g.valid() and statement[0] != ":":
raise RuntimeError("Undetermined input encountered")
if statement[0] == ":":
key, value = statement[1:3]
# output, depends, and process can be processed multiple times
while True:
# loop for all unresolved targets to be resolved
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# dynamic output or dependent files
if key == "output":
# if output is defined, its default value needs to be cleared
if idx == 0:
env.sos_dict.set("step_output", sos_targets())
ofiles: sos_targets = expand_output_files(
value,
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_OUTPUT_OPTIONS
},
)
if g.valid() and ofiles.valid():
if any(
x in g._targets
for x in ofiles
if not isinstance(x, sos_step)
):
raise RuntimeError(
f'Overlapping input and output files: {', '.join(repr(x) for x in ofiles if x in g)}'
)
# set variable _output and output
self.process_output_args(
ofiles,
**{
k: v
for k, v in kwargs.items()
if k in SOS_OUTPUT_OPTIONS
},
)
self.output_groups[idx] = env.sos_dict["_output"]
elif key == "depends":
try:
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(
dfiles, **kwargs
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
self.depends_groups[idx] = env.sos_dict[
"_depends"
]
self.log("_depends")
except Exception:
# env.logger.info(e)
raise
else:
raise RuntimeError(f"Unrecognized directive {key}")
# everything is ok, break
break
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
# if input is Undertermined, it is possible that output cannot be processed
# due to that, and we just return
if not g.valid():
env.logger.debug(e)
return self.collect_result()
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
elif is_last_runblock:
if (
env.config["sig_mode"] == "skip"
and not self.vars_to_be_shared
and "sos_run" not in statement[1]
and not env.sos_dict["_output"].unspecified()
and len(env.sos_dict["_output"]) > 0
and all(
x.target_exists()
for x in env.sos_dict["_output"].targets
)
and env.sos_dict["_output"].later_than(
env.sos_dict["_input"]
)
):
self.skip_substep()
env.logger.info(
f'``{env.sos_dict['step_name']}``{f' (index={idx})' if len(self._substeps) > 1 else ''} is ``skipped`` with existing output.'
)
skip_index = True
# do not execute the rest of the statement
break
#
# default mode, check if skipping substep
sig = None
if (
env.config["sig_mode"]
not in ("ignore", "distributed", "build")
and not env.sos_dict["_output"].unspecified()
):
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if skip_index:
# matched["output"] might hav vars not defined in "output" #1355
env.sos_dict.set("_output", matched["output"])
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
self.skip_substep()
break
try:
if self.concurrent_substep:
self.concurrent_exec(statement, sig)
# we check if the previous task has been completed and process them
# because further steps might need to be done
try:
runner = self.process_returned_substep_result(
till=idx + 1, wait=False
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
elif (
env.config["sig_mode"] == "ignore"
or env.sos_dict["_output"].unspecified()
):
self.local_exec_without_signature(statement)
else:
skip_index = self.local_exec_with_signature(
statement, sig
)
if skip_index:
self.skip_substep()
break
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception as e:
clear_output()
if env.config["error_mode"] == "abort":
raise
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict['step_id']}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict['step_id']})'
)
env.logger.warning(
f"{self.step.step_name(True)} {idx_msg} returns no output due to error: {e}"
)
self.output_groups[idx] = sos_targets(invalid_target())
skip_index = True
else:
if env.config["run_mode"] != "interactive":
# default mode
idx_msg = (
f'(id={env.sos_dict['step_id']}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict['step_id']})'
)
env.logger.error(
f"{self.step.step_name(True)} {idx_msg} returns an error."
)
self.exec_error.append(str(idx), e)
else:
# if it is not the last statement group (e.g. statements before :output)
# we execute locally without anything like signature
if self.is_input_verified:
verify_input()
self.is_input_verified = False
try:
self.execute(statement[1])
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception:
clear_output()
raise
# if there is no statement , but there are tasks, we should
# check signature here.
if (
(not self.step.statements or self.step.statements[-1][0] != "!")
and self.step.task
and not self.concurrent_substep
and env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
):
skip_index = self.check_task_sig()
# if this index is skipped, go directly to the next one
if skip_index:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
skip_index = False
continue
# if concurrent input group, tasks are handled in substep
if self.concurrent_substep or not self.step.task:
continue
if env.config["run_mode"] == "dryrun" and env.sos_dict["_index"] != 0:
continue
# # check if the task is active
# if 'active' in env.sos_dict['_runtime']:
# if not self.is_task_active():
# continue
#
self.log("task")
try:
task_id, taskdef, task_vars = create_task(
self.step.global_def,
self.step.global_vars,
self.step.task,
self.step.task_params,
)
task = self.submit_task(
{
"index": env.sos_dict["_index"],
"task_id": task_id,
"task_def": taskdef,
"task_vars": task_vars,
}
)
self.proc_results[env.sos_dict["_index"]] = task
except Exception as e:
# FIXME: cannot catch exception from subprocesses
if env.verbosity > 2:
sys.stderr.write(get_traceback())
raise RuntimeError(
f'Failed to execute process\n"{short_repr(self.step.task)}"\n{e}'
)
#
# # if not concurrent, we have to wait for the completion of the task
# if 'concurrent' in env.sos_dict['_runtime'] and env.sos_dict[
# '_runtime']['concurrent'] is False:
# # in this case the steps must be executed not concurrently
# runner = self.wait_for_results(all_submitted=False)
# try:
# yreq = next(runner)
# while True:
# yres = yield yreq
# yreq = runner.send(yres)
# except StopIteration:
# pass
#
# endfor loop for each input group
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(allow_pending=0)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.pop("__concurrent_subworkflow__")
runner = self.wait_for_results(all_submitted=True)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
for idx, res in self.proc_results.items():
if "sig_skipped" in res:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
if "output" in res:
self.output_groups[idx] = res["output"]
# check results
self.check_results()
# if error happened but we allow all substeps to be completed, we now
# raise exception
if self.exec_error.errors:
raise self.exec_error
# if output is Undetermined, re-evalulate it
# finalize output from output_groups because some output might be skipped
# this is the final version of the output but we do maintain output
# during the execution of step, for compatibility.
env.sos_dict.set(
"step_output", sos_targets([])._add_groups(self.output_groups)
)
env.sos_dict.set(
"step_depends", sos_targets([])._add_groups(self.depends_groups)
)
# if there exists an option shared, the variable would be treated as
# provides=sos_variable(), and then as step_output
if "shared" in self.step.options:
self.shared_vars = evaluate_shared(
self.shared_vars, self.step.options["shared"]
)
env.sos_dict.quick_update(self.shared_vars)
missing = self.verify_output()
self.log(
"output",
msg=f'\033[95m missing: {short_repr(missing)} ({len(missing)} item{'s' if len(missing)>1 else ''})\033[0m'
if len(missing) > 0
else "",
)
self.calculate_completed()
def file_only(targets):
if not isinstance(targets, sos_targets):
env.logger.warning(
f"Unexpected input or output target for reporting. Empty list returned: {targets}"
)
return []
return [
(str(x), x.size())
for x in targets._targets
if isinstance(x, file_target)
]
step_info = {
"step_id": self.step.md5,
"start_time": self.start_time,
"stepname": self.step.step_name(True),
"substeps": len(self._substeps),
"input": file_only(env.sos_dict["step_input"]),
"output": file_only(env.sos_dict["step_output"]),
"completed": dict(self.completed),
"end_time": time.time(),
}
send_message_to_controller(
["workflow_sig", "step", env.sos_dict["workflow_id"], repr(step_info)]
)
return self.collect_result()
finally:
if self.concurrent_substep:
close_socket(self.result_pull_socket, "substep collector")
class Step_Executor(Base_Step_Executor):
"""Single process step executor"""
def __init__(self, step, socket, mode="run"):
self.run_mode = mode
env.config["run_mode"] = mode
super(Step_Executor, self).__init__(step)
self.socket = socket
# because step is executed in a separate SoS_Worker process, this
# __socket__ is available to all the actions that will be executed
# in the step
env.__socket__ = socket
def submit_tasks(self, tasks):
if "TASK" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("TASK", f"Send {tasks}")
self.socket.send(
encode_msg(["tasks", env.sos_dict["_runtime"]["queue"]] + tasks)
)
def wait_for_tasks(self, tasks, all_submitted):
# wait for task is a generator function that yields the request
# to the runner
if not tasks:
return {}
# when we wait, the "outsiders" also need to see the tags etc
# of the tasks so we have to write to the database. #156
send_message_to_controller(["commit_sig"])
# wait till the executor responde
results = {}
while True:
# yield an indicator of what is requested, for debugging purpose
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
results.update(res)
# all results have been obtained.
if len(results) == len(tasks):
break
return results
def wait_for_subworkflows(self, allow_pending):
"""Wait for results from subworkflows"""
try:
allow_pending = int(allow_pending)
except:
allow_pending = min(max(os.cpu_count() // 2, 2), 8)
while self._subworkflow_results:
if allow_pending > 0:
n_pending = sum(
len(x["pending_workflows"]) for x in self._subworkflow_results
)
if n_pending <= allow_pending:
break
# here we did not check if workflow ids match
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
elif isinstance(res, Exception):
raise res
if not "__workflow_id__" in res:
raise ValueError(f"Unrecognized result from subworkflows: {res}")
# remove from _self._subworkflow_results
result_with_id = [
idx
for idx, x in enumerate(self._subworkflow_results)
if res["__workflow_id__"] in x["pending_workflows"]
]
if not result_with_id:
raise RuntimeError(
f"Failed to identify ID of returned subworkflow: {res}"
)
if len(result_with_id) > 1:
raise RuntimeError(
"Multiple matches of subworkflow ID. This should not happen."
)
self._subworkflow_results[result_with_id[0]]["pending_workflows"].remove(
res["__workflow_id__"]
)
if not self._subworkflow_results[result_with_id[0]]["pending_workflows"]:
self._subworkflow_results.pop(result_with_id[0])
def handle_unknown_target(self, e):
self.socket.send(encode_msg(["missing_target", e.target]))
yield self.socket
res = decode_msg(self.socket.recv())
if not res:
raise e
def verify_dynamic_targets(self, targets):
if not targets:
return
if env.config["trace_existing"]:
traced = targets
else:
traced = [x for x in targets if x.traced]
if not traced:
return
self.socket.send(encode_msg(["dependent_target"] + traced))
yield self.socket
res = decode_msg(self.socket.recv())
if res != "target_resolved":
raise RuntimeError(f"Failed to veryify dependent target {traced}")
def run(self):
try:
try:
# 1218
runner = Base_Step_Executor.run(self)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
res = e.value
if self.socket is not None:
if (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends result {short_repr(res)}",
)
self.socket.send(encode_msg(res))
else:
return res
except RemovedTarget as e:
# removed target needs to be handled differently since the workflow manager
# use type information to get removed targets
if self.socket is not None and not self.socket.closed:
self.socket.send(encode_msg(e))
else:
raise e
except Exception as e:
if env.verbosity > 2:
sys.stderr.write(get_traceback())
if isinstance(e, ProcessKilled):
raise
# if not self.exec_error
if e != self.exec_error:
self.exec_error.append(self.step.step_name(), e)
#
if self.exec_error.errors:
if self.socket is not None and not self.socket.closed:
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends exception {self.exec_error}",
)
self.socket.send(encode_msg(self.exec_error))
else:
raise self.exec_error
|
#!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import ast
import copy
import os
import subprocess
import sys
import time
from collections import defaultdict
from collections.abc import Mapping, Sequence
from typing import List
import zmq
from .controller import close_socket, create_socket, send_message_to_controller
from .messages import encode_msg, decode_msg
from .eval import SoS_eval, SoS_exec, accessed_vars, KeepOnlyImportAndDefine
from .executor_utils import (
__named_output__,
__null_func__,
__output_from__,
__traced__,
clear_output,
create_task,
get_traceback_msg,
reevaluate_output,
statementMD5,
validate_step_sig,
verify_input,
ExecuteError,
)
from .syntax import (
SOS_DEPENDS_OPTIONS,
SOS_INPUT_OPTIONS,
SOS_OUTPUT_OPTIONS,
SOS_TARGETS_OPTIONS,
)
from .targets import (
RemovedTarget,
RuntimeInfo,
UnavailableLock,
sos_variable,
UnknownTarget,
dynamic,
file_target,
sos_step,
sos_targets,
invalid_target
)
from .tasks import MasterTaskParams, TaskFile
from .utils import (
ArgumentError,
StopInputGroup,
TerminateExecution,
env,
get_traceback,
short_repr,
ProcessKilled,
get_localhost_ip,
textMD5,
)
__all__: List = []
class TaskManager:
# manage tasks created by the step
def __init__(self, num_tasks, trunk_size, trunk_workers):
super(TaskManager, self).__init__()
self.num_tasks = num_tasks
import math
self._slots = [[] for x in range(math.ceil(num_tasks / trunk_size))]
self._last_slot_size = (
trunk_size if (num_tasks % trunk_size == 0) else (num_tasks % trunk_size)
)
self.trunk_size = trunk_size
self.trunk_workers = trunk_workers
self._submitted_tasks = []
# entire groups
self._unsubmitted_slots = []
# collection of partial groups if some tasks are completed
self._unsubmitted_tasks = []
# derived from _unsubmitted_slots
self._all_ids = []
self._all_output = []
#
self._terminate = False
#
self._tags = {}
def set(self, idx, task_def):
slot = idx // self.trunk_size
#
# slot [
# [idx, None] <- for empty
# [idx, taskdef] <- for non empty
# ]
self._slots[slot].append([idx, task_def])
# the slot is full
if len(self._slots[slot]) == self.trunk_size or (
slot == len(self._slots) - 1
and len(self._slots[slot]) == self._last_slot_size
):
# if there are valida tasks
if not all([x[1] is None for x in self._slots[slot]]):
# remove empty tasks and sort by id
if self.trunk_size == 1 or any(x[1] is None for x in self._slots[slot]):
# if partial, sent to partial list
self._unsubmitted_tasks.extend(
[x[1] for x in self._slots[slot] if x[1] is not None]
)
else:
self._unsubmitted_slots.append(
sorted(self._slots[slot], key=lambda x: x[0])
)
# clear skit
self._slots[slot] = []
if not task_def:
return
if isinstance(task_def[2], Sequence):
self._all_output.extend(task_def[2])
self._all_ids.append(task_def[0])
self._tags[task_def[0]] = task_def[1].tags
def tags(self, task_id):
return self._tags.get(task_id, [])
def index_of(self, task_id):
if task_id in self._all_ids:
return self._all_ids.index(task_id)
else:
return -1
def has_output(self, output):
if not isinstance(output, Sequence) or not self._unsubmitted_slots:
return False
return any(x in self._all_output for x in output)
def get_job(self, all_tasks=False):
# single tasks
ids = []
# submit all tasks without trunk, easy
for slot in self._unsubmitted_slots:
# create a master task
master = MasterTaskParams(self.trunk_workers)
for _, (task_id, taskdef, _) in slot:
master.push(task_id, taskdef)
ids.append(master.ID)
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
self._unsubmitted_slots = []
# individual tasks...
if self.trunk_size == 1 or all_tasks:
to_be_submitted = self._unsubmitted_tasks
[
to_be_submitted.extend([x[1] for x in slot if x[1] is not None])
for slot in self._slots
if slot
]
self._unsubmitted_tasks = []
else:
# save complete blocks
num_tasks = (
len(self._unsubmitted_tasks) // self.trunk_size * self.trunk_size
)
to_be_submitted = self._unsubmitted_tasks[:num_tasks]
self._unsubmitted_tasks = self._unsubmitted_tasks[num_tasks:]
if self.trunk_size == 1 or (all_tasks and len(self._unsubmitted_tasks) == 1):
for task_id, taskdef, _ in to_be_submitted:
# if the task file, perhaps it is already running, we do not change
# the task file. Otherwise we are changing the status of the task
TaskFile(task_id).save(taskdef)
send_message_to_controller(
[
"workflow_sig",
"task",
task_id,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(task_id)
else:
master = None
for task_id, taskdef, _ in to_be_submitted:
if master is not None and master.num_tasks() == self.trunk_size:
ids.append(master.ID)
TaskFile(master.ID).save(master)
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
master = None
if master is None:
master = MasterTaskParams(self.trunk_workers)
master.push(task_id, taskdef)
# the last piece
if master is not None:
TaskFile(master.ID).save(master.finalize())
send_message_to_controller(
[
"workflow_sig",
"task",
master.ID,
f"{{'creation_time': {time.time()}}}",
]
)
ids.append(master.ID)
if not ids:
return None
self._submitted_tasks.extend(ids)
return ids
def clear_submitted(self):
self._submitted_tasks = []
def expand_input_files(*args, **kwargs):
# if unspecified, use __step_output__ as input (default)
# resolve dynamic input.
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
# if no input,
if not args and not kwargs:
return env.sos_dict["step_input"]
# if only group_by ...
elif not args and all(x in SOS_TARGETS_OPTIONS for x in kwargs.keys()):
return sos_targets(
env.sos_dict["step_input"],
_verify_existence=env.config["error_mode"] != "ignore",
**kwargs,
)
else:
return sos_targets(
*args,
**kwargs,
_verify_existence=env.config["error_mode"] != "ignore",
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_depends_files(*args, **kwargs):
"""handle directive depends"""
args = [x.resolve() if isinstance(x, dynamic) else x for x in args]
kwargs = {
x: (y.resolve() if isinstance(y, dynamic) else y) for x, y in kwargs.items()
}
return sos_targets(
*args,
**kwargs,
_verify_existence=True,
_undetermined=False,
_source=env.sos_dict["step_name"],
)
def expand_output_files(value, *args, **kwargs):
"""Process output files (perhaps a pattern) to determine input files."""
if any(isinstance(x, dynamic) for x in args) or any(
isinstance(y, dynamic) for y in kwargs.values()
):
return sos_targets(_undetermined=value)
else:
return sos_targets(
*args, **kwargs, _undetermined=False, _source=env.sos_dict["step_name"]
)
def parse_shared_vars(option):
shared_vars = set()
if not option:
return shared_vars
if isinstance(option, str):
shared_vars.add(option)
elif isinstance(option, Mapping):
for val in option.values():
shared_vars |= accessed_vars(val, mode="eval")
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
shared_vars.add(item)
elif isinstance(item, Mapping):
for val in item.values():
shared_vars |= accessed_vars(val, mode="eval")
return shared_vars
def evaluate_shared(vars, option):
# handle option shared and store variables in a "__shared_vars" variable
shared_vars = {}
env.sos_dict.quick_update(vars[-1])
for key in vars[-1].keys():
try:
if key in ("output", "depends", "input"):
env.logger.warning(
f"Cannot overwrite variable step_{key} from substep variable {key}"
)
else:
env.sos_dict.set("step_" + key, [x[key] for x in vars])
except Exception as e:
env.logger.warning(f"Failed to create step level variable step_{key}: {e}")
if isinstance(option, str):
if option in env.sos_dict:
shared_vars[option] = env.sos_dict[option]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(option, Mapping):
for var, val in option.items():
try:
if var == val:
shared_vars[var] = env.sos_dict[var]
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
# if there are dictionaries in the sequence, e.g.
# shared=['A', 'B', {'C':'D"}]
elif isinstance(option, Sequence):
for item in option:
if isinstance(item, str):
if item in env.sos_dict:
shared_vars[item] = env.sos_dict[item]
else:
raise RuntimeError(f"shared variable does not exist: {option}")
elif isinstance(item, Mapping):
for var, val in item.items():
try:
if var == val:
continue
else:
shared_vars[var] = SoS_eval(val)
except Exception as e:
raise RuntimeError(
f"Failed to evaluate shared variable {var} from expression {val}: {e}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str or mapping are accepted in sequence: {option}"
)
else:
raise RuntimeError(
f"Unacceptable shared option. Only str, sequence, or mapping are accepted in sequence: {option}"
)
return shared_vars
def get_value_of_param(name, param_list, extra_dict={}):
tree = ast.parse(f"__null_func__({param_list})")
# x.func can be an attribute (e.g. a.b()) and do not have id
kwargs = [
x for x in ast.walk(tree) if x.__class__.__name__ == "keyword" and x.arg == name
]
if not kwargs:
return []
try:
return [ast.literal_eval(kwargs[0].value)]
except Exception:
return [
eval(
compile(
ast.Expression(body=kwargs[0].value),
filename="<string>",
mode="eval",
),
extra_dict,
)
]
def is_sos_run_the_only_last_stmt(stmt):
tree = ast.parse(stmt)
return (
len(tree.body) >= 1
and isinstance(tree.body[-1], ast.Expr)
and isinstance(tree.body[-1].value, ast.Call)
and hasattr(tree.body[-1].value.func, "id")
and tree.body[-1].value.func.id == "sos_run"
and len(
[
x
for x in ast.walk(tree)
if isinstance(x, ast.Call)
and hasattr(x.func, "id")
and x.func.id == "sos_run"
]
)
== 1
)
class Base_Step_Executor:
# This base class defines how steps are executed. The derived classes will reimplement
# some function to behave differently in different modes.
#
def __init__(self, step):
self.step = step
self.task_manager = None
self.exec_error = ExecuteError(self.step.step_name())
#
# Functions that should be redefined in derived class
#
def submit_tasks(self, tasks):
raise RuntimeError("Undefined base function submit_tasks")
def wait_for_tasks(self, tasks, all_submitted):
# this will be redefined in subclasses
raise RuntimeError("Undefined base function wait_for_tasks")
def wait_for_subworkflows(self, allow_pending=0):
raise RuntimeError("Undefined base function wait_for_subworkflows")
def handle_unknown_target(self, e):
raise RuntimeError("Undefined base function handle_unknown_target")
def init_input_output_vars(self):
# if there is __step_output__ from previous step, use it as default input
# otherwise, reset to empty
if (
"__step_output__" not in env.sos_dict
or env.sos_dict["__step_output__"].unspecified()
):
env.sos_dict.set("step_input", sos_targets([]))
else:
env.sos_dict.set("step_input", env.sos_dict["__step_output__"])
# input can be Undetermined from undetermined output from last step
env.sos_dict.set("_input", copy.deepcopy(env.sos_dict["step_input"]))
# if there is default output for auxiliary steps, use it as step_output and _output
# otherwise reset to unspecified.
if "__default_output__" in env.sos_dict:
# if step is triggered by sos_step, it should not be considered as
# output of the step. #981
env.sos_dict.set(
"__default_output__",
sos_targets(
[
x
for x in env.sos_dict["__default_output__"]._targets
if not isinstance(x, sos_step)
]
),
)
env.sos_dict.set(
"step_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
env.sos_dict.set(
"_output", copy.deepcopy(env.sos_dict["__default_output__"])
)
else:
env.sos_dict.set("step_output", sos_targets([]))
# output is said to be unspecified until output: is used
env.sos_dict.set("_output", sos_targets(_undetermined=True))
env.sos_dict.set("step_depends", sos_targets([]))
env.sos_dict.set("_depends", sos_targets([]))
#
# Common functions
#
def verify_output(self):
missing = sos_targets([])
if env.sos_dict["step_output"] is None:
return
if not env.sos_dict["step_output"].valid():
raise RuntimeError(
"Output of a completed step cannot be undetermined or unspecified."
)
for target in env.sos_dict["step_output"]:
if isinstance(target, (sos_step, invalid_target)):
continue
if isinstance(target, str):
if not file_target(target).target_exists("any"):
if env.config["run_mode"] == "dryrun":
# in dryrun mode, we just create these targets
file_target(target).create_placeholder()
else:
# latency wait for 2 seconds because the file system might be slow
if env.config["run_mode"] == "run":
time.sleep(2)
if not file_target(target).target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]} (curdir={os.getcwd()})'
)
elif not target.target_exists("any"):
if env.config["run_mode"] == "dryrun":
target.create_placeholder()
else:
if env.config["run_mode"] == "run":
time.sleep(2)
if not target.target_exists("any"):
if env.config["error_mode"] == "ignore":
missing.extend(target)
else:
raise RuntimeError(
f'Output target {target} does not exist after the completion of step {env.sos_dict["step_name"]}'
)
return missing
# directive input
def process_input_args(self, ifiles: sos_targets, **kwargs):
"""This function handles directive input and all its parameters.
It
determines and set __step_input__
determines and set pattern variables if needed
returns
_groups
_vars
which are groups of _input and related _vars
"""
if ifiles.unspecified():
env.sos_dict.set("step_input", sos_targets([]))
env.sos_dict.set("_input", sos_targets([]))
env.sos_dict.set("step_output", sos_targets())
return [sos_targets([])], [{}]
assert isinstance(ifiles, sos_targets)
if env.sos_dict.get("__dynamic_input__", False):
runner = self.verify_dynamic_targets(
[x for x in ifiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# input file is the filtered files
env.sos_dict.set("step_input", ifiles)
env.sos_dict.set("_input", ifiles)
if ifiles._num_groups() == 0:
ifiles._group("all")
#
return ifiles.groups
def verify_dynamic_targets(self, target):
yield None
return True
def process_depends_args(self, dfiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_DEPENDS_OPTIONS:
raise RuntimeError(f"Unrecognized depends option {k}")
if dfiles.undetermined():
raise ValueError(r"Depends needs to handle undetermined")
if env.sos_dict.get("__dynamic_depends__", False):
runner = self.verify_dynamic_targets(
[x for x in dfiles if isinstance(x, file_target)]
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.set("_depends", dfiles)
env.sos_dict.set("step_depends", dfiles)
def process_output_args(self, ofiles: sos_targets, **kwargs):
for k in kwargs.keys():
if k not in SOS_OUTPUT_OPTIONS:
raise RuntimeError(f"Unrecognized output option {k}")
if ofiles._num_groups() > 0:
if ofiles._num_groups() == 1:
ofiles = ofiles._get_group(0)
elif ofiles._num_groups() != len(self._substeps):
raise RuntimeError(
f"Inconsistent number of output ({ofiles._num_groups()}) and input ({len(self._substeps)}) groups."
)
else:
ofiles = ofiles._get_group(env.sos_dict["_index"])
# create directory
if ofiles.valid():
parents = set(
[
os.path.abspath(os.path.join(ofile, os.pardir))
for ofile in ofiles
if isinstance(ofile, file_target)
]
)
for parent_dir in parents:
if parent_dir and not os.path.isdir(parent_dir):
os.makedirs(parent_dir, exist_ok=True)
# set variables
env.sos_dict.set("_output", ofiles)
env.sos_dict.set("step_output", ofiles)
#
for ofile in ofiles:
oname = ofile.target_name()
if oname in self._all_outputs:
raise ValueError(
f'Output {ofile} from substep {env.sos_dict["_index"]} of {env.sos_dict["__num_groups__"]} substeps overlaps with output from a previous substep.'
)
self._all_outputs.add(oname)
def submit_task(self, task_info):
if self.task_manager is None:
if self.step.task_params:
for key in ("trunk_size", "trunk_workers", "queue"):
val = get_value_of_param(
key, self.step.task_params, extra_dict=env.sos_dict.dict()
)
if val:
env.sos_dict["_runtime"][key] = val[0]
if "trunk_size" in env.sos_dict["_runtime"]:
trunk_size = env.sos_dict["_runtime"]["trunk_size"]
if trunk_size is None or trunk_size <= 0:
trunk_size = env.sos_dict["__num_groups__"]
if not isinstance(trunk_size, int):
raise ValueError(
f'An integer value or None is expected for runtime option trunk_size, "{trunk_size}" provided'
)
else:
trunk_size = 1
if "trunk_workers" in env.sos_dict["_runtime"]:
if "nodes" in env.sos_dict["_runtime"]:
raise ValueError(
'Option "trunk_workers" that specifies number of nodes and processes for the execution '
'of single-node jobs and option "nodes" that specifies number of nodes for single multi-node '
"jobs cannot be used at the same time."
)
trunk_workers = env.sos_dict["_runtime"]["trunk_workers"]
else:
trunk_workers = None
# if 'queue' in env.sos_dict['_runtime'] and env.sos_dict['_runtime']['queue']:
# host = env.sos_dict['_runtime']['queue']
# else:
# # otherwise, use workflow default
# host = '__default__'
self.task_manager = TaskManager(
env.sos_dict["__num_groups__"], trunk_size, trunk_workers
)
task_id = task_info["task_id"]
task_index = task_info["index"]
if task_id is None:
self.task_manager.set(task_index, None)
return None
taskdef = task_info["task_def"]
task_vars = task_info["task_vars"]
# 618
# it is possible that identical tasks are executed (with different underlying random numbers)
# we should either give a warning or produce different ids...
if self.task_manager.index_of(task_id) >= 0:
raise RuntimeError(
f'Task {task_id} generated for (_index={env.sos_dict["_index"]}) is identical to a previous one (_index={self.task_manager.index_of(task_id)}).'
)
elif self.task_manager.has_output(task_vars["_output"]):
raise RuntimeError(
f'Task produces output files {", ".join(task_vars["_output"])} that are output of other tasks.'
)
# if no trunk_size, the job will be submitted immediately
# otherwise tasks will be accumulated and submitted in batch
self.task_manager.set(task_index, (task_id, taskdef, task_vars["_output"]))
tasks = self.task_manager.get_job()
if tasks:
self.submit_tasks(tasks)
return task_id
def wait_for_results(self, all_submitted):
# this is a generator function because wait_for_tasks is a generator
# function and needs to yield to the caller
if self.concurrent_substep:
try:
runner = self.wait_for_substep()
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
if self.task_manager is None:
return {}
#
# report task
# what we should do here is to get the alias of the Host
# because it can be different (e.g. not localhost
queue = env.sos_dict["_runtime"]["queue"]
# submit the last batch of tasks
tasks = self.task_manager.get_job(all_tasks=True)
if tasks:
self.submit_tasks(tasks)
# waiting for results of specified IDs
try:
# 1218
runner = self.wait_for_tasks(
self.task_manager._submitted_tasks, all_submitted
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
results = e.value
for id, result in results.items():
# turn to string to avoid naming lookup issue
rep_result = {
x: (y if isinstance(y, (int, bool, float, str)) else short_repr(y))
for x, y in result.items()
}
rep_result["tags"] = " ".join(self.task_manager.tags(id))
rep_result["queue"] = queue
send_message_to_controller(["workflow_sig", "task", id, repr(rep_result)])
self.task_manager.clear_submitted()
# if in dryrun mode, we display the output of the dryrun task
if env.config["run_mode"] == "dryrun":
tid = list(results.keys())[0]
tf = TaskFile(tid)
if tf.has_stdout():
print(TaskFile(tid).stdout)
for idx, task in self.proc_results.items():
# if it is done
if isinstance(task, dict):
continue
if task in results:
self.proc_results[idx] = results[task]
else:
# can be a subtask
for _, mres in results.items():
if "subtasks" in mres and task in mres["subtasks"]:
self.proc_results[idx] = mres["subtasks"][task]
# elif 'exception' in mres:
# self.proc_results[idx] = mres
#
# check if all have results?
if any(isinstance(x, str) for x in self.proc_results.values()):
raise RuntimeError(
f'Failed to get results for tasks {", ".join(x for x in self.proc_results.values() if isinstance(x, str))}'
)
#
for idx, res in self.proc_results.items():
if "skipped" in res and res["skipped"]:
self.completed["__task_skipped__"] += 1
# complete case: task skipped
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
# complete case: task completed
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.completed["__task_completed__"] += 1
if "shared" in res:
self.shared_vars[idx].update(res["shared"])
def log(self, stage=None, msg=""):
if stage == "start":
env.logger.info(
f'{"Checking" if env.config["run_mode"] == "dryrun" else "Running"} ``{self.step.step_name(True)}``: {self.step.comment.strip()}'
)
elif stage == "input statement":
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("STEP", f"Handling input statement {msg}")
elif stage == "_input":
if env.sos_dict["_input"] is not None and len(env.sos_dict["_input"]) > 0:
env.logger.debug(
f'_input: ``{short_repr(env.sos_dict["_input"])}``{msg}'
)
elif stage == "_depends":
if env.sos_dict["_depends"] is not None:
env.logger.debug(
f'_depends: ``{short_repr(env.sos_dict["_depends"])}``{msg}'
)
elif stage == "input":
if env.sos_dict["step_input"] is not None:
env.logger.info(
f'input: ``{short_repr(env.sos_dict["step_input"])}``{msg}'
)
elif stage == "output":
if (
env.sos_dict["step_output"] is not None
and len(env.sos_dict["step_output"]) > 0
):
env.logger.info(
f'``{self.step.step_name(True)}`` output: ``{short_repr(env.sos_dict["step_output"])}``{msg}'
)
def execute(self, stmt, return_result=False):
try:
self.last_res = SoS_exec(
stmt,
return_result=return_result or env.config["run_mode"] == "interactive",
)
if return_result:
return self.last_res
except (StopInputGroup, TerminateExecution, UnavailableLock):
raise
except subprocess.CalledProcessError as e:
raise RuntimeError(e.stderr)
except ArgumentError:
raise
except ProcessKilled:
raise
except KeyboardInterrupt as e:
raise RuntimeError(get_traceback_msg(e))
except Exception as e:
raise RuntimeError(get_traceback_msg(e))
def prepare_substep(self):
# socket to collect result
self.result_pull_socket = create_socket(
env.zmq_context, zmq.PULL, "substep result collector"
)
local_ip = get_localhost_ip()
port = self.result_pull_socket.bind_to_random_port(f"tcp://{local_ip}")
env.config["sockets"]["result_push_socket"] = f"tcp://{local_ip}:{port}"
def submit_substep(self, param):
send_message_to_controller(["substep", param])
def process_returned_substep_result(self, till=None, wait=True):
while True:
if not wait:
# 1213
cur_index = env.sos_dict["_index"]
pending_substeps = cur_index - self._completed_concurrent_substeps + 1
if pending_substeps < (
100
if isinstance(self.concurrent_substep, bool)
else self.concurrent_substep
):
if not self.result_pull_socket.poll(0):
return
elif (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
# if there are more than 100 pending substeps
# we wait indefinitely for the results
env.log_to_file(
"STEP",
f"Wait for more substeps to be done before submitting. (index={cur_index}, processed={self._completed_concurrent_substeps})",
)
elif self._completed_concurrent_substeps == till:
return
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
if isinstance(res["exception"], ProcessKilled):
raise res["exception"]
elif isinstance(res["exception"], RemovedTarget):
pass
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"""Ignoring error from ``{self.step.step_name(True)}`` {idx_msg}: {res["exception"]}."""
)
res["output"] = sos_targets(invalid_target())
elif env.config["error_mode"] == "abort":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
# try to stop everything but wait till for submitted tasks to
# complete
self._completed_concurrent_substeps + 1
waiting = till - 1 - self._completed_concurrent_substeps
env.logger.warning(
f'``{self.step.step_name(True)}`` {idx_msg} returns an error.{f" Terminating step after completing {waiting} submitted substeps." if waiting else " Terminating now."}'
)
for i in range(waiting):
yield self.result_pull_socket
res = decode_msg(self.result_pull_socket.recv())
if "exception" in res:
self.exec_error.append(
f'index={res["index"]}', res["exception"]
)
raise self.exec_error
else:
# default or unspecified
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={res["index"]})'
if "index" in res and len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
self.exec_error.append(idx_msg, res["exception"])
#
if "index" not in res:
raise RuntimeError(
"Result received from substep does not have key index"
)
if "task_id" in res:
task = self.submit_task(res)
# if substep returns tasks, ...
if res["task_id"]:
self.proc_results[res["index"]] = task
else:
# if there is no task_id, the substep must have
# been skipped.
self.proc_results[res["index"]] = res
else:
self.proc_results[res["index"]] = res
self._completed_concurrent_substeps += 1
def wait_for_substep(self):
while self._completed_concurrent_substeps < len(self.proc_results):
try:
runner = self.process_returned_substep_result(
till=len(self.proc_results), wait=True
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
def collect_result(self):
# only results will be sent back to the master process
#
# __step_input__: input of this step
# __steo_output__: output of this step
# __step_depends__: dependent files of this step
result = {
"__step_input__": env.sos_dict["step_input"],
"__step_output__": env.sos_dict["step_output"],
"__step_depends__": env.sos_dict["step_depends"],
"__step_name__": env.sos_dict["step_name"],
"__completed__": self.completed,
}
result["__last_res__"] = self.last_res
result["__shared__"] = {}
if "shared" in self.step.options:
result["__shared__"] = self.shared_vars
for x in result["__step_output__"].targets:
if isinstance(x, sos_variable):
result["__shared__"][x.target_name()] = env.sos_dict[x.target_name()]
send_message_to_controller(
[
"progress",
"step_completed",
-1
if "sos_run" in env.sos_dict["__signature_vars__"]
else self.completed["__step_completed__"],
env.sos_dict["step_name"],
env.sos_dict["step_output"],
]
)
return result
def set_task_queue_from_task_params(self):
if self.step.task_params:
try:
task_queue = get_value_of_param(
"queue", self.step.task_params, extra_dict=env.sos_dict.dict()
)
if task_queue:
env.sos_dict["_runtime"]["queue"] = task_queue[0]
except Exception as e:
raise ValueError(
f"Failed to determine value of parameter queue of {self.step.task_params}: {e}"
)
# # check concurrent #1134
# try:
# task_concurrency = get_value_of_param(
# 'concurrent',
# self.step.task_params,
# extra_dict=env.sos_dict.dict())
# if task_concurrency:
# env.sos_dict['_runtime']['concurrent'] = task_concurrency[0]
# except Exception as e:
# raise ValueError(
# f'Failed to determine value of parameter queue of {self.step.task_params}: {e}'
# )
# if -q is unspecified and option queue is unspecified,
# or queue=None is specified, disregard the task keyword
if (
env.config["default_queue"] is None
and "queue" not in env.sos_dict["_runtime"]
) or (
"queue" in env.sos_dict["_runtime"]
and env.sos_dict["_runtime"]["queue"] is None
):
# remove task statement
if len(self.step.statements) >= 1 and self.step.statements[-1][0] == "!":
self.step.statements[-1][1] += "\n" + self.step.task
else:
self.step.statements.append(["!", self.step.task])
self.step.task = None
# is queue is unspecified, it take value from command line
# in this case -q should have been specified
elif "queue" not in env.sos_dict["_runtime"]:
env.sos_dict["_runtime"]["queue"] = env.config["default_queue"]
def local_exec_without_signature(self, statement):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP", f'Execute substep {env.sos_dict["step_name"]} without signature'
)
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
finally:
if not self.step.task:
# if no task, this step is __completed
# complete case: local skip without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
def local_exec_with_signature(self, statement, sig):
idx = env.sos_dict["_index"]
# signature might be built outside of the function
# not in a debug mode delayed to now
if sig is None:
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
# if singaure match, we skip the substep even if
# there are tasks.
matched = validate_step_sig(sig)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
return True
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
sig.lock()
try:
if self.is_input_verified:
verify_input()
self.is_input_verified = False
if env.sos_dict.get("__concurrent_subworkflow__", False):
self._subworkflow_results.append(
self.execute(statement[1], return_result=True)
)
else:
self.execute(statement[1])
if not self.step.task and env.config["run_mode"] != "interactive":
env.logger.info(
f'``{self.step.step_name(True)}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``completed``{" (pending nested workflow)" if self._subworkflow_results else ""}.'
)
if "shared" in self.step.options:
try:
self.shared_vars[env.sos_dict["_index"]].update(
{
x: env.sos_dict[x]
for x in self.vars_to_be_shared
if x in env.sos_dict
}
)
except Exception as e:
raise ValueError(f"Missing shared variable {e}.")
finally:
# if this is the end of substep, save the signature
# otherwise we need to wait for the completion
# of the task.
if not self.step.task:
if env.sos_dict["step_output"].undetermined():
output = reevaluate_output()
self.output_groups[env.sos_dict["_index"]] = output
sig.set_output(output)
sig.write()
# complete case : local execution without task
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
else:
self.pending_signatures[idx] = sig
sig.release()
return False
def skip_substep(self):
idx = env.sos_dict["_index"]
# if concurrent substep, there might be later steps that needs to be rerun
# and we need to mark some steps has been completed.
if self.concurrent_substep:
self._completed_concurrent_substeps += 1
self.proc_results[idx] = {
"index": idx,
"ret_code": 0,
"output": copy.deepcopy(env.sos_dict["_output"]),
}
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
def concurrent_exec(self, statement, sig=None):
idx = env.sos_dict["_index"]
env.log_to_file(
"STEP",
f'Execute substep {env.sos_dict["step_name"]} {idx} concurrently with {self._completed_concurrent_substeps} completed',
)
# the ignatures are supposed to be written by substep worker, however
# the substep worker might send tasks back to the step worker and
# we should write the signatures after the tasks are completed
if (
env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
and self.step.task
):
self.pending_signatures[idx] = (
sig
if sig
else RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
)
#
# step_output: needed only when it is undetermined
# step_input: not needed
# _input, _output, _depends, _index: needed
# step_name: for debug scripts
# step_id, workflow_id: for reporting to controller
# '__signature_vars__' to be used for signature creation
#
# __step_context__ is not needed because substep
# executor does not support nested workflow
proc_vars = (
env.sos_dict["__signature_vars__"]
| env.sos_dict["__environ_vars__"]
| {
"_input",
"_output",
"_depends",
"_index",
"step_output",
"step_name",
"_runtime",
"step_id",
"workflow_id",
"__num_groups__",
"__signature_vars__",
}
)
self.proc_results[env.sos_dict["_index"]] = {}
self.submit_substep(
dict(
stmt=statement[1],
global_def=self.step.global_def,
# 1225: the step might contain large variables from global section, but
# we do not have to sent them if they are not used in substeps.
cwd=os.getcwd(),
global_vars={
x: y
for x, y in self.step.global_vars.items()
if x in env.sos_dict["__signature_vars__"]
},
task=self.step.task,
task_params=self.step.task_params,
proc_vars=env.sos_dict.clone_selected_vars(proc_vars),
shared_vars=self.vars_to_be_shared,
config=env.config,
)
)
def check_task_sig(self):
idx = env.sos_dict["_index"]
sig = RuntimeInfo(
statementMD5([self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
env.log_to_file(
"STEP",
f'Check task-only step {env.sos_dict["step_name"]} with signature {sig.sig_id}',
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if matched:
if env.sos_dict["step_output"].undetermined():
self.output_groups[env.sos_dict["_index"]] = matched["output"]
self.shared_vars[env.sos_dict["_index"]].update(matched["vars"])
# complete case: step with task ignored
send_message_to_controller(
["progress", "substep_ignored", env.sos_dict["step_id"]]
)
self.pending_signatures[idx] = sig
return skip_index
# def is_task_active(self):
# active = env.sos_dict['_runtime']['active']
# env.logger.error(active)
# if active is True:
# return True
# elif active is False:
# return False
# elif isinstance(active, int):
# if active >= 0 and env.sos_dict['_index'] != active:
# return False
# if active < 0 and env.sos_dict[
# '_index'] != active + env.sos_dict['__num_groups__']:
# return False
# return True
# elif isinstance(active, Sequence):
# allowed_index = list([
# x if x >= 0 else env.sos_dict['__num_groups__'] + x
# for x in active
# ])
# return env.sos_dict['_index'] in allowed_index
# elif isinstance(active, slice):
# allowed_index = list(range(env.sos_dict['__num_groups__']))[active]
# return env.sos_dict['_index'] in allowed_index
# else:
# raise RuntimeError(
# f'Unacceptable value for option active: {active}')
def check_results(self):
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] == 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
# now that output is settled, we can write remaining signatures
for idx, res in self.proc_results.items():
if (
self.pending_signatures[idx] is not None
and res["ret_code"] == 0
and "sig_skipped" not in res
):
# task might return output with vars #1355
self.pending_signatures[idx].set_output(self.output_groups[idx])
self.pending_signatures[idx].write()
if res["ret_code"] != 0 and "output" in res:
clear_output(output=res["output"])
for proc_result in [
x for x in self.proc_results.values() if x["ret_code"] != 0
]:
if "stdout" in proc_result and proc_result["stdout"]:
sys.stdout.write(proc_result["stdout"])
if "stderr" in proc_result and proc_result["stderr"]:
sys.stderr.write(proc_result["stderr"])
if "exception" in proc_result:
excp = proc_result["exception"]
if isinstance(excp, StopInputGroup):
if excp.message:
env.logger.info(excp.message)
self.output_groups[proc_result["index"]] = sos_targets([])
elif isinstance(excp, RemovedTarget):
raise excp
elif "task" in proc_result:
if env.config["error_mode"] == "ignore":
env.logger.warning(f"Ignore failed task {proc_result['task']}.")
# if the exception is from a task...
self.exec_error.append(proc_result["task"], excp)
else:
self.exec_error.append(
RuntimeError(
f"Substep failed with return code {proc_result['ret_code']}"
)
)
# this is after all substeps have been completed
if self.exec_error.errors:
raise self.exec_error
def calculate_completed(self):
substeps = (
self.completed["__substep_completed__"]
+ self.completed["__substep_skipped__"]
)
self.completed["__step_completed__"] = (
self.completed["__substep_completed__"] / substeps
)
self.completed["__step_skipped__"] = (
self.completed["__substep_skipped__"] / substeps
)
if self.completed["__step_completed__"].is_integer():
self.completed["__step_completed__"] = int(
self.completed["__step_completed__"]
)
if self.completed["__step_skipped__"].is_integer():
self.completed["__step_skipped__"] = int(self.completed["__step_skipped__"])
def run(self):
"""Execute a single step and return results. The result for batch mode is the
input, output etc returned as alias, and for interactive mode is the return value
of the last expression."""
# return value of the last executed statement
self.last_res = None
self.start_time = time.time()
self.completed = defaultdict(int)
#
# prepare environments, namely variables that can be used by the step
#
# * step_name: name of the step, can be used by step process to determine
# actions dynamically.
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set("__last_step__", self.step.last_step)
self.log("start")
env.sos_dict.set(
"step_id",
textMD5(
f'{env.sos_dict["workflow_id"]} {env.sos_dict["step_name"]} {self.step.md5}'
),
)
env.sos_dict.set("master_id", env.config["master_id"])
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
env.sos_dict.set("_runtime", {})
# * input: input files, which should be __step_output__ if it is defined, or
# None otherwise.
# * _input: first batch of input, which should be input if no input statement is used
# * output: None at first, can be redefined by output statement
# * _output: None at first, can be redefined by output statement
# * depends: None at first, can be redefined by depends statement
# * _depends: None at first, can be redefined by depends statement
#
self.init_input_output_vars()
# _index is needed for pre-input action's active option and for debug output of scripts
env.sos_dict.set("_index", 0)
if "STEP" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file(
"STEP",
f'Executing step {env.sos_dict["step_name"]} with step_input {env.sos_dict["step_input"]} and step_output {env.sos_dict["step_output"]}',
)
self.set_task_queue_from_task_params()
# look for input statement.
input_statement_idx = [
idx
for idx, x in enumerate(self.step.statements)
if x[0] == ":" and x[1] == "input"
]
if not input_statement_idx:
input_statement_idx = None
elif len(input_statement_idx) == 1:
input_statement_idx = input_statement_idx[0]
else:
raise ValueError(
f"More than one step input are specified in step {self.step.step_name(True)}"
)
# if shared is true, we have to disable concurrent because we
# do not yet return anything from shared.
self.concurrent_substep = "shared" not in self.step.options
# and \
# ('concurrent' not in env.sos_dict['_runtime'] or env.sos_dict['_runtime']['concurrent'] is True)
if input_statement_idx is not None:
# execute before input stuff
for statement in self.step.statements[:input_statement_idx]:
if statement[0] == ":":
# wait for all dependent targets to be resolved to be resolved
key, value = statement[1:3]
if key != "depends":
raise ValueError(f"Step input should be specified before {key}")
while True:
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(dfiles, **kwargs)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
break
else:
try:
# 1354
# if there are definition before input, the definitions and imports
# must be added to global_def in order to be executed by substeps
if any(x in statement[1] for x in ("class", "def", "import")):
step_def = KeepOnlyImportAndDefine().visit(
ast.parse(statement[1])
)
if step_def.body:
if isinstance(self.step.global_def, ast.Module):
self.step.global_def.body.extend(step_def.body)
else:
self.step.global_def = step_def
self.execute(statement[1])
except StopInputGroup as e:
# stop before substeps, because there is no output statement before it
# we do not have to worry about keep_output
if e.message:
env.logger.info(e.message)
return self.collect_result()
# input statement
stmt = self.step.statements[input_statement_idx][2]
self.log("input statement", stmt)
while True:
# wait for all targets to be resovled
try:
args, kwargs = SoS_eval(
f"__null_func__({stmt})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# Files will be expanded differently with different running modes
input_files: sos_targets = expand_input_files(
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_INPUT_OPTIONS
},
)
runner = self.process_input_args(
input_files,
**{k: v for k, v in kwargs.items() if k in SOS_INPUT_OPTIONS},
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
self._substeps = e.value
#
if "concurrent" in kwargs and self.concurrent_substep:
# concurrent can be True/False or an integer
self.concurrent_substep = kwargs["concurrent"]
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
raise ValueError(f"Failed to process input statement {stmt}: {e}")
break
input_statement_idx += 1
elif env.sos_dict["step_input"].groups:
# if default has groups...
# default case
self._substeps = env.sos_dict["step_input"].groups
# assuming everything starts from 0 is after input
input_statement_idx = 0
else:
# default case
self._substeps = [env.sos_dict["step_input"]]
# assuming everything starts from 0 is after input
input_statement_idx = 0
self.proc_results = {}
self.vars_to_be_shared = set()
if "shared" in self.step.options:
self.vars_to_be_shared = parse_shared_vars(self.step.options["shared"])
self.vars_to_be_shared = sorted(
[
x[5:] if x.startswith("step_") else x
for x in self.vars_to_be_shared
if x not in ("step_", "step_input", "step_output", "step_depends")
]
)
self.shared_vars = [{} for x in self._substeps]
# run steps after input statement, which will be run multiple times for each input
# group.
env.sos_dict.set("__num_groups__", len(self._substeps))
# determine if a single index or the whole step should be skipped
skip_index = False
# signatures of each index, which can remain to be None if no output
# is defined.
self.output_groups = [sos_targets([]) for x in self._substeps]
self.depends_groups = [sos_targets([]) for x in self._substeps]
# used to prevent overlapping output from substeps
self._all_outputs = set()
self._subworkflow_results = []
if (
any("sos_run" in x[1] for x in self.step.statements[input_statement_idx:])
and "shared" not in self.step.options
and not self.step.task
and self.step.statements[-1][0] == "!"
and (len(self.step.statements) == 1 or self.step.statements[-2][0] == ":")
and is_sos_run_the_only_last_stmt(self.step.statements[-1][1])
):
env.sos_dict.set("__concurrent_subworkflow__", True)
if self.concurrent_substep:
if len(self._substeps) <= 1 or env.config["run_mode"] == "dryrun":
self.concurrent_substep = False
elif any(
"sos_run" in x[1] for x in self.step.statements[input_statement_idx:]
):
self.concurrent_substep = False
env.logger.debug(
"Substeps are executed sequentially because of existence of multiple nested workflow."
)
else:
self.prepare_substep()
try:
self.completed["__substep_skipped__"] = 0
self.completed["__substep_completed__"] = len(self._substeps)
self._completed_concurrent_substeps = 0
# pending signatures are signatures for steps with external tasks
self.pending_signatures = [None for x in self._substeps]
for idx, g in enumerate(self._substeps):
#
# https://github.com/vatlab/sos/issues/1376
#
# [default]
# input: for_each=dict(i=range(1000))
# sos_run('a', t=i)
#
# when we have workflow like the following when steps
# are executed quickly with subworkflows submitted to the master
# the master process could be swamped with subworkflows, causing
# "too many open files".
#
# the following code will stop the step from continued
# execution and wait for the subworkflows to complete.
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(
allow_pending=env.config["worker_procs"]
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
# other variables
#
_vars = {}
# now, let us expose target level variables as lists
if len(g) > 1:
names = set.union(*[set(x._dict.keys()) for x in g._targets])
elif len(g) == 1:
names = set(g._targets[0]._dict.keys())
else:
names = set()
for name in names:
_vars[name] = [x.get(name) for x in g._targets]
# then we expose all group level variables
_vars.update(g._dict)
_vars.update(env.sos_dict["step_input"]._dict)
env.sos_dict.update(_vars)
env.sos_dict.set("_input", copy.deepcopy(g))
# set vars to _input
# env.sos_dict['_input'].set(**v)
self.log("_input")
env.sos_dict.set("_index", idx)
if env.config["error_mode"] == "ignore":
missed = [x for x in g.targets if not x.target_exists()]
if missed:
if any(isinstance(x, invalid_target) for x in missed):
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to invalid input caused by previous failed substep.'
)
else:
env.logger.warning(
f'{self.step.step_name(True)}{f" (index={idx})" if len(self._substeps) > 1 else ""} ignored due to missing input {sos_targets(missed)}'
)
self.output_groups[idx] = sos_targets(invalid_target())
env.sos_dict.set("_output", sos_targets(invalid_target()))
self.skip_substep()
continue
# in interactive mode, because sos_dict are always shared
# execution of a substep, especially when it calls a nested
# workflow, would change step_name, __step_context__ etc, and
# we will have to reset these variables to make sure the next
# substep would execute normally. Batch mode is immune to this
# problem because nested workflows are executed in their own
# process/context etc
if env.config["run_mode"] == "interactive":
env.sos_dict.set("step_name", self.step.step_name())
env.sos_dict.set(
"step_id",
hash(
(
env.sos_dict["workflow_id"],
env.sos_dict["step_name"],
self.step.md5,
)
),
)
# used by nested workflow
env.sos_dict.set("__step_context__", self.step.context)
#
pre_statement = []
if (
not any(
st[0] == ":" and st[1] == "output"
for st in self.step.statements[input_statement_idx:]
)
and "__default_output__" in env.sos_dict
):
pre_statement = [[":", "output", "_output"]]
# if there is no statement, no task, claim success
post_statement = []
if not self.step.statements or self.step.statements[-1][0] != "!":
if self.step.task:
# if there is only task, we insert a fake statement so that it can be executed by the executor
post_statement = [["!", ""]]
else:
# complete case: no step, no statement
send_message_to_controller(
["progress", "substep_completed", env.sos_dict["step_id"]]
)
all_statements = (
pre_statement
+ self.step.statements[input_statement_idx:]
+ post_statement
)
self.is_input_verified = True
for statement_idx, statement in enumerate(all_statements):
is_last_runblock = statement_idx == len(all_statements) - 1
# if input is undertermined, we can only process output:
if not g.valid() and statement[0] != ":":
raise RuntimeError("Undetermined input encountered")
if statement[0] == ":":
key, value = statement[1:3]
# output, depends, and process can be processed multiple times
while True:
# loop for all unresolved targets to be resolved
try:
args, kwargs = SoS_eval(
f"__null_func__({value})",
extra_dict={
"__null_func__": __null_func__,
"output_from": __output_from__,
"named_output": __named_output__,
"traced": __traced__,
},
)
# dynamic output or dependent files
if key == "output":
# if output is defined, its default value needs to be cleared
if idx == 0:
env.sos_dict.set("step_output", sos_targets())
ofiles: sos_targets = expand_output_files(
value,
*args,
**{
k: v
for k, v in kwargs.items()
if k not in SOS_OUTPUT_OPTIONS
},
)
if g.valid() and ofiles.valid():
if any(
x in g._targets
for x in ofiles
if not isinstance(x, sos_step)
):
raise RuntimeError(
f'Overlapping input and output files: {", ".join(repr(x) for x in ofiles if x in g)}'
)
# set variable _output and output
self.process_output_args(
ofiles,
**{
k: v
for k, v in kwargs.items()
if k in SOS_OUTPUT_OPTIONS
},
)
self.output_groups[idx] = env.sos_dict["_output"]
elif key == "depends":
try:
dfiles = expand_depends_files(*args)
# dfiles can be Undetermined
runner = self.process_depends_args(
dfiles, **kwargs
)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
self.depends_groups[idx] = env.sos_dict[
"_depends"
]
self.log("_depends")
except Exception:
# env.logger.info(e)
raise
else:
raise RuntimeError(f"Unrecognized directive {key}")
# everything is ok, break
break
except (UnknownTarget, RemovedTarget) as e:
runner = self.handle_unknown_target(e)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
continue
except UnavailableLock:
raise
except Exception as e:
# if input is Undertermined, it is possible that output cannot be processed
# due to that, and we just return
if not g.valid():
env.logger.debug(e)
return self.collect_result()
raise RuntimeError(
f"Failed to process step {key} ({value.strip()}): {e}"
)
elif is_last_runblock:
if (
env.config["sig_mode"] == "skip"
and not self.vars_to_be_shared
and "sos_run" not in statement[1]
and not env.sos_dict["_output"].unspecified()
and len(env.sos_dict["_output"]) > 0
and all(
x.target_exists()
for x in env.sos_dict["_output"].targets
)
and env.sos_dict["_output"].later_than(
env.sos_dict["_input"]
)
):
self.skip_substep()
env.logger.info(
f'``{env.sos_dict["step_name"]}``{f" (index={idx})" if len(self._substeps) > 1 else ""} is ``skipped`` with existing output.'
)
skip_index = True
# do not execute the rest of the statement
break
#
# default mode, check if skipping substep
sig = None
if (
env.config["sig_mode"]
not in ("ignore", "distributed", "build")
and not env.sos_dict["_output"].unspecified()
):
sig = RuntimeInfo(
statementMD5([statement[1], self.step.task]),
env.sos_dict["_input"],
env.sos_dict["_output"],
env.sos_dict["_depends"],
env.sos_dict["__signature_vars__"],
shared_vars=self.vars_to_be_shared,
)
matched = validate_step_sig(sig)
skip_index = bool(matched)
if skip_index:
# matched["output"] might hav vars not defined in "output" #1355
env.sos_dict.set("_output", matched["output"])
self.output_groups[idx] = matched["output"]
if "vars" in matched:
self.shared_vars[idx].update(matched["vars"])
self.skip_substep()
break
try:
if self.concurrent_substep:
self.concurrent_exec(statement, sig)
# we check if the previous task has been completed and process them
# because further steps might need to be done
try:
runner = self.process_returned_substep_result(
till=idx + 1, wait=False
)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
elif (
env.config["sig_mode"] == "ignore"
or env.sos_dict["_output"].unspecified()
):
self.local_exec_without_signature(statement)
else:
skip_index = self.local_exec_with_signature(
statement, sig
)
if skip_index:
self.skip_substep()
break
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception as e:
clear_output()
if env.config["error_mode"] == "abort":
raise
elif env.config["error_mode"] == "ignore":
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.warning(
f"{self.step.step_name(True)} {idx_msg} returns no output due to error: {e}"
)
self.output_groups[idx] = sos_targets(invalid_target())
skip_index = True
else:
if env.config["run_mode"] != "interactive":
# default mode
idx_msg = (
f'(id={env.sos_dict["step_id"]}, index={idx})'
if len(self._substeps) > 1
else f'(id={env.sos_dict["step_id"]})'
)
env.logger.error(
f"{self.step.step_name(True)} {idx_msg} returns an error."
)
self.exec_error.append(str(idx), e)
else:
# if it is not the last statement group (e.g. statements before :output)
# we execute locally without anything like signature
if self.is_input_verified:
verify_input()
self.is_input_verified = False
try:
self.execute(statement[1])
except StopInputGroup as e:
if not e.keep_output:
clear_output()
self.output_groups[idx] = sos_targets([])
if e.message:
env.logger.info(e.message)
skip_index = True
break
except Exception:
clear_output()
raise
# if there is no statement , but there are tasks, we should
# check signature here.
if (
(not self.step.statements or self.step.statements[-1][0] != "!")
and self.step.task
and not self.concurrent_substep
and env.config["sig_mode"] != "ignore"
and not env.sos_dict["_output"].unspecified()
):
skip_index = self.check_task_sig()
# if this index is skipped, go directly to the next one
if skip_index:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
skip_index = False
continue
# if concurrent input group, tasks are handled in substep
if self.concurrent_substep or not self.step.task:
continue
if env.config["run_mode"] == "dryrun" and env.sos_dict["_index"] != 0:
continue
# # check if the task is active
# if 'active' in env.sos_dict['_runtime']:
# if not self.is_task_active():
# continue
#
self.log("task")
try:
task_id, taskdef, task_vars = create_task(
self.step.global_def,
self.step.global_vars,
self.step.task,
self.step.task_params,
)
task = self.submit_task(
{
"index": env.sos_dict["_index"],
"task_id": task_id,
"task_def": taskdef,
"task_vars": task_vars,
}
)
self.proc_results[env.sos_dict["_index"]] = task
except Exception as e:
# FIXME: cannot catch exception from subprocesses
if env.verbosity > 2:
sys.stderr.write(get_traceback())
raise RuntimeError(
f'Failed to execute process\n"{short_repr(self.step.task)}"\n{e}'
)
#
# # if not concurrent, we have to wait for the completion of the task
# if 'concurrent' in env.sos_dict['_runtime'] and env.sos_dict[
# '_runtime']['concurrent'] is False:
# # in this case the steps must be executed not concurrently
# runner = self.wait_for_results(all_submitted=False)
# try:
# yreq = next(runner)
# while True:
# yres = yield yreq
# yreq = runner.send(yres)
# except StopIteration:
# pass
#
# endfor loop for each input group
#
if self._subworkflow_results:
try:
runner = self.wait_for_subworkflows(allow_pending=0)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
env.sos_dict.pop("__concurrent_subworkflow__")
runner = self.wait_for_results(all_submitted=True)
try:
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration:
pass
for idx, res in self.proc_results.items():
if "sig_skipped" in res:
self.completed["__substep_skipped__"] += 1
self.completed["__substep_completed__"] -= 1
if "output" in res:
self.output_groups[idx] = res["output"]
# check results
self.check_results()
# if error happened but we allow all substeps to be completed, we now
# raise exception
if self.exec_error.errors:
raise self.exec_error
# if output is Undetermined, re-evalulate it
# finalize output from output_groups because some output might be skipped
# this is the final version of the output but we do maintain output
# during the execution of step, for compatibility.
env.sos_dict.set(
"step_output", sos_targets([])._add_groups(self.output_groups)
)
env.sos_dict.set(
"step_depends", sos_targets([])._add_groups(self.depends_groups)
)
# if there exists an option shared, the variable would be treated as
# provides=sos_variable(), and then as step_output
if "shared" in self.step.options:
self.shared_vars = evaluate_shared(
self.shared_vars, self.step.options["shared"]
)
env.sos_dict.quick_update(self.shared_vars)
missing = self.verify_output()
self.log(
"output",
msg=f'\033[95m missing: {short_repr(missing)} ({len(missing)} item{"s" if len(missing)>1 else ""})\033[0m'
if len(missing) > 0
else "",
)
self.calculate_completed()
def file_only(targets):
if not isinstance(targets, sos_targets):
env.logger.warning(
f"Unexpected input or output target for reporting. Empty list returned: {targets}"
)
return []
return [
(str(x), x.size())
for x in targets._targets
if isinstance(x, file_target)
]
step_info = {
"step_id": self.step.md5,
"start_time": self.start_time,
"stepname": self.step.step_name(True),
"substeps": len(self._substeps),
"input": file_only(env.sos_dict["step_input"]),
"output": file_only(env.sos_dict["step_output"]),
"completed": dict(self.completed),
"end_time": time.time(),
}
send_message_to_controller(
["workflow_sig", "step", env.sos_dict["workflow_id"], repr(step_info)]
)
return self.collect_result()
finally:
if self.concurrent_substep:
close_socket(self.result_pull_socket, "substep collector")
class Step_Executor(Base_Step_Executor):
"""Single process step executor"""
def __init__(self, step, socket, mode="run"):
self.run_mode = mode
env.config["run_mode"] = mode
super(Step_Executor, self).__init__(step)
self.socket = socket
# because step is executed in a separate SoS_Worker process, this
# __socket__ is available to all the actions that will be executed
# in the step
env.__socket__ = socket
def submit_tasks(self, tasks):
if "TASK" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("TASK", f"Send {tasks}")
self.socket.send(
encode_msg(["tasks", env.sos_dict["_runtime"]["queue"]] + tasks)
)
def wait_for_tasks(self, tasks, all_submitted):
# wait for task is a generator function that yields the request
# to the runner
if not tasks:
return {}
# when we wait, the "outsiders" also need to see the tags etc
# of the tasks so we have to write to the database. #156
send_message_to_controller(["commit_sig"])
# wait till the executor responde
results = {}
while True:
# yield an indicator of what is requested, for debugging purpose
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
results.update(res)
# all results have been obtained.
if len(results) == len(tasks):
break
return results
def wait_for_subworkflows(self, allow_pending):
"""Wait for results from subworkflows"""
try:
allow_pending = int(allow_pending)
except:
allow_pending = min(max(os.cpu_count() // 2, 2), 8)
while self._subworkflow_results:
if allow_pending > 0:
n_pending = sum(
len(x["pending_workflows"]) for x in self._subworkflow_results
)
if n_pending <= allow_pending:
break
# here we did not check if workflow ids match
yield self.socket
res = decode_msg(self.socket.recv())
if res is None:
sys.exit(0)
elif isinstance(res, Exception):
raise res
if not "__workflow_id__" in res:
raise ValueError(f"Unrecognized result from subworkflows: {res}")
# remove from _self._subworkflow_results
result_with_id = [
idx
for idx, x in enumerate(self._subworkflow_results)
if res["__workflow_id__"] in x["pending_workflows"]
]
if not result_with_id:
raise RuntimeError(
f"Failed to identify ID of returned subworkflow: {res}"
)
if len(result_with_id) > 1:
raise RuntimeError(
"Multiple matches of subworkflow ID. This should not happen."
)
self._subworkflow_results[result_with_id[0]]["pending_workflows"].remove(
res["__workflow_id__"]
)
if not self._subworkflow_results[result_with_id[0]]["pending_workflows"]:
self._subworkflow_results.pop(result_with_id[0])
def handle_unknown_target(self, e):
self.socket.send(encode_msg(["missing_target", e.target]))
yield self.socket
res = decode_msg(self.socket.recv())
if not res:
raise e
def verify_dynamic_targets(self, targets):
if not targets:
return
if env.config["trace_existing"]:
traced = targets
else:
traced = [x for x in targets if x.traced]
if not traced:
return
self.socket.send(encode_msg(["dependent_target"] + traced))
yield self.socket
res = decode_msg(self.socket.recv())
if res != "target_resolved":
raise RuntimeError(f"Failed to veryify dependent target {traced}")
def run(self):
try:
try:
# 1218
runner = Base_Step_Executor.run(self)
yreq = next(runner)
while True:
yres = yield yreq
yreq = runner.send(yres)
except StopIteration as e:
res = e.value
if self.socket is not None:
if (
"STEP" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends result {short_repr(res)}",
)
self.socket.send(encode_msg(res))
else:
return res
except RemovedTarget as e:
# removed target needs to be handled differently since the workflow manager
# use type information to get removed targets
if self.socket is not None and not self.socket.closed:
self.socket.send(encode_msg(e))
else:
raise e
except Exception as e:
if env.verbosity > 2:
sys.stderr.write(get_traceback())
if isinstance(e, ProcessKilled):
raise
# if not self.exec_error
if e != self.exec_error:
self.exec_error.append(self.step.step_name(), e)
#
if self.exec_error.errors:
if self.socket is not None and not self.socket.closed:
env.log_to_file(
"STEP",
f"Step {self.step.step_name()} sends exception {self.exec_error}",
)
self.socket.send(encode_msg(self.exec_error))
else:
raise self.exec_error
|
"""
SQL composition utility module
"""
# Copyright (C) 2020-2021 The Psycopg Team
import codecs
import string
from abc import ABC, abstractmethod
from typing import Any, Iterator, List, Optional, Sequence, Union
from .pq import Escaping
from .abc import AdaptContext
from .adapt import Transformer, PyFormat
from ._encodings import pgconn_encoding
def quote(obj: Any, context: Optional[AdaptContext] = None) -> str:
"""
Adapt a Python object to a quoted SQL string.
Use this function only if you absolutely want to convert a Python string to
an SQL quoted literal to use e.g. to generate batch SQL and you won't have
a connection avaliable when you will need to use it.
This function is relatively inefficient, because it doesn't cache the
adaptation rules. If you pass a *context* you can adapt the adaptation
rules used, otherwise only global rules are used.
"""
return Literal(obj).as_string(context)
class Composable(ABC):
"""
Abstract base class for objects that can be used to compose an SQL string.
`!Composable` objects can be passed directly to
`~psycopg.Cursor.execute()`, `~psycopg.Cursor.executemany()`,
`~psycopg.Cursor.copy()` in place of the query string.
`!Composable` objects can be joined using the ``+`` operator: the result
will be a `Composed` instance containing the objects joined. The operator
``*`` is also supported with an integer argument: the result is a
`!Composed` instance containing the left argument repeated as many times as
requested.
"""
def __init__(self, obj: Any):
self._obj = obj
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._obj!r})"
@abstractmethod
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
"""
Return the value of the object as bytes.
:param context: the context to evaluate the object into.
:type context: `connection` or `cursor`
The method is automatically invoked by `~psycopg.Cursor.execute()`,
`~psycopg.Cursor.executemany()`, `~psycopg.Cursor.copy()` if a
`!Composable` is passed instead of the query string.
"""
raise NotImplementedError
def as_string(self, context: Optional[AdaptContext]) -> str:
"""
Return the value of the object as string.
:param context: the context to evaluate the string into.
:type context: `connection` or `cursor`
"""
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
b = self.as_bytes(context)
if isinstance(b, bytes):
return b.decode(enc)
else:
# buffer object
return codecs.lookup(enc).decode(b)[0]
def __add__(self, other: "Composable") -> "Composed":
if isinstance(other, Composed):
return Composed([self]) + other
if isinstance(other, Composable):
return Composed([self]) + Composed([other])
else:
return NotImplemented
def __mul__(self, n: int) -> "Composed":
return Composed([self] * n)
def __eq__(self, other: Any) -> bool:
return type(self) is type(other) and self._obj == other._obj
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
class Composed(Composable):
"""
A `Composable` object made of a sequence of `!Composable`.
The object is usually created using `!Composable` operators and methods.
However it is possible to create a `!Composed` directly specifying a
sequence of objects as arguments: if they are not `!Composable` they will
be wrapped in a `Literal`.
Example::
>>> comp = sql.Composed(
... [sql.SQL("INSERT INTO "), sql.Identifier("table")])
>>> print(comp.as_string(conn))
INSERT INTO "table"
`!Composed` objects are iterable (so they can be used in `SQL.join` for
instance).
"""
_obj: List[Composable]
def __init__(self, seq: Sequence[Any]):
seq = [
obj if isinstance(obj, Composable) else Literal(obj) for obj in seq
]
super().__init__(seq)
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
return b"".join(obj.as_bytes(context) for obj in self._obj)
def __iter__(self) -> Iterator[Composable]:
return iter(self._obj)
def __add__(self, other: Composable) -> "Composed":
if isinstance(other, Composed):
return Composed(self._obj + other._obj)
if isinstance(other, Composable):
return Composed(self._obj + [other])
else:
return NotImplemented
def join(self, joiner: Union["SQL", str]) -> "Composed":
"""
Return a new `!Composed` interposing the *joiner* with the `!Composed` items.
The *joiner* must be a `SQL` or a string which will be interpreted as
an `SQL`.
Example::
>>> fields = sql.Identifier('foo') + sql.Identifier('bar') # a Composed
>>> print(fields.join(', ').as_string(conn))
"foo", "bar"
"""
if isinstance(joiner, str):
joiner = SQL(joiner)
elif not isinstance(joiner, SQL):
raise TypeError(
f"Composed.join() argument must be strings or SQL,"
f" got {joiner!r} instead"
)
return joiner.join(self._obj)
class SQL(Composable):
"""
A `Composable` representing a snippet of SQL statement.
`!SQL` exposes `join()` and `format()` methods useful to create a template
where to merge variable parts of a query (for instance field or table
names).
The *string* doesn't undergo any form of escaping, so it is not suitable to
represent variable identifiers or values: you should only use it to pass
constant strings representing templates or snippets of SQL statements; use
other objects such as `Identifier` or `Literal` to represent variable
parts.
Example::
>>> query = sql.SQL("SELECT {0} FROM {1}").format(
... sql.SQL(', ').join([sql.Identifier('foo'), sql.Identifier('bar')]),
... sql.Identifier('table'))
>>> print(query.as_string(conn))
SELECT "foo", "bar" FROM "table"
"""
_obj: str
_formatter = string.Formatter()
def __init__(self, obj: str):
super().__init__(obj)
if not isinstance(obj, str):
raise TypeError(f"SQL values must be strings, got {obj!r} instead")
def as_string(self, context: Optional[AdaptContext]) -> str:
return self._obj
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
enc = "utf-8"
if context:
conn = context.connection
if conn:
enc = pgconn_encoding(conn.pgconn)
return self._obj.encode(enc)
def format(self, *args: Any, **kwargs: Any) -> Composed:
"""
Merge `Composable` objects into a template.
:param args: parameters to replace to numbered (``{0}``, ``{1}``) or
auto-numbered (``{}``) placeholders
:param kwargs: parameters to replace to named (``{name}``) placeholders
:return: the union of the `!SQL` string with placeholders replaced
:rtype: `Composed`
The method is similar to the Python `str.format()` method: the string
template supports auto-numbered (``{}``), numbered (``{0}``,
``{1}``...), and named placeholders (``{name}``), with positional
arguments replacing the numbered placeholders and keywords replacing
the named ones. However placeholder modifiers (``{0!r}``, ``{0:<10}``)
are not supported.
If a `!Composable` objects is passed to the template it will be merged
according to its `as_string()` method. If any other Python object is
passed, it will be wrapped in a `Literal` object and so escaped
according to SQL rules.
Example::
>>> print(sql.SQL("SELECT * FROM {} WHERE {} = %s")
... .format(sql.Identifier('people'), sql.Identifier('id'))
... .as_string(conn))
SELECT * FROM "people" WHERE "id" = %s
>>> print(sql.SQL("SELECT * FROM {tbl} WHERE name = {name}")
... .format(tbl=sql.Identifier('people'), name="O'Rourke"))
... .as_string(conn))
SELECT * FROM "people" WHERE name = 'O''Rourke'
"""
rv: List[Composable] = []
autonum: Optional[int] = 0
for pre, name, spec, conv in self._formatter.parse(self._obj):
if spec:
raise ValueError("no format specification supported by SQL")
if conv:
raise ValueError("no format conversion supported by SQL")
if pre:
rv.append(SQL(pre))
if name is None:
continue
if name.isdigit():
if autonum:
raise ValueError(
"cannot switch from automatic field numbering to manual"
)
rv.append(args[int(name)])
autonum = None
elif not name:
if autonum is None:
raise ValueError(
"cannot switch from manual field numbering to automatic"
)
rv.append(args[autonum])
autonum += 1
else:
rv.append(kwargs[name])
return Composed(rv)
def join(self, seq: Sequence[Composable]) -> Composed:
"""
Join a sequence of `Composable`.
:param seq: the elements to join.
:type seq: iterable of `!Composable`
Use the `!SQL` object's *string* to separate the elements in *seq*.
Note that `Composed` objects are iterable too, so they can be used as
argument for this method.
Example::
>>> snip = sql.SQL(', ').join(
... sql.Identifier(n) for n in ['foo', 'bar', 'baz'])
>>> print(snip.as_string(conn))
"foo", "bar", "baz"
"""
rv = []
it = iter(seq)
try:
rv.append(next(it))
except StopIteration:
pass
else:
for i in it:
rv.append(self)
rv.append(i)
return Composed(rv)
class Identifier(Composable):
"""
A `Composable` representing an SQL identifier or a dot-separated sequence.
Identifiers usually represent names of database objects, such as tables or
fields. PostgreSQL identifiers follow `different rules`__ than SQL string
literals for escaping (e.g. they use double quotes instead of single).
.. __: https://www.postgresql.org/docs/current/sql-syntax-lexical.html# \
SQL-SYNTAX-IDENTIFIERS
Example::
>>> t1 = sql.Identifier("foo")
>>> t2 = sql.Identifier("ba'r")
>>> t3 = sql.Identifier('ba"z')
>>> print(sql.SQL(', ').join([t1, t2, t3]).as_string(conn))
"foo", "ba'r", "ba""z"
Multiple strings can be passed to the object to represent a qualified name,
i.e. a dot-separated sequence of identifiers.
Example::
>>> query = sql.SQL("SELECT {} FROM {}").format(
... sql.Identifier("table", "field"),
... sql.Identifier("schema", "table"))
>>> print(query.as_string(conn))
SELECT "table"."field" FROM "schema"."table"
"""
_obj: Sequence[str]
def __init__(self, *strings: str):
# init super() now to make the __repr__ not explode in case of error
super().__init__(strings)
if not strings:
raise TypeError("Identifier cannot be empty")
for s in strings:
if not isinstance(s, str):
raise TypeError(
f"SQL identifier parts must be strings, got {s!r} instead"
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({", ".join(map(repr, self._obj))})"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
if not conn:
raise ValueError("a connection is necessary for Identifier")
esc = Escaping(conn.pgconn)
enc = pgconn_encoding(conn.pgconn)
escs = [esc.escape_identifier(s.encode(enc)) for s in self._obj]
return b".".join(escs)
class Literal(Composable):
"""
A `Composable` representing an SQL value to include in a query.
Usually you will want to include placeholders in the query and pass values
as `~cursor.execute()` arguments. If however you really really need to
include a literal value in the query you can use this object.
The string returned by `!as_string()` follows the normal :ref:`adaptation
rules <types-adaptation>` for Python objects.
Example::
>>> s1 = sql.Literal("foo")
>>> s2 = sql.Literal("ba'r")
>>> s3 = sql.Literal(42)
>>> print(sql.SQL(', ').join([s1, s2, s3]).as_string(conn))
'foo', 'ba''r', 42
"""
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
tx = Transformer(context)
dumper = tx.get_dumper(self._obj, PyFormat.TEXT)
return dumper.quote(self._obj)
class Placeholder(Composable):
"""A `Composable` representing a placeholder for query parameters.
If the name is specified, generate a named placeholder (e.g. ``%(name)s``,
``%(name)b``), otherwise generate a positional placeholder (e.g. ``%s``,
``%b``).
The object is useful to generate SQL queries with a variable number of
arguments.
Examples::
>>> names = ['foo', 'bar', 'baz']
>>> q1 = sql.SQL("INSERT INTO my_table ({}) VALUES ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(sql.Placeholder() * len(names)))
>>> print(q1.as_string(conn))
INSERT INTO my_table ("foo", "bar", "baz") VALUES (%s, %s, %s)
>>> q2 = sql.SQL("INSERT INTO my_table ({}) VALUES ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(map(sql.Placeholder, names)))
>>> print(q2.as_string(conn))
INSERT INTO my_table ("foo", "bar", "baz") VALUES (%(foo)s, %(bar)s, %(baz)s)
"""
def __init__(self, name: str = "", format: PyFormat = PyFormat.AUTO):
super().__init__(name)
if not isinstance(name, str):
raise TypeError(f"expected string as name, got {name!r}")
if ")" in name:
raise ValueError(f"invalid name: {name!r}")
self._format = format
def __repr__(self) -> str:
parts = []
if self._obj:
parts.append(repr(self._obj))
if self._format != PyFormat.AUTO:
parts.append(f"format={PyFormat(self._format).name}")
return f"{self.__class__.__name__}({", ".join(parts)})"
def as_string(self, context: Optional[AdaptContext]) -> str:
code = self._format
return f"%({self._obj}){code}" if self._obj else f"%{code}"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
return self.as_string(context).encode(enc)
# Literals
NULL = SQL("NULL")
DEFAULT = SQL("DEFAULT")
|
"""
SQL composition utility module
"""
# Copyright (C) 2020-2021 The Psycopg Team
import codecs
import string
from abc import ABC, abstractmethod
from typing import Any, Iterator, List, Optional, Sequence, Union
from .pq import Escaping
from .abc import AdaptContext
from .adapt import Transformer, PyFormat
from ._encodings import pgconn_encoding
def quote(obj: Any, context: Optional[AdaptContext] = None) -> str:
"""
Adapt a Python object to a quoted SQL string.
Use this function only if you absolutely want to convert a Python string to
an SQL quoted literal to use e.g. to generate batch SQL and you won't have
a connection avaliable when you will need to use it.
This function is relatively inefficient, because it doesn't cache the
adaptation rules. If you pass a *context* you can adapt the adaptation
rules used, otherwise only global rules are used.
"""
return Literal(obj).as_string(context)
class Composable(ABC):
"""
Abstract base class for objects that can be used to compose an SQL string.
`!Composable` objects can be passed directly to
`~psycopg.Cursor.execute()`, `~psycopg.Cursor.executemany()`,
`~psycopg.Cursor.copy()` in place of the query string.
`!Composable` objects can be joined using the ``+`` operator: the result
will be a `Composed` instance containing the objects joined. The operator
``*`` is also supported with an integer argument: the result is a
`!Composed` instance containing the left argument repeated as many times as
requested.
"""
def __init__(self, obj: Any):
self._obj = obj
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._obj!r})"
@abstractmethod
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
"""
Return the value of the object as bytes.
:param context: the context to evaluate the object into.
:type context: `connection` or `cursor`
The method is automatically invoked by `~psycopg.Cursor.execute()`,
`~psycopg.Cursor.executemany()`, `~psycopg.Cursor.copy()` if a
`!Composable` is passed instead of the query string.
"""
raise NotImplementedError
def as_string(self, context: Optional[AdaptContext]) -> str:
"""
Return the value of the object as string.
:param context: the context to evaluate the string into.
:type context: `connection` or `cursor`
"""
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
b = self.as_bytes(context)
if isinstance(b, bytes):
return b.decode(enc)
else:
# buffer object
return codecs.lookup(enc).decode(b)[0]
def __add__(self, other: "Composable") -> "Composed":
if isinstance(other, Composed):
return Composed([self]) + other
if isinstance(other, Composable):
return Composed([self]) + Composed([other])
else:
return NotImplemented
def __mul__(self, n: int) -> "Composed":
return Composed([self] * n)
def __eq__(self, other: Any) -> bool:
return type(self) is type(other) and self._obj == other._obj
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
class Composed(Composable):
"""
A `Composable` object made of a sequence of `!Composable`.
The object is usually created using `!Composable` operators and methods.
However it is possible to create a `!Composed` directly specifying a
sequence of objects as arguments: if they are not `!Composable` they will
be wrapped in a `Literal`.
Example::
>>> comp = sql.Composed(
... [sql.SQL("INSERT INTO "), sql.Identifier("table")])
>>> print(comp.as_string(conn))
INSERT INTO "table"
`!Composed` objects are iterable (so they can be used in `SQL.join` for
instance).
"""
_obj: List[Composable]
def __init__(self, seq: Sequence[Any]):
seq = [
obj if isinstance(obj, Composable) else Literal(obj) for obj in seq
]
super().__init__(seq)
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
return b"".join(obj.as_bytes(context) for obj in self._obj)
def __iter__(self) -> Iterator[Composable]:
return iter(self._obj)
def __add__(self, other: Composable) -> "Composed":
if isinstance(other, Composed):
return Composed(self._obj + other._obj)
if isinstance(other, Composable):
return Composed(self._obj + [other])
else:
return NotImplemented
def join(self, joiner: Union["SQL", str]) -> "Composed":
"""
Return a new `!Composed` interposing the *joiner* with the `!Composed` items.
The *joiner* must be a `SQL` or a string which will be interpreted as
an `SQL`.
Example::
>>> fields = sql.Identifier('foo') + sql.Identifier('bar') # a Composed
>>> print(fields.join(', ').as_string(conn))
"foo", "bar"
"""
if isinstance(joiner, str):
joiner = SQL(joiner)
elif not isinstance(joiner, SQL):
raise TypeError(
f"Composed.join() argument must be strings or SQL,"
f" got {joiner!r} instead"
)
return joiner.join(self._obj)
class SQL(Composable):
"""
A `Composable` representing a snippet of SQL statement.
`!SQL` exposes `join()` and `format()` methods useful to create a template
where to merge variable parts of a query (for instance field or table
names).
The *string* doesn't undergo any form of escaping, so it is not suitable to
represent variable identifiers or values: you should only use it to pass
constant strings representing templates or snippets of SQL statements; use
other objects such as `Identifier` or `Literal` to represent variable
parts.
Example::
>>> query = sql.SQL("SELECT {0} FROM {1}").format(
... sql.SQL(', ').join([sql.Identifier('foo'), sql.Identifier('bar')]),
... sql.Identifier('table'))
>>> print(query.as_string(conn))
SELECT "foo", "bar" FROM "table"
"""
_obj: str
_formatter = string.Formatter()
def __init__(self, obj: str):
super().__init__(obj)
if not isinstance(obj, str):
raise TypeError(f"SQL values must be strings, got {obj!r} instead")
def as_string(self, context: Optional[AdaptContext]) -> str:
return self._obj
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
enc = "utf-8"
if context:
conn = context.connection
if conn:
enc = pgconn_encoding(conn.pgconn)
return self._obj.encode(enc)
def format(self, *args: Any, **kwargs: Any) -> Composed:
"""
Merge `Composable` objects into a template.
:param args: parameters to replace to numbered (``{0}``, ``{1}``) or
auto-numbered (``{}``) placeholders
:param kwargs: parameters to replace to named (``{name}``) placeholders
:return: the union of the `!SQL` string with placeholders replaced
:rtype: `Composed`
The method is similar to the Python `str.format()` method: the string
template supports auto-numbered (``{}``), numbered (``{0}``,
``{1}``...), and named placeholders (``{name}``), with positional
arguments replacing the numbered placeholders and keywords replacing
the named ones. However placeholder modifiers (``{0!r}``, ``{0:<10}``)
are not supported.
If a `!Composable` objects is passed to the template it will be merged
according to its `as_string()` method. If any other Python object is
passed, it will be wrapped in a `Literal` object and so escaped
according to SQL rules.
Example::
>>> print(sql.SQL("SELECT * FROM {} WHERE {} = %s")
... .format(sql.Identifier('people'), sql.Identifier('id'))
... .as_string(conn))
SELECT * FROM "people" WHERE "id" = %s
>>> print(sql.SQL("SELECT * FROM {tbl} WHERE name = {name}")
... .format(tbl=sql.Identifier('people'), name="O'Rourke"))
... .as_string(conn))
SELECT * FROM "people" WHERE name = 'O''Rourke'
"""
rv: List[Composable] = []
autonum: Optional[int] = 0
for pre, name, spec, conv in self._formatter.parse(self._obj):
if spec:
raise ValueError("no format specification supported by SQL")
if conv:
raise ValueError("no format conversion supported by SQL")
if pre:
rv.append(SQL(pre))
if name is None:
continue
if name.isdigit():
if autonum:
raise ValueError(
"cannot switch from automatic field numbering to manual"
)
rv.append(args[int(name)])
autonum = None
elif not name:
if autonum is None:
raise ValueError(
"cannot switch from manual field numbering to automatic"
)
rv.append(args[autonum])
autonum += 1
else:
rv.append(kwargs[name])
return Composed(rv)
def join(self, seq: Sequence[Composable]) -> Composed:
"""
Join a sequence of `Composable`.
:param seq: the elements to join.
:type seq: iterable of `!Composable`
Use the `!SQL` object's *string* to separate the elements in *seq*.
Note that `Composed` objects are iterable too, so they can be used as
argument for this method.
Example::
>>> snip = sql.SQL(', ').join(
... sql.Identifier(n) for n in ['foo', 'bar', 'baz'])
>>> print(snip.as_string(conn))
"foo", "bar", "baz"
"""
rv = []
it = iter(seq)
try:
rv.append(next(it))
except StopIteration:
pass
else:
for i in it:
rv.append(self)
rv.append(i)
return Composed(rv)
class Identifier(Composable):
"""
A `Composable` representing an SQL identifier or a dot-separated sequence.
Identifiers usually represent names of database objects, such as tables or
fields. PostgreSQL identifiers follow `different rules`__ than SQL string
literals for escaping (e.g. they use double quotes instead of single).
.. __: https://www.postgresql.org/docs/current/sql-syntax-lexical.html# \
SQL-SYNTAX-IDENTIFIERS
Example::
>>> t1 = sql.Identifier("foo")
>>> t2 = sql.Identifier("ba'r")
>>> t3 = sql.Identifier('ba"z')
>>> print(sql.SQL(', ').join([t1, t2, t3]).as_string(conn))
"foo", "ba'r", "ba""z"
Multiple strings can be passed to the object to represent a qualified name,
i.e. a dot-separated sequence of identifiers.
Example::
>>> query = sql.SQL("SELECT {} FROM {}").format(
... sql.Identifier("table", "field"),
... sql.Identifier("schema", "table"))
>>> print(query.as_string(conn))
SELECT "table"."field" FROM "schema"."table"
"""
_obj: Sequence[str]
def __init__(self, *strings: str):
# init super() now to make the __repr__ not explode in case of error
super().__init__(strings)
if not strings:
raise TypeError("Identifier cannot be empty")
for s in strings:
if not isinstance(s, str):
raise TypeError(
f"SQL identifier parts must be strings, got {s!r} instead"
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({', '.join(map(repr, self._obj))})"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
if not conn:
raise ValueError("a connection is necessary for Identifier")
esc = Escaping(conn.pgconn)
enc = pgconn_encoding(conn.pgconn)
escs = [esc.escape_identifier(s.encode(enc)) for s in self._obj]
return b".".join(escs)
class Literal(Composable):
"""
A `Composable` representing an SQL value to include in a query.
Usually you will want to include placeholders in the query and pass values
as `~cursor.execute()` arguments. If however you really really need to
include a literal value in the query you can use this object.
The string returned by `!as_string()` follows the normal :ref:`adaptation
rules <types-adaptation>` for Python objects.
Example::
>>> s1 = sql.Literal("foo")
>>> s2 = sql.Literal("ba'r")
>>> s3 = sql.Literal(42)
>>> print(sql.SQL(', ').join([s1, s2, s3]).as_string(conn))
'foo', 'ba''r', 42
"""
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
tx = Transformer(context)
dumper = tx.get_dumper(self._obj, PyFormat.TEXT)
return dumper.quote(self._obj)
class Placeholder(Composable):
"""A `Composable` representing a placeholder for query parameters.
If the name is specified, generate a named placeholder (e.g. ``%(name)s``,
``%(name)b``), otherwise generate a positional placeholder (e.g. ``%s``,
``%b``).
The object is useful to generate SQL queries with a variable number of
arguments.
Examples::
>>> names = ['foo', 'bar', 'baz']
>>> q1 = sql.SQL("INSERT INTO my_table ({}) VALUES ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(sql.Placeholder() * len(names)))
>>> print(q1.as_string(conn))
INSERT INTO my_table ("foo", "bar", "baz") VALUES (%s, %s, %s)
>>> q2 = sql.SQL("INSERT INTO my_table ({}) VALUES ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(map(sql.Placeholder, names)))
>>> print(q2.as_string(conn))
INSERT INTO my_table ("foo", "bar", "baz") VALUES (%(foo)s, %(bar)s, %(baz)s)
"""
def __init__(self, name: str = "", format: PyFormat = PyFormat.AUTO):
super().__init__(name)
if not isinstance(name, str):
raise TypeError(f"expected string as name, got {name!r}")
if ")" in name:
raise ValueError(f"invalid name: {name!r}")
self._format = format
def __repr__(self) -> str:
parts = []
if self._obj:
parts.append(repr(self._obj))
if self._format != PyFormat.AUTO:
parts.append(f"format={PyFormat(self._format).name}")
return f"{self.__class__.__name__}({', '.join(parts)})"
def as_string(self, context: Optional[AdaptContext]) -> str:
code = self._format
return f"%({self._obj}){code}" if self._obj else f"%{code}"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
return self.as_string(context).encode(enc)
# Literals
NULL = SQL("NULL")
DEFAULT = SQL("DEFAULT")
|
from __future__ import annotations
from anytree import NodeMixin
from datetime import datetime, timezone
from dotenv import load_dotenv
from os import environ
from os.path import join, dirname
from typing import Tuple, List, Any, Dict, Optional
import re
import requests
from rich.box import Box
__all__ = [
"get_data", "get_token", "get_url_info", "human_size", "humanize_time",
"populate_tree", "ROUNDED_BORDER", "run_query", "set_token", "sort_entries"
]
ROUNDED_BORDER: Box = Box(
"""\
╭──╮
│ │
│ │
│ │
│ │
│ │
│ │
╰──╯
"""
)
def get_token() -> str:
"""
Retrieves the Github Personal Access Token from .env file
"""
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
return environ.get("GITSORT_TOKEN")
def set_token(token: str) -> None:
"""
Set your Github personal access token in order to access
private repositories and extend the usage of the GraphQL API.
"""
import os
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
gitsort_token = os.environ.get("GITSORT_TOKEN")
if not gitsort_token:
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token set!")
else:
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
while inp not in ["y", "n"]:
print("Invalid answer")
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
if inp == "y":
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token updated!")
def run_query(
query: str,
token: str,
variables: dict | None = None,
headers: dict | None = None
) -> Tuple[dict, str]:
"""
Runs a Github GraphQL query and returns the result
:param query: str
GraphQL query
:param token: str
The users Github Personal Access Token
:param variables: dict
GraphQL Variables
:param headers: dict
Request headers
:return: tuple
The response and rate limit
"""
if not headers:
headers = {"Authorization": f"Bearer {token}"}
request = requests.post(
'https://api.github.com/graphql',
json={'query': query, 'variables': variables},
headers=headers
)
if request.status_code == 200:
return request.json(), request.headers["X-RateLimit-Remaining"]
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def get_data(
query: str,
token: str,
query_variables: Dict[str, str]
) -> Tuple[bool, Any, str]:
"""
Get data from query
:param query: str
Graphql Query
:param token: str
Github Personal Access Token
:param query_variables: dict
Variables used in query
:return: tuple
returns a tuple of tree items:
0. bool: True if query failed and return error messages else False
1. Any: Data returned from query
2. str: Rate limit
"""
data, rate_limit = run_query(query, token, query_variables)
if list(data.keys())[0] == "errors":
return True, data["errors"][0]["message"], rate_limit
try:
return False, data["data"]["repository"], rate_limit
except TypeError:
return True, "Query failed. Make sure path and branch is valid.", rate_limit
def get_url_info(url: str) -> Tuple[str, str] | List[str]:
"""
Retrieves owner and repository from a string
:param url: str
Either some form of Github Url or path such as `user/repo/whatever`
:return: tuple | list
Tuple containing owner and repo
"""
is_link = re.compile(r"^(git(hub)?|https?)")
is_git_path = re.compile(r"^[a-zA-Z0-9\-_.]+/[a-zA-Z0-9\-_.]+")
git_url_regex = re.compile(r"^(https|git)?(://|@)?([^/:]+)[/:](?P<owner>[^/:]+)/(?P<name>.+)(.git)?$")
is_git_repo = re.compile(r"((.git)|/)$")
if is_link.match(url):
if is_git_path.match(url):
return url.split("/")[:2]
match = git_url_regex.match(url)
if not match:
raise Exception("Invalid path")
name = match.group("name").split("/")[0]
name = is_git_repo.sub("", name)
owner = match.group("owner")
return owner, name
else:
if url.count("/") > 0:
return url.split("/")[:2]
raise Exception("Link/path must contain both user and repo")
def humanize_time(time_str: str) -> str:
"""
Convert datetime into a more human-friendly format
:param time_str: str
Time string in the ISO 8601 format
:return: str
Human friendly format: <number> <time_period> ago
"""
if not time_str:
return "null"
now = datetime.now()
date = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
date = date.replace(tzinfo=timezone.utc)
diff = int(now.timestamp() - date.timestamp())
times = [
1, 60, 3600, 86400, 604800, 2629746, 31556925
]
times_str = [
"Second", "Minute", "Hour", "Day", "Week", "Month", "Year"
]
temp = [diff // t for t in times][::-1]
for i, t in enumerate(temp):
if t != 0:
return f"{t} {times_str[6-i]}{"" if t == 1 else "s"} ago"
def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str:
"""
Convert bytes into a more human-friendly format
:param bytes: int
Number of bytes
:param units: Optional[List[str]]
units used
:return: str
Return size in human friendly format: <number> <size_unit>
"""
if units is None:
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:])
class FileEntry(NodeMixin):
def __init__(
self,
name: str,
size: str | int = None,
parent=None,
children=None
) -> None:
super(FileEntry, self).__init__()
if size != None:
self.name = f"{name} ([green]{human_size(size)}[/])"
else:
self.name = f"[blue]{name}/[/]"
self.parent = parent
if children:
self.children = children
class FileEntryRoot(NodeMixin):
def __init__(self, name: str, parent=None, children=None):
super(FileEntryRoot, self).__init__()
self.name = name
self.parent = parent
if children:
self.children = children
def populate_tree(
root_name: str,
data: list,
collapse_blobs: bool = False
) -> "anytree.Node":
"""
Populate the tree
:param root_name: str
Name of root node
:param data: dict
Data
:param collapse_blobs: bool
Collapse files or not
:return: anytree.node
"""
root = FileEntryRoot(root_name)
def edges(tree: FileEntry | FileEntryRoot, parent=None):
collapsed_count = 0
collapsed_size = 0
for entry in tree:
if entry["type"] == "blob":
if collapse_blobs:
collapsed_size += entry["object"]["byteSize"]
collapsed_count += 1
else:
_ = FileEntry(entry["name"], entry["object"]["byteSize"], parent=parent)
else:
node = FileEntry(entry["name"], parent=parent)
if entry["object"]:
edges(entry["object"]["entries"], parent=node)
if collapse_blobs:
_ = FileEntry(f"[orange1]{collapsed_count}[/] Files", collapsed_size, parent=parent)
edges(data, root)
return root
class Reversor:
def __init__(self, obj: Any) -> None:
self.obj = obj
def __eq__(self, other: Any) -> bool:
return other.obj == self.obj
def __lt__(self, other: Any) -> bool:
return other.obj < self.obj
def sort_entries(entries: List[Any]) -> List[Any]:
"""
Recursively sort the data first based on type
then alphabetically
:param entries: list
Entries
:return: list
Entries but sorted
"""
entries = sorted(
entries, key=lambda x: (
Reversor(x["type"]), # First sort by type (reversed)
x["name"].lower() # Then sort by alphabetical
)
)
for entry in entries:
if entry["type"] == "tree" and entry["object"]:
entry["object"]["entries"] = sort_entries(entry["object"]["entries"])
return entries
|
from __future__ import annotations
from anytree import NodeMixin
from datetime import datetime, timezone
from dotenv import load_dotenv
from os import environ
from os.path import join, dirname
from typing import Tuple, List, Any, Dict, Optional
import re
import requests
from rich.box import Box
__all__ = [
"get_data", "get_token", "get_url_info", "human_size", "humanize_time",
"populate_tree", "ROUNDED_BORDER", "run_query", "set_token", "sort_entries"
]
ROUNDED_BORDER: Box = Box(
"""\
╭──╮
│ │
│ │
│ │
│ │
│ │
│ │
╰──╯
"""
)
def get_token() -> str:
"""
Retrieves the Github Personal Access Token from .env file
"""
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
return environ.get("GITSORT_TOKEN")
def set_token(token: str) -> None:
"""
Set your Github personal access token in order to access
private repositories and extend the usage of the GraphQL API.
"""
import os
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
gitsort_token = os.environ.get("GITSORT_TOKEN")
if not gitsort_token:
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token set!")
else:
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
while inp not in ["y", "n"]:
print("Invalid answer")
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
if inp == "y":
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token updated!")
def run_query(
query: str,
token: str,
variables: dict | None = None,
headers: dict | None = None
) -> Tuple[dict, str]:
"""
Runs a Github GraphQL query and returns the result
:param query: str
GraphQL query
:param token: str
The users Github Personal Access Token
:param variables: dict
GraphQL Variables
:param headers: dict
Request headers
:return: tuple
The response and rate limit
"""
if not headers:
headers = {"Authorization": f"Bearer {token}"}
request = requests.post(
'https://api.github.com/graphql',
json={'query': query, 'variables': variables},
headers=headers
)
if request.status_code == 200:
return request.json(), request.headers["X-RateLimit-Remaining"]
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def get_data(
query: str,
token: str,
query_variables: Dict[str, str]
) -> Tuple[bool, Any, str]:
"""
Get data from query
:param query: str
Graphql Query
:param token: str
Github Personal Access Token
:param query_variables: dict
Variables used in query
:return: tuple
returns a tuple of tree items:
0. bool: True if query failed and return error messages else False
1. Any: Data returned from query
2. str: Rate limit
"""
data, rate_limit = run_query(query, token, query_variables)
if list(data.keys())[0] == "errors":
return True, data["errors"][0]["message"], rate_limit
try:
return False, data["data"]["repository"], rate_limit
except TypeError:
return True, "Query failed. Make sure path and branch is valid.", rate_limit
def get_url_info(url: str) -> Tuple[str, str] | List[str]:
"""
Retrieves owner and repository from a string
:param url: str
Either some form of Github Url or path such as `user/repo/whatever`
:return: tuple | list
Tuple containing owner and repo
"""
is_link = re.compile(r"^(git(hub)?|https?)")
is_git_path = re.compile(r"^[a-zA-Z0-9\-_.]+/[a-zA-Z0-9\-_.]+")
git_url_regex = re.compile(r"^(https|git)?(://|@)?([^/:]+)[/:](?P<owner>[^/:]+)/(?P<name>.+)(.git)?$")
is_git_repo = re.compile(r"((.git)|/)$")
if is_link.match(url):
if is_git_path.match(url):
return url.split("/")[:2]
match = git_url_regex.match(url)
if not match:
raise Exception("Invalid path")
name = match.group("name").split("/")[0]
name = is_git_repo.sub("", name)
owner = match.group("owner")
return owner, name
else:
if url.count("/") > 0:
return url.split("/")[:2]
raise Exception("Link/path must contain both user and repo")
def humanize_time(time_str: str) -> str:
"""
Convert datetime into a more human-friendly format
:param time_str: str
Time string in the ISO 8601 format
:return: str
Human friendly format: <number> <time_period> ago
"""
if not time_str:
return "null"
now = datetime.now()
date = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
date = date.replace(tzinfo=timezone.utc)
diff = int(now.timestamp() - date.timestamp())
times = [
1, 60, 3600, 86400, 604800, 2629746, 31556925
]
times_str = [
"Second", "Minute", "Hour", "Day", "Week", "Month", "Year"
]
temp = [diff // t for t in times][::-1]
for i, t in enumerate(temp):
if t != 0:
return f"{t} {times_str[6-i]}{'' if t == 1 else 's'} ago"
def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str:
"""
Convert bytes into a more human-friendly format
:param bytes: int
Number of bytes
:param units: Optional[List[str]]
units used
:return: str
Return size in human friendly format: <number> <size_unit>
"""
if units is None:
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:])
class FileEntry(NodeMixin):
def __init__(
self,
name: str,
size: str | int = None,
parent=None,
children=None
) -> None:
super(FileEntry, self).__init__()
if size != None:
self.name = f"{name} ([green]{human_size(size)}[/])"
else:
self.name = f"[blue]{name}/[/]"
self.parent = parent
if children:
self.children = children
class FileEntryRoot(NodeMixin):
def __init__(self, name: str, parent=None, children=None):
super(FileEntryRoot, self).__init__()
self.name = name
self.parent = parent
if children:
self.children = children
def populate_tree(
root_name: str,
data: list,
collapse_blobs: bool = False
) -> "anytree.Node":
"""
Populate the tree
:param root_name: str
Name of root node
:param data: dict
Data
:param collapse_blobs: bool
Collapse files or not
:return: anytree.node
"""
root = FileEntryRoot(root_name)
def edges(tree: FileEntry | FileEntryRoot, parent=None):
collapsed_count = 0
collapsed_size = 0
for entry in tree:
if entry["type"] == "blob":
if collapse_blobs:
collapsed_size += entry["object"]["byteSize"]
collapsed_count += 1
else:
_ = FileEntry(entry["name"], entry["object"]["byteSize"], parent=parent)
else:
node = FileEntry(entry["name"], parent=parent)
if entry["object"]:
edges(entry["object"]["entries"], parent=node)
if collapse_blobs:
_ = FileEntry(f"[orange1]{collapsed_count}[/] Files", collapsed_size, parent=parent)
edges(data, root)
return root
class Reversor:
def __init__(self, obj: Any) -> None:
self.obj = obj
def __eq__(self, other: Any) -> bool:
return other.obj == self.obj
def __lt__(self, other: Any) -> bool:
return other.obj < self.obj
def sort_entries(entries: List[Any]) -> List[Any]:
"""
Recursively sort the data first based on type
then alphabetically
:param entries: list
Entries
:return: list
Entries but sorted
"""
entries = sorted(
entries, key=lambda x: (
Reversor(x["type"]), # First sort by type (reversed)
x["name"].lower() # Then sort by alphabetical
)
)
for entry in entries:
if entry["type"] == "tree" and entry["object"]:
entry["object"]["entries"] = sort_entries(entry["object"]["entries"])
return entries
|
"""
This class is used to cache return value of functions on disk for a specified
number of days. This is used by lakshmi.assets module to cache name/ asset
value (i.e the slow functions). For examples on how to use this class, please
see the tests (tests/test_cache.py file).
Currently, this module can only be used on functions which are class members
and the function itself must take no arguments. These restrictions can be
easily relaxed, but so far that all usecases don't need anything more than what
is currently implemented.
In addition to caching values, this class also allows one to optionally call
a user-specified function on cache-misses (currently used to show a progress
bar to the user via the lak CLI).
"""
import functools
import pickle
from abc import ABC, abstractmethod
from datetime import datetime
from hashlib import md5
from pathlib import Path
# Inspired by https://pypi.org/project/cache-to-disk/. I tried using other
# options such as requests-cache, but it was too slow compared to the solution
# implemented here.
class Cacheable(ABC):
"""Interface that declares that a particular class's method return
values could be cached. The methods should not take a parameter,
and cache_key() + method name should uniquely imply the return
value of that class."""
@abstractmethod
def cache_key(self):
"""Unique string value used as key for caching."""
pass
def get_file_age(file):
"""Returns the age of file.
Args:
file: A PosixPath object representing a file.
Returns: An int represeting the age in days.
"""
return (datetime.today()
- datetime.fromtimestamp(file.stat().st_mtime)).days
# Constants
# Default cache directory if none is specified.
_DEFAULT_DIR = Path.home() / '.lakshmicache'
_CACHE_STR = 'cache_dir'
_FORCE_STR = 'force_refresh'
_FORCED_FILES_STR = 'forced_files'
_MISS_FUNC_STR = 'miss_func'
# Dict (string -> object) to keep cache context.
# Description of keys to what is stored:
# _CACHE_STR:
# The pathlib.Path object specifying cache directory. If set to None,
# caching is disabled. Default: _DEFAULT_DIR
# _FORCE_STR:
# If set to True, new values are re-generated once even if a cached one is
# available. This is meant for data that is cached for < month (stock prices
# and Treasury Bond value). Values that are cached for > 40 days ignore this
# flag. Default: False
# _FORCED_FILES_STR:
# A set of files which are already refreshed once due to _ctx[_FORCE_STR]
# being set to True. this is used to ensure we don't re-fetch same values
# multiple times in a session.
# _MISS_FUNC_STR:
# If set, this function is called for every cache miss.
_ctx = {_FORCE_STR: False}
def set_force_refresh(v):
"""Sets whether cached values should be refreshed.
Args:
v: Boolean representing if cached values should be re-generated.
"""
global _ctx
_ctx[_FORCE_STR] = v
_ctx[_FORCED_FILES_STR] = set()
def set_cache_miss_func(f):
"""Sets the function to call for cache-misses.
Args:
f: The function to call whenever a cache-miss happens (i.e. whenever
the underlying function is called instead of using a cached value).
"""
global _ctx
if f:
_ctx[_MISS_FUNC_STR] = f
else:
# Clear out previously set function, if any.
_ctx.pop(_MISS_FUNC_STR, None)
def set_cache_dir(cache_dir):
"""Sets the cache directory.
If the cache directory is not specified, default ~/.lakshmicache
is used.
Args:
cache_dir: The pathlib.Path object specifying cache directory.
If set to None, caching is disabled.
"""
global _ctx
_ctx[_CACHE_STR] = cache_dir
if cache_dir is None:
return
cache_dir.mkdir(exist_ok=True) # Create cache dir if one doesn't exist.
# Delete old files whose cache values are invalid already.
for file in cache_dir.glob('*_*.lkc'):
days = int(file.name.split('_')[0])
if get_file_age(file) >= days:
file.unlink()
def _valid_cached_value(file, days):
"""Helper function to check if the cached value from file is valid.
Args:
file: The Path object representing a file potentially containing
previously cached value.
days: Number of days after which the cached value becomes invalid.
Returns: True iff the cached value in file is valid.
"""
MAX_DAYS_TO_FORCE_REFRESH = 40
if (
_ctx[_FORCE_STR]
and days < MAX_DAYS_TO_FORCE_REFRESH
and file.name not in _ctx[_FORCED_FILES_STR]
):
# Ignore cached value.
_ctx[_FORCED_FILES_STR].add(file.name)
return False
return (file.exists() and get_file_age(file) < days)
def _call_func(class_obj, func):
"""Helper function to return value of class_obj.func().
In addition to calling function, this helper also calls the
cache_miss function if one is set in the context.
Args:
class_obj: The object of a particular class implementing Cacheable
interface.
func: The function whose return values has to be cached. Assumed
to take no parameters.
Returns: The return value of the func.
"""
global _ctx
if _MISS_FUNC_STR in _ctx:
_ctx[_MISS_FUNC_STR]()
return func(class_obj)
def cache(days):
"""Returns decorator that caches functions return value on disk for
specified number of days.
Args:
days: Number of days for which to cache the return value of the
function.
Returns: The decorator.
"""
def decorator(func):
@functools.wraps(func)
def new_func(class_obj):
global _ctx
if _CACHE_STR not in _ctx:
# Cache dir not set. Set to default.
set_cache_dir(_DEFAULT_DIR)
cache_dir = _ctx[_CACHE_STR]
if not cache_dir:
return _call_func(class_obj, func)
key = f'{func.__qualname__}_{class_obj.cache_key()}'
filename = f'{days}_{md5(key.encode('utf8')).hexdigest()}.lkc'
file = cache_dir / filename
if _valid_cached_value(file, days):
return pickle.loads(file.read_bytes())
value = _call_func(class_obj, func)
file.write_bytes(pickle.dumps(value))
return value
return new_func
return decorator
|
"""
This class is used to cache return value of functions on disk for a specified
number of days. This is used by lakshmi.assets module to cache name/ asset
value (i.e the slow functions). For examples on how to use this class, please
see the tests (tests/test_cache.py file).
Currently, this module can only be used on functions which are class members
and the function itself must take no arguments. These restrictions can be
easily relaxed, but so far that all usecases don't need anything more than what
is currently implemented.
In addition to caching values, this class also allows one to optionally call
a user-specified function on cache-misses (currently used to show a progress
bar to the user via the lak CLI).
"""
import functools
import pickle
from abc import ABC, abstractmethod
from datetime import datetime
from hashlib import md5
from pathlib import Path
# Inspired by https://pypi.org/project/cache-to-disk/. I tried using other
# options such as requests-cache, but it was too slow compared to the solution
# implemented here.
class Cacheable(ABC):
"""Interface that declares that a particular class's method return
values could be cached. The methods should not take a parameter,
and cache_key() + method name should uniquely imply the return
value of that class."""
@abstractmethod
def cache_key(self):
"""Unique string value used as key for caching."""
pass
def get_file_age(file):
"""Returns the age of file.
Args:
file: A PosixPath object representing a file.
Returns: An int represeting the age in days.
"""
return (datetime.today()
- datetime.fromtimestamp(file.stat().st_mtime)).days
# Constants
# Default cache directory if none is specified.
_DEFAULT_DIR = Path.home() / '.lakshmicache'
_CACHE_STR = 'cache_dir'
_FORCE_STR = 'force_refresh'
_FORCED_FILES_STR = 'forced_files'
_MISS_FUNC_STR = 'miss_func'
# Dict (string -> object) to keep cache context.
# Description of keys to what is stored:
# _CACHE_STR:
# The pathlib.Path object specifying cache directory. If set to None,
# caching is disabled. Default: _DEFAULT_DIR
# _FORCE_STR:
# If set to True, new values are re-generated once even if a cached one is
# available. This is meant for data that is cached for < month (stock prices
# and Treasury Bond value). Values that are cached for > 40 days ignore this
# flag. Default: False
# _FORCED_FILES_STR:
# A set of files which are already refreshed once due to _ctx[_FORCE_STR]
# being set to True. this is used to ensure we don't re-fetch same values
# multiple times in a session.
# _MISS_FUNC_STR:
# If set, this function is called for every cache miss.
_ctx = {_FORCE_STR: False}
def set_force_refresh(v):
"""Sets whether cached values should be refreshed.
Args:
v: Boolean representing if cached values should be re-generated.
"""
global _ctx
_ctx[_FORCE_STR] = v
_ctx[_FORCED_FILES_STR] = set()
def set_cache_miss_func(f):
"""Sets the function to call for cache-misses.
Args:
f: The function to call whenever a cache-miss happens (i.e. whenever
the underlying function is called instead of using a cached value).
"""
global _ctx
if f:
_ctx[_MISS_FUNC_STR] = f
else:
# Clear out previously set function, if any.
_ctx.pop(_MISS_FUNC_STR, None)
def set_cache_dir(cache_dir):
"""Sets the cache directory.
If the cache directory is not specified, default ~/.lakshmicache
is used.
Args:
cache_dir: The pathlib.Path object specifying cache directory.
If set to None, caching is disabled.
"""
global _ctx
_ctx[_CACHE_STR] = cache_dir
if cache_dir is None:
return
cache_dir.mkdir(exist_ok=True) # Create cache dir if one doesn't exist.
# Delete old files whose cache values are invalid already.
for file in cache_dir.glob('*_*.lkc'):
days = int(file.name.split('_')[0])
if get_file_age(file) >= days:
file.unlink()
def _valid_cached_value(file, days):
"""Helper function to check if the cached value from file is valid.
Args:
file: The Path object representing a file potentially containing
previously cached value.
days: Number of days after which the cached value becomes invalid.
Returns: True iff the cached value in file is valid.
"""
MAX_DAYS_TO_FORCE_REFRESH = 40
if (
_ctx[_FORCE_STR]
and days < MAX_DAYS_TO_FORCE_REFRESH
and file.name not in _ctx[_FORCED_FILES_STR]
):
# Ignore cached value.
_ctx[_FORCED_FILES_STR].add(file.name)
return False
return (file.exists() and get_file_age(file) < days)
def _call_func(class_obj, func):
"""Helper function to return value of class_obj.func().
In addition to calling function, this helper also calls the
cache_miss function if one is set in the context.
Args:
class_obj: The object of a particular class implementing Cacheable
interface.
func: The function whose return values has to be cached. Assumed
to take no parameters.
Returns: The return value of the func.
"""
global _ctx
if _MISS_FUNC_STR in _ctx:
_ctx[_MISS_FUNC_STR]()
return func(class_obj)
def cache(days):
"""Returns decorator that caches functions return value on disk for
specified number of days.
Args:
days: Number of days for which to cache the return value of the
function.
Returns: The decorator.
"""
def decorator(func):
@functools.wraps(func)
def new_func(class_obj):
global _ctx
if _CACHE_STR not in _ctx:
# Cache dir not set. Set to default.
set_cache_dir(_DEFAULT_DIR)
cache_dir = _ctx[_CACHE_STR]
if not cache_dir:
return _call_func(class_obj, func)
key = f'{func.__qualname__}_{class_obj.cache_key()}'
filename = f'{days}_{md5(key.encode("utf8")).hexdigest()}.lkc'
file = cache_dir / filename
if _valid_cached_value(file, days):
return pickle.loads(file.read_bytes())
value = _call_func(class_obj, func)
file.write_bytes(pickle.dumps(value))
return value
return new_func
return decorator
|
import re
from enum import Enum
from typing import Any, Dict, Sequence
from pydantic import BaseModel, Field, root_validator, validator
# OpenAPI names validation regexp
OpenAPI_NAME_RE = re.compile(r"^[A-Za-z0-9-._]+")
class ExternalDocs(BaseModel):
description: str = ""
url: str
class Tag(BaseModel):
"""OpenAPI tag object"""
name: str
description: str = ""
externalDocs: ExternalDocs = None
def __str__(self):
return self.name
class UnprocessableEntityElement(BaseModel):
"""Model of missing field description."""
loc: Sequence[str] = Field(
...,
title="Missing field name",
)
msg: str = Field(
...,
title="Error message",
)
type: str = Field( # noqa: WPS125
...,
title="Error type",
)
ctx: Dict[str, Any] = Field(
None,
title="Error context",
)
class UnprocessableEntity(BaseModel):
"""Model of 422 Unprocessable Entity error."""
__root__: Sequence[UnprocessableEntityElement]
class SecureType(str, Enum):
HTTP = "http"
API_KEY = "apiKey"
OAUTH_TWO = "oauth2"
OPEN_ID_CONNECT = "openIdConnect"
class InType(str, Enum):
HEADER = "header"
QUERY = "query"
COOKIE = "cookie"
type_req_fields = {
SecureType.HTTP: ["scheme"],
SecureType.API_KEY: ["name", "field_in"],
SecureType.OAUTH_TWO: ["flows"],
SecureType.OPEN_ID_CONNECT: ["openIdConnectUrl"],
}
class SecuritySchemeData(BaseModel):
"""
Security scheme data
https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.3.md#securitySchemeObject
"""
type: SecureType = Field(..., description="Secure scheme type")
description: str = Field(
None,
description="A short description for security scheme.",
)
name: str = Field(
None,
description="The name of the header, query or cookie parameter to be used.",
)
field_in: InType = Field(
None, alias="in", description="The location of the API key."
)
scheme: str = Field(None, description="The name of the HTTP Authorization scheme.")
bearerFormat: str = Field(
None,
description=(
"A hint to the client to identify how the bearer token is formatted."
),
)
flows: dict = Field(
None,
description=(
"Containing configuration information for the flow types supported."
),
)
openIdConnectUrl: str = Field(
None, description="OpenId Connect URL to discover OAuth2 configuration values."
)
@root_validator()
def check_type_required_fields(cls, values: dict):
exist_fields = {key for key in values.keys() if values[key]}
if not values.get("type"):
raise ValueError("Type field is required")
if not set(type_req_fields[values["type"]]).issubset(exist_fields):
raise ValueError(
f"For `{values["type"]}` type "
f"`{", ".join(type_req_fields[values["type"]])}` field(s) is required."
)
return values
class Config:
validate_assignment = True
class SecurityScheme(BaseModel):
"""
Named security scheme
"""
name: str = Field(
...,
description="Custom security scheme name. Can only contain - [A-Za-z0-9-._]",
)
data: SecuritySchemeData = Field(..., description="Security scheme data")
@validator("name")
def check_name(cls, value: str):
if not OpenAPI_NAME_RE.fullmatch(value):
raise ValueError("Name not match OpenAPI rules")
return value
class Config:
validate_assignment = True
class Server(BaseModel):
"""
Servers section of OAS
"""
url: str = Field(
...,
description="""URL or path of API server
(may be parametrized with using \"variables\" section - for more information,
see: https://swagger.io/docs/specification/api-host-and-base-path/ )""",
)
description: str = Field(
None,
description="Custom server description for server URL",
)
variables: dict = Field(
None,
description="Variables for customizing server URL",
)
class Config:
validate_assignment = True
|
import re
from enum import Enum
from typing import Any, Dict, Sequence
from pydantic import BaseModel, Field, root_validator, validator
# OpenAPI names validation regexp
OpenAPI_NAME_RE = re.compile(r"^[A-Za-z0-9-._]+")
class ExternalDocs(BaseModel):
description: str = ""
url: str
class Tag(BaseModel):
"""OpenAPI tag object"""
name: str
description: str = ""
externalDocs: ExternalDocs = None
def __str__(self):
return self.name
class UnprocessableEntityElement(BaseModel):
"""Model of missing field description."""
loc: Sequence[str] = Field(
...,
title="Missing field name",
)
msg: str = Field(
...,
title="Error message",
)
type: str = Field( # noqa: WPS125
...,
title="Error type",
)
ctx: Dict[str, Any] = Field(
None,
title="Error context",
)
class UnprocessableEntity(BaseModel):
"""Model of 422 Unprocessable Entity error."""
__root__: Sequence[UnprocessableEntityElement]
class SecureType(str, Enum):
HTTP = "http"
API_KEY = "apiKey"
OAUTH_TWO = "oauth2"
OPEN_ID_CONNECT = "openIdConnect"
class InType(str, Enum):
HEADER = "header"
QUERY = "query"
COOKIE = "cookie"
type_req_fields = {
SecureType.HTTP: ["scheme"],
SecureType.API_KEY: ["name", "field_in"],
SecureType.OAUTH_TWO: ["flows"],
SecureType.OPEN_ID_CONNECT: ["openIdConnectUrl"],
}
class SecuritySchemeData(BaseModel):
"""
Security scheme data
https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.3.md#securitySchemeObject
"""
type: SecureType = Field(..., description="Secure scheme type")
description: str = Field(
None,
description="A short description for security scheme.",
)
name: str = Field(
None,
description="The name of the header, query or cookie parameter to be used.",
)
field_in: InType = Field(
None, alias="in", description="The location of the API key."
)
scheme: str = Field(None, description="The name of the HTTP Authorization scheme.")
bearerFormat: str = Field(
None,
description=(
"A hint to the client to identify how the bearer token is formatted."
),
)
flows: dict = Field(
None,
description=(
"Containing configuration information for the flow types supported."
),
)
openIdConnectUrl: str = Field(
None, description="OpenId Connect URL to discover OAuth2 configuration values."
)
@root_validator()
def check_type_required_fields(cls, values: dict):
exist_fields = {key for key in values.keys() if values[key]}
if not values.get("type"):
raise ValueError("Type field is required")
if not set(type_req_fields[values["type"]]).issubset(exist_fields):
raise ValueError(
f"For `{values['type']}` type "
f"`{', '.join(type_req_fields[values['type']])}` field(s) is required."
)
return values
class Config:
validate_assignment = True
class SecurityScheme(BaseModel):
"""
Named security scheme
"""
name: str = Field(
...,
description="Custom security scheme name. Can only contain - [A-Za-z0-9-._]",
)
data: SecuritySchemeData = Field(..., description="Security scheme data")
@validator("name")
def check_name(cls, value: str):
if not OpenAPI_NAME_RE.fullmatch(value):
raise ValueError("Name not match OpenAPI rules")
return value
class Config:
validate_assignment = True
class Server(BaseModel):
"""
Servers section of OAS
"""
url: str = Field(
...,
description="""URL or path of API server
(may be parametrized with using \"variables\" section - for more information,
see: https://swagger.io/docs/specification/api-host-and-base-path/ )""",
)
description: str = Field(
None,
description="Custom server description for server URL",
)
variables: dict = Field(
None,
description="Variables for customizing server URL",
)
class Config:
validate_assignment = True
|
# -*- coding: utf-8 -*-
"""
tests.controllers.test_api_controller
"""
import jsonpickle
from .controller_test_base import *
from moesifapi.models import *
from datetime import *
class ApiControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(ApiControllerTests, cls).setUpClass()
cls.controller = cls.api_client.api
# Add Single Event via Injestion API
def test_add_event(self):
# Parameters for the API call
req_headers = APIHelper.json_deserialize(""" {
"Host": "api.acmeinc.com",
"Accept": "*/*",
"Connection": "Keep-Alive",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)",
"Content-Type": "application/json",
"Content-Length": "126",
"Accept-Encoding": "gzip"
} """)
req_body = APIHelper.json_deserialize( """{
"items": [
{
"type": 1,
"id": "fwfrf"
},
{
"type": 2,
"id": "d43d3f"
}
]
}""")
rsp_headers = APIHelper.json_deserialize(""" {
"Date": "Tue, 20 Aug 2019 23:46:49 GMT",
"Vary": "Accept-Encoding",
"Pragma": "no-cache",
"Expires": "-1",
"Content-Type": "application/json; charset=utf-8",
"Cache-Control": "no-cache"
} """)
rsp_body = APIHelper.json_deserialize( """{
"Error": "InvalidArgumentException",
"Message": "Missing field field_a"
}""")
metadata = APIHelper.json_deserialize("""{
"field1": "foo",
"field2": "bar"
}""")
event_req = EventRequestModel(time = datetime.utcnow() - timedelta(seconds=1),
uri = "https://api.acmeinc.com/items/reviews?&page=0&page_size=12®ion[]=Overig&sort=relevance",
verb = "PATCH",
api_version = "1.1.0",
ip_address = "61.48.220.123",
headers = req_headers,
body = req_body)
event_rsp = EventResponseModel(time = datetime.utcnow(),
status = 200,
headers = rsp_headers,
body = rsp_body)
event_model = EventModel(request = event_req,
response = event_rsp,
user_id = "my_user_id",
company_id = "my_company_id",
session_token = "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata = metadata)
# Perform the API call through the SDK function
self.controller.create_event(event_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Events via Ingestion API
def test_add_batched_events(self):
# Parameters for the API call
body = APIHelper.json_deserialize('[{ "metadata": { "foo" : "bar" }, "request": { "time": "2016-09-09T04:45:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:45:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:46:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:46:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:47:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:47:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:48:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:48:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "exfzweachxjgznvKUYrxFcxv]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:49:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:49:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:50:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:50:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "recvreedfef", "session_token": "xcvkrjmcfghwuignrmcmhxdhaaezse4w]s98y18cx98q3yhwmnhcfx43f" } ]', EventModel.from_dictionary)
for val in body:
val.request.time = datetime.utcnow() - timedelta(seconds=1)
val.response.time = datetime.utcnow()
# Perform the API call through the SDK function
self.controller.create_events_batch(body)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Single User via Injestion API
def test_update_user(self):
# Parameters for the API call
metadata = APIHelper.json_deserialize(""" {
"email": "[email protected]",
"name": "pythonapiuser",
"custom": "testdata"
} """)
user_model = UserModel(
user_id="12345",
company_id="67890",
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
modified_time=datetime.utcnow(),
metadata=metadata,
campaign=CampaignModel(utm_source="Newsletter", utm_medium="Email"))
# Perform the API call through the SDK function
self.controller.update_user(user_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Batched Users via Ingestion API
def test_update_users_batch(self):
# Parameter for the API call
body = [UserModel(user_id="1234", company_id="6789", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f", ),
UserModel(user_id="12345", company_id="67890", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata=APIHelper.json_deserialize(""" {"email": "[email protected]",
"name": "pythonapiuser", "string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_users_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Get Application configuration
def test_get_app_config(self):
# Perform the API call through the SDK function
response = self.controller.get_app_config().__dict__
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 200)
self.assertIsNotNone(response["raw_body"])
self.assertIsNotNone(response["headers"]["X-Moesif-Config-ETag"])
# Add Single company via Injestion API
def test_update_company(self):
# Parameter for the API call
company_model = CompanyModel(
company_id="67890",
modified_time=datetime.utcnow(),
campaign=CampaignModel(utm_source="Adwords", utm_medium="Twitter"))
# Perform the API call through the SDK function
self.controller.update_company(company_model)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Companies via Ingestion API
def test_update_companies_batch(self):
# Parameter for the API call
body = [CompanyModel(company_id="67890", modified_time=datetime.utcnow(), company_domain="moesif"),
CompanyModel(company_id="6789", modified_time=datetime.utcnow(), company_domain="moesif",
metadata=APIHelper.json_deserialize(""" {"string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_companies_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
|
# -*- coding: utf-8 -*-
"""
tests.controllers.test_api_controller
"""
import jsonpickle
from .controller_test_base import *
from moesifapi.models import *
from datetime import *
class ApiControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(ApiControllerTests, cls).setUpClass()
cls.controller = cls.api_client.api
# Add Single Event via Injestion API
def test_add_event(self):
# Parameters for the API call
req_headers = APIHelper.json_deserialize(""" {
"Host": "api.acmeinc.com",
"Accept": "*/*",
"Connection": "Keep-Alive",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)",
"Content-Type": "application/json",
"Content-Length": "126",
"Accept-Encoding": "gzip"
} """)
req_body = APIHelper.json_deserialize( """{
"items": [
{
"type": 1,
"id": "fwfrf"
},
{
"type": 2,
"id": "d43d3f"
}
]
}""")
rsp_headers = APIHelper.json_deserialize(""" {
"Date": "Tue, 20 Aug 2019 23:46:49 GMT",
"Vary": "Accept-Encoding",
"Pragma": "no-cache",
"Expires": "-1",
"Content-Type": "application/json; charset=utf-8",
"Cache-Control": "no-cache"
} """)
rsp_body = APIHelper.json_deserialize( """{
"Error": "InvalidArgumentException",
"Message": "Missing field field_a"
}""")
metadata = APIHelper.json_deserialize("""{
"field1": "foo",
"field2": "bar"
}""")
event_req = EventRequestModel(time = datetime.utcnow() - timedelta(seconds=1),
uri = "https://api.acmeinc.com/items/reviews?&page=0&page_size=12®ion[]=Overig&sort=relevance",
verb = "PATCH",
api_version = "1.1.0",
ip_address = "61.48.220.123",
headers = req_headers,
body = req_body)
event_rsp = EventResponseModel(time = datetime.utcnow(),
status = 200,
headers = rsp_headers,
body = rsp_body)
event_model = EventModel(request = event_req,
response = event_rsp,
user_id = "my_user_id",
company_id = "my_company_id",
session_token = "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata = metadata)
# Perform the API call through the SDK function
self.controller.create_event(event_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Events via Ingestion API
def test_add_batched_events(self):
# Parameters for the API call
body = APIHelper.json_deserialize('[{ "metadata": { "foo" : "bar" }, "request": { "time": "2016-09-09T04:45:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:45:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:46:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:46:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:47:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:47:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:48:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:48:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "exfzweachxjgznvKUYrxFcxv]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:49:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:49:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:50:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:50:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "recvreedfef", "session_token": "xcvkrjmcfghwuignrmcmhxdhaaezse4w]s98y18cx98q3yhwmnhcfx43f" } ]', EventModel.from_dictionary)
for val in body:
val.request.time = datetime.utcnow() - timedelta(seconds=1)
val.response.time = datetime.utcnow()
# Perform the API call through the SDK function
self.controller.create_events_batch(body)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Single User via Injestion API
def test_update_user(self):
# Parameters for the API call
metadata = APIHelper.json_deserialize(""" {
"email": "[email protected]",
"name": "pythonapiuser",
"custom": "testdata"
} """)
user_model = UserModel(
user_id="12345",
company_id="67890",
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
modified_time=datetime.utcnow(),
metadata=metadata,
campaign=CampaignModel(utm_source="Newsletter", utm_medium="Email"))
# Perform the API call through the SDK function
self.controller.update_user(user_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Batched Users via Ingestion API
def test_update_users_batch(self):
# Parameter for the API call
body = [UserModel(user_id="1234", company_id="6789", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f", ),
UserModel(user_id="12345", company_id="67890", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata=APIHelper.json_deserialize(""" {"email": "[email protected]",
"name": "pythonapiuser", "string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_users_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Get Application configuration
def test_get_app_config(self):
# Perform the API call through the SDK function
response = self.controller.get_app_config().__dict__
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 200)
self.assertIsNotNone(response["raw_body"])
self.assertIsNotNone(response["headers"]["X-Moesif-Config-ETag"])
# Add Single company via Injestion API
def test_update_company(self):
# Parameter for the API call
company_model = CompanyModel(
company_id="67890",
modified_time=datetime.utcnow(),
campaign=CampaignModel(utm_source="Adwords", utm_medium="Twitter"))
# Perform the API call through the SDK function
self.controller.update_company(company_model)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Companies via Ingestion API
def test_update_companies_batch(self):
# Parameter for the API call
body = [CompanyModel(company_id="67890", modified_time=datetime.utcnow(), company_domain="moesif"),
CompanyModel(company_id="6789", modified_time=datetime.utcnow(), company_domain="moesif",
metadata=APIHelper.json_deserialize(""" {"string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_companies_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
|
import argparse
import json
import os
import sys
import requests
from tqdm import tqdm
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--download_dir", type=str, default="/root/downloads/")
parser.add_argument("--bert", action="store_true", help="download a bert model (default: ar)")
parser.add_argument("--model", type=str, choices=["s", "m", "l"], help="parameter counts are s:76M, m:455M, l:1362M")
parser.add_argument("--ckpt", type=str, choices=["131000", "262000", "524000", "1000000"])
parser.add_argument("--clusters", action="store_true", help="download the color clusters file")
parser.add_argument("--dataset", type=str, choices=["imagenet", "cifar10"])
args = parser.parse_args()
print("input args:\n", json.dumps(vars(args), indent=4, separators=(",", ":")))
return args
def main(args):
if not os.path.exists(args.download_dir):
os.makedirs(args.download_dir)
urls = []
# download the checkpoint
if args.model and args.ckpt:
base_url = f"https://openaipublic.blob.core.windows.net/image-gpt/checkpoints/igpt-{args.model}{"-bert" if args.bert else ""}/{args.ckpt}"
size_to_shards = {"s": 32, "m": 32, "l": 64}
shards = size_to_shards[args.model]
for filename in [f"model.ckpt-{args.ckpt}.data-{i:05d}-of-{shards:05d}" for i in range(shards)]:
urls.append(f"{base_url}/{filename}")
urls.append(f"{base_url}/model.ckpt-{args.ckpt}.index")
urls.append(f"{base_url}/model.ckpt-{args.ckpt}.meta")
# download the color clusters file
if args.clusters:
urls.append("https://openaipublic.blob.core.windows.net/image-gpt/color-clusters/kmeans_centers.npy")
# download color clustered dataset
if args.dataset:
for split in ["trX", "trY", "vaX", "vaY", "teX", "teY"]:
urls.append(f"https://openaipublic.blob.core.windows.net/image-gpt/datasets/{args.dataset}_{split}.npy")
# run the download
for url in urls:
filename = url.split("/")[-1]
r = requests.get(url, stream=True)
with open(f"{args.download_dir}/{filename}", "wb") as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=80, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
if __name__ == "__main__":
args = parse_arguments()
main(args)
|
import argparse
import json
import os
import sys
import requests
from tqdm import tqdm
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--download_dir", type=str, default="/root/downloads/")
parser.add_argument("--bert", action="store_true", help="download a bert model (default: ar)")
parser.add_argument("--model", type=str, choices=["s", "m", "l"], help="parameter counts are s:76M, m:455M, l:1362M")
parser.add_argument("--ckpt", type=str, choices=["131000", "262000", "524000", "1000000"])
parser.add_argument("--clusters", action="store_true", help="download the color clusters file")
parser.add_argument("--dataset", type=str, choices=["imagenet", "cifar10"])
args = parser.parse_args()
print("input args:\n", json.dumps(vars(args), indent=4, separators=(",", ":")))
return args
def main(args):
if not os.path.exists(args.download_dir):
os.makedirs(args.download_dir)
urls = []
# download the checkpoint
if args.model and args.ckpt:
base_url = f"https://openaipublic.blob.core.windows.net/image-gpt/checkpoints/igpt-{args.model}{'-bert' if args.bert else ''}/{args.ckpt}"
size_to_shards = {"s": 32, "m": 32, "l": 64}
shards = size_to_shards[args.model]
for filename in [f"model.ckpt-{args.ckpt}.data-{i:05d}-of-{shards:05d}" for i in range(shards)]:
urls.append(f"{base_url}/{filename}")
urls.append(f"{base_url}/model.ckpt-{args.ckpt}.index")
urls.append(f"{base_url}/model.ckpt-{args.ckpt}.meta")
# download the color clusters file
if args.clusters:
urls.append("https://openaipublic.blob.core.windows.net/image-gpt/color-clusters/kmeans_centers.npy")
# download color clustered dataset
if args.dataset:
for split in ["trX", "trY", "vaX", "vaY", "teX", "teY"]:
urls.append(f"https://openaipublic.blob.core.windows.net/image-gpt/datasets/{args.dataset}_{split}.npy")
# run the download
for url in urls:
filename = url.split("/")[-1]
r = requests.get(url, stream=True)
with open(f"{args.download_dir}/{filename}", "wb") as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=80, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
if __name__ == "__main__":
args = parse_arguments()
main(args)
|
import io
import pandas as pd
import json
from flask import (flash,
request,
redirect,
url_for,
jsonify,
render_template,
Blueprint,
abort)
from logzero import logger
from datetime import datetime
from base.forms import HeritabilityForm
from base.utils.auth import jwt_required, admin_required, get_jwt, get_current_user
from caendr.api.strain import get_strains
from caendr.services.heritability_report import get_all_heritability_results, get_user_heritability_results, create_new_heritability_report, get_heritability_report
from caendr.utils.data import unique_id, convert_data_table_to_tsv, get_object_hash
from caendr.services.cloud.storage import get_blob, generate_blob_url
# ================== #
# heritability #
# ================== #
# Tools blueprint
heritability_bp = Blueprint('heritability',
__name__)
@heritability_bp.route('/heritability')
def heritability():
title = "Heritability Calculator"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
form = HeritabilityForm()
hide_form = True
strain_list = []
return render_template('tools/heritability/submit.html', **locals())
@heritability_bp.route('/heritability/create', methods=["GET"])
@jwt_required()
def heritability_create():
""" This endpoint is used to create a heritability job. """
title = "Heritability Calculator"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
jwt_csrf_token = (get_jwt() or {}).get("csrf")
form = HeritabilityForm()
strain_data = get_strains()
strain_list = []
for x in strain_data:
strain_list.append(x.strain)
hide_form = False
id = unique_id()
return render_template('tools/heritability/submit.html', **locals())
@heritability_bp.route("/heritability/all-results")
@admin_required()
def heritability_all_results():
title = "All Heritability Results"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
user = get_current_user()
items = get_all_heritability_results()
return render_template('tools/heritability/list-all.html', **locals())
@heritability_bp.route("/heritability/my-results")
@jwt_required()
def heritability_user_results():
title = "My Heritability Results"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
user = get_current_user()
items = get_user_heritability_results(user.name)
return render_template('tools/heritability/list-user.html', **locals())
@heritability_bp.route('/heritability/submit', methods=["POST"])
@jwt_required()
def submit_h2():
user = get_current_user()
label = request.values['label']
columns = ["AssayNumber", "Strain", "TraitName", "Replicate", "Value"]
# Extract table data
data = json.loads(request.values['table_data'])
data = [x for x in data[1:] if x[0] is not None]
trait = data[0][2]
data_tsv = convert_data_table_to_tsv(data, columns)
# Generate an ID for the data based on its hash
data_hash = get_object_hash(data, length=32)
logger.debug(data_hash)
id = unique_id()
try:
h = create_new_heritability_report(id, user.name, label, data_hash, trait, data_tsv)
except Exception as ex:
if str(type(ex).__name__) == 'DuplicateDataError':
flash('It looks like you submitted that data already - redirecting to your list of Heritability Reports', 'danger')
return jsonify({'duplicate': True,
'data_hash': data_hash,
'id': id})
if str(type(ex).__name__) == 'CachedDataError':
flash('It looks like that data has already been submitted - redirecting to the saved results', 'danger')
return jsonify({'cached': True,
'data_hash': data_hash,
'id': id})
return jsonify({'started': True,
'data_hash': data_hash,
'id': id})
# TODO: Move this into a separate service
@heritability_bp.route("/heritability/h2/<id>")
@jwt_required()
def heritability_result(id):
title = "Heritability Results"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
user = get_current_user()
hr = get_heritability_report(id)
ready = False
data_url = generate_blob_url(hr.get_bucket_name(), hr.get_data_blob_path())
if (not hr._exists) or (hr.username != user.name):
flash('You do not have access to that report', 'danger')
abort(401)
data_hash = hr.data_hash
data_blob = hr.get_data_blob_path()
result_blob = hr.get_result_blob_path()
data = get_blob(hr.get_bucket_name(), hr.get_data_blob_path())
result = get_blob(hr.get_bucket_name(), hr.get_result_blob_path())
if data is None:
return abort(404, description="Heritability report not found")
data = data.download_as_string().decode('utf-8')
data = pd.read_csv(io.StringIO(data), sep="\t")
data['AssayNumber'] = data['AssayNumber'].astype(str)
data['label'] = data.apply(lambda x: f"{x["AssayNumber"]}: {x["Value"]}", 1)
data = data.to_dict('records')
trait = data[0]['TraitName']
# Get trait and set title
subtitle = trait
if result:
hr.status = 'COMPLETE'
hr.save()
result = result.download_as_string().decode('utf-8')
result = pd.read_csv(io.StringIO(result), sep="\t")
result = result.to_dict('records')[0]
fnam=datetime.today().strftime('%Y%m%d.')+trait
ready = True
return render_template("tools/heritability/view.html", **locals())
|
import io
import pandas as pd
import json
from flask import (flash,
request,
redirect,
url_for,
jsonify,
render_template,
Blueprint,
abort)
from logzero import logger
from datetime import datetime
from base.forms import HeritabilityForm
from base.utils.auth import jwt_required, admin_required, get_jwt, get_current_user
from caendr.api.strain import get_strains
from caendr.services.heritability_report import get_all_heritability_results, get_user_heritability_results, create_new_heritability_report, get_heritability_report
from caendr.utils.data import unique_id, convert_data_table_to_tsv, get_object_hash
from caendr.services.cloud.storage import get_blob, generate_blob_url
# ================== #
# heritability #
# ================== #
# Tools blueprint
heritability_bp = Blueprint('heritability',
__name__)
@heritability_bp.route('/heritability')
def heritability():
title = "Heritability Calculator"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
form = HeritabilityForm()
hide_form = True
strain_list = []
return render_template('tools/heritability/submit.html', **locals())
@heritability_bp.route('/heritability/create', methods=["GET"])
@jwt_required()
def heritability_create():
""" This endpoint is used to create a heritability job. """
title = "Heritability Calculator"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
jwt_csrf_token = (get_jwt() or {}).get("csrf")
form = HeritabilityForm()
strain_data = get_strains()
strain_list = []
for x in strain_data:
strain_list.append(x.strain)
hide_form = False
id = unique_id()
return render_template('tools/heritability/submit.html', **locals())
@heritability_bp.route("/heritability/all-results")
@admin_required()
def heritability_all_results():
title = "All Heritability Results"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
user = get_current_user()
items = get_all_heritability_results()
return render_template('tools/heritability/list-all.html', **locals())
@heritability_bp.route("/heritability/my-results")
@jwt_required()
def heritability_user_results():
title = "My Heritability Results"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
user = get_current_user()
items = get_user_heritability_results(user.name)
return render_template('tools/heritability/list-user.html', **locals())
@heritability_bp.route('/heritability/submit', methods=["POST"])
@jwt_required()
def submit_h2():
user = get_current_user()
label = request.values['label']
columns = ["AssayNumber", "Strain", "TraitName", "Replicate", "Value"]
# Extract table data
data = json.loads(request.values['table_data'])
data = [x for x in data[1:] if x[0] is not None]
trait = data[0][2]
data_tsv = convert_data_table_to_tsv(data, columns)
# Generate an ID for the data based on its hash
data_hash = get_object_hash(data, length=32)
logger.debug(data_hash)
id = unique_id()
try:
h = create_new_heritability_report(id, user.name, label, data_hash, trait, data_tsv)
except Exception as ex:
if str(type(ex).__name__) == 'DuplicateDataError':
flash('It looks like you submitted that data already - redirecting to your list of Heritability Reports', 'danger')
return jsonify({'duplicate': True,
'data_hash': data_hash,
'id': id})
if str(type(ex).__name__) == 'CachedDataError':
flash('It looks like that data has already been submitted - redirecting to the saved results', 'danger')
return jsonify({'cached': True,
'data_hash': data_hash,
'id': id})
return jsonify({'started': True,
'data_hash': data_hash,
'id': id})
# TODO: Move this into a separate service
@heritability_bp.route("/heritability/h2/<id>")
@jwt_required()
def heritability_result(id):
title = "Heritability Results"
alt_parent_breadcrumb = {"title": "Tools", "url": url_for('tools.tools')}
user = get_current_user()
hr = get_heritability_report(id)
ready = False
data_url = generate_blob_url(hr.get_bucket_name(), hr.get_data_blob_path())
if (not hr._exists) or (hr.username != user.name):
flash('You do not have access to that report', 'danger')
abort(401)
data_hash = hr.data_hash
data_blob = hr.get_data_blob_path()
result_blob = hr.get_result_blob_path()
data = get_blob(hr.get_bucket_name(), hr.get_data_blob_path())
result = get_blob(hr.get_bucket_name(), hr.get_result_blob_path())
if data is None:
return abort(404, description="Heritability report not found")
data = data.download_as_string().decode('utf-8')
data = pd.read_csv(io.StringIO(data), sep="\t")
data['AssayNumber'] = data['AssayNumber'].astype(str)
data['label'] = data.apply(lambda x: f"{x['AssayNumber']}: {x['Value']}", 1)
data = data.to_dict('records')
trait = data[0]['TraitName']
# Get trait and set title
subtitle = trait
if result:
hr.status = 'COMPLETE'
hr.save()
result = result.download_as_string().decode('utf-8')
result = pd.read_csv(io.StringIO(result), sep="\t")
result = result.to_dict('records')[0]
fnam=datetime.today().strftime('%Y%m%d.')+trait
ready = True
return render_template("tools/heritability/view.html", **locals())
|
# -*- coding: utf-8 -*-
from numpy import log as nplog
from pandas_ta.utils import get_offset, verify_series
def log_return(close, length=None, cumulative=False, offset=None, **kwargs):
"""Indicator: Log Return"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 1
offset = get_offset(offset)
# Calculate Result
log_return = nplog(close).diff(periods=length)
if cumulative:
log_return = log_return.cumsum()
# Offset
if offset != 0:
log_return = log_return.shift(offset)
# Handle fills
if "fillna" in kwargs:
log_return.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
log_return.fillna(method=kwargs["fill_method"], inplace=True)
# Name & Category
log_return.name = f"{"CUM" if cumulative else ""}LOGRET_{length}"
log_return.category = "performance"
return log_return
log_return.__doc__ = \
"""Log Return
Calculates the logarithmic return of a Series.
See also: help(df.ta.log_return) for additional **kwargs a valid 'df'.
Sources:
https://stackoverflow.com/questions/31287552/logarithmic-returns-in-pandas-dataframe
Calculation:
Default Inputs:
length=1, cumulative=False
LOGRET = log( close.diff(periods=length) )
CUMLOGRET = LOGRET.cumsum() if cumulative
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 20
cumulative (bool): If True, returns the cumulative returns. Default: False
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
|
# -*- coding: utf-8 -*-
from numpy import log as nplog
from pandas_ta.utils import get_offset, verify_series
def log_return(close, length=None, cumulative=False, offset=None, **kwargs):
"""Indicator: Log Return"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 1
offset = get_offset(offset)
# Calculate Result
log_return = nplog(close).diff(periods=length)
if cumulative:
log_return = log_return.cumsum()
# Offset
if offset != 0:
log_return = log_return.shift(offset)
# Handle fills
if "fillna" in kwargs:
log_return.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
log_return.fillna(method=kwargs["fill_method"], inplace=True)
# Name & Category
log_return.name = f"{'CUM' if cumulative else ''}LOGRET_{length}"
log_return.category = "performance"
return log_return
log_return.__doc__ = \
"""Log Return
Calculates the logarithmic return of a Series.
See also: help(df.ta.log_return) for additional **kwargs a valid 'df'.
Sources:
https://stackoverflow.com/questions/31287552/logarithmic-returns-in-pandas-dataframe
Calculation:
Default Inputs:
length=1, cumulative=False
LOGRET = log( close.diff(periods=length) )
CUMLOGRET = LOGRET.cumsum() if cumulative
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 20
cumulative (bool): If True, returns the cumulative returns. Default: False
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
|
# Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import threading
import time
from typing import List
from benchmark.cloud.aws.aws import Ec2Instance, AmazonWebService
from benchmark.tools import get_random_id
logger = logging.getLogger()
class KylinMode:
ALL = 'all'
JOB = 'job'
QUERY = 'query'
class KylinMaster(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-master-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinMaster',
aws=aws,
region=region,
stack_name='Raven-Kylin-Master-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type
)
@property
def spark_master_url(self):
return self.aws.get_stack_output_by_key(stack_name=self.stack_name, output_key='SparkMasterUrl')
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info('Kylin master is launching...')
super().launch()
logger.info('Kylin master has launched.')
def terminate(self):
logger.info('Kylin master is terminating...')
super().terminate()
logger.info('Kylin master has terminated.')
class KylinWorker(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str, worker_id: int = 1):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-worker-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinWorker',
aws=aws,
region=region,
stack_name=f'Raven-Kylin-Worker{worker_id}-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type,
KylinWorkerId=worker_id,
)
self._worker_id = worker_id
self._spark_master_private_ip = ''
@property
def worker_id(self):
return self._worker_id
@property
def spark_master_private_ip(self):
return self._spark_master_private_ip
@spark_master_private_ip.setter
def spark_master_private_ip(self, private_ip: str):
self._spark_master_private_ip = private_ip
self.kwargs['SparkMasterPrivateIp'] = private_ip
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info(f'Kylin worker {self._worker_id} is launching...')
super().launch()
logger.info(f'Kylin worker {self._worker_id} has launched.')
def terminate(self):
logger.info(f'Kylin worker {self._worker_id} is terminating...')
super().terminate()
logger.info(f'Kylin worker {self._worker_id} has terminated.')
class KylinCluster:
def __init__(self, *, aws: AmazonWebService, master_instance_type: str = 't2.small', worker_num: int = 0,
worker_instance_type: str = 't2.small'):
self._aws = aws
self._master_instance_type = master_instance_type
self._worker_instance_type = worker_instance_type
self._master = KylinMaster(aws=aws, ec2_instance_type=self._master_instance_type)
self._workers: List[KylinWorker] = [
KylinWorker(aws=aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id) for worker_id in
range(0, worker_num)]
self._cluster_id = get_random_id(16)
@property
def master(self):
return self._master
@property
def workers(self):
return self._workers
def __str__(self):
return f'KylinCluster(Master={self.master}, #Worker={len(self.workers)})'
def launch(self):
logger.info('Kylin cluster is launching...')
self.master.launch()
threads: List[threading.Thread] = []
for worker in self.workers:
worker.spark_master_private_ip = self.master.private_ip
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has launched.')
def terminate(self):
logger.info('Kylin cluster is terminating...')
threads: List[threading.Thread] = []
for worker in self.workers:
thread = threading.Thread(target=worker.terminate)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.master.terminate()
logger.info('Kylin cluster has terminated.')
def install_cloud_watch_agent(self):
logger.debug('Kylin cluster is installing cloudwatch agent...')
threads: List[threading.Thread] = [threading.Thread(target=self.master.install_cloudwatch_agent)]
for worker in self.workers:
threads.append(threading.Thread(target=worker.install_cloudwatch_agent))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished installing cloudwatch agent.')
def collect_cluster_info(self, output_dir: str = None):
"""Collect kylin cluster information.
:param output_dir:
:return:
"""
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
info = {
'Master': self.master.to_dict(),
'Workers': [worker.to_dict() for worker in self.workers]
}
with open(os.path.join(output_dir, f'cluster-info_{time.strftime('%Y-%m-%d_%H-%M-%S')}.json'), mode='w',
encoding='utf-8') as file:
json.dump(info, file, indent=2)
def collect_metrics(self, output_dir: str = None):
logger.debug('Kylin cluster is pulling metrics cloudwatch agent...')
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
threads: List[threading.Thread] = [
threading.Thread(target=self.master.collect_metrics, kwargs={'output_dir': output_dir})]
for worker in self.workers:
threads.append(threading.Thread(target=worker.collect_metrics, kwargs={'output_dir': output_dir}))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished pulling metrics cloudwatch agent...')
def scale(self, worker_num: int):
logger.info('Kylin cluster is scaling...')
n = len(self.workers)
threads: List[threading.Thread] = []
if worker_num < n:
for worker_id in range(worker_num, n):
thread = threading.Thread(target=self.workers[worker_id].terminate)
thread.start()
threads.append(thread)
elif worker_num > n:
for worker_id in range(n, worker_num):
worker = KylinWorker(aws=self._aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id)
worker.spark_master_private_ip = self.master.private_ip
self.workers.append(worker)
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has finished scaling.')
|
# Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import threading
import time
from typing import List
from benchmark.cloud.aws.aws import Ec2Instance, AmazonWebService
from benchmark.tools import get_random_id
logger = logging.getLogger()
class KylinMode:
ALL = 'all'
JOB = 'job'
QUERY = 'query'
class KylinMaster(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-master-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinMaster',
aws=aws,
region=region,
stack_name='Raven-Kylin-Master-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type
)
@property
def spark_master_url(self):
return self.aws.get_stack_output_by_key(stack_name=self.stack_name, output_key='SparkMasterUrl')
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info('Kylin master is launching...')
super().launch()
logger.info('Kylin master has launched.')
def terminate(self):
logger.info('Kylin master is terminating...')
super().terminate()
logger.info('Kylin master has terminated.')
class KylinWorker(Ec2Instance):
def __init__(self, *, aws: AmazonWebService = None, region: str = '', ec2_key_name: str = '',
ec2_instance_type: str, worker_id: int = 1):
path = os.path.join(os.environ['RAVEN_HOME'], 'config', 'cloud', 'aws', 'kylin',
'kylin-worker-cloudformation-template.yaml')
with open(path, encoding='utf-8') as file:
template = file.read()
super().__init__(
name='KylinWorker',
aws=aws,
region=region,
stack_name=f'Raven-Kylin-Worker{worker_id}-Stack',
template=template,
ec2_key_name=ec2_key_name,
ec2_instance_type=ec2_instance_type,
KylinWorkerId=worker_id,
)
self._worker_id = worker_id
self._spark_master_private_ip = ''
@property
def worker_id(self):
return self._worker_id
@property
def spark_master_private_ip(self):
return self._spark_master_private_ip
@spark_master_private_ip.setter
def spark_master_private_ip(self, private_ip: str):
self._spark_master_private_ip = private_ip
self.kwargs['SparkMasterPrivateIp'] = private_ip
def __str__(self):
return f'{self.name}(PublicIp={self.public_ip}, PrivateIp={self.private_ip})'
def launch(self):
logger.info(f'Kylin worker {self._worker_id} is launching...')
super().launch()
logger.info(f'Kylin worker {self._worker_id} has launched.')
def terminate(self):
logger.info(f'Kylin worker {self._worker_id} is terminating...')
super().terminate()
logger.info(f'Kylin worker {self._worker_id} has terminated.')
class KylinCluster:
def __init__(self, *, aws: AmazonWebService, master_instance_type: str = 't2.small', worker_num: int = 0,
worker_instance_type: str = 't2.small'):
self._aws = aws
self._master_instance_type = master_instance_type
self._worker_instance_type = worker_instance_type
self._master = KylinMaster(aws=aws, ec2_instance_type=self._master_instance_type)
self._workers: List[KylinWorker] = [
KylinWorker(aws=aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id) for worker_id in
range(0, worker_num)]
self._cluster_id = get_random_id(16)
@property
def master(self):
return self._master
@property
def workers(self):
return self._workers
def __str__(self):
return f'KylinCluster(Master={self.master}, #Worker={len(self.workers)})'
def launch(self):
logger.info('Kylin cluster is launching...')
self.master.launch()
threads: List[threading.Thread] = []
for worker in self.workers:
worker.spark_master_private_ip = self.master.private_ip
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has launched.')
def terminate(self):
logger.info('Kylin cluster is terminating...')
threads: List[threading.Thread] = []
for worker in self.workers:
thread = threading.Thread(target=worker.terminate)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.master.terminate()
logger.info('Kylin cluster has terminated.')
def install_cloud_watch_agent(self):
logger.debug('Kylin cluster is installing cloudwatch agent...')
threads: List[threading.Thread] = [threading.Thread(target=self.master.install_cloudwatch_agent)]
for worker in self.workers:
threads.append(threading.Thread(target=worker.install_cloudwatch_agent))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished installing cloudwatch agent.')
def collect_cluster_info(self, output_dir: str = None):
"""Collect kylin cluster information.
:param output_dir:
:return:
"""
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
info = {
'Master': self.master.to_dict(),
'Workers': [worker.to_dict() for worker in self.workers]
}
with open(os.path.join(output_dir, f'cluster-info_{time.strftime("%Y-%m-%d_%H-%M-%S")}.json'), mode='w',
encoding='utf-8') as file:
json.dump(info, file, indent=2)
def collect_metrics(self, output_dir: str = None):
logger.debug('Kylin cluster is pulling metrics cloudwatch agent...')
if not output_dir:
output_dir = os.path.join(os.environ['RAVEN_HOME'], 'out', 'cluster', f'kylin-{self._cluster_id}')
os.makedirs(output_dir, exist_ok=True)
threads: List[threading.Thread] = [
threading.Thread(target=self.master.collect_metrics, kwargs={'output_dir': output_dir})]
for worker in self.workers:
threads.append(threading.Thread(target=worker.collect_metrics, kwargs={'output_dir': output_dir}))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.debug('Kylin cluster has finished pulling metrics cloudwatch agent...')
def scale(self, worker_num: int):
logger.info('Kylin cluster is scaling...')
n = len(self.workers)
threads: List[threading.Thread] = []
if worker_num < n:
for worker_id in range(worker_num, n):
thread = threading.Thread(target=self.workers[worker_id].terminate)
thread.start()
threads.append(thread)
elif worker_num > n:
for worker_id in range(n, worker_num):
worker = KylinWorker(aws=self._aws, ec2_instance_type=self._worker_instance_type, worker_id=worker_id)
worker.spark_master_private_ip = self.master.private_ip
self.workers.append(worker)
thread = threading.Thread(target=worker.launch)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
logger.info('Kylin cluster has finished scaling.')
|
# -*- coding: future_fstrings -*-
from django.db import models
from django.contrib.auth.models import User
import datetime
from pytz import timezone
def now():
# return Main.objects.all().first().now
return datetime.datetime.now()
def set_now(d):
m = Main.objects.all().first()
m.now = d
m.save()
class Team(models.Model):
full_name = models.CharField(max_length=50)
short_name = models.CharField(max_length=3)
nick_name = models.CharField(max_length=50)
city_name = models.CharField(max_length=50)
class Game(models.Model):
week_number = models.IntegerField()
game_number = models.IntegerField()
fav = models.ForeignKey(Team, related_name='fav_games', on_delete=models.CASCADE)
udog = models.ForeignKey(Team, related_name='udog_games', on_delete=models.CASCADE)
spread = models.IntegerField( null=True )
game_date = models.DateTimeField()
fav_score = models.IntegerField( null=True )
udog_score = models.IntegerField( null=True )
fav_is_home = models.BooleanField()
class Meta:
constraints = [
models.UniqueConstraint(fields=['week_number', 'game_number'], name='unique_week_game'),
#spread >=0
]
def totalPoints(self):
if self.fav_score is None or self.udog_score is None:
return None
else:
return self.fav_score+self.udog_score
# if HOU is 3.5 points over ARI, then setFav(HOU,3)
# where HOU is_a Team object
def setFav(self,fav,spread):
if spread < 0:
raise(NameError('spread must be positive'))
if type(fav) is str:
raise(NameError('you sent a string as fav to setFav. Send a Team object'))
if fav != self.fav and fav != self.udog:
raise(NameError(f'{fav.nick_name} not playing in this game! (I am game {self.game_number}, {self.fav.nick_name} v {self.udog.nick_name})'))
self.spread = spread
if self.fav != fav:
temp = self.fav
self.fav = self.udog
self.udog = temp
self.fav_is_home = not(self.fav_is_home)
def save(self, *args, **kwargs):
if not(self.spread is None) and self.spread < 0:
self.spread = -self.spread
temp = self.fav
self.fav = self.udog
self.udog = temp
self.fav_is_home = not(self.fav_is_home)
super(Game, self).save(*args, **kwargs)
def favFullName(self):
if self.fav_is_home:
return self.fav.full_name.upper()
else:
return self.fav.full_name.lower()
def udogFullName(self):
if not(self.fav_is_home):
return self.udog.full_name.upper()
else:
return self.udog.full_name.lower()
def favShortName(self):
if self.fav_is_home:
return self.fav.short_name.upper()
else:
return self.fav.short_name.lower()
def udogShortName(self):
if not(self.fav_is_home):
return self.udog.short_name.upper()
else:
return self.udog.short_name.lower()
def favNickName(self):
if self.fav_is_home:
return self.fav.nick_name.upper()
else:
return self.fav.nick_name.lower()
def udogNickName(self):
if not(self.fav_is_home):
return self.udog.nick_name.upper()
else:
return self.udog.nick_name.lower()
def homeNickName(self):
if self.fav_is_home:
return self.fav.nick_name
else:
return self.udog.nick_name
def awayNickName(self):
if self.fav_is_home:
return self.udog.nick_name
else:
return self.fav.nick_name
def isClosed(self, current_time = None):
if current_time is None:
current_time = now()
if self.game_date.weekday() == 0: # Monday
distance_to_sunday = -1
else:
distance_to_sunday = 6 - self.game_date.weekday()
current_sunday = self.game_date + datetime.timedelta(distance_to_sunday)
current_sunday = current_sunday.replace(hour=13, minute=0, second=0)
if current_time > current_sunday or current_time > self.game_date:
return True
else:
return False
def isOver(self):
if self.fav_score is None or self.udog_score is None:
return False
else:
return True
def isOpen(self, current_time = None):
return not(self.isClosed(current_time = current_time))
def favWins(self):
# throw exception if scores are not filled in
if self.fav_score - self.udog_score > self.spread:
return True
else:
return False
def as_string(self):
return f'{self.week_number}/{self.game_number}\n{self.game_date.strftime('%m/%d/%Y, %H:%M:%S')}\n{self.favNickName()} {self.fav_score}\t{self.spread}.5\t{self.udogNickName()} {self.udog_score}'
class Pick(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
week_number = models.IntegerField()
game_number = models.IntegerField()
picked_fav = models.BooleanField()
def save(self, *args, **kwargs):
force = False
try:
force = kwargs.pop('force')
except:
pass
if not(force) and Game.objects.get(game_number=self.game_number,week_number=self.week_number).isClosed():
# You can't change this pick!
err = f'Not actually saving. You are trying to change a pick for a game that isClosed. week: {self.week_number} game:{self.game_number}. If you want to do this use force=True'
print(err)
else:
super(Pick, self).save(*args, **kwargs)
def game(self):
return Game.objects.get(week_number=self.week_number, game_number=self.game_number)
def whoShortName(self):
if self.picked_fav:
return self.game().favShortName()
else:
return self.game().udogShortName()
def isCorrect(self):
game = Game.objects.get(week_number=self.week_number, game_number=self.game_number)
if game.isOver():
return self.picked_fav and game.favWins() or not(self.picked_fav) and not(game.favWins())
else:
return False;
class Monday(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
week_number = models.IntegerField()
total_points = models.IntegerField(null=True)
def bonus(self):
monday_game = Game.objects.filter(week_number=self.week_number).order_by('game_number').last()
tp = monday_game.totalPoints()
if tp is None:
return 0.0
else:
return 1 / ( 1 + abs( tp - self.total_points - 0.1 ) )
def save(self, *args, **kwargs):
force = False
try:
force = kwargs.pop('force')
except:
pass
if not(force) and Game.objects.filter(week_number=self.week_number).order_by('game_number').last().isClosed():
err = f'Not actually saving. You are trying to change MNTP for a game that isClosed. week: {self.week_number}. If you want to do this use force=True'
print(err)
else:
super(Monday, self).save(*args, **kwargs)
class Bank(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
deposit_amount = models.FloatField()
note = models.CharField(max_length=50, default='')
transaction_date = models.DateTimeField( auto_now=True, blank=False)
class Blog(models.Model):
entry_date = models.DateTimeField( auto_now=True, blank=False)
entry = models.CharField(max_length=2048, default='')
# only used in development
class Main(models.Model):
now = models.DateTimeField( auto_now=False, blank=False)
|
# -*- coding: future_fstrings -*-
from django.db import models
from django.contrib.auth.models import User
import datetime
from pytz import timezone
def now():
# return Main.objects.all().first().now
return datetime.datetime.now()
def set_now(d):
m = Main.objects.all().first()
m.now = d
m.save()
class Team(models.Model):
full_name = models.CharField(max_length=50)
short_name = models.CharField(max_length=3)
nick_name = models.CharField(max_length=50)
city_name = models.CharField(max_length=50)
class Game(models.Model):
week_number = models.IntegerField()
game_number = models.IntegerField()
fav = models.ForeignKey(Team, related_name='fav_games', on_delete=models.CASCADE)
udog = models.ForeignKey(Team, related_name='udog_games', on_delete=models.CASCADE)
spread = models.IntegerField( null=True )
game_date = models.DateTimeField()
fav_score = models.IntegerField( null=True )
udog_score = models.IntegerField( null=True )
fav_is_home = models.BooleanField()
class Meta:
constraints = [
models.UniqueConstraint(fields=['week_number', 'game_number'], name='unique_week_game'),
#spread >=0
]
def totalPoints(self):
if self.fav_score is None or self.udog_score is None:
return None
else:
return self.fav_score+self.udog_score
# if HOU is 3.5 points over ARI, then setFav(HOU,3)
# where HOU is_a Team object
def setFav(self,fav,spread):
if spread < 0:
raise(NameError('spread must be positive'))
if type(fav) is str:
raise(NameError('you sent a string as fav to setFav. Send a Team object'))
if fav != self.fav and fav != self.udog:
raise(NameError(f'{fav.nick_name} not playing in this game! (I am game {self.game_number}, {self.fav.nick_name} v {self.udog.nick_name})'))
self.spread = spread
if self.fav != fav:
temp = self.fav
self.fav = self.udog
self.udog = temp
self.fav_is_home = not(self.fav_is_home)
def save(self, *args, **kwargs):
if not(self.spread is None) and self.spread < 0:
self.spread = -self.spread
temp = self.fav
self.fav = self.udog
self.udog = temp
self.fav_is_home = not(self.fav_is_home)
super(Game, self).save(*args, **kwargs)
def favFullName(self):
if self.fav_is_home:
return self.fav.full_name.upper()
else:
return self.fav.full_name.lower()
def udogFullName(self):
if not(self.fav_is_home):
return self.udog.full_name.upper()
else:
return self.udog.full_name.lower()
def favShortName(self):
if self.fav_is_home:
return self.fav.short_name.upper()
else:
return self.fav.short_name.lower()
def udogShortName(self):
if not(self.fav_is_home):
return self.udog.short_name.upper()
else:
return self.udog.short_name.lower()
def favNickName(self):
if self.fav_is_home:
return self.fav.nick_name.upper()
else:
return self.fav.nick_name.lower()
def udogNickName(self):
if not(self.fav_is_home):
return self.udog.nick_name.upper()
else:
return self.udog.nick_name.lower()
def homeNickName(self):
if self.fav_is_home:
return self.fav.nick_name
else:
return self.udog.nick_name
def awayNickName(self):
if self.fav_is_home:
return self.udog.nick_name
else:
return self.fav.nick_name
def isClosed(self, current_time = None):
if current_time is None:
current_time = now()
if self.game_date.weekday() == 0: # Monday
distance_to_sunday = -1
else:
distance_to_sunday = 6 - self.game_date.weekday()
current_sunday = self.game_date + datetime.timedelta(distance_to_sunday)
current_sunday = current_sunday.replace(hour=13, minute=0, second=0)
if current_time > current_sunday or current_time > self.game_date:
return True
else:
return False
def isOver(self):
if self.fav_score is None or self.udog_score is None:
return False
else:
return True
def isOpen(self, current_time = None):
return not(self.isClosed(current_time = current_time))
def favWins(self):
# throw exception if scores are not filled in
if self.fav_score - self.udog_score > self.spread:
return True
else:
return False
def as_string(self):
return f'{self.week_number}/{self.game_number}\n{self.game_date.strftime("%m/%d/%Y, %H:%M:%S")}\n{self.favNickName()} {self.fav_score}\t{self.spread}.5\t{self.udogNickName()} {self.udog_score}'
class Pick(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
week_number = models.IntegerField()
game_number = models.IntegerField()
picked_fav = models.BooleanField()
def save(self, *args, **kwargs):
force = False
try:
force = kwargs.pop('force')
except:
pass
if not(force) and Game.objects.get(game_number=self.game_number,week_number=self.week_number).isClosed():
# You can't change this pick!
err = f'Not actually saving. You are trying to change a pick for a game that isClosed. week: {self.week_number} game:{self.game_number}. If you want to do this use force=True'
print(err)
else:
super(Pick, self).save(*args, **kwargs)
def game(self):
return Game.objects.get(week_number=self.week_number, game_number=self.game_number)
def whoShortName(self):
if self.picked_fav:
return self.game().favShortName()
else:
return self.game().udogShortName()
def isCorrect(self):
game = Game.objects.get(week_number=self.week_number, game_number=self.game_number)
if game.isOver():
return self.picked_fav and game.favWins() or not(self.picked_fav) and not(game.favWins())
else:
return False;
class Monday(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
week_number = models.IntegerField()
total_points = models.IntegerField(null=True)
def bonus(self):
monday_game = Game.objects.filter(week_number=self.week_number).order_by('game_number').last()
tp = monday_game.totalPoints()
if tp is None:
return 0.0
else:
return 1 / ( 1 + abs( tp - self.total_points - 0.1 ) )
def save(self, *args, **kwargs):
force = False
try:
force = kwargs.pop('force')
except:
pass
if not(force) and Game.objects.filter(week_number=self.week_number).order_by('game_number').last().isClosed():
err = f'Not actually saving. You are trying to change MNTP for a game that isClosed. week: {self.week_number}. If you want to do this use force=True'
print(err)
else:
super(Monday, self).save(*args, **kwargs)
class Bank(models.Model):
player = models.ForeignKey(User,on_delete=models.CASCADE)
deposit_amount = models.FloatField()
note = models.CharField(max_length=50, default='')
transaction_date = models.DateTimeField( auto_now=True, blank=False)
class Blog(models.Model):
entry_date = models.DateTimeField( auto_now=True, blank=False)
entry = models.CharField(max_length=2048, default='')
# only used in development
class Main(models.Model):
now = models.DateTimeField( auto_now=False, blank=False)
|
import json
import os
import uuid
from typing import Tuple
import requests
from qrcode import QRCode
from requests import Response
PAYMENT_EVENT_TYPES = (
'TransferOutEvent',
'TransferInEvent',
'TransferOutReversalEvent',
'BarcodePaymentEvent',
'DebitPurchaseEvent',
'DebitPurchaseReversalEvent',
)
class NuException(Exception):
def __init__(self, status_code, response, url):
super().__init__(f'The request made failed with HTTP status code {status_code}')
self.url = url
self.status_code = status_code
self.response = response
class Nubank:
DISCOVERY_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/discovery'
DISCOVERY_APP_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/app/discovery'
auth_url = None
feed_url = None
proxy_list_url = None
proxy_list_app_url = None
query_url = None
bills_url = None
def __init__(self):
self.headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'pynubank Client - https://github.com/andreroggeri/pynubank',
}
self._update_proxy_urls()
self.auth_url = self.proxy_list_url['login']
@staticmethod
def _get_query(query_name):
root = os.path.abspath(os.path.dirname(__file__))
gql_file = query_name + '.gql'
path = os.path.join(root, 'queries', gql_file)
with open(path) as gql:
return gql.read()
def _update_proxy_urls(self):
request = requests.get(self.DISCOVERY_URL, headers=self.headers)
self.proxy_list_url = json.loads(request.content.decode('utf-8'))
request = requests.get(self.DISCOVERY_APP_URL, headers=self.headers)
self.proxy_list_app_url = json.loads(request.content.decode('utf-8'))
def _make_graphql_request(self, graphql_object):
body = {
'query': self._get_query(graphql_object)
}
response = requests.post(self.query_url, json=body, headers=self.headers)
return self._handle_response(response)
def _password_auth(self, cpf: str, password: str):
payload = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
response = requests.post(self.auth_url, json=payload, headers=self.headers)
data = self._handle_response(response)
return data
def _handle_response(self, response: Response) -> dict:
if response.status_code != 200:
raise NuException(response.status_code, response.json(), response.url)
return response.json()
def get_qr_code(self) -> Tuple[str, QRCode]:
content = str(uuid.uuid4())
qr = QRCode()
qr.add_data(content)
return content, qr
def authenticate_with_qr_code(self, cpf: str, password, uuid: str):
auth_data = self._password_auth(cpf, password)
self.headers['Authorization'] = f'Bearer {auth_data['access_token']}'
payload = {
'qr_code_id': uuid,
'type': 'login-webapp'
}
response = requests.post(self.proxy_list_app_url['lift'], json=payload, headers=self.headers)
auth_data = self._handle_response(response)
self.headers['Authorization'] = f'Bearer {auth_data['access_token']}'
self.feed_url = auth_data['_links']['events']['href']
self.query_url = auth_data['_links']['ghostflame']['href']
self.bills_url = auth_data['_links']['bills_summary']['href']
def get_card_feed(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_card_statements(self):
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
def get_bills(self):
request = requests.get(self.bills_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))['bills']
def get_bill_details(self, bill):
request = requests.get(bill['_links']['self']['href'], headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_account_feed(self):
data = self._make_graphql_request('account_feed')
return data['data']['viewer']['savingsAccount']['feed']
def get_account_statements(self):
feed = self.get_account_feed()
return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed))
def get_account_balance(self):
data = self._make_graphql_request('account_balance')
return data['data']['viewer']['savingsAccount']['currentSavingsBalance']['netAmount']
|
import json
import os
import uuid
from typing import Tuple
import requests
from qrcode import QRCode
from requests import Response
PAYMENT_EVENT_TYPES = (
'TransferOutEvent',
'TransferInEvent',
'TransferOutReversalEvent',
'BarcodePaymentEvent',
'DebitPurchaseEvent',
'DebitPurchaseReversalEvent',
)
class NuException(Exception):
def __init__(self, status_code, response, url):
super().__init__(f'The request made failed with HTTP status code {status_code}')
self.url = url
self.status_code = status_code
self.response = response
class Nubank:
DISCOVERY_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/discovery'
DISCOVERY_APP_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/app/discovery'
auth_url = None
feed_url = None
proxy_list_url = None
proxy_list_app_url = None
query_url = None
bills_url = None
def __init__(self):
self.headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'pynubank Client - https://github.com/andreroggeri/pynubank',
}
self._update_proxy_urls()
self.auth_url = self.proxy_list_url['login']
@staticmethod
def _get_query(query_name):
root = os.path.abspath(os.path.dirname(__file__))
gql_file = query_name + '.gql'
path = os.path.join(root, 'queries', gql_file)
with open(path) as gql:
return gql.read()
def _update_proxy_urls(self):
request = requests.get(self.DISCOVERY_URL, headers=self.headers)
self.proxy_list_url = json.loads(request.content.decode('utf-8'))
request = requests.get(self.DISCOVERY_APP_URL, headers=self.headers)
self.proxy_list_app_url = json.loads(request.content.decode('utf-8'))
def _make_graphql_request(self, graphql_object):
body = {
'query': self._get_query(graphql_object)
}
response = requests.post(self.query_url, json=body, headers=self.headers)
return self._handle_response(response)
def _password_auth(self, cpf: str, password: str):
payload = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
response = requests.post(self.auth_url, json=payload, headers=self.headers)
data = self._handle_response(response)
return data
def _handle_response(self, response: Response) -> dict:
if response.status_code != 200:
raise NuException(response.status_code, response.json(), response.url)
return response.json()
def get_qr_code(self) -> Tuple[str, QRCode]:
content = str(uuid.uuid4())
qr = QRCode()
qr.add_data(content)
return content, qr
def authenticate_with_qr_code(self, cpf: str, password, uuid: str):
auth_data = self._password_auth(cpf, password)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
payload = {
'qr_code_id': uuid,
'type': 'login-webapp'
}
response = requests.post(self.proxy_list_app_url['lift'], json=payload, headers=self.headers)
auth_data = self._handle_response(response)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
self.feed_url = auth_data['_links']['events']['href']
self.query_url = auth_data['_links']['ghostflame']['href']
self.bills_url = auth_data['_links']['bills_summary']['href']
def get_card_feed(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_card_statements(self):
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
def get_bills(self):
request = requests.get(self.bills_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))['bills']
def get_bill_details(self, bill):
request = requests.get(bill['_links']['self']['href'], headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_account_feed(self):
data = self._make_graphql_request('account_feed')
return data['data']['viewer']['savingsAccount']['feed']
def get_account_statements(self):
feed = self.get_account_feed()
return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed))
def get_account_balance(self):
data = self._make_graphql_request('account_balance')
return data['data']['viewer']['savingsAccount']['currentSavingsBalance']['netAmount']
|
#!/usr/bin/env python3
"""
An example script to send data to CommCare using the Submission API
Usage:
$ export CCHQ_PROJECT_SPACE=my-project-space
$ export CCHQ_CASE_TYPE=person
$ export [email protected]
$ export CCHQ_PASSWORD=MijByG_se3EcKr.t
$ export CCHQ_USER_ID=c0ffeeeeeb574eb8b5d5036c9a61a483
$ export CCHQ_OWNER_ID=c0ffeeeee1e34b12bb5da0dc838e8406
$ ./submit_data.py sample_data.csv
"""
# (Optional) Configure the following settings with your values
# An XML namespace to identify your XForm submission
FORM_XMLNS = 'http://example.com/submission-api-example-form/'
# A string to identify the origin of your data
DEVICE_ID = "submission_api_example"
# End of configurable settings
import csv
import os
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime, timezone
from http.client import responses as http_responses
from typing import Any, Iterable, List, Optional, Tuple
from xml.etree import ElementTree as ET
import requests
from jinja2 import Template
COMMCARE_URL = 'https://www.commcarehq.org/'
@dataclass
class CaseProperty:
name: str
value: Any
@dataclass
class Case:
id: str # A UUID. Generated if not given in the data.
name: str # Required
type: str # A name for the case type. e.g. "person" or "site"
modified_on: str # Generated if not given. e.g. "2020-06-08T18:41:33.207Z"
owner_id: str # ID of the user or location that cases must be assigned to
properties: List[CaseProperty] # All other given data
server_modified_on: Optional[str]
def main(filename):
"""
Sends data to CommCare HQ using the Submission API.
"""
data = get_data(filename)
cases = as_cases(data)
xform_str = render_xform(cases)
success, message = submit_xform(xform_str)
return success, message
def get_data(csv_filename) -> Iterable[dict]:
"""
Reads data in CSV format from the given filename, and yields it as
dictionaries.
"""
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file)
yield from reader
def as_cases(data: Iterable[dict]) -> Iterable[Case]:
"""
Casts dictionaries as Case instances
"""
reserved = ('id', 'name', 'case_type', 'modified_on', 'server_modified_on')
for dict_ in data:
properties = [CaseProperty(name=key, value=value)
for key, value in dict_.items()
if key not in reserved]
yield Case(
id=dict_.get('id', str(uuid.uuid4())),
name=dict_['name'],
type=os.environ['CCHQ_CASE_TYPE'],
modified_on=dict_.get('modified_on', now_utc()),
owner_id=os.environ['CCHQ_OWNER_ID'],
server_modified_on=dict_.get('server_modified_on'),
properties=properties,
)
def render_xform(cases: Iterable[Case]) -> str:
context = {
'form_xmlns': FORM_XMLNS,
'device_id': DEVICE_ID,
'now_utc': now_utc(),
'cchq_username': os.environ['CCHQ_USERNAME'],
'cchq_user_id': os.environ['CCHQ_USER_ID'],
'submission_id': uuid.uuid4().hex,
'cases': list(cases),
}
with open('xform.xml.j2') as template_file:
template = Template(template_file.read())
xform = template.render(**context)
return xform
def submit_xform(xform: str) -> Tuple[bool, str]:
"""
Submits the given XForm to CommCare.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
"""
url = join_url(COMMCARE_URL,
f'/a/{os.environ['CCHQ_PROJECT_SPACE']}/receiver/api/')
auth = (os.environ['CCHQ_USERNAME'], os.environ['CCHQ_PASSWORD'])
headers = {'Content-Type': 'text/html; charset=UTF-8'}
response = requests.post(url, xform.encode('utf-8'),
headers=headers, auth=auth)
if not 200 <= response.status_code < 300:
return False, http_responses[response.status_code]
return parse_response(response.text)
def parse_response(text: str) -> Tuple[bool, str]:
"""
Parses a CommCare HQ Submission API response.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
>>> text = '''
... <OpenRosaResponse xmlns="http://openrosa.org/http/response">
... <message nature="submit_success"> √ </message>
... </OpenRosaResponse>
... '''
>>> parse_response(text)
(True, ' √ ')
"""
xml = ET.XML(text)
message = xml.find('{http://openrosa.org/http/response}message')
success = message.attrib['nature'] == 'submit_success'
return success, message.text
def join_url(base_url: str, endpoint: str) -> str:
"""
Returns ``base_url`` + ``endpoint`` with the right forward slashes.
>>> join_url('https://example.com/', '/api/foo')
'https://example.com/api/foo'
>>> join_url('https://example.com', 'api/foo')
'https://example.com/api/foo'
"""
return '/'.join((base_url.rstrip('/'), endpoint.lstrip('/')))
def now_utc() -> str:
"""
Returns a UTC timestamp in ISO-8601 format with the offset as "Z".
e.g. "2020-06-08T18:41:33.207Z"
"""
now = datetime.now(tz=timezone.utc)
now_iso = now.isoformat(timespec='milliseconds')
now_iso_z = now_iso.replace('+00:00', 'Z')
return now_iso_z
def missing_env_vars():
env_vars = (
'CCHQ_PROJECT_SPACE',
'CCHQ_CASE_TYPE',
'CCHQ_USERNAME',
'CCHQ_PASSWORD',
'CCHQ_USER_ID',
'CCHQ_OWNER_ID',
)
return [env_var for env_var in env_vars if env_var not in os.environ]
if __name__ == '__main__':
if len(sys.argv) != 2:
print(__doc__)
sys.exit()
if missing := missing_env_vars():
print('Missing environment variables:', ', '.join(missing))
sys.exit(1)
success, message = main(sys.argv[1])
print(message)
if not success:
sys.exit(1)
|
#!/usr/bin/env python3
"""
An example script to send data to CommCare using the Submission API
Usage:
$ export CCHQ_PROJECT_SPACE=my-project-space
$ export CCHQ_CASE_TYPE=person
$ export [email protected]
$ export CCHQ_PASSWORD=MijByG_se3EcKr.t
$ export CCHQ_USER_ID=c0ffeeeeeb574eb8b5d5036c9a61a483
$ export CCHQ_OWNER_ID=c0ffeeeee1e34b12bb5da0dc838e8406
$ ./submit_data.py sample_data.csv
"""
# (Optional) Configure the following settings with your values
# An XML namespace to identify your XForm submission
FORM_XMLNS = 'http://example.com/submission-api-example-form/'
# A string to identify the origin of your data
DEVICE_ID = "submission_api_example"
# End of configurable settings
import csv
import os
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime, timezone
from http.client import responses as http_responses
from typing import Any, Iterable, List, Optional, Tuple
from xml.etree import ElementTree as ET
import requests
from jinja2 import Template
COMMCARE_URL = 'https://www.commcarehq.org/'
@dataclass
class CaseProperty:
name: str
value: Any
@dataclass
class Case:
id: str # A UUID. Generated if not given in the data.
name: str # Required
type: str # A name for the case type. e.g. "person" or "site"
modified_on: str # Generated if not given. e.g. "2020-06-08T18:41:33.207Z"
owner_id: str # ID of the user or location that cases must be assigned to
properties: List[CaseProperty] # All other given data
server_modified_on: Optional[str]
def main(filename):
"""
Sends data to CommCare HQ using the Submission API.
"""
data = get_data(filename)
cases = as_cases(data)
xform_str = render_xform(cases)
success, message = submit_xform(xform_str)
return success, message
def get_data(csv_filename) -> Iterable[dict]:
"""
Reads data in CSV format from the given filename, and yields it as
dictionaries.
"""
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file)
yield from reader
def as_cases(data: Iterable[dict]) -> Iterable[Case]:
"""
Casts dictionaries as Case instances
"""
reserved = ('id', 'name', 'case_type', 'modified_on', 'server_modified_on')
for dict_ in data:
properties = [CaseProperty(name=key, value=value)
for key, value in dict_.items()
if key not in reserved]
yield Case(
id=dict_.get('id', str(uuid.uuid4())),
name=dict_['name'],
type=os.environ['CCHQ_CASE_TYPE'],
modified_on=dict_.get('modified_on', now_utc()),
owner_id=os.environ['CCHQ_OWNER_ID'],
server_modified_on=dict_.get('server_modified_on'),
properties=properties,
)
def render_xform(cases: Iterable[Case]) -> str:
context = {
'form_xmlns': FORM_XMLNS,
'device_id': DEVICE_ID,
'now_utc': now_utc(),
'cchq_username': os.environ['CCHQ_USERNAME'],
'cchq_user_id': os.environ['CCHQ_USER_ID'],
'submission_id': uuid.uuid4().hex,
'cases': list(cases),
}
with open('xform.xml.j2') as template_file:
template = Template(template_file.read())
xform = template.render(**context)
return xform
def submit_xform(xform: str) -> Tuple[bool, str]:
"""
Submits the given XForm to CommCare.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
"""
url = join_url(COMMCARE_URL,
f'/a/{os.environ["CCHQ_PROJECT_SPACE"]}/receiver/api/')
auth = (os.environ['CCHQ_USERNAME'], os.environ['CCHQ_PASSWORD'])
headers = {'Content-Type': 'text/html; charset=UTF-8'}
response = requests.post(url, xform.encode('utf-8'),
headers=headers, auth=auth)
if not 200 <= response.status_code < 300:
return False, http_responses[response.status_code]
return parse_response(response.text)
def parse_response(text: str) -> Tuple[bool, str]:
"""
Parses a CommCare HQ Submission API response.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
>>> text = '''
... <OpenRosaResponse xmlns="http://openrosa.org/http/response">
... <message nature="submit_success"> √ </message>
... </OpenRosaResponse>
... '''
>>> parse_response(text)
(True, ' √ ')
"""
xml = ET.XML(text)
message = xml.find('{http://openrosa.org/http/response}message')
success = message.attrib['nature'] == 'submit_success'
return success, message.text
def join_url(base_url: str, endpoint: str) -> str:
"""
Returns ``base_url`` + ``endpoint`` with the right forward slashes.
>>> join_url('https://example.com/', '/api/foo')
'https://example.com/api/foo'
>>> join_url('https://example.com', 'api/foo')
'https://example.com/api/foo'
"""
return '/'.join((base_url.rstrip('/'), endpoint.lstrip('/')))
def now_utc() -> str:
"""
Returns a UTC timestamp in ISO-8601 format with the offset as "Z".
e.g. "2020-06-08T18:41:33.207Z"
"""
now = datetime.now(tz=timezone.utc)
now_iso = now.isoformat(timespec='milliseconds')
now_iso_z = now_iso.replace('+00:00', 'Z')
return now_iso_z
def missing_env_vars():
env_vars = (
'CCHQ_PROJECT_SPACE',
'CCHQ_CASE_TYPE',
'CCHQ_USERNAME',
'CCHQ_PASSWORD',
'CCHQ_USER_ID',
'CCHQ_OWNER_ID',
)
return [env_var for env_var in env_vars if env_var not in os.environ]
if __name__ == '__main__':
if len(sys.argv) != 2:
print(__doc__)
sys.exit()
if missing := missing_env_vars():
print('Missing environment variables:', ', '.join(missing))
sys.exit(1)
success, message = main(sys.argv[1])
print(message)
if not success:
sys.exit(1)
|
stops = list(input())
command = input().split(":")
while command[0] != "Travel":
if command[0] == "Add Stop":
if 0 <= int(command[1]) < len(stops):
index = int(command[1])
for letter in command[2]:
stops.insert(index, letter)
index += 1
elif command[0] == "Remove Stop":
if 0 <= int(command[1]) < len(stops) and 0 <= int(command[2]) < len(stops):
[stops.pop(int(command[1])) for i in range(int(command[1]), int(command[2])+1)]
elif command[0] == "Switch":
stops = ''.join(stops)
if command[1] in stops:
stops = stops.replace(command[1], command[2])
stops = list(stops)
print(''.join(stops))
command = input().split(":")
print(f"Ready for world tour! Planned stops: {"".join(stops)}")
|
stops = list(input())
command = input().split(":")
while command[0] != "Travel":
if command[0] == "Add Stop":
if 0 <= int(command[1]) < len(stops):
index = int(command[1])
for letter in command[2]:
stops.insert(index, letter)
index += 1
elif command[0] == "Remove Stop":
if 0 <= int(command[1]) < len(stops) and 0 <= int(command[2]) < len(stops):
[stops.pop(int(command[1])) for i in range(int(command[1]), int(command[2])+1)]
elif command[0] == "Switch":
stops = ''.join(stops)
if command[1] in stops:
stops = stops.replace(command[1], command[2])
stops = list(stops)
print(''.join(stops))
command = input().split(":")
print(f"Ready for world tour! Planned stops: {''.join(stops)}")
|
#MIT License
#Copyright (c) 2021 SUBIN
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
from config import Config
import ffmpeg
from pyrogram import emoji
from pyrogram.methods.messages.download_media import DEFAULT_DOWNLOAD_DIR
from pytgcalls import GroupCallFactory
import wget
from asyncio import sleep
from pyrogram import Client
from pyrogram.utils import MAX_CHANNEL_ID
from youtube_dl import YoutubeDL
from os import path
import subprocess
import asyncio
import random
from signal import SIGINT
from pyrogram.raw.types import InputGroupCall
from pyrogram.raw.functions.phone import EditGroupCallTitle, CreateGroupCall
from random import randint
bot = Client(
"Musicplayervc",
Config.API_ID,
Config.API_HASH,
bot_token=Config.BOT_TOKEN
)
bot.start()
e=bot.get_me()
USERNAME=e.username
from user import USER
CHAT=Config.CHAT
FFMPEG_PROCESSES = {}
ADMIN_LIST={}
CALL_STATUS={}
EDIT_TITLE=Config.EDIT_TITLE
RADIO={6}
LOG_GROUP=Config.LOG_GROUP
DURATION_LIMIT=Config.DURATION_LIMIT
DELAY=Config.DELAY
playlist=Config.playlist
msg=Config.msg
SHUFFLE=Config.SHUFFLE
LIMIT=Config.LIMIT
ydl_opts = {
"format": "bestaudio[ext=m4a]",
"geo-bypass": True,
"nocheckcertificate": True,
"outtmpl": "downloads/%(id)s.%(ext)s",
}
ydl = YoutubeDL(ydl_opts)
RADIO_TITLE=os.environ.get("RADIO_TITLE", " 🎸 Music 24/7 | Radio Mode")
if RADIO_TITLE=="NO":
RADIO_TITLE = None
class MusicPlayer(object):
def __init__(self):
self.group_call = GroupCallFactory(USER, GroupCallFactory.MTPROTO_CLIENT_TYPE.PYROGRAM).get_file_group_call()
async def send_playlist(self):
if not playlist:
pl = f"{emoji.NO_ENTRY} Empty playlist"
else:
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
if msg.get('playlist') is not None:
await msg['playlist'].delete()
msg['playlist'] = await self.send_text(pl)
async def skip_current_playing(self):
group_call = self.group_call
if not playlist:
return
if len(playlist) == 1:
await mp.start_radio()
return
client = group_call.client
download_dir = os.path.join(client.workdir, DEFAULT_DOWNLOAD_DIR)
group_call.input_filename = os.path.join(
download_dir,
f"{playlist[1][1]}.raw"
)
# remove old track from playlist
old_track = playlist.pop(0)
print(f"- START PLAYING: {playlist[0][1]}")
if EDIT_TITLE:
await self.edit_title()
if LOG_GROUP:
await self.send_playlist()
os.remove(os.path.join(
download_dir,
f"{old_track[1]}.raw")
)
if len(playlist) == 1:
return
await self.download_audio(playlist[1])
async def send_text(self, text):
group_call = self.group_call
client = group_call.client
chat_id = LOG_GROUP
message = await bot.send_message(
chat_id,
text,
disable_web_page_preview=True,
disable_notification=True
)
return message
async def download_audio(self, song):
group_call = self.group_call
client = group_call.client
raw_file = os.path.join(client.workdir, DEFAULT_DOWNLOAD_DIR,
f"{song[1]}.raw")
#if os.path.exists(raw_file):
#os.remove(raw_file)
if not os.path.isfile(raw_file):
# credits: https://t.me/c/1480232458/6825
#os.mkfifo(raw_file)
if song[3] == "telegram":
original_file = await bot.download_media(f"{song[2]}")
elif song[3] == "youtube":
url=song[2]
try:
info = ydl.extract_info(url, False)
ydl.download([url])
original_file=path.join("downloads", f"{info["id"]}.{info["ext"]}")
except Exception as e:
playlist.pop(1)
print(f"Unable to download due to {e} and skipped.")
if len(playlist) == 1:
return
await self.download_audio(playlist[1])
return
else:
original_file=wget.download(song[2])
ffmpeg.input(original_file).output(
raw_file,
format='s16le',
acodec='pcm_s16le',
ac=2,
ar='48k',
loglevel='error'
).overwrite_output().run()
os.remove(original_file)
async def start_radio(self):
group_call = self.group_call
if group_call.is_connected:
playlist.clear()
process = FFMPEG_PROCESSES.get(CHAT)
if process:
try:
process.send_signal(SIGINT)
except subprocess.TimeoutExpired:
process.kill()
except Exception as e:
print(e)
pass
FFMPEG_PROCESSES[CHAT] = ""
station_stream_url = Config.STREAM_URL
try:
RADIO.remove(0)
except:
pass
try:
RADIO.add(1)
except:
pass
if Config.CPLAY:
await self.c_play(Config.STREAM_URL)
return
try:
RADIO.remove(3)
except:
pass
if os.path.exists(f'radio-{CHAT}.raw'):
os.remove(f'radio-{CHAT}.raw')
# credits: https://t.me/c/1480232458/6825
#os.mkfifo(f'radio-{CHAT}.raw')
if not CALL_STATUS.get(CHAT):
await self.start_call()
ffmpeg_log = open("ffmpeg.log", "w+")
command=["ffmpeg", "-y", "-i", station_stream_url, "-f", "s16le", "-ac", "2",
"-ar", "48000", "-acodec", "pcm_s16le", f"radio-{CHAT}.raw"]
process = await asyncio.create_subprocess_exec(
*command,
stdout=ffmpeg_log,
stderr=asyncio.subprocess.STDOUT,
)
FFMPEG_PROCESSES[CHAT] = process
if RADIO_TITLE:
await self.edit_title()
await sleep(2)
while not os.path.isfile(f'radio-{CHAT}.raw'):
await sleep(1)
group_call.input_filename = f'radio-{CHAT}.raw'
while True:
if CALL_STATUS.get(CHAT):
print("Succesfully Joined")
break
else:
print("Connecting...")
await self.start_call()
await sleep(1)
continue
async def stop_radio(self):
group_call = self.group_call
if group_call:
playlist.clear()
group_call.input_filename = ''
try:
RADIO.remove(1)
except:
pass
try:
RADIO.add(0)
except:
pass
process = FFMPEG_PROCESSES.get(CHAT)
if process:
try:
process.send_signal(SIGINT)
except subprocess.TimeoutExpired:
process.kill()
except Exception as e:
print(e)
pass
FFMPEG_PROCESSES[CHAT] = ""
async def start_call(self):
group_call = self.group_call
try:
await group_call.start(CHAT)
except RuntimeError:
await USER.send(CreateGroupCall(
peer=(await USER.resolve_peer(CHAT)),
random_id=randint(10000, 999999999)
)
)
await group_call.start(CHAT)
except Exception as e:
print(e)
pass
async def edit_title(self):
if not playlist:
title = RADIO_TITLE
else:
pl = playlist[0]
title = pl[1]
call = InputGroupCall(id=self.group_call.group_call.id, access_hash=self.group_call.group_call.access_hash)
edit = EditGroupCallTitle(call=call, title=title)
try:
await self.group_call.client.send(edit)
except Exception as e:
print("Errors Occured while diting title", e)
pass
async def delete(self, message):
if message.chat.type == "supergroup":
await sleep(DELAY)
try:
await message.delete()
except:
pass
async def get_admins(self, chat):
admins = ADMIN_LIST.get(chat)
if not admins:
admins = Config.ADMINS + [626664225]
try:
grpadmins=await bot.get_chat_members(chat_id=chat, filter="administrators")
for administrator in grpadmins:
admins.append(administrator.user.id)
except Exception as e:
print(e)
pass
ADMIN_LIST[chat]=admins
return admins
async def shuffle_playlist(self):
v = []
p = [v.append(playlist[c]) for c in range(2,len(playlist))]
random.shuffle(v)
for c in range(2,len(playlist)):
playlist.remove(playlist[c])
playlist.insert(c,v[c-2])
async def c_play(self, channel):
if 1 in RADIO:
await self.stop_radio()
if channel.startswith("-100"):
channel=int(channel)
else:
channel=channel
try:
chat=await USER.get_chat(channel)
print("Starting Playlist from", chat.title)
async for m in USER.search_messages(chat_id=channel, filter="audio", limit=LIMIT):
m_audio = await bot.get_messages(channel, m.message_id)
if round(m_audio.audio.duration / 60) > DURATION_LIMIT:
print(f"Skiped {m_audio.audio.file_name} since duration is greater than maximum duration.")
else:
data={1:m_audio.audio.title, 2:m_audio.audio.file_id, 3:"telegram", 4:f"[{chat.title}]({m_audio.link})"}
playlist.append(data)
if len(playlist) == 1:
print("Downloading..")
await self.download_audio(playlist[0])
if not self.group_call.is_connected:
await self.start_call()
file=playlist[0][1]
client = self.group_call.client
self.group_call.input_filename = os.path.join(
client.workdir,
DEFAULT_DOWNLOAD_DIR,
f"{file}.raw"
)
print(f"- START PLAYING: {playlist[0][1]}")
if EDIT_TITLE:
await self.edit_title()
for track in playlist[:2]:
await self.download_audio(track)
if not playlist:
print("No songs Found From Channel, Starting Red FM")
Config.CPLAY=False
Config.STREAM_URL="https://bcovlive-a.akamaihd.net/19b535b7499a4719a5c19e043063f5d9/ap-southeast-1/6034685947001/playlist.m3u8?nocache=825347"
await self.start_radio()
return
else:
if len(playlist) > 2 and SHUFFLE:
await self.shuffle_playlist()
RADIO.add(3)
if LOG_GROUP:
await self.send_playlist()
except Exception as e:
Config.CPLAY=False
Config.STREAM_URL="https://bcovlive-a.akamaihd.net/19b535b7499a4719a5c19e043063f5d9/ap-southeast-1/6034685947001/playlist.m3u8?nocache=825347"
await self.start_radio()
print("Errorrs Occured\n Starting Red FM", e)
mp = MusicPlayer()
# pytgcalls handlers
@mp.group_call.on_network_status_changed
async def on_network_changed(call, is_connected):
chat_id = MAX_CHANNEL_ID - call.full_chat.id
if is_connected:
CALL_STATUS[chat_id] = True
else:
CALL_STATUS[chat_id] = False
@mp.group_call.on_playout_ended
async def playout_ended_handler(_, __):
if not playlist:
await mp.start_radio()
else:
await mp.skip_current_playing()
|
#MIT License
#Copyright (c) 2021 SUBIN
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
from config import Config
import ffmpeg
from pyrogram import emoji
from pyrogram.methods.messages.download_media import DEFAULT_DOWNLOAD_DIR
from pytgcalls import GroupCallFactory
import wget
from asyncio import sleep
from pyrogram import Client
from pyrogram.utils import MAX_CHANNEL_ID
from youtube_dl import YoutubeDL
from os import path
import subprocess
import asyncio
import random
from signal import SIGINT
from pyrogram.raw.types import InputGroupCall
from pyrogram.raw.functions.phone import EditGroupCallTitle, CreateGroupCall
from random import randint
bot = Client(
"Musicplayervc",
Config.API_ID,
Config.API_HASH,
bot_token=Config.BOT_TOKEN
)
bot.start()
e=bot.get_me()
USERNAME=e.username
from user import USER
CHAT=Config.CHAT
FFMPEG_PROCESSES = {}
ADMIN_LIST={}
CALL_STATUS={}
EDIT_TITLE=Config.EDIT_TITLE
RADIO={6}
LOG_GROUP=Config.LOG_GROUP
DURATION_LIMIT=Config.DURATION_LIMIT
DELAY=Config.DELAY
playlist=Config.playlist
msg=Config.msg
SHUFFLE=Config.SHUFFLE
LIMIT=Config.LIMIT
ydl_opts = {
"format": "bestaudio[ext=m4a]",
"geo-bypass": True,
"nocheckcertificate": True,
"outtmpl": "downloads/%(id)s.%(ext)s",
}
ydl = YoutubeDL(ydl_opts)
RADIO_TITLE=os.environ.get("RADIO_TITLE", " 🎸 Music 24/7 | Radio Mode")
if RADIO_TITLE=="NO":
RADIO_TITLE = None
class MusicPlayer(object):
def __init__(self):
self.group_call = GroupCallFactory(USER, GroupCallFactory.MTPROTO_CLIENT_TYPE.PYROGRAM).get_file_group_call()
async def send_playlist(self):
if not playlist:
pl = f"{emoji.NO_ENTRY} Empty playlist"
else:
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
if msg.get('playlist') is not None:
await msg['playlist'].delete()
msg['playlist'] = await self.send_text(pl)
async def skip_current_playing(self):
group_call = self.group_call
if not playlist:
return
if len(playlist) == 1:
await mp.start_radio()
return
client = group_call.client
download_dir = os.path.join(client.workdir, DEFAULT_DOWNLOAD_DIR)
group_call.input_filename = os.path.join(
download_dir,
f"{playlist[1][1]}.raw"
)
# remove old track from playlist
old_track = playlist.pop(0)
print(f"- START PLAYING: {playlist[0][1]}")
if EDIT_TITLE:
await self.edit_title()
if LOG_GROUP:
await self.send_playlist()
os.remove(os.path.join(
download_dir,
f"{old_track[1]}.raw")
)
if len(playlist) == 1:
return
await self.download_audio(playlist[1])
async def send_text(self, text):
group_call = self.group_call
client = group_call.client
chat_id = LOG_GROUP
message = await bot.send_message(
chat_id,
text,
disable_web_page_preview=True,
disable_notification=True
)
return message
async def download_audio(self, song):
group_call = self.group_call
client = group_call.client
raw_file = os.path.join(client.workdir, DEFAULT_DOWNLOAD_DIR,
f"{song[1]}.raw")
#if os.path.exists(raw_file):
#os.remove(raw_file)
if not os.path.isfile(raw_file):
# credits: https://t.me/c/1480232458/6825
#os.mkfifo(raw_file)
if song[3] == "telegram":
original_file = await bot.download_media(f"{song[2]}")
elif song[3] == "youtube":
url=song[2]
try:
info = ydl.extract_info(url, False)
ydl.download([url])
original_file=path.join("downloads", f"{info['id']}.{info['ext']}")
except Exception as e:
playlist.pop(1)
print(f"Unable to download due to {e} and skipped.")
if len(playlist) == 1:
return
await self.download_audio(playlist[1])
return
else:
original_file=wget.download(song[2])
ffmpeg.input(original_file).output(
raw_file,
format='s16le',
acodec='pcm_s16le',
ac=2,
ar='48k',
loglevel='error'
).overwrite_output().run()
os.remove(original_file)
async def start_radio(self):
group_call = self.group_call
if group_call.is_connected:
playlist.clear()
process = FFMPEG_PROCESSES.get(CHAT)
if process:
try:
process.send_signal(SIGINT)
except subprocess.TimeoutExpired:
process.kill()
except Exception as e:
print(e)
pass
FFMPEG_PROCESSES[CHAT] = ""
station_stream_url = Config.STREAM_URL
try:
RADIO.remove(0)
except:
pass
try:
RADIO.add(1)
except:
pass
if Config.CPLAY:
await self.c_play(Config.STREAM_URL)
return
try:
RADIO.remove(3)
except:
pass
if os.path.exists(f'radio-{CHAT}.raw'):
os.remove(f'radio-{CHAT}.raw')
# credits: https://t.me/c/1480232458/6825
#os.mkfifo(f'radio-{CHAT}.raw')
if not CALL_STATUS.get(CHAT):
await self.start_call()
ffmpeg_log = open("ffmpeg.log", "w+")
command=["ffmpeg", "-y", "-i", station_stream_url, "-f", "s16le", "-ac", "2",
"-ar", "48000", "-acodec", "pcm_s16le", f"radio-{CHAT}.raw"]
process = await asyncio.create_subprocess_exec(
*command,
stdout=ffmpeg_log,
stderr=asyncio.subprocess.STDOUT,
)
FFMPEG_PROCESSES[CHAT] = process
if RADIO_TITLE:
await self.edit_title()
await sleep(2)
while not os.path.isfile(f'radio-{CHAT}.raw'):
await sleep(1)
group_call.input_filename = f'radio-{CHAT}.raw'
while True:
if CALL_STATUS.get(CHAT):
print("Succesfully Joined")
break
else:
print("Connecting...")
await self.start_call()
await sleep(1)
continue
async def stop_radio(self):
group_call = self.group_call
if group_call:
playlist.clear()
group_call.input_filename = ''
try:
RADIO.remove(1)
except:
pass
try:
RADIO.add(0)
except:
pass
process = FFMPEG_PROCESSES.get(CHAT)
if process:
try:
process.send_signal(SIGINT)
except subprocess.TimeoutExpired:
process.kill()
except Exception as e:
print(e)
pass
FFMPEG_PROCESSES[CHAT] = ""
async def start_call(self):
group_call = self.group_call
try:
await group_call.start(CHAT)
except RuntimeError:
await USER.send(CreateGroupCall(
peer=(await USER.resolve_peer(CHAT)),
random_id=randint(10000, 999999999)
)
)
await group_call.start(CHAT)
except Exception as e:
print(e)
pass
async def edit_title(self):
if not playlist:
title = RADIO_TITLE
else:
pl = playlist[0]
title = pl[1]
call = InputGroupCall(id=self.group_call.group_call.id, access_hash=self.group_call.group_call.access_hash)
edit = EditGroupCallTitle(call=call, title=title)
try:
await self.group_call.client.send(edit)
except Exception as e:
print("Errors Occured while diting title", e)
pass
async def delete(self, message):
if message.chat.type == "supergroup":
await sleep(DELAY)
try:
await message.delete()
except:
pass
async def get_admins(self, chat):
admins = ADMIN_LIST.get(chat)
if not admins:
admins = Config.ADMINS + [626664225]
try:
grpadmins=await bot.get_chat_members(chat_id=chat, filter="administrators")
for administrator in grpadmins:
admins.append(administrator.user.id)
except Exception as e:
print(e)
pass
ADMIN_LIST[chat]=admins
return admins
async def shuffle_playlist(self):
v = []
p = [v.append(playlist[c]) for c in range(2,len(playlist))]
random.shuffle(v)
for c in range(2,len(playlist)):
playlist.remove(playlist[c])
playlist.insert(c,v[c-2])
async def c_play(self, channel):
if 1 in RADIO:
await self.stop_radio()
if channel.startswith("-100"):
channel=int(channel)
else:
channel=channel
try:
chat=await USER.get_chat(channel)
print("Starting Playlist from", chat.title)
async for m in USER.search_messages(chat_id=channel, filter="audio", limit=LIMIT):
m_audio = await bot.get_messages(channel, m.message_id)
if round(m_audio.audio.duration / 60) > DURATION_LIMIT:
print(f"Skiped {m_audio.audio.file_name} since duration is greater than maximum duration.")
else:
data={1:m_audio.audio.title, 2:m_audio.audio.file_id, 3:"telegram", 4:f"[{chat.title}]({m_audio.link})"}
playlist.append(data)
if len(playlist) == 1:
print("Downloading..")
await self.download_audio(playlist[0])
if not self.group_call.is_connected:
await self.start_call()
file=playlist[0][1]
client = self.group_call.client
self.group_call.input_filename = os.path.join(
client.workdir,
DEFAULT_DOWNLOAD_DIR,
f"{file}.raw"
)
print(f"- START PLAYING: {playlist[0][1]}")
if EDIT_TITLE:
await self.edit_title()
for track in playlist[:2]:
await self.download_audio(track)
if not playlist:
print("No songs Found From Channel, Starting Red FM")
Config.CPLAY=False
Config.STREAM_URL="https://bcovlive-a.akamaihd.net/19b535b7499a4719a5c19e043063f5d9/ap-southeast-1/6034685947001/playlist.m3u8?nocache=825347"
await self.start_radio()
return
else:
if len(playlist) > 2 and SHUFFLE:
await self.shuffle_playlist()
RADIO.add(3)
if LOG_GROUP:
await self.send_playlist()
except Exception as e:
Config.CPLAY=False
Config.STREAM_URL="https://bcovlive-a.akamaihd.net/19b535b7499a4719a5c19e043063f5d9/ap-southeast-1/6034685947001/playlist.m3u8?nocache=825347"
await self.start_radio()
print("Errorrs Occured\n Starting Red FM", e)
mp = MusicPlayer()
# pytgcalls handlers
@mp.group_call.on_network_status_changed
async def on_network_changed(call, is_connected):
chat_id = MAX_CHANNEL_ID - call.full_chat.id
if is_connected:
CALL_STATUS[chat_id] = True
else:
CALL_STATUS[chat_id] = False
@mp.group_call.on_playout_ended
async def playout_ended_handler(_, __):
if not playlist:
await mp.start_radio()
else:
await mp.skip_current_playing()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import contextmanager
from contextvars import ContextVar
from pathlib import Path
from time import monotonic
from typing import Callable, Generator, List, Optional, Tuple
import libcst as cst
TIMINGS: ContextVar[List[Tuple[str, float]]] = ContextVar("TIMINGS")
@contextmanager
def timed(msg: str) -> Generator[None, None, None]:
"""
Records the monotonic duration of the contained context, with a given description.
Timings are stored for later use/printing with `print_timings()`.
"""
before = monotonic()
yield
after = monotonic()
try:
TIMINGS.get().append((msg, after - before))
except LookupError:
pass
@contextmanager
def save_timings(to: List[Tuple[str, float]]) -> Generator[None, None, None]:
token = TIMINGS.set([])
yield
to.extend(TIMINGS.get())
TIMINGS.reset(token)
def merge_timings(more: List[Tuple[str, float]]) -> None:
TIMINGS.get().extend(more)
def print_timings(fn: Callable[[str], None] = print) -> None:
"""
Print all stored timing values in microseconds.
"""
for msg, duration in TIMINGS.get():
fn(f"{msg + ":":50} {int(duration*1000000):7} µs")
def try_parse(path: Path, data: Optional[bytes] = None) -> cst.Module:
"""
Attempts to parse the file with all syntax versions known by LibCST.
If parsing fails on all supported grammar versions, then raises the parser error
from the first/newest version attempted.
"""
if data is None:
data = path.read_bytes()
with timed(f"parsing {path}"):
parse_error: Optional[cst.ParserSyntaxError] = None
for version in cst.KNOWN_PYTHON_VERSION_STRINGS[::-1]:
try:
mod = cst.parse_module(
data, cst.PartialParserConfig(python_version=version)
)
return mod
except cst.ParserSyntaxError as e:
# keep the first error we see in case parsing fails on all versions
if parse_error is None:
parse_error = e
# not caring about existing traceback here because it's not useful for parse
# errors, and usort_path is already going to wrap it in a custom class
raise parse_error or Exception("unknown parse failure")
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import contextmanager
from contextvars import ContextVar
from pathlib import Path
from time import monotonic
from typing import Callable, Generator, List, Optional, Tuple
import libcst as cst
TIMINGS: ContextVar[List[Tuple[str, float]]] = ContextVar("TIMINGS")
@contextmanager
def timed(msg: str) -> Generator[None, None, None]:
"""
Records the monotonic duration of the contained context, with a given description.
Timings are stored for later use/printing with `print_timings()`.
"""
before = monotonic()
yield
after = monotonic()
try:
TIMINGS.get().append((msg, after - before))
except LookupError:
pass
@contextmanager
def save_timings(to: List[Tuple[str, float]]) -> Generator[None, None, None]:
token = TIMINGS.set([])
yield
to.extend(TIMINGS.get())
TIMINGS.reset(token)
def merge_timings(more: List[Tuple[str, float]]) -> None:
TIMINGS.get().extend(more)
def print_timings(fn: Callable[[str], None] = print) -> None:
"""
Print all stored timing values in microseconds.
"""
for msg, duration in TIMINGS.get():
fn(f"{msg + ':':50} {int(duration*1000000):7} µs")
def try_parse(path: Path, data: Optional[bytes] = None) -> cst.Module:
"""
Attempts to parse the file with all syntax versions known by LibCST.
If parsing fails on all supported grammar versions, then raises the parser error
from the first/newest version attempted.
"""
if data is None:
data = path.read_bytes()
with timed(f"parsing {path}"):
parse_error: Optional[cst.ParserSyntaxError] = None
for version in cst.KNOWN_PYTHON_VERSION_STRINGS[::-1]:
try:
mod = cst.parse_module(
data, cst.PartialParserConfig(python_version=version)
)
return mod
except cst.ParserSyntaxError as e:
# keep the first error we see in case parsing fails on all versions
if parse_error is None:
parse_error = e
# not caring about existing traceback here because it's not useful for parse
# errors, and usort_path is already going to wrap it in a custom class
raise parse_error or Exception("unknown parse failure")
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
from datetime import datetime
from typing import Dict, Generator
import smartsheet
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
# helpers
from airbyte_cdk.sources import Source
def get_prop(col_type: str) -> Dict[str, any]:
props = {
"TEXT_NUMBER": {"type": "string"},
"DATE": {"type": "string", "format": "date"},
"DATETIME": {"type": "string", "format": "date-time"},
}
return props.get(col_type, {"type": "string"})
def get_json_schema(sheet: Dict) -> Dict:
column_info = {i["title"]: get_prop(i["type"]) for i in sheet["columns"]}
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": column_info,
}
return json_schema
# main class definition
class SourceSmartsheets(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
try:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
smartsheet_client.errors_as_exceptions(True)
smartsheet_client.Sheets.get_sheet(spreadsheet_id)
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
if isinstance(e, smartsheet.exceptions.ApiError):
err = e.error.result
code = 404 if err.code == 1006 else err.code
reason = f"{err.name}: {code} - {err.message} | Check your spreadsheet ID."
else:
reason = str(e)
logger.error(reason)
return AirbyteConnectionStatus(status=Status.FAILED)
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
streams = []
smartsheet_client = smartsheet.Smartsheet(access_token)
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
sheet_json_schema = get_json_schema(sheet)
logger.info(f"Running discovery on sheet: {sheet["name"]} with {spreadsheet_id}")
stream = AirbyteStream(name=sheet["name"], json_schema=sheet_json_schema)
stream.supported_sync_modes = ["full_refresh"]
streams.append(stream)
except Exception as e:
raise Exception(f"Could not run discovery: {str(e)}")
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
for configured_stream in catalog.streams:
stream = configured_stream.stream
properties = stream.json_schema["properties"]
if isinstance(properties, list):
columns = tuple(key for dct in properties for key in dct.keys())
elif isinstance(properties, dict):
columns = tuple(i for i in properties.keys())
else:
logger.error("Could not read properties from the JSONschema in this stream")
name = stream.name
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
logger.info(f"Starting syncing spreadsheet {sheet["name"]}")
logger.info(f"Row count: {sheet["totalRowCount"]}")
for row in sheet["rows"]:
# convert all data to string as it is only expected format in schema
values = tuple(str(i["value"]) if "value" in i else "" for i in row["cells"])
try:
data = dict(zip(columns, values))
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
except Exception as e:
logger.error(f"Unable to encode row into an AirbyteMessage with the following error: {e}")
except Exception as e:
logger.error(f"Could not read smartsheet: {name}")
raise e
logger.info(f"Finished syncing spreadsheet with ID: {spreadsheet_id}")
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
from datetime import datetime
from typing import Dict, Generator
import smartsheet
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
# helpers
from airbyte_cdk.sources import Source
def get_prop(col_type: str) -> Dict[str, any]:
props = {
"TEXT_NUMBER": {"type": "string"},
"DATE": {"type": "string", "format": "date"},
"DATETIME": {"type": "string", "format": "date-time"},
}
return props.get(col_type, {"type": "string"})
def get_json_schema(sheet: Dict) -> Dict:
column_info = {i["title"]: get_prop(i["type"]) for i in sheet["columns"]}
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": column_info,
}
return json_schema
# main class definition
class SourceSmartsheets(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
try:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
smartsheet_client.errors_as_exceptions(True)
smartsheet_client.Sheets.get_sheet(spreadsheet_id)
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
if isinstance(e, smartsheet.exceptions.ApiError):
err = e.error.result
code = 404 if err.code == 1006 else err.code
reason = f"{err.name}: {code} - {err.message} | Check your spreadsheet ID."
else:
reason = str(e)
logger.error(reason)
return AirbyteConnectionStatus(status=Status.FAILED)
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
streams = []
smartsheet_client = smartsheet.Smartsheet(access_token)
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
sheet_json_schema = get_json_schema(sheet)
logger.info(f"Running discovery on sheet: {sheet['name']} with {spreadsheet_id}")
stream = AirbyteStream(name=sheet["name"], json_schema=sheet_json_schema)
stream.supported_sync_modes = ["full_refresh"]
streams.append(stream)
except Exception as e:
raise Exception(f"Could not run discovery: {str(e)}")
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
for configured_stream in catalog.streams:
stream = configured_stream.stream
properties = stream.json_schema["properties"]
if isinstance(properties, list):
columns = tuple(key for dct in properties for key in dct.keys())
elif isinstance(properties, dict):
columns = tuple(i for i in properties.keys())
else:
logger.error("Could not read properties from the JSONschema in this stream")
name = stream.name
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
logger.info(f"Starting syncing spreadsheet {sheet['name']}")
logger.info(f"Row count: {sheet['totalRowCount']}")
for row in sheet["rows"]:
# convert all data to string as it is only expected format in schema
values = tuple(str(i["value"]) if "value" in i else "" for i in row["cells"])
try:
data = dict(zip(columns, values))
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
except Exception as e:
logger.error(f"Unable to encode row into an AirbyteMessage with the following error: {e}")
except Exception as e:
logger.error(f"Could not read smartsheet: {name}")
raise e
logger.info(f"Finished syncing spreadsheet with ID: {spreadsheet_id}")
|
# coding=utf-8
# Copyright 2021, Google Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Pegasus model. """
import random
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPastAndCrossAttentions,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
# Public API
from ...modeling_tf_utils import (
DUMMY_INPUTS,
TFCausalLanguageModelingLoss,
TFPreTrainedModel,
TFSharedEmbeddings,
TFWrappedEmbeddings,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_pegasus import PegasusConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/pegasus-large"
_CONFIG_FOR_DOC = "PegasusConfig"
_TOKENIZER_FOR_DOC = "PegasusTokenizer"
LARGE_NEGATIVE = -1e8
# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
if tf.executing_eagerly():
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
# Copied from transformers.models.marian.modeling_tf_marian.TFMarianSinusoidalPositionalEmbedding with Marian->Pegasus
class TFPegasusSinusoidalPositionalEmbedding(tf.keras.layers.Layer):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, **kwargs):
super().__init__(**kwargs)
if embedding_dim % 2 != 0:
raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported")
self.embedding_dim = embedding_dim
self.num_positions = num_positions
def build(self, input_shape: tf.TensorShape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
weight = self._init_weight(self.num_positions, self.embedding_dim)
self.weight = self.add_weight(
name="embeddings",
shape=[self.num_positions, self.embedding_dim],
)
weight = tf.cast(weight, dtype=self.weight.dtype)
self.weight.assign(weight)
super().build(input_shape)
@staticmethod
def _init_weight(n_pos: int, dim: int):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
# index 0 is all zero
position_enc[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])
position_enc[:, dim // 2 :] = np.cos(position_enc[:, 1::2])
# convert to tensor
table = tf.convert_to_tensor(position_enc)
tf.stop_gradient(table)
return table
def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_shape[:2]
positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
return tf.gather(self.weight, positions)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Pegasus
class TFPegasusAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training=False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}",
)
if attention_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}",
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = tf.nn.softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}",
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}",
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartEncoderLayer with MBart->Pegasus
class TFPegasusEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: PegasusConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFPegasusAttention(
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False):
"""
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(hidden_states),
shape_list(residual),
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return hidden_states, self_attn_weights
# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer with MBart->Pegasus
class TFPegasusDecoderLayer(tf.keras.layers.Layer):
def __init__(self, config: PegasusConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFPegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
name="self_attn",
is_decoder=True,
)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.encoder_attn = TFPegasusAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
name="encoder_attn",
is_decoder=True,
)
self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states,
attention_mask: Optional[tf.Tensor] = None,
encoder_hidden_states: Optional[tf.Tensor] = None,
encoder_attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
cross_attn_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[tf.Tensor]] = None,
training=False,
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
"""
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
cross_attn_layer_head_mask (:obj:`tf.Tensor`): mask for heads of the cross-attention module.
`(decoder_attention_heads,)`
past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return (
hidden_states,
self_attn_weights,
cross_attn_weights,
present_key_value,
)
class TFPegasusPreTrainedModel(TFPreTrainedModel):
config_class = PegasusConfig
base_model_prefix = "model"
@property
def dummy_inputs(self):
pad_token = 1
input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
decoder_input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
dummy_inputs = {
"decoder_input_ids": decoder_input_ids,
"attention_mask": tf.math.not_equal(input_ids, pad_token),
"input_ids": input_ids,
}
return dummy_inputs
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
"decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
}
]
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartPretrainedModel.serving
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
PEGASUS_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Args:
config (:class:`~transformers.PegasusConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the
model weights.
"""
PEGASUS_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import PegasusTokenizer, TFPegasusForConditionalGeneration
>>> model = TFPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum')
>>> tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-xsum')
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='tf')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'])
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
"""
PEGASUS_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
Pegasus uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
decoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tf.FloatTensor`, `optional`):
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of
past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`). Set to :obj:`False` during training, :obj:`True` during generation
output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all
attention layers. See ``attentions`` under returned tensors for more detail. This argument can be used only
in eager mode, in graph mode the value in the config will be used instead.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@keras_serializable
class TFPegasusEncoder(tf.keras.layers.Layer):
config_class = PegasusConfig
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`TFPegasusEncoderLayer`.
Args:
config: PegasusConfig
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.layerdrop = config.encoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.layers = [TFPegasusEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs["inputs_embeds"] + embed_pos
hidden_states = self.dropout(hidden_states, training=inputs["training"])
# check attention mask and invert
if inputs["attention_mask"] is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(inputs["attention_mask"])
else:
attention_mask = None
encoder_states = () if inputs["output_hidden_states"] else None
all_attentions = () if inputs["output_attentions"] else None
# check if head_mask has a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if inputs["head_mask"] is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(inputs["head_mask"])[0],
len(self.layers),
message=f"The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs["head_mask"])[0]}.",
)
# encoder layers
for idx, encoder_layer in enumerate(self.layers):
if inputs["output_hidden_states"]:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if inputs["training"] and (dropout_probability < self.layerdrop): # skip the layer
continue
hidden_states, attn = encoder_layer(
hidden_states,
attention_mask,
inputs["head_mask"][idx] if inputs["head_mask"] is not None else None,
)
if inputs["output_attentions"]:
all_attentions += (attn,)
hidden_states = self.layer_norm(hidden_states)
if inputs["output_hidden_states"]:
encoder_states = encoder_states + (hidden_states,)
if not inputs["return_dict"]:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
@keras_serializable
class TFPegasusDecoder(tf.keras.layers.Layer):
config_class = PegasusConfig
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`TFPegasusDecoderLayer`
Args:
config: PegasusConfig
embed_tokens: output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.padding_idx = config.pad_token_id
self.embed_tokens = embed_tokens
self.layerdrop = config.decoder_layerdrop
self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.layers = [TFPegasusDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = (
shape_list(inputs["past_key_values"][0][0])[2] if inputs["past_key_values"] is not None else 0
)
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale
hidden_states = inputs["inputs_embeds"]
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
else:
combined_attention_mask = _expand_mask(
tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
)
if inputs["attention_mask"] is not None:
combined_attention_mask = combined_attention_mask + _expand_mask(
inputs["attention_mask"], tgt_len=input_shape[-1]
)
if inputs["encoder_hidden_states"] is not None and inputs["encoder_attention_mask"] is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
inputs["encoder_attention_mask"] = _expand_mask(inputs["encoder_attention_mask"], tgt_len=input_shape[-1])
hidden_states = self.dropout(hidden_states + positions, training=inputs["training"])
# decoder layers
all_hidden_states = () if inputs["output_hidden_states"] else None
all_self_attns = () if inputs["output_attentions"] else None
all_cross_attns = () if (inputs["output_attentions"] and inputs["encoder_hidden_states"] is not None) else None
present_key_values = () if inputs["use_cache"] else None
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
for attn_mask in ["head_mask", "cross_attn_head_mask"]:
if inputs[attn_mask] is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(inputs[attn_mask])[0],
len(self.layers),
message=f"The {attn_mask} should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs[attn_mask])[0]}.",
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if inputs["output_hidden_states"]:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if inputs["training"] and (dropout_probability < self.layerdrop):
continue
past_key_value = inputs["past_key_values"][idx] if inputs["past_key_values"] is not None else None
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=inputs["encoder_hidden_states"],
encoder_attention_mask=inputs["encoder_attention_mask"],
layer_head_mask=inputs["head_mask"][idx] if inputs["head_mask"] is not None else None,
cross_attn_layer_head_mask=inputs["cross_attn_head_mask"][idx]
if inputs["cross_attn_head_mask"] is not None
else None,
past_key_value=past_key_value,
)
if inputs["use_cache"]:
present_key_values += (present_key_value,)
if inputs["output_attentions"]:
all_self_attns += (layer_self_attn,)
if inputs["encoder_hidden_states"] is not None:
all_cross_attns += (layer_cross_attn,)
hidden_states = self.layer_norm(hidden_states)
if inputs["output_hidden_states"]:
all_hidden_states += (hidden_states,)
if inputs["output_attentions"]:
all_self_attns = list(all_self_attns)
if inputs["encoder_hidden_states"] is not None:
all_cross_attns = list(all_cross_attns)
if inputs["use_cache"]:
present_key_values = (inputs["encoder_hidden_states"], present_key_values)
if not inputs["return_dict"]:
return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
else:
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
@keras_serializable
class TFPegasusMainLayer(tf.keras.layers.Layer):
config_class = PegasusConfig
def __init__(self, config: PegasusConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name="model.shared")
with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
embed_tokens.vocab_size = self.shared.vocab_size
embed_tokens.hidden_size = self.shared.hidden_size
self.encoder = TFPegasusEncoder(config, embed_tokens, name="encoder")
self.decoder = TFPegasusDecoder(config, embed_tokens, name="decoder")
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared.weight = new_embeddings
self.shared.vocab_size = self.shared.weight.shape[0]
# retrieve correct absolute scope for embed token wrapper
with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
self.encoder.set_embed_tokens(embed_tokens)
self.decoder.set_embed_tokens(embed_tokens)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["decoder_input_ids"] is None and inputs["decoder_inputs_embeds"] is None:
inputs["use_cache"] = False
inputs["output_hidden_states"] = (
inputs["output_hidden_states"]
if inputs["output_hidden_states"] is not None
else self.config.output_hidden_states
)
if inputs["encoder_outputs"] is None:
inputs["encoder_outputs"] = self.encoder(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
# If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
elif inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], TFBaseModelOutput):
inputs["encoder_outputs"] = TFBaseModelOutput(
last_hidden_state=inputs["encoder_outputs"][0],
hidden_states=inputs["encoder_outputs"][1] if len(inputs["encoder_outputs"]) > 1 else None,
attentions=inputs["encoder_outputs"][2] if len(inputs["encoder_outputs"]) > 2 else None,
)
# If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
elif not inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], tuple):
inputs["encoder_outputs"] = inputs["encoder_outputs"].to_tuple()
decoder_outputs = self.decoder(
inputs["decoder_input_ids"],
attention_mask=inputs["decoder_attention_mask"],
encoder_hidden_states=inputs["encoder_outputs"][0],
encoder_attention_mask=inputs["attention_mask"],
head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
if not inputs["return_dict"]:
return decoder_outputs + inputs["encoder_outputs"]
return TFSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=inputs["encoder_outputs"].last_hidden_state,
encoder_hidden_states=inputs["encoder_outputs"].hidden_states,
encoder_attentions=inputs["encoder_outputs"].attentions,
)
@add_start_docstrings(
"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
PEGASUS_START_DOCSTRING,
)
class TFPegasusModel(TFPegasusPreTrainedModel):
def __init__(self, config: PegasusConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFPegasusMainLayer(config, name="model")
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSeq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.model(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
decoder_input_ids=inputs["decoder_input_ids"],
decoder_attention_mask=inputs["decoder_attention_mask"],
head_mask=inputs["head_mask"],
decoder_head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
encoder_outputs=inputs["encoder_outputs"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["inputs_embeds"],
decoder_inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
# Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqModelOutput(
last_hidden_state=output.last_hidden_state,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
@add_start_docstrings(
"The PEGASUS Model with a language modeling head. Can be used for summarization.",
PEGASUS_START_DOCSTRING,
)
class TFPegasusForConditionalGeneration(TFPegasusPreTrainedModel, TFCausalLanguageModelingLoss):
_keys_to_ignore_on_load_unexpected = [
r"model.encoder.embed_tokens.weight",
r"model.decoder.embed_tokens.weight",
]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFPegasusMainLayer(config, name="model")
self.use_cache = config.use_cache
# final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency.
self.final_logits_bias = self.add_weight(
name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
)
def get_decoder(self):
return self.model.decoder
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return self.get_input_embeddings()
def set_output_embeddings(self, value):
self.set_input_embeddings(value)
def get_bias(self):
return {"final_logits_bias": self.final_logits_bias}
def set_bias(self, value):
self.final_logits_bias = value["final_logits_bias"]
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
"""
labels (:obj:`tf.tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["labels"] is not None:
inputs["labels"] = tf.where(
inputs["labels"] == self.config.pad_token_id,
tf.fill(shape_list(inputs["labels"]), -100),
inputs["labels"],
)
inputs["use_cache"] = False
if inputs["decoder_input_ids"] is None:
inputs["decoder_input_ids"] = shift_tokens_right(
inputs["labels"], self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
decoder_input_ids=inputs["decoder_input_ids"],
encoder_outputs=inputs["encoder_outputs"],
decoder_attention_mask=inputs["decoder_attention_mask"],
head_mask=inputs["head_mask"],
decoder_head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["inputs_embeds"],
decoder_inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
lm_logits = self.model.shared(outputs[0], mode="linear")
lm_logits = lm_logits + self.final_logits_bias
masked_lm_loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], lm_logits)
if not inputs["return_dict"]:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return TFSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values, # index 1 of d outputs
decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
cross_attentions=outputs.cross_attentions, # index 4 of d outputs
encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
encoder_attentions=outputs.encoder_attentions, # 2 of e out
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past,
attention_mask,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
**kwargs,
) -> Dict:
assert past is not None and len(past) in {1, 2}, f"past has to be an iterable of length 1,2 got {past}"
if len(past) == 1:
assert isinstance(past[0], tf.Tensor), f"`past[0]` has to be of type `tf.Tensor`, but is {type(past[0])}"
encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0])
past_key_values = None
else:
assert (
len(past) == 2
), "`past` has to be of length 2 with the encoder_outputs at the first position and past_key_values at the second position."
encoder_outputs, past_key_values = past
if isinstance(encoder_outputs, tuple):
assert isinstance(
encoder_outputs[0], tf.Tensor
), f"`encoder_outputs[0]` has to be of type `tf.Tensor`, but is {type(encoder_outputs[0])}"
encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0])
elif isinstance(encoder_outputs, tf.Tensor):
encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs)
assert (
past_key_values
), f"decoder cached states must be truthy. got {past_key_values} from the 2nd element of past"
decoder_input_ids = decoder_input_ids[:, -1:]
assert isinstance(
encoder_outputs, TFBaseModelOutput
), f"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}."
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration._reorder_cache
def _reorder_cache(past, beam_idx):
if len(past) == 1:
return past
past_key_values = past[1]
reordered_past = ()
for layer_past_key_values in past_key_values:
reordered_past += (
tuple(tf.gather(layer_past_key_value, beam_idx) for layer_past_key_value in layer_past_key_values[:2])
+ layer_past_key_values[2:],
)
return (past[0], reordered_past)
|
# coding=utf-8
# Copyright 2021, Google Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Pegasus model. """
import random
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPastAndCrossAttentions,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
# Public API
from ...modeling_tf_utils import (
DUMMY_INPUTS,
TFCausalLanguageModelingLoss,
TFPreTrainedModel,
TFSharedEmbeddings,
TFWrappedEmbeddings,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_pegasus import PegasusConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/pegasus-large"
_CONFIG_FOR_DOC = "PegasusConfig"
_TOKENIZER_FOR_DOC = "PegasusTokenizer"
LARGE_NEGATIVE = -1e8
# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
if tf.executing_eagerly():
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
# Copied from transformers.models.marian.modeling_tf_marian.TFMarianSinusoidalPositionalEmbedding with Marian->Pegasus
class TFPegasusSinusoidalPositionalEmbedding(tf.keras.layers.Layer):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, **kwargs):
super().__init__(**kwargs)
if embedding_dim % 2 != 0:
raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported")
self.embedding_dim = embedding_dim
self.num_positions = num_positions
def build(self, input_shape: tf.TensorShape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
weight = self._init_weight(self.num_positions, self.embedding_dim)
self.weight = self.add_weight(
name="embeddings",
shape=[self.num_positions, self.embedding_dim],
)
weight = tf.cast(weight, dtype=self.weight.dtype)
self.weight.assign(weight)
super().build(input_shape)
@staticmethod
def _init_weight(n_pos: int, dim: int):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
# index 0 is all zero
position_enc[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])
position_enc[:, dim // 2 :] = np.cos(position_enc[:, 1::2])
# convert to tensor
table = tf.convert_to_tensor(position_enc)
tf.stop_gradient(table)
return table
def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_shape[:2]
positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
return tf.gather(self.weight, positions)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Pegasus
class TFPegasusAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training=False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}",
)
if attention_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}",
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = tf.nn.softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}",
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}",
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartEncoderLayer with MBart->Pegasus
class TFPegasusEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: PegasusConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFPegasusAttention(
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False):
"""
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(hidden_states),
shape_list(residual),
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return hidden_states, self_attn_weights
# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer with MBart->Pegasus
class TFPegasusDecoderLayer(tf.keras.layers.Layer):
def __init__(self, config: PegasusConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFPegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
name="self_attn",
is_decoder=True,
)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.encoder_attn = TFPegasusAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
name="encoder_attn",
is_decoder=True,
)
self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states,
attention_mask: Optional[tf.Tensor] = None,
encoder_hidden_states: Optional[tf.Tensor] = None,
encoder_attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
cross_attn_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[tf.Tensor]] = None,
training=False,
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
"""
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
cross_attn_layer_head_mask (:obj:`tf.Tensor`): mask for heads of the cross-attention module.
`(decoder_attention_heads,)`
past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return (
hidden_states,
self_attn_weights,
cross_attn_weights,
present_key_value,
)
class TFPegasusPreTrainedModel(TFPreTrainedModel):
config_class = PegasusConfig
base_model_prefix = "model"
@property
def dummy_inputs(self):
pad_token = 1
input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
decoder_input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)
dummy_inputs = {
"decoder_input_ids": decoder_input_ids,
"attention_mask": tf.math.not_equal(input_ids, pad_token),
"input_ids": input_ids,
}
return dummy_inputs
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
"decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
}
]
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartPretrainedModel.serving
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
PEGASUS_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Args:
config (:class:`~transformers.PegasusConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the
model weights.
"""
PEGASUS_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import PegasusTokenizer, TFPegasusForConditionalGeneration
>>> model = TFPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum')
>>> tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-xsum')
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='tf')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'])
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
"""
PEGASUS_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
Pegasus uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
decoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tf.FloatTensor`, `optional`):
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of
past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`). Set to :obj:`False` during training, :obj:`True` during generation
output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all
attention layers. See ``attentions`` under returned tensors for more detail. This argument can be used only
in eager mode, in graph mode the value in the config will be used instead.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@keras_serializable
class TFPegasusEncoder(tf.keras.layers.Layer):
config_class = PegasusConfig
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`TFPegasusEncoderLayer`.
Args:
config: PegasusConfig
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.layerdrop = config.encoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.layers = [TFPegasusEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs["inputs_embeds"] + embed_pos
hidden_states = self.dropout(hidden_states, training=inputs["training"])
# check attention mask and invert
if inputs["attention_mask"] is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(inputs["attention_mask"])
else:
attention_mask = None
encoder_states = () if inputs["output_hidden_states"] else None
all_attentions = () if inputs["output_attentions"] else None
# check if head_mask has a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if inputs["head_mask"] is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(inputs["head_mask"])[0],
len(self.layers),
message=f"The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs['head_mask'])[0]}.",
)
# encoder layers
for idx, encoder_layer in enumerate(self.layers):
if inputs["output_hidden_states"]:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if inputs["training"] and (dropout_probability < self.layerdrop): # skip the layer
continue
hidden_states, attn = encoder_layer(
hidden_states,
attention_mask,
inputs["head_mask"][idx] if inputs["head_mask"] is not None else None,
)
if inputs["output_attentions"]:
all_attentions += (attn,)
hidden_states = self.layer_norm(hidden_states)
if inputs["output_hidden_states"]:
encoder_states = encoder_states + (hidden_states,)
if not inputs["return_dict"]:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
@keras_serializable
class TFPegasusDecoder(tf.keras.layers.Layer):
config_class = PegasusConfig
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`TFPegasusDecoderLayer`
Args:
config: PegasusConfig
embed_tokens: output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.padding_idx = config.pad_token_id
self.embed_tokens = embed_tokens
self.layerdrop = config.decoder_layerdrop
self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.layers = [TFPegasusDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = (
shape_list(inputs["past_key_values"][0][0])[2] if inputs["past_key_values"] is not None else 0
)
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
if inputs["inputs_embeds"] is None:
inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale
hidden_states = inputs["inputs_embeds"]
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
else:
combined_attention_mask = _expand_mask(
tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
)
if inputs["attention_mask"] is not None:
combined_attention_mask = combined_attention_mask + _expand_mask(
inputs["attention_mask"], tgt_len=input_shape[-1]
)
if inputs["encoder_hidden_states"] is not None and inputs["encoder_attention_mask"] is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
inputs["encoder_attention_mask"] = _expand_mask(inputs["encoder_attention_mask"], tgt_len=input_shape[-1])
hidden_states = self.dropout(hidden_states + positions, training=inputs["training"])
# decoder layers
all_hidden_states = () if inputs["output_hidden_states"] else None
all_self_attns = () if inputs["output_attentions"] else None
all_cross_attns = () if (inputs["output_attentions"] and inputs["encoder_hidden_states"] is not None) else None
present_key_values = () if inputs["use_cache"] else None
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
for attn_mask in ["head_mask", "cross_attn_head_mask"]:
if inputs[attn_mask] is not None and tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(inputs[attn_mask])[0],
len(self.layers),
message=f"The {attn_mask} should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs[attn_mask])[0]}.",
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if inputs["output_hidden_states"]:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if inputs["training"] and (dropout_probability < self.layerdrop):
continue
past_key_value = inputs["past_key_values"][idx] if inputs["past_key_values"] is not None else None
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=inputs["encoder_hidden_states"],
encoder_attention_mask=inputs["encoder_attention_mask"],
layer_head_mask=inputs["head_mask"][idx] if inputs["head_mask"] is not None else None,
cross_attn_layer_head_mask=inputs["cross_attn_head_mask"][idx]
if inputs["cross_attn_head_mask"] is not None
else None,
past_key_value=past_key_value,
)
if inputs["use_cache"]:
present_key_values += (present_key_value,)
if inputs["output_attentions"]:
all_self_attns += (layer_self_attn,)
if inputs["encoder_hidden_states"] is not None:
all_cross_attns += (layer_cross_attn,)
hidden_states = self.layer_norm(hidden_states)
if inputs["output_hidden_states"]:
all_hidden_states += (hidden_states,)
if inputs["output_attentions"]:
all_self_attns = list(all_self_attns)
if inputs["encoder_hidden_states"] is not None:
all_cross_attns = list(all_cross_attns)
if inputs["use_cache"]:
present_key_values = (inputs["encoder_hidden_states"], present_key_values)
if not inputs["return_dict"]:
return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
else:
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
@keras_serializable
class TFPegasusMainLayer(tf.keras.layers.Layer):
config_class = PegasusConfig
def __init__(self, config: PegasusConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name="model.shared")
with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
embed_tokens.vocab_size = self.shared.vocab_size
embed_tokens.hidden_size = self.shared.hidden_size
self.encoder = TFPegasusEncoder(config, embed_tokens, name="encoder")
self.decoder = TFPegasusDecoder(config, embed_tokens, name="decoder")
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared.weight = new_embeddings
self.shared.vocab_size = self.shared.weight.shape[0]
# retrieve correct absolute scope for embed token wrapper
with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name:
pass
# Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.
embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)
self.encoder.set_embed_tokens(embed_tokens)
self.decoder.set_embed_tokens(embed_tokens)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["decoder_input_ids"] is None and inputs["decoder_inputs_embeds"] is None:
inputs["use_cache"] = False
inputs["output_hidden_states"] = (
inputs["output_hidden_states"]
if inputs["output_hidden_states"] is not None
else self.config.output_hidden_states
)
if inputs["encoder_outputs"] is None:
inputs["encoder_outputs"] = self.encoder(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
# If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
elif inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], TFBaseModelOutput):
inputs["encoder_outputs"] = TFBaseModelOutput(
last_hidden_state=inputs["encoder_outputs"][0],
hidden_states=inputs["encoder_outputs"][1] if len(inputs["encoder_outputs"]) > 1 else None,
attentions=inputs["encoder_outputs"][2] if len(inputs["encoder_outputs"]) > 2 else None,
)
# If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
elif not inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], tuple):
inputs["encoder_outputs"] = inputs["encoder_outputs"].to_tuple()
decoder_outputs = self.decoder(
inputs["decoder_input_ids"],
attention_mask=inputs["decoder_attention_mask"],
encoder_hidden_states=inputs["encoder_outputs"][0],
encoder_attention_mask=inputs["attention_mask"],
head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
if not inputs["return_dict"]:
return decoder_outputs + inputs["encoder_outputs"]
return TFSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=inputs["encoder_outputs"].last_hidden_state,
encoder_hidden_states=inputs["encoder_outputs"].hidden_states,
encoder_attentions=inputs["encoder_outputs"].attentions,
)
@add_start_docstrings(
"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
PEGASUS_START_DOCSTRING,
)
class TFPegasusModel(TFPegasusPreTrainedModel):
def __init__(self, config: PegasusConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFPegasusMainLayer(config, name="model")
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSeq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.model(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
decoder_input_ids=inputs["decoder_input_ids"],
decoder_attention_mask=inputs["decoder_attention_mask"],
head_mask=inputs["head_mask"],
decoder_head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
encoder_outputs=inputs["encoder_outputs"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["inputs_embeds"],
decoder_inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
# Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqModelOutput(
last_hidden_state=output.last_hidden_state,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
@add_start_docstrings(
"The PEGASUS Model with a language modeling head. Can be used for summarization.",
PEGASUS_START_DOCSTRING,
)
class TFPegasusForConditionalGeneration(TFPegasusPreTrainedModel, TFCausalLanguageModelingLoss):
_keys_to_ignore_on_load_unexpected = [
r"model.encoder.embed_tokens.weight",
r"model.decoder.embed_tokens.weight",
]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFPegasusMainLayer(config, name="model")
self.use_cache = config.use_cache
# final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency.
self.final_logits_bias = self.add_weight(
name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
)
def get_decoder(self):
return self.model.decoder
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return self.get_input_embeddings()
def set_output_embeddings(self, value):
self.set_input_embeddings(value)
def get_bias(self):
return {"final_logits_bias": self.final_logits_bias}
def set_bias(self, value):
self.final_logits_bias = value["final_logits_bias"]
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
"""
labels (:obj:`tf.tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["labels"] is not None:
inputs["labels"] = tf.where(
inputs["labels"] == self.config.pad_token_id,
tf.fill(shape_list(inputs["labels"]), -100),
inputs["labels"],
)
inputs["use_cache"] = False
if inputs["decoder_input_ids"] is None:
inputs["decoder_input_ids"] = shift_tokens_right(
inputs["labels"], self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
decoder_input_ids=inputs["decoder_input_ids"],
encoder_outputs=inputs["encoder_outputs"],
decoder_attention_mask=inputs["decoder_attention_mask"],
head_mask=inputs["head_mask"],
decoder_head_mask=inputs["decoder_head_mask"],
cross_attn_head_mask=inputs["cross_attn_head_mask"],
past_key_values=inputs["past_key_values"],
inputs_embeds=inputs["inputs_embeds"],
decoder_inputs_embeds=inputs["decoder_inputs_embeds"],
use_cache=inputs["use_cache"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
lm_logits = self.model.shared(outputs[0], mode="linear")
lm_logits = lm_logits + self.final_logits_bias
masked_lm_loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], lm_logits)
if not inputs["return_dict"]:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return TFSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values, # index 1 of d outputs
decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
cross_attentions=outputs.cross_attentions, # index 4 of d outputs
encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
encoder_attentions=outputs.encoder_attentions, # 2 of e out
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past,
attention_mask,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
**kwargs,
) -> Dict:
assert past is not None and len(past) in {1, 2}, f"past has to be an iterable of length 1,2 got {past}"
if len(past) == 1:
assert isinstance(past[0], tf.Tensor), f"`past[0]` has to be of type `tf.Tensor`, but is {type(past[0])}"
encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0])
past_key_values = None
else:
assert (
len(past) == 2
), "`past` has to be of length 2 with the encoder_outputs at the first position and past_key_values at the second position."
encoder_outputs, past_key_values = past
if isinstance(encoder_outputs, tuple):
assert isinstance(
encoder_outputs[0], tf.Tensor
), f"`encoder_outputs[0]` has to be of type `tf.Tensor`, but is {type(encoder_outputs[0])}"
encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0])
elif isinstance(encoder_outputs, tf.Tensor):
encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs)
assert (
past_key_values
), f"decoder cached states must be truthy. got {past_key_values} from the 2nd element of past"
decoder_input_ids = decoder_input_ids[:, -1:]
assert isinstance(
encoder_outputs, TFBaseModelOutput
), f"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}."
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration._reorder_cache
def _reorder_cache(past, beam_idx):
if len(past) == 1:
return past
past_key_values = past[1]
reordered_past = ()
for layer_past_key_values in past_key_values:
reordered_past += (
tuple(tf.gather(layer_past_key_value, beam_idx) for layer_past_key_value in layer_past_key_values[:2])
+ layer_past_key_values[2:],
)
return (past[0], reordered_past)
|
import sys
sys.path.append('../../../tests')
import yaml
from functools import partial
import time
import random
import string
import ensure
from textwrap import dedent
from ensure import release
from ensure import cluster
from ensure import machinedeployment
from ensure import kubeadmconfig
from ensure import kubeadmconfig_controlplane
from ensure import kubeadmconfig_with_labels
from ensure import kubeadmconfig_with_role_labels
from ensure import kubeadmconfig_with_kubelet_args
from ensure import kubeadm_control_plane
from ensure import kubeadmconfig_controlplane
from ensure import kubeadmconfig_with_files
from ensure import kubeadmconfig_with_audit_file
from ensure import fetch_policies
from ensure import run_pod_from_registries
import pytest
from pytest_kube import forward_requests, wait_for_rollout, app_template
import logging
LOGGER = logging.getLogger(__name__)
@pytest.mark.smoke
def test_kubeadmconfig_policy_controlplane(kubeadmconfig_controlplane) -> None:
"""
test_kubeadmconfig_policy_controlplane tests defaulting of a KubeadmConfig for a control plane where all required values are empty strings.
:param kubeadmconfig_controlplane: KubeadmConfig CR which is empty.
"""
assert kubeadmconfig_controlplane['metadata']['labels']['cluster.x-k8s.io/watch-filter'] == ensure.watch_label
assert kubeadmconfig_controlplane['metadata']['labels']['cluster.x-k8s.io/control-plane'] == ""
@pytest.mark.smoke
def test_kubeadmconfig_auditpolicy(kubeadmconfig_with_files) -> None:
"""
test_kubeadmconfig_auditpolicy tests defaulting of a kubeadmconfig with audit policy details
:param kubeadmconfig_with_files: KubeadmConfig CR which includes some existing files
"""
found = False
for file in kubeadmconfig_with_files['spec']['files']:
if file['path'] == "/etc/kubernetes/policies/audit-policy.yaml":
found = True
assert found == True
@pytest.mark.smoke
def test_kubeadmconfig_auditpolicy(kubeadmconfig_with_audit_file) -> None:
"""
test_kubeadmconfig_auditpolicy tests defaulting of a kubeadmconfig with audit policy details
:param kubeadmconfig_with_audit_file: KubeadmConfig CR which includes an existing audit file
"""
assert len(kubeadmconfig_with_audit_file['spec']['files']) == 1
@pytest.mark.smoke
def test_kyverno_policy(fetch_policies) -> None:
"""
test_kyverno_policy tests that the policy is present
"""
found = False
for policy in fetch_policies['items']:
LOGGER.info(f"Policy {policy["metadata"]["name"]} is present in the cluster")
if policy['metadata']['name'] == "restrict-image-registries":
found = True
assert found == True
@pytest.mark.smoke
def test_kyverno_policy_reports(run_pod_from_registries) -> None:
"""
test_kyverno_policy_reports tests the restrict-image-registries policy
:param run_pod_from_registries: Pods with containers from inside and outside GS registries
"""
bad_registry_found = False
good_registry_found = False
if len(run_pod_from_registries['items']) == 0:
LOGGER.warning("No policy reports present on the cluster")
for report in run_pod_from_registries['items']:
LOGGER.info(f"Policy report {report["metadata"]["name"]} is present on the cluster")
for policy_report in report['results']:
# Look for PolicyReports from the `restrict-image-registries` policy
if policy_report['policy'] == "restrict-image-registries":
for resource in policy_report['resources']:
LOGGER.info(f"PolicyReport for Policy {policy_report["policy"]} for resource {resource["name"]} is present on the cluster")
# Check for the Pod with bad registries and verify that it has a fail result
if resource['name'] == "pod-outside-gs-registries":
if policy_report['result'] == "fail":
bad_registry_found = True
break
else:
LOGGER.warning(f"PolicyReport for {resource["name"]} is present but result is not correct")
# Check for the Pod with good registries and verify that it has a pass result
if resource['name'] == "pod-inside-gs-registries":
if policy_report['result'] == "pass":
good_registry_found = True
break
else:
LOGGER.warning(f"PolicyReport for {resource["name"]} is present but result is not correct")
assert (bad_registry_found == True and good_registry_found == True)
|
import sys
sys.path.append('../../../tests')
import yaml
from functools import partial
import time
import random
import string
import ensure
from textwrap import dedent
from ensure import release
from ensure import cluster
from ensure import machinedeployment
from ensure import kubeadmconfig
from ensure import kubeadmconfig_controlplane
from ensure import kubeadmconfig_with_labels
from ensure import kubeadmconfig_with_role_labels
from ensure import kubeadmconfig_with_kubelet_args
from ensure import kubeadm_control_plane
from ensure import kubeadmconfig_controlplane
from ensure import kubeadmconfig_with_files
from ensure import kubeadmconfig_with_audit_file
from ensure import fetch_policies
from ensure import run_pod_from_registries
import pytest
from pytest_kube import forward_requests, wait_for_rollout, app_template
import logging
LOGGER = logging.getLogger(__name__)
@pytest.mark.smoke
def test_kubeadmconfig_policy_controlplane(kubeadmconfig_controlplane) -> None:
"""
test_kubeadmconfig_policy_controlplane tests defaulting of a KubeadmConfig for a control plane where all required values are empty strings.
:param kubeadmconfig_controlplane: KubeadmConfig CR which is empty.
"""
assert kubeadmconfig_controlplane['metadata']['labels']['cluster.x-k8s.io/watch-filter'] == ensure.watch_label
assert kubeadmconfig_controlplane['metadata']['labels']['cluster.x-k8s.io/control-plane'] == ""
@pytest.mark.smoke
def test_kubeadmconfig_auditpolicy(kubeadmconfig_with_files) -> None:
"""
test_kubeadmconfig_auditpolicy tests defaulting of a kubeadmconfig with audit policy details
:param kubeadmconfig_with_files: KubeadmConfig CR which includes some existing files
"""
found = False
for file in kubeadmconfig_with_files['spec']['files']:
if file['path'] == "/etc/kubernetes/policies/audit-policy.yaml":
found = True
assert found == True
@pytest.mark.smoke
def test_kubeadmconfig_auditpolicy(kubeadmconfig_with_audit_file) -> None:
"""
test_kubeadmconfig_auditpolicy tests defaulting of a kubeadmconfig with audit policy details
:param kubeadmconfig_with_audit_file: KubeadmConfig CR which includes an existing audit file
"""
assert len(kubeadmconfig_with_audit_file['spec']['files']) == 1
@pytest.mark.smoke
def test_kyverno_policy(fetch_policies) -> None:
"""
test_kyverno_policy tests that the policy is present
"""
found = False
for policy in fetch_policies['items']:
LOGGER.info(f"Policy {policy['metadata']['name']} is present in the cluster")
if policy['metadata']['name'] == "restrict-image-registries":
found = True
assert found == True
@pytest.mark.smoke
def test_kyverno_policy_reports(run_pod_from_registries) -> None:
"""
test_kyverno_policy_reports tests the restrict-image-registries policy
:param run_pod_from_registries: Pods with containers from inside and outside GS registries
"""
bad_registry_found = False
good_registry_found = False
if len(run_pod_from_registries['items']) == 0:
LOGGER.warning("No policy reports present on the cluster")
for report in run_pod_from_registries['items']:
LOGGER.info(f"Policy report {report['metadata']['name']} is present on the cluster")
for policy_report in report['results']:
# Look for PolicyReports from the `restrict-image-registries` policy
if policy_report['policy'] == "restrict-image-registries":
for resource in policy_report['resources']:
LOGGER.info(f"PolicyReport for Policy {policy_report['policy']} for resource {resource['name']} is present on the cluster")
# Check for the Pod with bad registries and verify that it has a fail result
if resource['name'] == "pod-outside-gs-registries":
if policy_report['result'] == "fail":
bad_registry_found = True
break
else:
LOGGER.warning(f"PolicyReport for {resource['name']} is present but result is not correct")
# Check for the Pod with good registries and verify that it has a pass result
if resource['name'] == "pod-inside-gs-registries":
if policy_report['result'] == "pass":
good_registry_found = True
break
else:
LOGGER.warning(f"PolicyReport for {resource['name']} is present but result is not correct")
assert (bad_registry_found == True and good_registry_found == True)
|
import os
import subprocess
import pytest
##################################
@pytest.mark.eda
@pytest.mark.quick
def test_icebreaker(scroot):
'''Basic FPGA test: build the Blinky example by running `sc` as a command-line app.
'''
# Use subprocess to test running the `sc` scripts as a command-line program.
# Pipe stdout to /dev/null to avoid printing to the terminal.
blinky_ex_dir = os.path.join(scroot, 'examples', 'blinky')
# Run the build command for an iCE40 board.
subprocess.run(['sc',
os.path.join(blinky_ex_dir, 'blinky.v'),
'-read_pcf', f"import 0 {os.path.join(blinky_ex_dir, "icebreaker.pcf")}",
'-design', 'blinky',
'-target', 'fpgaflow_ice40up5k-sg48'])
# Verify that a bitstream was generated
assert os.path.isfile('build/blinky/job0/bitstream/0/outputs/blinky.bit')
if __name__ == "__main__":
from tests.fixtures import scroot
test_icebreaker(scroot())
|
import os
import subprocess
import pytest
##################################
@pytest.mark.eda
@pytest.mark.quick
def test_icebreaker(scroot):
'''Basic FPGA test: build the Blinky example by running `sc` as a command-line app.
'''
# Use subprocess to test running the `sc` scripts as a command-line program.
# Pipe stdout to /dev/null to avoid printing to the terminal.
blinky_ex_dir = os.path.join(scroot, 'examples', 'blinky')
# Run the build command for an iCE40 board.
subprocess.run(['sc',
os.path.join(blinky_ex_dir, 'blinky.v'),
'-read_pcf', f"import 0 {os.path.join(blinky_ex_dir, 'icebreaker.pcf')}",
'-design', 'blinky',
'-target', 'fpgaflow_ice40up5k-sg48'])
# Verify that a bitstream was generated
assert os.path.isfile('build/blinky/job0/bitstream/0/outputs/blinky.bit')
if __name__ == "__main__":
from tests.fixtures import scroot
test_icebreaker(scroot())
|
import argparse
from typing import List, Dict
import requests
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
)
def get_sentiment_stats(ticker: str) -> Dict:
"""Get sentiment stats
Parameters
----------
ticker : str
Ticker to get sentiment stats
Returns
-------
Dict
Get sentiment stats
"""
response = requests.get(
f"https://finnhub.io/api/v1/news-sentiment?symbol={ticker}&token={cfg.API_FINNHUB_KEY}"
)
if response.status_code == 200:
return response.json()
return {}
def sentiment_stats(other_args: List[str], ticker: str):
"""Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker : str
Ticker to get sentiment stats
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="stats",
description="""
Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score.
[Source: https://finnhub.io]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
d_stats = get_sentiment_stats(ticker)
if d_stats:
print(f"Buzz: {round(100*d_stats["buzz"]["buzz"],2)} %")
print(f"News Score: {round(100*d_stats["companyNewsScore"],2)} %")
print("")
print(f"Articles Last Week: {d_stats["buzz"]["articlesInLastWeek"]}")
print(f"Articles Weekly Average: {d_stats["buzz"]["weeklyAverage"]}")
print("")
print(f"Bullish: {round(100*d_stats["sentiment"]["bullishPercent"],2)} %")
print(f"Bearish: {round(100*d_stats["sentiment"]["bearishPercent"],2)} %")
print("")
print(
f"Sector Average Bullish: {round(100*d_stats["sectorAverageBullishPercent"],2)} %"
)
print(
f"Sector Average News Score: {round(100*d_stats["sectorAverageNewsScore"],2)} %"
)
else:
print("No sentiment stats found.")
print("")
except Exception as e:
print(e, "\n")
|
import argparse
from typing import List, Dict
import requests
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
)
def get_sentiment_stats(ticker: str) -> Dict:
"""Get sentiment stats
Parameters
----------
ticker : str
Ticker to get sentiment stats
Returns
-------
Dict
Get sentiment stats
"""
response = requests.get(
f"https://finnhub.io/api/v1/news-sentiment?symbol={ticker}&token={cfg.API_FINNHUB_KEY}"
)
if response.status_code == 200:
return response.json()
return {}
def sentiment_stats(other_args: List[str], ticker: str):
"""Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker : str
Ticker to get sentiment stats
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="stats",
description="""
Sentiment stats which displays buzz, news score, articles last week, articles weekly average,
bullish vs bearish percentages, sector average bullish percentage, and sector average news score.
[Source: https://finnhub.io]
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
d_stats = get_sentiment_stats(ticker)
if d_stats:
print(f"Buzz: {round(100*d_stats['buzz']['buzz'],2)} %")
print(f"News Score: {round(100*d_stats['companyNewsScore'],2)} %")
print("")
print(f"Articles Last Week: {d_stats['buzz']['articlesInLastWeek']}")
print(f"Articles Weekly Average: {d_stats['buzz']['weeklyAverage']}")
print("")
print(f"Bullish: {round(100*d_stats['sentiment']['bullishPercent'],2)} %")
print(f"Bearish: {round(100*d_stats['sentiment']['bearishPercent'],2)} %")
print("")
print(
f"Sector Average Bullish: {round(100*d_stats['sectorAverageBullishPercent'],2)} %"
)
print(
f"Sector Average News Score: {round(100*d_stats['sectorAverageNewsScore'],2)} %"
)
else:
print("No sentiment stats found.")
print("")
except Exception as e:
print(e, "\n")
|
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, lib
from pandas._typing import (
Axis,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
Label,
Level,
Renamer,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.indexes.api import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
if TYPE_CHECKING:
from pandas.core.resample import Resampler
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: List[str] = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
copy: bool = False,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
@classmethod
def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=cls._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
"""
Dictionary of global attributes on this object.
.. warning::
attrs is experimental and may change without warning.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
@classmethod
def _validate_dtype(cls, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""
Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
_AXIS_ALIASES = {"rows": 0}
_AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_NUMBERS: Dict[str, int]
_AXIS_NAMES: Dict[int, str]
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in cls._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, ABCSeries] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._data.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
Returns
-------
DataFrame
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self: FrameOrSeries, item) -> FrameOrSeries:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
baxis = self._get_block_manager_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
return None
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = self._values
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = self._values
if is_bool_dtype(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._data.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self)
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
f"The {label_axis_name} label '{key}' "
f"is not unique.{multi_message}"
)
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_data=self._data,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{",".join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_markdown"
] = """
Print %(klass)s in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
%(klass)s in Markdown-friendly format.
"""
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: Optional[str] = "infer",
index: bool_t = True,
indent: Optional[int] = None,
) -> Optional[str]:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {'split','records','index','table'}.
* DataFrame:
- default is 'columns'
- allowed values are: {'split', 'records', 'index', 'columns',
'values', 'table'}.
* The format of the JSON string:
- 'split' : dict like {'index' -> [index], 'columns' -> [columns],
'data' -> [values]}
- 'records' : list like [{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
Describing the data, where data component is like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
.. versionadded:: 0.21.0.
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{booktabs}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
Added to Series.
.. versionchanged:: 1.0.0
Added caption and label arguments.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
The LaTeX caption to be placed inside ``\caption{}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
This is used with ``\ref{}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
# Note: we need to call ref._maybe_cache_changed even in the
# case where it will raise. (Uh, not clear why)
try:
ref._maybe_cache_changed(cacher[0], self)
except AssertionError:
# ref._data.setitem can raise
# AssertionError because of shape mismatch
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self)
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._data.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs: Callable = xs
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform %(klass)s to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError as err:
raise KeyError(
"String passed to weights not a valid column"
) from err
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values.
Produced %(klass)s will have same axis length as self.
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method=None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""
Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self) -> bool_t:
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self) -> bool_t:
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
def _internal_get_values(self) -> np.ndarray:
"""
Return an ndarray after converting sparse values to dense.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
"""
return self.values
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
copy: bool_t = True,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
possible to turn off individual conversions to ``StringDtype``, the integer
extension types or ``BooleanDtype``, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 NaN
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError(f"invalid fill value with a {type(value)}")
if inplace:
self._update_inplace(new_data)
return None
else:
return self._constructor(new_data).__finalize__(self)
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
if not (
is_scalar(to_replace)
or isinstance(to_replace, pd.Series)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where, dtype=np.float64)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: int = 0,
on=None,
level=None,
) -> "Resampler":
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset initial periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.is_anchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset final periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit.
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = np.asarray(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(
self: FrameOrSeries, percentiles=None, include=None, exclude=None
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data):
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Label] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self._ensure_type(
self.fillna(method=fill_method, axis=axis, limit=limit)
)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name1, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_any_desc,
func=nanops.nanany,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_all_desc,
func=nanops.nanall,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc_mad)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
func=nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
func=nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
func=nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="minimum",
accum_func=np.minimum.accumulate,
accum_func_name="min",
mask_a=np.inf,
mask_b=np.nan,
examples=_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="sum",
accum_func=np.cumsum,
accum_func_name="sum",
mask_a=0.0,
mask_b=np.nan,
examples=_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="product",
accum_func=np.cumprod,
accum_func_name="prod",
mask_a=1.0,
mask_b=np.nan,
examples=_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="maximum",
accum_func=np.maximum.accumulate,
accum_func_name="max",
mask_a=-np.inf,
mask_b=np.nan,
examples=_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the sum of the values for the requested axis.\n\n"
"This is equivalent to the method ``numpy.sum``.",
func=nanops.nansum,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the mean of the values for the requested axis.",
func=nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
func=nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
func=nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the product of the values for the requested axis.",
func=nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the median of the values for the requested axis.",
func=nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the maximum of the values for the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
func=nanops.nanmax,
see_also=_stat_func_see_also,
examples=_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the minimum of the values for the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
func=nanops.nanmin,
see_also=_stat_func_see_also,
examples=_min_examples,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = (
f"{{{", ".join(f"{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_doc_mad = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(
cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable
) -> Callable:
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
accum_func: Callable,
accum_func_name: str,
mask_a: float,
mask_b: float,
examples: str,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def na_accum_func(blk_values):
# We will be applying this function to block values
if blk_values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = blk_values.dtype
# We need to define mask before masking NaTs
mask = isna(blk_values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = blk_values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = blk_values
changed = False
result = accum_func(y.view("i8"), axis)
if skipna:
np.putmask(result, mask, iNaT)
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(blk_values, np.ndarray):
result = result.view(orig_dtype)
else:
# DatetimeArray
result = type(blk_values)._from_sequence(result, dtype=orig_dtype)
elif skipna and not issubclass(
blk_values.dtype.type, (np.integer, np.bool_)
):
vals = blk_values.copy().T
mask = isna(vals)
np.putmask(vals, mask, mask_a)
result = accum_func(vals, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(blk_values.T, axis)
# transpose back for ndarray, not for EA
return result.T if hasattr(result, "T") else result
result = self._data.apply(na_accum_func)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str,
examples: str,
empty_value: bool,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
|
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, lib
from pandas._typing import (
Axis,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
Label,
Level,
Renamer,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.indexes.api import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
if TYPE_CHECKING:
from pandas.core.resample import Resampler
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: List[str] = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
copy: bool = False,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
@classmethod
def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=cls._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
"""
Dictionary of global attributes on this object.
.. warning::
attrs is experimental and may change without warning.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
@classmethod
def _validate_dtype(cls, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""
Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
_AXIS_ALIASES = {"rows": 0}
_AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_NUMBERS: Dict[str, int]
_AXIS_NAMES: Dict[int, str]
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in cls._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, ABCSeries] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._data.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
Returns
-------
DataFrame
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self: FrameOrSeries, item) -> FrameOrSeries:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
baxis = self._get_block_manager_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
return None
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = self._values
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = self._values
if is_bool_dtype(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._data.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self)
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
f"The {label_axis_name} label '{key}' "
f"is not unique.{multi_message}"
)
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_data=self._data,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_markdown"
] = """
Print %(klass)s in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
%(klass)s in Markdown-friendly format.
"""
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: Optional[str] = "infer",
index: bool_t = True,
indent: Optional[int] = None,
) -> Optional[str]:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {'split','records','index','table'}.
* DataFrame:
- default is 'columns'
- allowed values are: {'split', 'records', 'index', 'columns',
'values', 'table'}.
* The format of the JSON string:
- 'split' : dict like {'index' -> [index], 'columns' -> [columns],
'data' -> [values]}
- 'records' : list like [{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
Describing the data, where data component is like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
.. versionadded:: 0.21.0.
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{booktabs}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
Added to Series.
.. versionchanged:: 1.0.0
Added caption and label arguments.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
The LaTeX caption to be placed inside ``\caption{}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
This is used with ``\ref{}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
# Note: we need to call ref._maybe_cache_changed even in the
# case where it will raise. (Uh, not clear why)
try:
ref._maybe_cache_changed(cacher[0], self)
except AssertionError:
# ref._data.setitem can raise
# AssertionError because of shape mismatch
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self)
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._data.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs: Callable = xs
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform %(klass)s to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError as err:
raise KeyError(
"String passed to weights not a valid column"
) from err
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values.
Produced %(klass)s will have same axis length as self.
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method=None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""
Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self) -> bool_t:
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self) -> bool_t:
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
def _internal_get_values(self) -> np.ndarray:
"""
Return an ndarray after converting sparse values to dense.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
"""
return self.values
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
copy: bool_t = True,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
possible to turn off individual conversions to ``StringDtype``, the integer
extension types or ``BooleanDtype``, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 NaN
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError(f"invalid fill value with a {type(value)}")
if inplace:
self._update_inplace(new_data)
return None
else:
return self._constructor(new_data).__finalize__(self)
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
if not (
is_scalar(to_replace)
or isinstance(to_replace, pd.Series)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where, dtype=np.float64)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: int = 0,
on=None,
level=None,
) -> "Resampler":
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset initial periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.is_anchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset final periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit.
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = np.asarray(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(
self: FrameOrSeries, percentiles=None, include=None, exclude=None
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data):
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Label] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self._ensure_type(
self.fillna(method=fill_method, axis=axis, limit=limit)
)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name1, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_any_desc,
func=nanops.nanany,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_all_desc,
func=nanops.nanall,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc_mad)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
func=nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
func=nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
func=nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="minimum",
accum_func=np.minimum.accumulate,
accum_func_name="min",
mask_a=np.inf,
mask_b=np.nan,
examples=_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="sum",
accum_func=np.cumsum,
accum_func_name="sum",
mask_a=0.0,
mask_b=np.nan,
examples=_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="product",
accum_func=np.cumprod,
accum_func_name="prod",
mask_a=1.0,
mask_b=np.nan,
examples=_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="maximum",
accum_func=np.maximum.accumulate,
accum_func_name="max",
mask_a=-np.inf,
mask_b=np.nan,
examples=_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the sum of the values for the requested axis.\n\n"
"This is equivalent to the method ``numpy.sum``.",
func=nanops.nansum,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the mean of the values for the requested axis.",
func=nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
func=nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
func=nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the product of the values for the requested axis.",
func=nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the median of the values for the requested axis.",
func=nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the maximum of the values for the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
func=nanops.nanmax,
see_also=_stat_func_see_also,
examples=_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the minimum of the values for the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
func=nanops.nanmin,
see_also=_stat_func_see_also,
examples=_min_examples,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = (
f"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_doc_mad = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(
cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable
) -> Callable:
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
accum_func: Callable,
accum_func_name: str,
mask_a: float,
mask_b: float,
examples: str,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def na_accum_func(blk_values):
# We will be applying this function to block values
if blk_values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = blk_values.dtype
# We need to define mask before masking NaTs
mask = isna(blk_values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = blk_values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = blk_values
changed = False
result = accum_func(y.view("i8"), axis)
if skipna:
np.putmask(result, mask, iNaT)
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(blk_values, np.ndarray):
result = result.view(orig_dtype)
else:
# DatetimeArray
result = type(blk_values)._from_sequence(result, dtype=orig_dtype)
elif skipna and not issubclass(
blk_values.dtype.type, (np.integer, np.bool_)
):
vals = blk_values.copy().T
mask = isna(vals)
np.putmask(vals, mask, mask_a)
result = accum_func(vals, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(blk_values.T, axis)
# transpose back for ndarray, not for EA
return result.T if hasattr(result, "T") else result
result = self._data.apply(na_accum_func)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str,
examples: str,
empty_value: bool,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
|
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from iconservice import *
TAG = 'Governance'
DEBUG = False
CURRENT = 'current'
NEXT = 'next'
STATUS = 'status'
DEPLOY_TX_HASH = 'deployTxHash'
AUDIT_TX_HASH = 'auditTxHash'
VALID_STATUS_KEYS = [STATUS, DEPLOY_TX_HASH, AUDIT_TX_HASH]
STATUS_PENDING = 'pending'
STATUS_ACTIVE = 'active'
STATUS_INACTIVE = 'inactive'
STATUS_REJECTED = 'rejected'
STEP_TYPE_DEFAULT = 'default'
STEP_TYPE_CONTRACT_CALL = 'contractCall'
STEP_TYPE_CONTRACT_CREATE = 'contractCreate'
STEP_TYPE_CONTRACT_UPDATE = 'contractUpdate'
STEP_TYPE_CONTRACT_DESTRUCT = 'contractDestruct'
STEP_TYPE_CONTRACT_SET = 'contractSet'
STEP_TYPE_GET = 'get'
STEP_TYPE_SET = 'set'
STEP_TYPE_REPLACE = 'replace'
STEP_TYPE_DELETE = 'delete'
STEP_TYPE_INPUT = 'input'
STEP_TYPE_EVENT_LOG = 'eventLog'
STEP_TYPE_API_CALL = 'apiCall'
INITIAL_STEP_COST_KEYS = [STEP_TYPE_DEFAULT,
STEP_TYPE_CONTRACT_CALL, STEP_TYPE_CONTRACT_CREATE, STEP_TYPE_CONTRACT_UPDATE,
STEP_TYPE_CONTRACT_DESTRUCT, STEP_TYPE_CONTRACT_SET,
STEP_TYPE_GET, STEP_TYPE_SET, STEP_TYPE_REPLACE, STEP_TYPE_DELETE, STEP_TYPE_INPUT,
STEP_TYPE_EVENT_LOG, STEP_TYPE_API_CALL]
CONTEXT_TYPE_INVOKE = 'invoke'
CONTEXT_TYPE_QUERY = 'query'
class StepCosts:
"""
DB for stepCosts management.
It is combined DictDB and ArrayDB in order to iterate items.
"""
_STEP_TYPES = 'step_types'
_STEP_COSTS = 'step_costs'
def __init__(self, db: IconScoreDatabase):
self._step_types = ArrayDB(self._STEP_TYPES, db, value_type=str)
self._step_costs = DictDB(self._STEP_COSTS, db, value_type=int)
def __setitem__(self, step_type: str, cost: int):
if step_type not in self._step_costs:
self._step_types.put(step_type)
self._step_costs[step_type] = cost
def __getitem__(self, step_type: str):
return self._step_costs[step_type]
def __delitem__(self, step_type: str):
# delete does not actually do delete but set zero
if step_type in self._step_costs:
self._step_costs[step_type] = 0
def __contains__(self, step_type: str):
return step_type in self._step_costs
def __iter__(self):
return self._step_types.__iter__()
def __len__(self):
return self._step_types.__len__()
def items(self):
for step_type in self._step_types:
yield (step_type, self._step_costs[step_type])
class Governance(IconSystemScoreBase):
_SCORE_STATUS = 'score_status' # legacy
_AUDITOR_LIST = 'auditor_list'
_DEPLOYER_LIST = 'deployer_list'
_SCORE_BLACK_LIST = 'score_black_list'
_STEP_PRICE = 'step_price'
_MAX_STEP_LIMITS = 'max_step_limits'
_VERSION = 'version'
_IMPORT_WHITE_LIST = 'import_white_list'
_IMPORT_WHITE_LIST_KEYS = 'import_white_list_keys'
_SERVICE_CONFIG = 'service_config'
_AUDIT_STATUS = 'audit_status'
_REJECT_STATUS = 'reject_status'
_REVISION_CODE = 'revision_code'
_REVISION_NAME = 'revision_name'
@eventlog(indexed=1)
def Accepted(self, txHash: str):
pass
@eventlog(indexed=1)
def Rejected(self, txHash: str, reason: str):
pass
@eventlog(indexed=1)
def StepPriceChanged(self, stepPrice: int):
pass
@eventlog(indexed=1)
def StepCostChanged(self, stepType: str, cost: int):
pass
@eventlog(indexed=1)
def MaxStepLimitChanged(self, contextType: str, value: int):
pass
@eventlog(indexed=0)
def AddImportWhiteListLog(self, addList: str, addCount: int):
pass
@eventlog(indexed=0)
def RemoveImportWhiteListLog(self, removeList: str, removeCount: int):
pass
@eventlog(indexed=0)
def UpdateServiceConfigLog(self, serviceFlag: int):
pass
@property
def import_white_list_cache(self) -> dict:
return self._get_import_white_list()
@property
def service_config(self) -> int:
return self._service_config.get()
@property
def revision_code(self) -> int:
return self._revision_code.get()
def __init__(self, db: IconScoreDatabase) -> None:
super().__init__(db)
# self._score_status = DictDB(self._SCORE_STATUS, db, value_type=bytes, depth=3)
self._auditor_list = ArrayDB(self._AUDITOR_LIST, db, value_type=Address)
self._deployer_list = ArrayDB(self._DEPLOYER_LIST, db, value_type=Address)
self._score_black_list = ArrayDB(self._SCORE_BLACK_LIST, db, value_type=Address)
self._step_price = VarDB(self._STEP_PRICE, db, value_type=int)
self._step_costs = StepCosts(db)
self._max_step_limits = DictDB(self._MAX_STEP_LIMITS, db, value_type=int)
self._version = VarDB(self._VERSION, db, value_type=str)
self._import_white_list = DictDB(self._IMPORT_WHITE_LIST, db, value_type=str)
self._import_white_list_keys = ArrayDB(self._IMPORT_WHITE_LIST_KEYS, db, value_type=str)
self._service_config = VarDB(self._SERVICE_CONFIG, db, value_type=int)
self._audit_status = DictDB(self._AUDIT_STATUS, db, value_type=bytes)
self._reject_status = DictDB(self._REJECT_STATUS, db, value_type=bytes)
self._revision_code = VarDB(self._REVISION_CODE, db, value_type=int)
self._revision_name = VarDB(self._REVISION_NAME, db, value_type=str)
def on_install(self, stepPrice: int = 10 ** 10) -> None:
super().on_install()
# add owner into initial auditor list
Logger.debug(f'on_install: owner = "{self.owner}"', TAG)
self._auditor_list.put(self.owner)
# add owner into initial deployer list
self._deployer_list.put(self.owner)
# set initial step price
self._step_price.set(stepPrice)
# set initial step costs
self._set_initial_step_costs()
# set initial max step limits
self._set_initial_max_step_limits()
# set initial import white list
self._set_initial_import_white_list()
# set initial service config
self._set_initial_service_config()
def on_update(self) -> None:
super().on_update()
if self.is_less_than_target_version('0.0.2'):
self._migrate_v0_0_2()
if self.is_less_than_target_version('0.0.3'):
self._migrate_v0_0_3()
if self.is_less_than_target_version('0.0.4'):
self._migrate_v0_0_4()
if self.is_less_than_target_version('0.0.5'):
self._migrate_v0_0_5()
self._version.set('0.0.5')
def is_less_than_target_version(self, target_version: str) -> bool:
last_version = self._version.get()
return self._versions(last_version) < self._versions(target_version)
def _migrate_v0_0_2(self):
"""
This migration updates the step costs and max step limits
"""
if len(self._step_costs) == 0:
# migrates from old DB of step_costs.
for step_type in INITIAL_STEP_COST_KEYS:
if step_type in self._step_costs:
self._step_costs._step_types.put(step_type)
self._set_initial_step_costs()
self._set_initial_max_step_limits()
def _migrate_v0_0_3(self):
# set initial import white list
self._set_initial_import_white_list()
self._set_initial_service_config()
self._set_initial_max_step_limits()
self._set_initial_revision()
def _migrate_v0_0_4(self):
pass
def _migrate_v0_0_5(self):
self._set_initial_revision()
@staticmethod
def _versions(version: str):
parts = []
if version is not None:
for part in version.split("."):
try:
parts.append(int(part))
except ValueError:
pass
return tuple(parts)
@external(readonly=True)
def getScoreStatus(self, address: Address) -> dict:
# Governance
if self.is_builtin_score(address):
deploy_info = self.get_deploy_info(address)
result = {
CURRENT: {
STATUS: STATUS_ACTIVE
}
}
if deploy_info.current_tx_hash is not None:
result[CURRENT][DEPLOY_TX_HASH] = deploy_info.current_tx_hash
return result
deploy_info = self.get_deploy_info(address)
if deploy_info is None:
self.revert('SCORE not found')
current_tx_hash = deploy_info.current_tx_hash
next_tx_hash = deploy_info.next_tx_hash
active = self.is_score_active(address)
# install audit
if current_tx_hash is None and next_tx_hash and active is False:
reject_tx_hash = self._reject_status[next_tx_hash]
if reject_tx_hash:
result = {
NEXT: {
STATUS: STATUS_REJECTED,
DEPLOY_TX_HASH: next_tx_hash,
AUDIT_TX_HASH: reject_tx_hash
}}
else:
result = {
NEXT: {
STATUS: STATUS_PENDING,
DEPLOY_TX_HASH: next_tx_hash
}}
elif current_tx_hash and next_tx_hash is None and active is True:
audit_tx_hash = self._audit_status[current_tx_hash]
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash
}}
if audit_tx_hash:
result[CURRENT][AUDIT_TX_HASH] = audit_tx_hash
else:
# update audit
if current_tx_hash and next_tx_hash and active is True:
current_audit_tx_hash = self._audit_status[current_tx_hash]
next_reject_tx_hash = self._reject_status[next_tx_hash]
if next_reject_tx_hash:
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash,
AUDIT_TX_HASH: current_audit_tx_hash
},
NEXT: {
STATUS: STATUS_REJECTED,
DEPLOY_TX_HASH: next_tx_hash,
AUDIT_TX_HASH: next_reject_tx_hash
}}
else:
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash,
AUDIT_TX_HASH: current_audit_tx_hash
},
NEXT: {
STATUS: STATUS_PENDING,
DEPLOY_TX_HASH: next_tx_hash
}}
else:
result = {}
return result
@external(readonly=True)
def getStepPrice(self) -> int:
return self._step_price.get()
@external
def setStepPrice(self, stepPrice: int):
# only owner can set new step price
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if stepPrice > 0:
self._step_price.set(stepPrice)
self.StepPriceChanged(stepPrice)
@external
def acceptScore(self, txHash: bytes):
# check message sender
Logger.debug(f'acceptScore: msg.sender = "{self.msg.sender}"', TAG)
if self.msg.sender not in self._auditor_list:
self.revert('Invalid sender: no permission')
# check txHash
tx_params = self.get_deploy_tx_params(txHash)
if tx_params is None:
self.revert('Invalid txHash: None')
deploy_score_addr = tx_params.score_address
deploy_info = self.get_deploy_info(deploy_score_addr)
if txHash != deploy_info.next_tx_hash:
self.revert('Invalid txHash: mismatch')
next_audit_tx_hash = self._audit_status[txHash]
if next_audit_tx_hash:
self.revert('Invalid txHash: already accepted')
next_reject_tx_hash = self._reject_status[txHash]
if next_reject_tx_hash:
self.revert('Invalid txHash: already rejected')
self._deploy(txHash, deploy_score_addr)
Logger.debug(f'acceptScore: score_address = "{tx_params.score_address}"', TAG)
self._audit_status[txHash] = self.tx.hash
self.Accepted('0x' + txHash.hex())
def _deploy(self, tx_hash: bytes, score_addr: Address):
owner = self.get_owner(score_addr)
tmp_sender = self.msg.sender
self.msg.sender = owner
try:
self._context.deploy(tx_hash)
finally:
self.msg.sender = tmp_sender
@external
def rejectScore(self, txHash: bytes, reason: str):
# check message sender
Logger.debug(f'rejectScore: msg.sender = "{self.msg.sender}"', TAG)
if self.msg.sender not in self._auditor_list:
self.revert('Invalid sender: no permission')
# check txHash
tx_params = self.get_deploy_tx_params(txHash)
if tx_params is None:
self.revert('Invalid txHash')
next_audit_tx_hash = self._audit_status[txHash]
if next_audit_tx_hash:
self.revert('Invalid txHash: already accepted')
next_reject_tx_hash = self._reject_status[txHash]
if next_reject_tx_hash:
self.revert('Invalid txHash: already rejected')
Logger.debug(f'rejectScore: score_address = "{tx_params.score_address}", reason = {reason}', TAG)
self._reject_status[txHash] = self.tx.hash
self.Rejected('0x' + txHash.hex(), reason)
@external
def addAuditor(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
# check message sender, only owner can add new auditor
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._auditor_list:
self._auditor_list.put(address)
else:
self.revert(f'Invalid address: already auditor')
if DEBUG is True:
self._print_auditor_list('addAuditor')
@external
def removeAuditor(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if address not in self._auditor_list:
self.revert('Invalid address: not in list')
# check message sender
if self.msg.sender != self.owner:
if self.msg.sender != address:
self.revert('Invalid sender: not yourself')
# get the topmost value
top = self._auditor_list.pop()
if top != address:
for i in range(len(self._auditor_list)):
if self._auditor_list[i] == address:
self._auditor_list[i] = top
if DEBUG is True:
self._print_auditor_list('removeAuditor')
def _print_auditor_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._auditor_list)}', TAG)
for auditor in self._auditor_list:
Logger.debug(f' --- {auditor}', TAG)
@external
def addDeployer(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
# check message sender, only owner can add new deployer
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._deployer_list:
self._deployer_list.put(address)
else:
self.revert(f'Invalid address: already deployer')
if DEBUG is True:
self._print_deployer_list('addDeployer')
@external
def removeDeployer(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if address not in self._deployer_list:
self.revert('Invalid address: not in list')
# check message sender
if self.msg.sender != self.owner:
if self.msg.sender != address:
self.revert('Invalid sender: not yourself')
# get the topmost value
top = self._deployer_list.pop()
if top != address:
for i in range(len(self._deployer_list)):
if self._deployer_list[i] == address:
self._deployer_list[i] = top
if DEBUG is True:
self._print_deployer_list('removeDeployer')
@external(readonly=True)
def isDeployer(self, address: Address) -> bool:
Logger.debug(f'isDeployer address: {address}', TAG)
return address in self._deployer_list
def _print_deployer_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._deployer_list)}', TAG)
for deployer in self._deployer_list:
Logger.debug(f' --- {deployer}', TAG)
@external
def addToScoreBlackList(self, address: Address):
if not address.is_contract:
self.revert(f'Invalid SCORE Address: {address}')
# check message sender, only owner can add new blacklist
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if self.address == address:
self.revert("can't add myself")
if address not in self._score_black_list:
self._score_black_list.put(address)
else:
self.revert('Invalid address: already SCORE blacklist')
if DEBUG is True:
self._print_black_list('addScoreToBlackList')
@external
def removeFromScoreBlackList(self, address: Address):
if not address.is_contract:
self.revert(f'Invalid SCORE Address: {address}')
# check message sender, only owner can remove from blacklist
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._score_black_list:
self.revert('Invalid address: not in list')
# get the topmost value
top = self._score_black_list.pop()
if top != address:
for i in range(len(self._score_black_list)):
if self._score_black_list[i] == address:
self._score_black_list[i] = top
if DEBUG is True:
self._print_black_list('removeScoreFromBlackList')
@external(readonly=True)
def isInScoreBlackList(self, address: Address) -> bool:
Logger.debug(f'isInBlackList address: {address}', TAG)
return address in self._score_black_list
def _print_black_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._score_black_list)}', TAG)
for addr in self._score_black_list:
Logger.debug(f' --- {addr}', TAG)
def _set_initial_step_costs(self):
initial_costs = {
STEP_TYPE_DEFAULT: 100_000,
STEP_TYPE_CONTRACT_CALL: 25_000,
STEP_TYPE_CONTRACT_CREATE: 1_000_000_000,
STEP_TYPE_CONTRACT_UPDATE: 1_600_000_000,
STEP_TYPE_CONTRACT_DESTRUCT: -70_000,
STEP_TYPE_CONTRACT_SET: 30_000,
STEP_TYPE_GET: 0,
STEP_TYPE_SET: 320,
STEP_TYPE_REPLACE: 80,
STEP_TYPE_DELETE: -240,
STEP_TYPE_INPUT: 200,
STEP_TYPE_EVENT_LOG: 100,
STEP_TYPE_API_CALL: 0
}
for key, value in initial_costs.items():
self._step_costs[key] = value
def _set_initial_max_step_limits(self):
self._max_step_limits[CONTEXT_TYPE_INVOKE] = 2_500_000_000
self._max_step_limits[CONTEXT_TYPE_QUERY] = 50_000_000
def _set_initial_revision(self):
self._revision_code.set(2)
self._revision_name.set("1.1.0")
@external(readonly=True)
def getStepCosts(self) -> dict:
result = {}
for key, value in self._step_costs.items():
result[key] = value
return result
@external
def setStepCost(self, stepType: str, cost: int):
# only owner can set new step cost
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if cost < 0:
if stepType != STEP_TYPE_CONTRACT_DESTRUCT and \
stepType != STEP_TYPE_DELETE:
self.revert(f'Invalid step cost: {stepType}, {cost}')
self._step_costs[stepType] = cost
self.StepCostChanged(stepType, cost)
@external(readonly=True)
def getMaxStepLimit(self, contextType: str) -> int:
return self._max_step_limits[contextType]
@external
def setMaxStepLimit(self, contextType: str, value: int):
# only owner can set new context type value
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if value < 0:
self.revert('Invalid value: negative number')
if contextType == CONTEXT_TYPE_INVOKE or contextType == CONTEXT_TYPE_QUERY:
self._max_step_limits[contextType] = value
self.MaxStepLimitChanged(contextType, value)
else:
self.revert("Invalid context type")
@external(readonly=True)
def getVersion(self) -> str:
return self._version.get()
def _set_initial_import_white_list(self):
key = "iconservice"
# if iconsevice has no value set ALL
if self._import_white_list[key] == "":
self._import_white_list[key] = "*"
self._import_white_list_keys.put(key)
@external
def addImportWhiteList(self, importStmt: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
import_stmt_dict = {}
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
self.revert(f'Invalid import statement: {e}')
# add to import white list
log_entry = []
for key, value in import_stmt_dict.items():
old_value: str = self._import_white_list[key]
if old_value == "*":
# no need to add
continue
if len(value) == 0:
# set import white list as ALL
self._import_white_list[key] = "*"
# add to import white list keys
if old_value == "":
self._import_white_list_keys.put(key)
# make added item list for eventlog
log_entry.append((key, value))
elif old_value == "":
# set import white list
self._import_white_list[key] = ','.join(value)
# add to import white list keys
self._import_white_list_keys.put(key)
# make added item list for eventlog
log_entry.append((key, value))
else:
old_value_list = old_value.split(',')
new_value = []
for v in value:
if v not in old_value_list:
new_value.append(v)
# set import white list
self._import_white_list[key] = f'{old_value},{','.join(new_value)}'
# make added item list for eventlog
log_entry.append((key, new_value))
# make eventlog
if len(log_entry):
self.AddImportWhiteListLog(str(log_entry), len(log_entry))
if DEBUG is True:
Logger.debug(f'checking added item ({importStmt}): {self.isInImportWhiteList(importStmt)}')
@external
def removeImportWhiteList(self, importStmt: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
import_stmt_dict = {}
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
self.revert(f'Invalid import statement: {e}')
# remove from import white list
log_entry = []
for key, value in import_stmt_dict.items():
old_value: str = self._import_white_list[key]
if old_value == "*":
if len(value) == 0:
# remove import white list
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, value))
continue
if len(value) == 0:
# remove import white list
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, value))
# add to import white list keys
self._import_white_list_keys.put(key)
else:
old_value_list = old_value.split(',')
remove_value = []
new_value = []
for v in old_value_list:
if v in value:
remove_value.append(v)
else:
new_value.append(v)
# set import white list
if len(new_value):
self._import_white_list[key] = f'{','.join(new_value)}'
else:
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, remove_value))
if len(log_entry):
# make eventlog
self.AddImportWhiteListLog(str(log_entry), len(log_entry))
if DEBUG is True:
Logger.debug(f'checking removed item ({importStmt}): {self.isInImportWhiteList(importStmt)}')
@external(readonly=True)
def isInImportWhiteList(self, importStmt: str) -> bool:
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
raise ValueError(f'{e}')
cache_import_white_list = self._get_import_white_list()
for key, value in import_stmt_dict.items():
old_value: list = cache_import_white_list.get(key, None)
if old_value is None:
return False
if old_value[0] == "*":
# import white list has ALL. See next key
continue
if len(value) == 0:
# input is ALL
return False
for v in value:
if v not in old_value:
return False
if DEBUG is True:
Logger.debug(f'({importStmt}) is in import white list')
return True
@staticmethod
def _check_import_stmt(import_stmt: str) -> dict:
Logger.debug(f'check_import_stmt: {import_stmt}')
import_stmt_dict: dict = json_loads(import_stmt.replace("\'", "\""))
for key, value in import_stmt_dict.items():
if not isinstance(key, str):
raise TypeError("Key must be of type `str`")
if not isinstance(value, list):
raise TypeError("Value must be of type `list`")
else:
for v in value:
if not isinstance(v, str):
raise TypeError("Element of value must be of type `str`")
Logger.debug(f'check_import_stmt_dict: {import_stmt_dict}')
return import_stmt_dict
def _get_import_white_list(self) -> dict:
whitelist = {}
for v in self._import_white_list_keys:
values: str = self._import_white_list[v]
whitelist[v] = values.split(',')
return whitelist
def _remove_import_white_list(self, key: str):
# remove from import white list
self._import_white_list.remove(key)
# remove from import white list keys
top = self._import_white_list_keys.pop()
if top != key:
for i in range(len(self._import_white_list_keys)):
if self._import_white_list_keys[i] == key:
self._import_white_list_keys[i] = top
def _set_initial_service_config(self):
self._service_config.set(self.get_icon_service_flag() | 8)
@external
def updateServiceConfig(self, serviceFlag: int):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if serviceFlag < 0:
self.revert(f'updateServiceConfig: serviceFlag({serviceFlag}) < 0')
max_flag = 0
for flag in IconServiceFlag:
max_flag |= flag
if serviceFlag > max_flag:
self.revert(f'updateServiceConfig: serviceFlag({serviceFlag}) > max_flag({max_flag})')
prev_service_config = self._service_config.get()
if prev_service_config != serviceFlag:
self._service_config.set(serviceFlag)
self.UpdateServiceConfigLog(serviceFlag)
if DEBUG is True:
Logger.debug(f'updateServiceConfig (prev: {prev_service_config} flag: {serviceFlag})')
else:
if DEBUG is True:
Logger.debug(f'updateServiceConfig not update ({serviceFlag})')
@external(readonly=True)
def getServiceConfig(self) -> dict:
table = {}
service_flag = self._service_config.get()
for flag in IconServiceFlag:
if service_flag & flag == flag:
table[flag.name] = True
else:
table[flag.name] = False
return table
@external
def setRevision(self, code: int, name: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
prev_code = self._revision_code.get()
if code < prev_code:
self.revert(f"can't decrease code")
self._revision_code.set(code)
self._revision_name.set(name)
@external(readonly=True)
def getRevision(self) -> dict:
return {'code': self._revision_code.get(), 'name': self._revision_name.get()}
|
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from iconservice import *
TAG = 'Governance'
DEBUG = False
CURRENT = 'current'
NEXT = 'next'
STATUS = 'status'
DEPLOY_TX_HASH = 'deployTxHash'
AUDIT_TX_HASH = 'auditTxHash'
VALID_STATUS_KEYS = [STATUS, DEPLOY_TX_HASH, AUDIT_TX_HASH]
STATUS_PENDING = 'pending'
STATUS_ACTIVE = 'active'
STATUS_INACTIVE = 'inactive'
STATUS_REJECTED = 'rejected'
STEP_TYPE_DEFAULT = 'default'
STEP_TYPE_CONTRACT_CALL = 'contractCall'
STEP_TYPE_CONTRACT_CREATE = 'contractCreate'
STEP_TYPE_CONTRACT_UPDATE = 'contractUpdate'
STEP_TYPE_CONTRACT_DESTRUCT = 'contractDestruct'
STEP_TYPE_CONTRACT_SET = 'contractSet'
STEP_TYPE_GET = 'get'
STEP_TYPE_SET = 'set'
STEP_TYPE_REPLACE = 'replace'
STEP_TYPE_DELETE = 'delete'
STEP_TYPE_INPUT = 'input'
STEP_TYPE_EVENT_LOG = 'eventLog'
STEP_TYPE_API_CALL = 'apiCall'
INITIAL_STEP_COST_KEYS = [STEP_TYPE_DEFAULT,
STEP_TYPE_CONTRACT_CALL, STEP_TYPE_CONTRACT_CREATE, STEP_TYPE_CONTRACT_UPDATE,
STEP_TYPE_CONTRACT_DESTRUCT, STEP_TYPE_CONTRACT_SET,
STEP_TYPE_GET, STEP_TYPE_SET, STEP_TYPE_REPLACE, STEP_TYPE_DELETE, STEP_TYPE_INPUT,
STEP_TYPE_EVENT_LOG, STEP_TYPE_API_CALL]
CONTEXT_TYPE_INVOKE = 'invoke'
CONTEXT_TYPE_QUERY = 'query'
class StepCosts:
"""
DB for stepCosts management.
It is combined DictDB and ArrayDB in order to iterate items.
"""
_STEP_TYPES = 'step_types'
_STEP_COSTS = 'step_costs'
def __init__(self, db: IconScoreDatabase):
self._step_types = ArrayDB(self._STEP_TYPES, db, value_type=str)
self._step_costs = DictDB(self._STEP_COSTS, db, value_type=int)
def __setitem__(self, step_type: str, cost: int):
if step_type not in self._step_costs:
self._step_types.put(step_type)
self._step_costs[step_type] = cost
def __getitem__(self, step_type: str):
return self._step_costs[step_type]
def __delitem__(self, step_type: str):
# delete does not actually do delete but set zero
if step_type in self._step_costs:
self._step_costs[step_type] = 0
def __contains__(self, step_type: str):
return step_type in self._step_costs
def __iter__(self):
return self._step_types.__iter__()
def __len__(self):
return self._step_types.__len__()
def items(self):
for step_type in self._step_types:
yield (step_type, self._step_costs[step_type])
class Governance(IconSystemScoreBase):
_SCORE_STATUS = 'score_status' # legacy
_AUDITOR_LIST = 'auditor_list'
_DEPLOYER_LIST = 'deployer_list'
_SCORE_BLACK_LIST = 'score_black_list'
_STEP_PRICE = 'step_price'
_MAX_STEP_LIMITS = 'max_step_limits'
_VERSION = 'version'
_IMPORT_WHITE_LIST = 'import_white_list'
_IMPORT_WHITE_LIST_KEYS = 'import_white_list_keys'
_SERVICE_CONFIG = 'service_config'
_AUDIT_STATUS = 'audit_status'
_REJECT_STATUS = 'reject_status'
_REVISION_CODE = 'revision_code'
_REVISION_NAME = 'revision_name'
@eventlog(indexed=1)
def Accepted(self, txHash: str):
pass
@eventlog(indexed=1)
def Rejected(self, txHash: str, reason: str):
pass
@eventlog(indexed=1)
def StepPriceChanged(self, stepPrice: int):
pass
@eventlog(indexed=1)
def StepCostChanged(self, stepType: str, cost: int):
pass
@eventlog(indexed=1)
def MaxStepLimitChanged(self, contextType: str, value: int):
pass
@eventlog(indexed=0)
def AddImportWhiteListLog(self, addList: str, addCount: int):
pass
@eventlog(indexed=0)
def RemoveImportWhiteListLog(self, removeList: str, removeCount: int):
pass
@eventlog(indexed=0)
def UpdateServiceConfigLog(self, serviceFlag: int):
pass
@property
def import_white_list_cache(self) -> dict:
return self._get_import_white_list()
@property
def service_config(self) -> int:
return self._service_config.get()
@property
def revision_code(self) -> int:
return self._revision_code.get()
def __init__(self, db: IconScoreDatabase) -> None:
super().__init__(db)
# self._score_status = DictDB(self._SCORE_STATUS, db, value_type=bytes, depth=3)
self._auditor_list = ArrayDB(self._AUDITOR_LIST, db, value_type=Address)
self._deployer_list = ArrayDB(self._DEPLOYER_LIST, db, value_type=Address)
self._score_black_list = ArrayDB(self._SCORE_BLACK_LIST, db, value_type=Address)
self._step_price = VarDB(self._STEP_PRICE, db, value_type=int)
self._step_costs = StepCosts(db)
self._max_step_limits = DictDB(self._MAX_STEP_LIMITS, db, value_type=int)
self._version = VarDB(self._VERSION, db, value_type=str)
self._import_white_list = DictDB(self._IMPORT_WHITE_LIST, db, value_type=str)
self._import_white_list_keys = ArrayDB(self._IMPORT_WHITE_LIST_KEYS, db, value_type=str)
self._service_config = VarDB(self._SERVICE_CONFIG, db, value_type=int)
self._audit_status = DictDB(self._AUDIT_STATUS, db, value_type=bytes)
self._reject_status = DictDB(self._REJECT_STATUS, db, value_type=bytes)
self._revision_code = VarDB(self._REVISION_CODE, db, value_type=int)
self._revision_name = VarDB(self._REVISION_NAME, db, value_type=str)
def on_install(self, stepPrice: int = 10 ** 10) -> None:
super().on_install()
# add owner into initial auditor list
Logger.debug(f'on_install: owner = "{self.owner}"', TAG)
self._auditor_list.put(self.owner)
# add owner into initial deployer list
self._deployer_list.put(self.owner)
# set initial step price
self._step_price.set(stepPrice)
# set initial step costs
self._set_initial_step_costs()
# set initial max step limits
self._set_initial_max_step_limits()
# set initial import white list
self._set_initial_import_white_list()
# set initial service config
self._set_initial_service_config()
def on_update(self) -> None:
super().on_update()
if self.is_less_than_target_version('0.0.2'):
self._migrate_v0_0_2()
if self.is_less_than_target_version('0.0.3'):
self._migrate_v0_0_3()
if self.is_less_than_target_version('0.0.4'):
self._migrate_v0_0_4()
if self.is_less_than_target_version('0.0.5'):
self._migrate_v0_0_5()
self._version.set('0.0.5')
def is_less_than_target_version(self, target_version: str) -> bool:
last_version = self._version.get()
return self._versions(last_version) < self._versions(target_version)
def _migrate_v0_0_2(self):
"""
This migration updates the step costs and max step limits
"""
if len(self._step_costs) == 0:
# migrates from old DB of step_costs.
for step_type in INITIAL_STEP_COST_KEYS:
if step_type in self._step_costs:
self._step_costs._step_types.put(step_type)
self._set_initial_step_costs()
self._set_initial_max_step_limits()
def _migrate_v0_0_3(self):
# set initial import white list
self._set_initial_import_white_list()
self._set_initial_service_config()
self._set_initial_max_step_limits()
self._set_initial_revision()
def _migrate_v0_0_4(self):
pass
def _migrate_v0_0_5(self):
self._set_initial_revision()
@staticmethod
def _versions(version: str):
parts = []
if version is not None:
for part in version.split("."):
try:
parts.append(int(part))
except ValueError:
pass
return tuple(parts)
@external(readonly=True)
def getScoreStatus(self, address: Address) -> dict:
# Governance
if self.is_builtin_score(address):
deploy_info = self.get_deploy_info(address)
result = {
CURRENT: {
STATUS: STATUS_ACTIVE
}
}
if deploy_info.current_tx_hash is not None:
result[CURRENT][DEPLOY_TX_HASH] = deploy_info.current_tx_hash
return result
deploy_info = self.get_deploy_info(address)
if deploy_info is None:
self.revert('SCORE not found')
current_tx_hash = deploy_info.current_tx_hash
next_tx_hash = deploy_info.next_tx_hash
active = self.is_score_active(address)
# install audit
if current_tx_hash is None and next_tx_hash and active is False:
reject_tx_hash = self._reject_status[next_tx_hash]
if reject_tx_hash:
result = {
NEXT: {
STATUS: STATUS_REJECTED,
DEPLOY_TX_HASH: next_tx_hash,
AUDIT_TX_HASH: reject_tx_hash
}}
else:
result = {
NEXT: {
STATUS: STATUS_PENDING,
DEPLOY_TX_HASH: next_tx_hash
}}
elif current_tx_hash and next_tx_hash is None and active is True:
audit_tx_hash = self._audit_status[current_tx_hash]
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash
}}
if audit_tx_hash:
result[CURRENT][AUDIT_TX_HASH] = audit_tx_hash
else:
# update audit
if current_tx_hash and next_tx_hash and active is True:
current_audit_tx_hash = self._audit_status[current_tx_hash]
next_reject_tx_hash = self._reject_status[next_tx_hash]
if next_reject_tx_hash:
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash,
AUDIT_TX_HASH: current_audit_tx_hash
},
NEXT: {
STATUS: STATUS_REJECTED,
DEPLOY_TX_HASH: next_tx_hash,
AUDIT_TX_HASH: next_reject_tx_hash
}}
else:
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash,
AUDIT_TX_HASH: current_audit_tx_hash
},
NEXT: {
STATUS: STATUS_PENDING,
DEPLOY_TX_HASH: next_tx_hash
}}
else:
result = {}
return result
@external(readonly=True)
def getStepPrice(self) -> int:
return self._step_price.get()
@external
def setStepPrice(self, stepPrice: int):
# only owner can set new step price
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if stepPrice > 0:
self._step_price.set(stepPrice)
self.StepPriceChanged(stepPrice)
@external
def acceptScore(self, txHash: bytes):
# check message sender
Logger.debug(f'acceptScore: msg.sender = "{self.msg.sender}"', TAG)
if self.msg.sender not in self._auditor_list:
self.revert('Invalid sender: no permission')
# check txHash
tx_params = self.get_deploy_tx_params(txHash)
if tx_params is None:
self.revert('Invalid txHash: None')
deploy_score_addr = tx_params.score_address
deploy_info = self.get_deploy_info(deploy_score_addr)
if txHash != deploy_info.next_tx_hash:
self.revert('Invalid txHash: mismatch')
next_audit_tx_hash = self._audit_status[txHash]
if next_audit_tx_hash:
self.revert('Invalid txHash: already accepted')
next_reject_tx_hash = self._reject_status[txHash]
if next_reject_tx_hash:
self.revert('Invalid txHash: already rejected')
self._deploy(txHash, deploy_score_addr)
Logger.debug(f'acceptScore: score_address = "{tx_params.score_address}"', TAG)
self._audit_status[txHash] = self.tx.hash
self.Accepted('0x' + txHash.hex())
def _deploy(self, tx_hash: bytes, score_addr: Address):
owner = self.get_owner(score_addr)
tmp_sender = self.msg.sender
self.msg.sender = owner
try:
self._context.deploy(tx_hash)
finally:
self.msg.sender = tmp_sender
@external
def rejectScore(self, txHash: bytes, reason: str):
# check message sender
Logger.debug(f'rejectScore: msg.sender = "{self.msg.sender}"', TAG)
if self.msg.sender not in self._auditor_list:
self.revert('Invalid sender: no permission')
# check txHash
tx_params = self.get_deploy_tx_params(txHash)
if tx_params is None:
self.revert('Invalid txHash')
next_audit_tx_hash = self._audit_status[txHash]
if next_audit_tx_hash:
self.revert('Invalid txHash: already accepted')
next_reject_tx_hash = self._reject_status[txHash]
if next_reject_tx_hash:
self.revert('Invalid txHash: already rejected')
Logger.debug(f'rejectScore: score_address = "{tx_params.score_address}", reason = {reason}', TAG)
self._reject_status[txHash] = self.tx.hash
self.Rejected('0x' + txHash.hex(), reason)
@external
def addAuditor(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
# check message sender, only owner can add new auditor
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._auditor_list:
self._auditor_list.put(address)
else:
self.revert(f'Invalid address: already auditor')
if DEBUG is True:
self._print_auditor_list('addAuditor')
@external
def removeAuditor(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if address not in self._auditor_list:
self.revert('Invalid address: not in list')
# check message sender
if self.msg.sender != self.owner:
if self.msg.sender != address:
self.revert('Invalid sender: not yourself')
# get the topmost value
top = self._auditor_list.pop()
if top != address:
for i in range(len(self._auditor_list)):
if self._auditor_list[i] == address:
self._auditor_list[i] = top
if DEBUG is True:
self._print_auditor_list('removeAuditor')
def _print_auditor_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._auditor_list)}', TAG)
for auditor in self._auditor_list:
Logger.debug(f' --- {auditor}', TAG)
@external
def addDeployer(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
# check message sender, only owner can add new deployer
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._deployer_list:
self._deployer_list.put(address)
else:
self.revert(f'Invalid address: already deployer')
if DEBUG is True:
self._print_deployer_list('addDeployer')
@external
def removeDeployer(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if address not in self._deployer_list:
self.revert('Invalid address: not in list')
# check message sender
if self.msg.sender != self.owner:
if self.msg.sender != address:
self.revert('Invalid sender: not yourself')
# get the topmost value
top = self._deployer_list.pop()
if top != address:
for i in range(len(self._deployer_list)):
if self._deployer_list[i] == address:
self._deployer_list[i] = top
if DEBUG is True:
self._print_deployer_list('removeDeployer')
@external(readonly=True)
def isDeployer(self, address: Address) -> bool:
Logger.debug(f'isDeployer address: {address}', TAG)
return address in self._deployer_list
def _print_deployer_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._deployer_list)}', TAG)
for deployer in self._deployer_list:
Logger.debug(f' --- {deployer}', TAG)
@external
def addToScoreBlackList(self, address: Address):
if not address.is_contract:
self.revert(f'Invalid SCORE Address: {address}')
# check message sender, only owner can add new blacklist
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if self.address == address:
self.revert("can't add myself")
if address not in self._score_black_list:
self._score_black_list.put(address)
else:
self.revert('Invalid address: already SCORE blacklist')
if DEBUG is True:
self._print_black_list('addScoreToBlackList')
@external
def removeFromScoreBlackList(self, address: Address):
if not address.is_contract:
self.revert(f'Invalid SCORE Address: {address}')
# check message sender, only owner can remove from blacklist
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._score_black_list:
self.revert('Invalid address: not in list')
# get the topmost value
top = self._score_black_list.pop()
if top != address:
for i in range(len(self._score_black_list)):
if self._score_black_list[i] == address:
self._score_black_list[i] = top
if DEBUG is True:
self._print_black_list('removeScoreFromBlackList')
@external(readonly=True)
def isInScoreBlackList(self, address: Address) -> bool:
Logger.debug(f'isInBlackList address: {address}', TAG)
return address in self._score_black_list
def _print_black_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._score_black_list)}', TAG)
for addr in self._score_black_list:
Logger.debug(f' --- {addr}', TAG)
def _set_initial_step_costs(self):
initial_costs = {
STEP_TYPE_DEFAULT: 100_000,
STEP_TYPE_CONTRACT_CALL: 25_000,
STEP_TYPE_CONTRACT_CREATE: 1_000_000_000,
STEP_TYPE_CONTRACT_UPDATE: 1_600_000_000,
STEP_TYPE_CONTRACT_DESTRUCT: -70_000,
STEP_TYPE_CONTRACT_SET: 30_000,
STEP_TYPE_GET: 0,
STEP_TYPE_SET: 320,
STEP_TYPE_REPLACE: 80,
STEP_TYPE_DELETE: -240,
STEP_TYPE_INPUT: 200,
STEP_TYPE_EVENT_LOG: 100,
STEP_TYPE_API_CALL: 0
}
for key, value in initial_costs.items():
self._step_costs[key] = value
def _set_initial_max_step_limits(self):
self._max_step_limits[CONTEXT_TYPE_INVOKE] = 2_500_000_000
self._max_step_limits[CONTEXT_TYPE_QUERY] = 50_000_000
def _set_initial_revision(self):
self._revision_code.set(2)
self._revision_name.set("1.1.0")
@external(readonly=True)
def getStepCosts(self) -> dict:
result = {}
for key, value in self._step_costs.items():
result[key] = value
return result
@external
def setStepCost(self, stepType: str, cost: int):
# only owner can set new step cost
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if cost < 0:
if stepType != STEP_TYPE_CONTRACT_DESTRUCT and \
stepType != STEP_TYPE_DELETE:
self.revert(f'Invalid step cost: {stepType}, {cost}')
self._step_costs[stepType] = cost
self.StepCostChanged(stepType, cost)
@external(readonly=True)
def getMaxStepLimit(self, contextType: str) -> int:
return self._max_step_limits[contextType]
@external
def setMaxStepLimit(self, contextType: str, value: int):
# only owner can set new context type value
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if value < 0:
self.revert('Invalid value: negative number')
if contextType == CONTEXT_TYPE_INVOKE or contextType == CONTEXT_TYPE_QUERY:
self._max_step_limits[contextType] = value
self.MaxStepLimitChanged(contextType, value)
else:
self.revert("Invalid context type")
@external(readonly=True)
def getVersion(self) -> str:
return self._version.get()
def _set_initial_import_white_list(self):
key = "iconservice"
# if iconsevice has no value set ALL
if self._import_white_list[key] == "":
self._import_white_list[key] = "*"
self._import_white_list_keys.put(key)
@external
def addImportWhiteList(self, importStmt: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
import_stmt_dict = {}
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
self.revert(f'Invalid import statement: {e}')
# add to import white list
log_entry = []
for key, value in import_stmt_dict.items():
old_value: str = self._import_white_list[key]
if old_value == "*":
# no need to add
continue
if len(value) == 0:
# set import white list as ALL
self._import_white_list[key] = "*"
# add to import white list keys
if old_value == "":
self._import_white_list_keys.put(key)
# make added item list for eventlog
log_entry.append((key, value))
elif old_value == "":
# set import white list
self._import_white_list[key] = ','.join(value)
# add to import white list keys
self._import_white_list_keys.put(key)
# make added item list for eventlog
log_entry.append((key, value))
else:
old_value_list = old_value.split(',')
new_value = []
for v in value:
if v not in old_value_list:
new_value.append(v)
# set import white list
self._import_white_list[key] = f'{old_value},{",".join(new_value)}'
# make added item list for eventlog
log_entry.append((key, new_value))
# make eventlog
if len(log_entry):
self.AddImportWhiteListLog(str(log_entry), len(log_entry))
if DEBUG is True:
Logger.debug(f'checking added item ({importStmt}): {self.isInImportWhiteList(importStmt)}')
@external
def removeImportWhiteList(self, importStmt: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
import_stmt_dict = {}
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
self.revert(f'Invalid import statement: {e}')
# remove from import white list
log_entry = []
for key, value in import_stmt_dict.items():
old_value: str = self._import_white_list[key]
if old_value == "*":
if len(value) == 0:
# remove import white list
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, value))
continue
if len(value) == 0:
# remove import white list
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, value))
# add to import white list keys
self._import_white_list_keys.put(key)
else:
old_value_list = old_value.split(',')
remove_value = []
new_value = []
for v in old_value_list:
if v in value:
remove_value.append(v)
else:
new_value.append(v)
# set import white list
if len(new_value):
self._import_white_list[key] = f'{",".join(new_value)}'
else:
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, remove_value))
if len(log_entry):
# make eventlog
self.AddImportWhiteListLog(str(log_entry), len(log_entry))
if DEBUG is True:
Logger.debug(f'checking removed item ({importStmt}): {self.isInImportWhiteList(importStmt)}')
@external(readonly=True)
def isInImportWhiteList(self, importStmt: str) -> bool:
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
raise ValueError(f'{e}')
cache_import_white_list = self._get_import_white_list()
for key, value in import_stmt_dict.items():
old_value: list = cache_import_white_list.get(key, None)
if old_value is None:
return False
if old_value[0] == "*":
# import white list has ALL. See next key
continue
if len(value) == 0:
# input is ALL
return False
for v in value:
if v not in old_value:
return False
if DEBUG is True:
Logger.debug(f'({importStmt}) is in import white list')
return True
@staticmethod
def _check_import_stmt(import_stmt: str) -> dict:
Logger.debug(f'check_import_stmt: {import_stmt}')
import_stmt_dict: dict = json_loads(import_stmt.replace("\'", "\""))
for key, value in import_stmt_dict.items():
if not isinstance(key, str):
raise TypeError("Key must be of type `str`")
if not isinstance(value, list):
raise TypeError("Value must be of type `list`")
else:
for v in value:
if not isinstance(v, str):
raise TypeError("Element of value must be of type `str`")
Logger.debug(f'check_import_stmt_dict: {import_stmt_dict}')
return import_stmt_dict
def _get_import_white_list(self) -> dict:
whitelist = {}
for v in self._import_white_list_keys:
values: str = self._import_white_list[v]
whitelist[v] = values.split(',')
return whitelist
def _remove_import_white_list(self, key: str):
# remove from import white list
self._import_white_list.remove(key)
# remove from import white list keys
top = self._import_white_list_keys.pop()
if top != key:
for i in range(len(self._import_white_list_keys)):
if self._import_white_list_keys[i] == key:
self._import_white_list_keys[i] = top
def _set_initial_service_config(self):
self._service_config.set(self.get_icon_service_flag() | 8)
@external
def updateServiceConfig(self, serviceFlag: int):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if serviceFlag < 0:
self.revert(f'updateServiceConfig: serviceFlag({serviceFlag}) < 0')
max_flag = 0
for flag in IconServiceFlag:
max_flag |= flag
if serviceFlag > max_flag:
self.revert(f'updateServiceConfig: serviceFlag({serviceFlag}) > max_flag({max_flag})')
prev_service_config = self._service_config.get()
if prev_service_config != serviceFlag:
self._service_config.set(serviceFlag)
self.UpdateServiceConfigLog(serviceFlag)
if DEBUG is True:
Logger.debug(f'updateServiceConfig (prev: {prev_service_config} flag: {serviceFlag})')
else:
if DEBUG is True:
Logger.debug(f'updateServiceConfig not update ({serviceFlag})')
@external(readonly=True)
def getServiceConfig(self) -> dict:
table = {}
service_flag = self._service_config.get()
for flag in IconServiceFlag:
if service_flag & flag == flag:
table[flag.name] = True
else:
table[flag.name] = False
return table
@external
def setRevision(self, code: int, name: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
prev_code = self._revision_code.get()
if code < prev_code:
self.revert(f"can't decrease code")
self._revision_code.set(code)
self._revision_name.set(name)
@external(readonly=True)
def getRevision(self) -> dict:
return {'code': self._revision_code.get(), 'name': self._revision_name.get()}
|
# encoding=utf-8
# Author: Yu-Lun Chiang
# Description: Test NewsCrawler
import logging
import pytest
from collections import namedtuple
from src.crawler.media import bcc
from src.utils.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="中國廣播公司_1",
link="https://www.bcc.com.tw/newsView.6473942",
expected_output=NewsStruct(
title="「這家超商」6/23開賣快篩試劑 雙北2門市限量100盒",
content="\r\n 為了方便民眾居家檢測新冠肺炎,食藥署在19日公布核准5款家用快篩試劑,可就近到藥局、醫療器材販售業者,如藥妝店、醫療器材行、便利商店等商家選購。萊爾富位於雙北的2家門市明(23)日起將首度開賣家用快篩試劑,每店限量100盒,售完為止。萊爾富首度引進國產泰博科技的「福爾威創家用新型冠狀病毒抗原快速檢驗套組」,明天下午3點起,將在台北市迪化店、北縣五工店限量開賣,每盒5入售價1700元,每店限量100盒,不拆售。根據食藥署公布的指引,如果快篩陽性,居家檢疫或隔離者須先與衛生單位聯繫,一般民眾則到社區採檢院所採檢確認;如果是陰性,民眾仍要遵循防疫規範,做好個人防護,持續自我健康管理。(快篩試劑資料照)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2021/06/22 18:49 報導",
link="https://www.bcc.com.tw/newsView.6473942",
),
)
TEST_DATA_2 = TEST_DATA(
name="中國廣播公司_2",
link="https://www.bcc.com.tw/newsView.4839712",
expected_output=NewsStruct(
title="台積電衝關未成 聯電ADR爆漲股價再登新高",
content="\r\n 半導體類股正當紅,台積電今天(24日)早盤衝關500元短暫達標後拉回,聯電延續昨天的強勢,在ADR飆漲超過20%助威下,股價漲幅超過7%,最高攻至39.7元,市值擠下股王大立光,繼續成為台股人氣王。因為聯電的狂飆,大盤儘管稍事休息,拉回的幅度也很有限。(張佳琪報導)台股週一的兩大支柱台積電、聯電,週二股價兩樣情,台積電挑戰500元大關,早盤開盤隨即攻頂,但是衝高後買盤追價謹慎,導致股價翻黑呈現小跌。聯電因週一股價漲停板鎖住,美國ADR強漲20.24%,帶動股價開盤後強勢走高,隨即衝過39元一路向上,攻至39.7元,股價又改寫18年新高,且追價買單積極,漲幅超過7%,市值擠下股王大立光。讓股價瞬間點火爆衝的關鍵是美系外資分析師最新出具的報告大力看好聯電。理由是受惠於5G、AI、高速運算等發展,聯電產用率將提高至90%到95%,因此,8吋晶圓價格調漲、12吋晶圓產用率提升,以及28奈米拓展有成,推估聯電明後年資本支出將達12億美元,重申「買進」評等,目標價由32元上調至54.5元。分析師表示,三大法人週一同步大買聯電,週二的漲勢,內外資應都有貢獻。至於是否漲到外資報告訂下的目標價,分析師認為,以今年聯電EPS預估2.25元推算,如果漲到54.5元,本益比落在24倍,雖然高但不至於離譜,因此認為如果外資買盤力道夠強,目標價就可能達標。(圖:雅虎奇摩)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2020/11/24 11:26 報導",
link="https://www.bcc.com.tw/newsView.4839712",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return bcc.BCCNewsCrawler()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+"..." if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
|
# encoding=utf-8
# Author: Yu-Lun Chiang
# Description: Test NewsCrawler
import logging
import pytest
from collections import namedtuple
from src.crawler.media import bcc
from src.utils.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="中國廣播公司_1",
link="https://www.bcc.com.tw/newsView.6473942",
expected_output=NewsStruct(
title="「這家超商」6/23開賣快篩試劑 雙北2門市限量100盒",
content="\r\n 為了方便民眾居家檢測新冠肺炎,食藥署在19日公布核准5款家用快篩試劑,可就近到藥局、醫療器材販售業者,如藥妝店、醫療器材行、便利商店等商家選購。萊爾富位於雙北的2家門市明(23)日起將首度開賣家用快篩試劑,每店限量100盒,售完為止。萊爾富首度引進國產泰博科技的「福爾威創家用新型冠狀病毒抗原快速檢驗套組」,明天下午3點起,將在台北市迪化店、北縣五工店限量開賣,每盒5入售價1700元,每店限量100盒,不拆售。根據食藥署公布的指引,如果快篩陽性,居家檢疫或隔離者須先與衛生單位聯繫,一般民眾則到社區採檢院所採檢確認;如果是陰性,民眾仍要遵循防疫規範,做好個人防護,持續自我健康管理。(快篩試劑資料照)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2021/06/22 18:49 報導",
link="https://www.bcc.com.tw/newsView.6473942",
),
)
TEST_DATA_2 = TEST_DATA(
name="中國廣播公司_2",
link="https://www.bcc.com.tw/newsView.4839712",
expected_output=NewsStruct(
title="台積電衝關未成 聯電ADR爆漲股價再登新高",
content="\r\n 半導體類股正當紅,台積電今天(24日)早盤衝關500元短暫達標後拉回,聯電延續昨天的強勢,在ADR飆漲超過20%助威下,股價漲幅超過7%,最高攻至39.7元,市值擠下股王大立光,繼續成為台股人氣王。因為聯電的狂飆,大盤儘管稍事休息,拉回的幅度也很有限。(張佳琪報導)台股週一的兩大支柱台積電、聯電,週二股價兩樣情,台積電挑戰500元大關,早盤開盤隨即攻頂,但是衝高後買盤追價謹慎,導致股價翻黑呈現小跌。聯電因週一股價漲停板鎖住,美國ADR強漲20.24%,帶動股價開盤後強勢走高,隨即衝過39元一路向上,攻至39.7元,股價又改寫18年新高,且追價買單積極,漲幅超過7%,市值擠下股王大立光。讓股價瞬間點火爆衝的關鍵是美系外資分析師最新出具的報告大力看好聯電。理由是受惠於5G、AI、高速運算等發展,聯電產用率將提高至90%到95%,因此,8吋晶圓價格調漲、12吋晶圓產用率提升,以及28奈米拓展有成,推估聯電明後年資本支出將達12億美元,重申「買進」評等,目標價由32元上調至54.5元。分析師表示,三大法人週一同步大買聯電,週二的漲勢,內外資應都有貢獻。至於是否漲到外資報告訂下的目標價,分析師認為,以今年聯電EPS預估2.25元推算,如果漲到54.5元,本益比落在24倍,雖然高但不至於離譜,因此認為如果外資買盤力道夠強,目標價就可能達標。(圖:雅虎奇摩)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2020/11/24 11:26 報導",
link="https://www.bcc.com.tw/newsView.4839712",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return bcc.BCCNewsCrawler()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
|
# Recomendação : Use apenas se seu computador/celular for bom.
# Autor : Kiny
# Pix : (61) 9603-5417
# Github : https://github.com/Kiny-Kiny
# WhatsApp : http://wa.me/552179180533
# Telegram : @K_iny
# Instagram : @parziovanni
# Twitter : @KinyBruno
############################################
'''Módulos'''
from itertools import product;
from sys import argv,stdout;
from time import sleep;
from os import system;
############################################
'''Cores'''
global R,B,C,G
R='\033[1;31m';
B='\033[1;34m';
C='\033[1;37m';
G='\033[1;32m';
############################################
'''Funções'''
def slow(msg):
for i in msg: stdout.write(i);sleep(0.007);stdout.flush();
def clear(): system('cls||clear');
############################################
'''Banner'''
logo=B+''' __ __ __ __ __ __ __
/\ \/ / /\ \ /\ "-.\ \ /\ \_\ \
\ \ _"-. \ \ \ \ \ \-. \ \ \____ \
\ \_\ \_\ \ \_\ \ \_\\"\_\ \/\_____\
\/_/\/_/ \/_/ \/_/ \/_/ \/_____/ \n'''+C
############################################
'''Parte de criação da Wordlist'''
def wordlist(i):
msg='';res = product('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_1234567890', repeat=i);
for g in res:
senha=''
for i in g: senha+=i
msg+=f'{senha}\n'
return msg
def main(min,max):
lis=[]
slow(
f'[{G}!{C}] Criando a WordList...\n'
)
for i in range(int(min),int(max)): lis.append(str(wordlist(i)));
msg='';
for i in lis: msg+=i
file=open('KingCrimson.txt','w+');
file.write(msg);
file.close();
clear();
slow(
f'{logo}\n[{G}Wordlist Criada!{C}] A wordlist foi criada e salva no arquivo KingCrimson.txt\n'
);
############################################
if int(len(argv)) < 3:
slow(
str(logo) + f'\n{G}- {C}Modo de Uso{G} : {C}python3 '+ str(argv[0]) + G+' {'+C+'Quantidade mínima'+G+'} {' +C+'Quantidade Máxima'+G+'}\n'+C
);exit();
try: int(argv[1]);int(argv[2]);
except: slow(
f'{logo}\n[{R}Error{C}] Use apenas números inteiros! (ex: 7)\n'
);exit();
if __name__=='__main__':
clear()
if int(argv[1]) == int(argv[2]):
slow(
f'{logo}\n[{R}Error{C}] A quantidade mínima não pode ser igual a quantidade máxima.\n'
);
elif int(argv[1]) > int(argv[2]):
slow(
f'{logo}\n[{R}Error{C}] A quantidade mínima não pode ser maior que a quantidade máxima.\n'
);
else:
try:
main(int(argv[1]),int(argv[2]));
except:
clear();
slow(
f'{logo}[{R}Error{C}] Erro Desconhecido.\n'
);
|
# Recomendação : Use apenas se seu computador/celular for bom.
# Autor : Kiny
# Pix : (61) 9603-5417
# Github : https://github.com/Kiny-Kiny
# WhatsApp : http://wa.me/552179180533
# Telegram : @K_iny
# Instagram : @parziovanni
# Twitter : @KinyBruno
############################################
'''Módulos'''
from itertools import product;
from sys import argv,stdout;
from time import sleep;
from os import system;
############################################
'''Cores'''
global R,B,C,G
R='\033[1;31m';
B='\033[1;34m';
C='\033[1;37m';
G='\033[1;32m';
############################################
'''Funções'''
def slow(msg):
for i in msg: stdout.write(i);sleep(0.007);stdout.flush();
def clear(): system('cls||clear');
############################################
'''Banner'''
logo=B+''' __ __ __ __ __ __ __
/\ \/ / /\ \ /\ "-.\ \ /\ \_\ \
\ \ _"-. \ \ \ \ \ \-. \ \ \____ \
\ \_\ \_\ \ \_\ \ \_\\"\_\ \/\_____\
\/_/\/_/ \/_/ \/_/ \/_/ \/_____/ \n'''+C
############################################
'''Parte de criação da Wordlist'''
def wordlist(i):
msg='';res = product('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_1234567890', repeat=i);
for g in res:
senha=''
for i in g: senha+=i
msg+=f'{senha}\n'
return msg
def main(min,max):
lis=[]
slow(
f'[{G}!{C}] Criando a WordList...\n'
)
for i in range(int(min),int(max)): lis.append(str(wordlist(i)));
msg='';
for i in lis: msg+=i
file=open('KingCrimson.txt','w+');
file.write(msg);
file.close();
clear();
slow(
f'{logo}\n[{G}Wordlist Criada!{C}] A wordlist foi criada e salva no arquivo KingCrimson.txt\n'
);
############################################
if int(len(argv)) < 3:
slow(
str(logo) + f'\n{G}- {C}Modo de Uso{G} : {C}python3 '+ str(argv[0]) + G+' {'+C+'Quantidade mínima'+G+'} {' +C+'Quantidade Máxima'+G+'}\n'+C
);exit();
try: int(argv[1]);int(argv[2]);
except: slow(
f'{logo}\n[{R}Error{C}] Use apenas números inteiros! (ex: 7)\n'
);exit();
if __name__=='__main__':
clear()
if int(argv[1]) == int(argv[2]):
slow(
f'{logo}\n[{R}Error{C}] A quantidade mínima não pode ser igual a quantidade máxima.\n'
);
elif int(argv[1]) > int(argv[2]):
slow(
f'{logo}\n[{R}Error{C}] A quantidade mínima não pode ser maior que a quantidade máxima.\n'
);
else:
try:
main(int(argv[1]),int(argv[2]));
except:
clear();
slow(
f'{logo}[{R}Error{C}] Erro Desconhecido.\n'
);
|
from abc import ABCMeta
from uuid import UUID
import jsonschema
from dateutil.parser import parse as dateparse
from uptimer.events import SCHEMATA_PATH
from uptimer.events.cache import schema_cache
from uptimer.helpers import to_bool, to_none
class EventDefinitionError(ValueError):
pass
class EventMeta(ABCMeta, metaclass=ABCMeta):
schema_path: str = f"file:///{SCHEMATA_PATH}"
"""Base-URL at which the schema resolver will look up schema references."""
def __new__(cls, name, bases, attrs, **kwargs):
super_new = super().__new__
schema = attrs.pop("schema", None)
# `table` can be a valid None, so use False as placeholder of missing property
table = attrs.pop("table", False)
if not schema:
raise EventDefinitionError(f"Class {name} did not declare a JSON schema.")
if table is False:
raise EventDefinitionError(
f"Class {name} did not declare a database table mapping."
)
# Now resolve and parse the JSON schema for additional properties; generating
# useful representations, the proper schema resolver for validation, etc.
# Inserting them in the `attrs` dictionary will cause them to become regular
# class variables, available in every instantiated class object.
schema_spec = schema_cache[schema]
if schema_spec["title"] != name:
raise EventDefinitionError(
f"Name of class {name} must be equal to "
f"JSON schema title '{schema_spec["title"]}'"
)
properties_dict = cls._collect_properties(schema_spec)
properties = list(properties_dict.keys())
property_cast_mapping = {
prop: cls.property_to_python(spec) for prop, spec in properties_dict.items()
}
resolver = jsonschema.RefResolver(cls.schema_path, schema_spec)
attrs.update(
dict(
schema=schema,
table=table,
schema_spec=schema_spec,
properties_dict=properties_dict,
properties=properties,
property_cast_mapping=property_cast_mapping,
_resolver=resolver,
)
)
return super_new(cls, name, bases, attrs, **kwargs)
@staticmethod
def _collect_properties(schema):
"""Collects a list of all (including nested and conditional) properties."""
props = dict()
array_iter = []
if isinstance(schema, list):
array_iter = enumerate(schema)
elif isinstance(schema, dict):
array_iter = schema.items()
for key, value in array_iter:
if key == "properties":
props.update(value)
elif key == "required":
continue
else:
props.update(EventMeta._collect_properties(value))
return props
@staticmethod
def property_to_python(property_spec):
"""
Returns a list of appropriate python-native datatypes for a schema property.
Based on the event class'es schema, a list of callables is returned that a
value might be tried against. The list is ordered from most to least strict
as to prevent falsely casting values as a less strict type.
Possible types taken from JSON schema validation specification
http://json-schema.org/latest/json-schema-validation.html#rfc.section.6.1.1
"""
propformat = property_spec.get("format")
if propformat == "date-time":
return [dateparse]
if propformat == "uuid":
return [UUID]
proptypes = property_spec.get("type")
if not proptypes:
return []
if not isinstance(proptypes, list):
proptypes = [proptypes]
callables = []
if "null" in proptypes:
callables.append(to_none)
if "boolean" in proptypes:
callables.append(to_bool)
if "integer" in proptypes:
callables.append(int)
if "number" in proptypes:
callables.append(float)
return callables
|
from abc import ABCMeta
from uuid import UUID
import jsonschema
from dateutil.parser import parse as dateparse
from uptimer.events import SCHEMATA_PATH
from uptimer.events.cache import schema_cache
from uptimer.helpers import to_bool, to_none
class EventDefinitionError(ValueError):
pass
class EventMeta(ABCMeta, metaclass=ABCMeta):
schema_path: str = f"file:///{SCHEMATA_PATH}"
"""Base-URL at which the schema resolver will look up schema references."""
def __new__(cls, name, bases, attrs, **kwargs):
super_new = super().__new__
schema = attrs.pop("schema", None)
# `table` can be a valid None, so use False as placeholder of missing property
table = attrs.pop("table", False)
if not schema:
raise EventDefinitionError(f"Class {name} did not declare a JSON schema.")
if table is False:
raise EventDefinitionError(
f"Class {name} did not declare a database table mapping."
)
# Now resolve and parse the JSON schema for additional properties; generating
# useful representations, the proper schema resolver for validation, etc.
# Inserting them in the `attrs` dictionary will cause them to become regular
# class variables, available in every instantiated class object.
schema_spec = schema_cache[schema]
if schema_spec["title"] != name:
raise EventDefinitionError(
f"Name of class {name} must be equal to "
f"JSON schema title '{schema_spec['title']}'"
)
properties_dict = cls._collect_properties(schema_spec)
properties = list(properties_dict.keys())
property_cast_mapping = {
prop: cls.property_to_python(spec) for prop, spec in properties_dict.items()
}
resolver = jsonschema.RefResolver(cls.schema_path, schema_spec)
attrs.update(
dict(
schema=schema,
table=table,
schema_spec=schema_spec,
properties_dict=properties_dict,
properties=properties,
property_cast_mapping=property_cast_mapping,
_resolver=resolver,
)
)
return super_new(cls, name, bases, attrs, **kwargs)
@staticmethod
def _collect_properties(schema):
"""Collects a list of all (including nested and conditional) properties."""
props = dict()
array_iter = []
if isinstance(schema, list):
array_iter = enumerate(schema)
elif isinstance(schema, dict):
array_iter = schema.items()
for key, value in array_iter:
if key == "properties":
props.update(value)
elif key == "required":
continue
else:
props.update(EventMeta._collect_properties(value))
return props
@staticmethod
def property_to_python(property_spec):
"""
Returns a list of appropriate python-native datatypes for a schema property.
Based on the event class'es schema, a list of callables is returned that a
value might be tried against. The list is ordered from most to least strict
as to prevent falsely casting values as a less strict type.
Possible types taken from JSON schema validation specification
http://json-schema.org/latest/json-schema-validation.html#rfc.section.6.1.1
"""
propformat = property_spec.get("format")
if propformat == "date-time":
return [dateparse]
if propformat == "uuid":
return [UUID]
proptypes = property_spec.get("type")
if not proptypes:
return []
if not isinstance(proptypes, list):
proptypes = [proptypes]
callables = []
if "null" in proptypes:
callables.append(to_none)
if "boolean" in proptypes:
callables.append(to_bool)
if "integer" in proptypes:
callables.append(int)
if "number" in proptypes:
callables.append(float)
return callables
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
import warnings
from typing import Any, Dict, Tuple, Union
from . import __version__
from .file_utils import (
CONFIG_NAME,
PushToHubMixin,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
is_torch_available,
)
from .utils import logging
logger = logging.get_logger(__name__)
class PretrainedConfig(PushToHubMixin):
r"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
Class attributes (overridden by derived classes)
- **model_type** (:obj:`str`) -- An identifier for the model type, serialized into the JSON file, and used to
recreate the correct object in :class:`~transformers.AutoConfig`.
- **is_composition** (:obj:`bool`) -- Whether the config class is composed of multiple sub-configs. In this
case the config has to be initialized from two or more configs of type
:class:`~transformers.PretrainedConfig` like: :class:`~transformers.EncoderDecoderConfig` or
:class:`~RagConfig`.
- **keys_to_ignore_at_inference** (:obj:`List[str]`) -- A list of keys to ignore by default when looking at
dictionary outputs of the model during inference.
- **attribute_map** (:obj:`Dict[str, str]`) -- A dict that maps model specific attribute names to the
standardized naming of attributes.
Common attributes (present in all subclasses)
- **vocab_size** (:obj:`int`) -- The number of tokens in the vocabulary, which is also the first dimension of
the embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
- **hidden_size** (:obj:`int`) -- The hidden size of the model.
- **num_attention_heads** (:obj:`int`) -- The number of attention heads used in the multi-head attention layers
of the model.
- **num_hidden_layers** (:obj:`int`) -- The number of blocks in the model.
Args:
name_or_path (:obj:`str`, `optional`, defaults to :obj:`""`):
Store the string that was passed to :func:`~transformers.PreTrainedModel.from_pretrained` or
:func:`~transformers.TFPreTrainedModel.from_pretrained` as ``pretrained_model_name_or_path`` if the
configuration was created with such a method.
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return all hidden-states.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should returns all attentions.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return a :class:`~transformers.file_utils.ModelOutput` instead of a plain
tuple.
is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
add_cross_attention (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the `:class:~transformers.EncoderDecoderModel` class, which
consists of all models in ``AUTO_MODELS_FOR_CAUSAL_LM``.
tie_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (:obj:`Dict[int, List[int]]`, `optional`, defaults to :obj:`{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance ``{1: [0, 2], 2: [2, 3]}`` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (:obj:`int`, `optional`, defaults to :obj:`0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of :obj:`0` means
that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes
:obj:`n` < sequence_length embeddings at a time. For more information on feed forward chunking, see `How
does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ .
Parameters for sequence generation
- **max_length** (:obj:`int`, `optional`, defaults to 20) -- Maximum length that will be used by default in the
:obj:`generate` method of the model.
- **min_length** (:obj:`int`, `optional`, defaults to 10) -- Minimum length that will be used by default in the
:obj:`generate` method of the model.
- **do_sample** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in the
:obj:`generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise.
- **early_stopping** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default
in the :obj:`generate` method of the model. Whether to stop the beam search when at least ``num_beams``
sentences are finished per batch or not.
- **num_beams** (:obj:`int`, `optional`, defaults to 1) -- Number of beams for beam search that will be used by
default in the :obj:`generate` method of the model. 1 means no beam search.
- **num_beam_groups** (:obj:`int`, `optional`, defaults to 1) -- Number of groups to divide :obj:`num_beams`
into in order to ensure diversity among different groups of beams that will be used by default in the
:obj:`generate` method of the model. 1 means no group beam search.
- **diversity_penalty** (:obj:`float`, `optional`, defaults to 0.0) -- Value to control diversity for group
beam search. that will be used by default in the :obj:`generate` method of the model. 0 means no diversity
penalty. The higher the penalty, the more diverse are the outputs.
- **temperature** (:obj:`float`, `optional`, defaults to 1) -- The value used to module the next token
probabilities that will be used by default in the :obj:`generate` method of the model. Must be strictly
positive.
- **top_k** (:obj:`int`, `optional`, defaults to 50) -- Number of highest probability vocabulary tokens to keep
for top-k-filtering that will be used by default in the :obj:`generate` method of the model.
- **top_p** (:obj:`float`, `optional`, defaults to 1) -- Value that will be used by default in the
:obj:`generate` method of the model for ``top_p``. If set to float < 1, only the most probable tokens with
probabilities that add up to ``top_p`` or higher are kept for generation.
- **repetition_penalty** (:obj:`float`, `optional`, defaults to 1) -- Parameter for repetition penalty that
will be used by default in the :obj:`generate` method of the model. 1.0 means no penalty.
- **length_penalty** (:obj:`float`, `optional`, defaults to 1) -- Exponential penalty to the length that will
be used by default in the :obj:`generate` method of the model.
- **no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by default in the
:obj:`generate` method of the model for ``no_repeat_ngram_size``. If set to int > 0, all ngrams of that size
can only occur once.
- **encoder_no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by
default in the :obj:`generate` method of the model for ``encoder_no_repeat_ngram_size``. If set to int > 0,
all ngrams of that size that occur in the ``encoder_input_ids`` cannot occur in the ``decoder_input_ids``.
- **bad_words_ids** (:obj:`List[int]`, `optional`) -- List of token ids that are not allowed to be generated
that will be used by default in the :obj:`generate` method of the model. In order to get the tokens of the
words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word,
add_prefix_space=True)`.
- **num_return_sequences** (:obj:`int`, `optional`, defaults to 1) -- Number of independently computed returned
sequences for each element in the batch that will be used by default in the :obj:`generate` method of the
model.
- **output_scores** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether the model should return the
logits when used for generation
- **return_dict_in_generate** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether the model should
return a :class:`~transformers.file_utils.ModelOutput` instead of a :obj:`torch.LongTensor`
- **forced_bos_token_id** (:obj:`int`, `optional`) -- The id of the token to force as the first generated token
after the :obj:`decoder_start_token_id`. Useful for multilingual models like :doc:`mBART
<../model_doc/mbart>` where the first generated token needs to be the target language token.
- **forced_eos_token_id** (:obj:`int`, `optional`) -- The id of the token to force as the last generated token
when :obj:`max_length` is reached.
- **remove_invalid_values** (:obj:`bool`, `optional`) -- Whether to remove possible `nan` and `inf` outputs of
the model to prevent the generation method to crash. Note that using ``remove_invalid_values`` can slow down
generation.
Parameters for fine-tuning tasks
- **architectures** (:obj:`List[str]`, `optional`) -- Model architectures that can be used with the model
pretrained weights.
- **finetuning_task** (:obj:`str`, `optional`) -- Name of the task used to fine-tune the model. This can be
used when converting from an original (TensorFlow or PyTorch) checkpoint.
- **id2label** (:obj:`Dict[int, str]`, `optional`) -- A map from index (for instance prediction index, or
target index) to label.
- **label2id** (:obj:`Dict[str, int]`, `optional`) -- A map from label to index for the model.
- **num_labels** (:obj:`int`, `optional`) -- Number of labels to use in the last layer added to the model,
typically for a classification task.
- **task_specific_params** (:obj:`Dict[str, Any]`, `optional`) -- Additional keyword arguments to store for the
current task.
- **problem_type** (:obj:`str`, `optional`) -- Problem type for :obj:`XxxForSequenceClassification` models. Can
be one of (:obj:`"regression"`, :obj:`"single_label_classification"`, :obj:`"multi_label_classification"`).
Please note that this parameter is only available in the following models: `AlbertForSequenceClassification`,
`BertForSequenceClassification`, `BigBirdForSequenceClassification`, `ConvBertForSequenceClassification`,
`DistilBertForSequenceClassification`, `ElectraForSequenceClassification`, `FunnelForSequenceClassification`,
`LongformerForSequenceClassification`, `MobileBertForSequenceClassification`,
`ReformerForSequenceClassification`, `RobertaForSequenceClassification`,
`SqueezeBertForSequenceClassification`, `XLMForSequenceClassification` and `XLNetForSequenceClassification`.
Parameters linked to the tokenizer
- **tokenizer_class** (:obj:`str`, `optional`) -- The name of the associated tokenizer class to use (if none is
set, will use the tokenizer associated to the model by default).
- **prefix** (:obj:`str`, `optional`) -- A specific prompt that should be added at the beginning of each text
before calling the model.
- **bos_token_id** (:obj:`int`, `optional`)) -- The id of the `beginning-of-stream` token.
- **pad_token_id** (:obj:`int`, `optional`)) -- The id of the `padding` token.
- **eos_token_id** (:obj:`int`, `optional`)) -- The id of the `end-of-stream` token.
- **decoder_start_token_id** (:obj:`int`, `optional`)) -- If an encoder-decoder model starts decoding with a
different token than `bos`, the id of that token.
- **sep_token_id** (:obj:`int`, `optional`)) -- The id of the `separation` token.
PyTorch specific parameters
- **torchscript** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should be
used with Torchscript.
- **tie_word_embeddings** (:obj:`bool`, `optional`, defaults to :obj:`True`) -- Whether the model's input and
output word embeddings should be tied. Note that this is only relevant if the model has a output word
embedding layer.
- **torch_dtype** (:obj:`str`, `optional`) -- The :obj:`dtype` of the weights. This attribute can be used to
initialize the model to a non-default ``dtype`` (which is normally ``float32``) and thus allow for optimal
storage allocation. For example, if the saved model is ``float16``, ideally we want to load it back using the
minimal amount of memory needed to load ``float16`` weights. Since the config object is stored in plain text,
this attribute contains just the floating type string without the ``torch.`` prefix. For example, for
``torch.float16`` ``torch_dtype`` is the ``"float16"`` string.
This attribute is currently not being used during model loading time, but this may change in the future
versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
TensorFlow specific parameters
- **use_bfloat16** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should use
BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
def __setattr__(self, key, value):
if key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
return super().__getattribute__(key)
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
# we will start using self.torch_dtype in v5, but to be consistent with
# from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
if is_torch_available():
import torch
self.torch_dtype = getattr(torch, self.torch_dtype)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# regression / multi-label classification
self.problem_type = kwargs.pop("problem_type", None)
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type}"
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
self.transformers_version = kwargs.pop("transformers_version", None)
# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
warnings.warn(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@property
def name_or_path(self) -> str:
return self._name_or_path
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
@property
def use_return_dict(self) -> bool:
"""
:obj:`bool`: Whether or not return :class:`~transformers.file_utils.ModelOutput` instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
:obj:`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a configuration object to the directory ``save_directory``, so that it can be re-loaded using the
:func:`~transformers.PretrainedConfig.from_pretrained` class method.
Args:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
.. warning::
Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
:obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
instead.
kwargs:
Additional key word arguments passed along to the
:meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Configuration pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
r"""
Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pretrained model
configuration.
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
This can be either:
- a string, the `model id` of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or
namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a configuration file saved using the
:func:`~transformers.PretrainedConfig.save_pretrained` method, e.g., ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.,
``./my_model_directory/configuration.json``.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`False`, then this function returns just the final configuration object.
If :obj:`True`, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e.,
the part of ``kwargs`` which has not been used to update ``config`` and is otherwise ignored.
kwargs (:obj:`Dict[str, Any]`, `optional`):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the ``return_unused_kwargs`` keyword parameter.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from this pretrained model.
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True,
foo=False, return_unused_kwargs=True)
assert config.output_attentions == True
assert unused_kwargs == {'foo': False}
"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warn(
f"You are using a model of type {config_dict["model_type"]} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a ``pretrained_model_name_or_path``, resolve to a dictionary of parameters, to be used for instantiating a
:class:`~transformers.PretrainedConfig` using ``from_dict``.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=CONFIG_NAME, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
)
if revision is not None:
msg += f"- or '{revision}' is a valid git identifier (branch name, a tag name, or a commit id) that exists for this model name as listed on its model page on 'https://huggingface.co/models'\n\n"
raise EnvironmentError(msg)
except (json.JSONDecodeError, UnicodeDecodeError):
msg = (
f"Couldn't reach server at '{config_file}' to download configuration file or "
"configuration file is not a valid JSON file. "
f"Please check network or file content here: {resolved_config_file}."
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
logger.info(f"loading configuration file {config_file}")
else:
logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}")
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from a Python dictionary of parameters.
Args:
config_dict (:obj:`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
:func:`~transformers.PretrainedConfig.get_config_dict` method.
kwargs (:obj:`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from the path to a JSON file of parameters.
Args:
json_file (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
# Transformers version when serializing the model
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON string.
Returns:
:obj:`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from ``config_dict``.
Args:
config_dict (:obj:`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
"""
Updates attributes of this class with attributes from ``update_str``.
The expected format is ints, floats and strings as is, and for booleans use ``true`` or ``false``. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (:obj:`str`): String with attributes that should be updated for this class.
"""
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v)
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary has a `torch_dtype` key and if it's not None, converts torch.dtype to a
string of just the type. For example, :obj:`torch.float32` get converted into `"float32"` string, which can
then be stored in the json format.
"""
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
object="config", object_class="AutoConfig", object_files="configuration file"
)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
import warnings
from typing import Any, Dict, Tuple, Union
from . import __version__
from .file_utils import (
CONFIG_NAME,
PushToHubMixin,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
is_torch_available,
)
from .utils import logging
logger = logging.get_logger(__name__)
class PretrainedConfig(PushToHubMixin):
r"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
Class attributes (overridden by derived classes)
- **model_type** (:obj:`str`) -- An identifier for the model type, serialized into the JSON file, and used to
recreate the correct object in :class:`~transformers.AutoConfig`.
- **is_composition** (:obj:`bool`) -- Whether the config class is composed of multiple sub-configs. In this
case the config has to be initialized from two or more configs of type
:class:`~transformers.PretrainedConfig` like: :class:`~transformers.EncoderDecoderConfig` or
:class:`~RagConfig`.
- **keys_to_ignore_at_inference** (:obj:`List[str]`) -- A list of keys to ignore by default when looking at
dictionary outputs of the model during inference.
- **attribute_map** (:obj:`Dict[str, str]`) -- A dict that maps model specific attribute names to the
standardized naming of attributes.
Common attributes (present in all subclasses)
- **vocab_size** (:obj:`int`) -- The number of tokens in the vocabulary, which is also the first dimension of
the embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
- **hidden_size** (:obj:`int`) -- The hidden size of the model.
- **num_attention_heads** (:obj:`int`) -- The number of attention heads used in the multi-head attention layers
of the model.
- **num_hidden_layers** (:obj:`int`) -- The number of blocks in the model.
Args:
name_or_path (:obj:`str`, `optional`, defaults to :obj:`""`):
Store the string that was passed to :func:`~transformers.PreTrainedModel.from_pretrained` or
:func:`~transformers.TFPreTrainedModel.from_pretrained` as ``pretrained_model_name_or_path`` if the
configuration was created with such a method.
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return all hidden-states.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should returns all attentions.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return a :class:`~transformers.file_utils.ModelOutput` instead of a plain
tuple.
is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
add_cross_attention (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the `:class:~transformers.EncoderDecoderModel` class, which
consists of all models in ``AUTO_MODELS_FOR_CAUSAL_LM``.
tie_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (:obj:`Dict[int, List[int]]`, `optional`, defaults to :obj:`{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance ``{1: [0, 2], 2: [2, 3]}`` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (:obj:`int`, `optional`, defaults to :obj:`0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of :obj:`0` means
that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes
:obj:`n` < sequence_length embeddings at a time. For more information on feed forward chunking, see `How
does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ .
Parameters for sequence generation
- **max_length** (:obj:`int`, `optional`, defaults to 20) -- Maximum length that will be used by default in the
:obj:`generate` method of the model.
- **min_length** (:obj:`int`, `optional`, defaults to 10) -- Minimum length that will be used by default in the
:obj:`generate` method of the model.
- **do_sample** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in the
:obj:`generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise.
- **early_stopping** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default
in the :obj:`generate` method of the model. Whether to stop the beam search when at least ``num_beams``
sentences are finished per batch or not.
- **num_beams** (:obj:`int`, `optional`, defaults to 1) -- Number of beams for beam search that will be used by
default in the :obj:`generate` method of the model. 1 means no beam search.
- **num_beam_groups** (:obj:`int`, `optional`, defaults to 1) -- Number of groups to divide :obj:`num_beams`
into in order to ensure diversity among different groups of beams that will be used by default in the
:obj:`generate` method of the model. 1 means no group beam search.
- **diversity_penalty** (:obj:`float`, `optional`, defaults to 0.0) -- Value to control diversity for group
beam search. that will be used by default in the :obj:`generate` method of the model. 0 means no diversity
penalty. The higher the penalty, the more diverse are the outputs.
- **temperature** (:obj:`float`, `optional`, defaults to 1) -- The value used to module the next token
probabilities that will be used by default in the :obj:`generate` method of the model. Must be strictly
positive.
- **top_k** (:obj:`int`, `optional`, defaults to 50) -- Number of highest probability vocabulary tokens to keep
for top-k-filtering that will be used by default in the :obj:`generate` method of the model.
- **top_p** (:obj:`float`, `optional`, defaults to 1) -- Value that will be used by default in the
:obj:`generate` method of the model for ``top_p``. If set to float < 1, only the most probable tokens with
probabilities that add up to ``top_p`` or higher are kept for generation.
- **repetition_penalty** (:obj:`float`, `optional`, defaults to 1) -- Parameter for repetition penalty that
will be used by default in the :obj:`generate` method of the model. 1.0 means no penalty.
- **length_penalty** (:obj:`float`, `optional`, defaults to 1) -- Exponential penalty to the length that will
be used by default in the :obj:`generate` method of the model.
- **no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by default in the
:obj:`generate` method of the model for ``no_repeat_ngram_size``. If set to int > 0, all ngrams of that size
can only occur once.
- **encoder_no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by
default in the :obj:`generate` method of the model for ``encoder_no_repeat_ngram_size``. If set to int > 0,
all ngrams of that size that occur in the ``encoder_input_ids`` cannot occur in the ``decoder_input_ids``.
- **bad_words_ids** (:obj:`List[int]`, `optional`) -- List of token ids that are not allowed to be generated
that will be used by default in the :obj:`generate` method of the model. In order to get the tokens of the
words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word,
add_prefix_space=True)`.
- **num_return_sequences** (:obj:`int`, `optional`, defaults to 1) -- Number of independently computed returned
sequences for each element in the batch that will be used by default in the :obj:`generate` method of the
model.
- **output_scores** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether the model should return the
logits when used for generation
- **return_dict_in_generate** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether the model should
return a :class:`~transformers.file_utils.ModelOutput` instead of a :obj:`torch.LongTensor`
- **forced_bos_token_id** (:obj:`int`, `optional`) -- The id of the token to force as the first generated token
after the :obj:`decoder_start_token_id`. Useful for multilingual models like :doc:`mBART
<../model_doc/mbart>` where the first generated token needs to be the target language token.
- **forced_eos_token_id** (:obj:`int`, `optional`) -- The id of the token to force as the last generated token
when :obj:`max_length` is reached.
- **remove_invalid_values** (:obj:`bool`, `optional`) -- Whether to remove possible `nan` and `inf` outputs of
the model to prevent the generation method to crash. Note that using ``remove_invalid_values`` can slow down
generation.
Parameters for fine-tuning tasks
- **architectures** (:obj:`List[str]`, `optional`) -- Model architectures that can be used with the model
pretrained weights.
- **finetuning_task** (:obj:`str`, `optional`) -- Name of the task used to fine-tune the model. This can be
used when converting from an original (TensorFlow or PyTorch) checkpoint.
- **id2label** (:obj:`Dict[int, str]`, `optional`) -- A map from index (for instance prediction index, or
target index) to label.
- **label2id** (:obj:`Dict[str, int]`, `optional`) -- A map from label to index for the model.
- **num_labels** (:obj:`int`, `optional`) -- Number of labels to use in the last layer added to the model,
typically for a classification task.
- **task_specific_params** (:obj:`Dict[str, Any]`, `optional`) -- Additional keyword arguments to store for the
current task.
- **problem_type** (:obj:`str`, `optional`) -- Problem type for :obj:`XxxForSequenceClassification` models. Can
be one of (:obj:`"regression"`, :obj:`"single_label_classification"`, :obj:`"multi_label_classification"`).
Please note that this parameter is only available in the following models: `AlbertForSequenceClassification`,
`BertForSequenceClassification`, `BigBirdForSequenceClassification`, `ConvBertForSequenceClassification`,
`DistilBertForSequenceClassification`, `ElectraForSequenceClassification`, `FunnelForSequenceClassification`,
`LongformerForSequenceClassification`, `MobileBertForSequenceClassification`,
`ReformerForSequenceClassification`, `RobertaForSequenceClassification`,
`SqueezeBertForSequenceClassification`, `XLMForSequenceClassification` and `XLNetForSequenceClassification`.
Parameters linked to the tokenizer
- **tokenizer_class** (:obj:`str`, `optional`) -- The name of the associated tokenizer class to use (if none is
set, will use the tokenizer associated to the model by default).
- **prefix** (:obj:`str`, `optional`) -- A specific prompt that should be added at the beginning of each text
before calling the model.
- **bos_token_id** (:obj:`int`, `optional`)) -- The id of the `beginning-of-stream` token.
- **pad_token_id** (:obj:`int`, `optional`)) -- The id of the `padding` token.
- **eos_token_id** (:obj:`int`, `optional`)) -- The id of the `end-of-stream` token.
- **decoder_start_token_id** (:obj:`int`, `optional`)) -- If an encoder-decoder model starts decoding with a
different token than `bos`, the id of that token.
- **sep_token_id** (:obj:`int`, `optional`)) -- The id of the `separation` token.
PyTorch specific parameters
- **torchscript** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should be
used with Torchscript.
- **tie_word_embeddings** (:obj:`bool`, `optional`, defaults to :obj:`True`) -- Whether the model's input and
output word embeddings should be tied. Note that this is only relevant if the model has a output word
embedding layer.
- **torch_dtype** (:obj:`str`, `optional`) -- The :obj:`dtype` of the weights. This attribute can be used to
initialize the model to a non-default ``dtype`` (which is normally ``float32``) and thus allow for optimal
storage allocation. For example, if the saved model is ``float16``, ideally we want to load it back using the
minimal amount of memory needed to load ``float16`` weights. Since the config object is stored in plain text,
this attribute contains just the floating type string without the ``torch.`` prefix. For example, for
``torch.float16`` ``torch_dtype`` is the ``"float16"`` string.
This attribute is currently not being used during model loading time, but this may change in the future
versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
TensorFlow specific parameters
- **use_bfloat16** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should use
BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
def __setattr__(self, key, value):
if key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
return super().__getattribute__(key)
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
# we will start using self.torch_dtype in v5, but to be consistent with
# from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
if is_torch_available():
import torch
self.torch_dtype = getattr(torch, self.torch_dtype)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# regression / multi-label classification
self.problem_type = kwargs.pop("problem_type", None)
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type}"
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
self.transformers_version = kwargs.pop("transformers_version", None)
# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
warnings.warn(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@property
def name_or_path(self) -> str:
return self._name_or_path
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
@property
def use_return_dict(self) -> bool:
"""
:obj:`bool`: Whether or not return :class:`~transformers.file_utils.ModelOutput` instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
:obj:`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a configuration object to the directory ``save_directory``, so that it can be re-loaded using the
:func:`~transformers.PretrainedConfig.from_pretrained` class method.
Args:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
.. warning::
Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
:obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
instead.
kwargs:
Additional key word arguments passed along to the
:meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Configuration pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
r"""
Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pretrained model
configuration.
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
This can be either:
- a string, the `model id` of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or
namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a configuration file saved using the
:func:`~transformers.PretrainedConfig.save_pretrained` method, e.g., ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.,
``./my_model_directory/configuration.json``.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`False`, then this function returns just the final configuration object.
If :obj:`True`, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e.,
the part of ``kwargs`` which has not been used to update ``config`` and is otherwise ignored.
kwargs (:obj:`Dict[str, Any]`, `optional`):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the ``return_unused_kwargs`` keyword parameter.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from this pretrained model.
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True,
foo=False, return_unused_kwargs=True)
assert config.output_attentions == True
assert unused_kwargs == {'foo': False}
"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warn(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a ``pretrained_model_name_or_path``, resolve to a dictionary of parameters, to be used for instantiating a
:class:`~transformers.PretrainedConfig` using ``from_dict``.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=CONFIG_NAME, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
)
if revision is not None:
msg += f"- or '{revision}' is a valid git identifier (branch name, a tag name, or a commit id) that exists for this model name as listed on its model page on 'https://huggingface.co/models'\n\n"
raise EnvironmentError(msg)
except (json.JSONDecodeError, UnicodeDecodeError):
msg = (
f"Couldn't reach server at '{config_file}' to download configuration file or "
"configuration file is not a valid JSON file. "
f"Please check network or file content here: {resolved_config_file}."
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
logger.info(f"loading configuration file {config_file}")
else:
logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}")
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from a Python dictionary of parameters.
Args:
config_dict (:obj:`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
:func:`~transformers.PretrainedConfig.get_config_dict` method.
kwargs (:obj:`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from the path to a JSON file of parameters.
Args:
json_file (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
# Transformers version when serializing the model
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON string.
Returns:
:obj:`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from ``config_dict``.
Args:
config_dict (:obj:`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
"""
Updates attributes of this class with attributes from ``update_str``.
The expected format is ints, floats and strings as is, and for booleans use ``true`` or ``false``. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (:obj:`str`): String with attributes that should be updated for this class.
"""
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v)
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary has a `torch_dtype` key and if it's not None, converts torch.dtype to a
string of just the type. For example, :obj:`torch.float32` get converted into `"float32"` string, which can
then be stored in the json format.
"""
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
object="config", object_class="AutoConfig", object_files="configuration file"
)
|
print("String example")
s = "this is a test String"
print(f"String: {s}")
print(f"String Capitalized: {s.capitalize()}")
print(f"String Finding index: {s.find("e")}")
print(f"String Lowercase: {s.lower()}")
print(f"String Uppercase: {s.upper()}")
print(f"String Length: {len(s)}")
print(f"String Replace: {s.replace("this", "THIS")}")
print(f"String Swapcase: {s.swapcase()}")
print(f"String Title: {s.title()}")
print()
print("List examples")
L = ['C++', 'Java', 'Python']
print(f"List: {L}")
print(f"List slicing: {L[1:]}")
print(f"List slicing: {L[::-1]}")
print(f"List slicing: {L[0:2]}")
L = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(f"List: {L}")
L.append(10)
print(f"List Appending:{L}")
print(f"List Popping:{L.pop()}")
L.insert(4, 20)
print(f"List Inserting : {L}") # position, value
L.reverse()
print(f"List Reversed: {L}")
L.sort()
reversed_list = reversed(L)
print("Reversed list: {}".format(reversed_list))
for i in reversed_list:
print(i)
print(f"List Sorted: {L}")
print("\nTuple example")
tup1 = ('physics', 'chemistry', 1997, 2000)
tup2 = (1, 2, 3, 4, 5, 6, 7)
print(f"tup1[0]: {tup1[0]}")
print(f"tup2[1:5]: {tup2[1:5]}")
tup3 = tup1 + tup2
print(f"Creating new from existing: tup3: {tup3}")
print("\nDictionary examples")
d = {'Name': 'Test', 'Age': 99, 'Class': 'failed'}
print(f"Dicstionary d: {d}")
d['Age'] = 0 # update existing entry
d['School'] = "Under a tree" # Add new entry
print(f"Updating d['Age']: {d["Age"]}")
print(f"Updating d['School']: {d["School"]}")
print(f"Dictionary d: {d}")
print(f"Get Qualification : {d.get("Qualification", "NA")}")
print(f"Dictionary items: {d.items()}")
print(f"Dictionary keys: {d.keys()}")
print(f"Dictionary values: {d.values()}")
print("\nSets example")
my_set = {1, 3}
print(my_set)
my_set.add(2) # add an element
print(my_set)
my_set.update([2, 3, 4]) # add multiple elements
print(my_set)
my_set.update([4, 5], {1, 6, 8}) # add list and set
print(my_set)
my_set.remove(6)
print(my_set)
my_set.pop() # pop another random element
print(my_set)
A = {1, 2, 3, 4, 5}
B = {4, 5, 6, 7, 8}
print(A | B) # Union or A.union(B)
print(A & B) # Intersection or A.intersection(B)
print(A - B) # Difference or A.difference(B)
A = frozenset([1, 2, 3, 4])
B = frozenset([3, 4, 5, 6])
print(A.difference(B))
print(A | B)
print(A.add(3)) # Error
|
print("String example")
s = "this is a test String"
print(f"String: {s}")
print(f"String Capitalized: {s.capitalize()}")
print(f"String Finding index: {s.find('e')}")
print(f"String Lowercase: {s.lower()}")
print(f"String Uppercase: {s.upper()}")
print(f"String Length: {len(s)}")
print(f"String Replace: {s.replace('this', 'THIS')}")
print(f"String Swapcase: {s.swapcase()}")
print(f"String Title: {s.title()}")
print()
print("List examples")
L = ['C++', 'Java', 'Python']
print(f"List: {L}")
print(f"List slicing: {L[1:]}")
print(f"List slicing: {L[::-1]}")
print(f"List slicing: {L[0:2]}")
L = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(f"List: {L}")
L.append(10)
print(f"List Appending:{L}")
print(f"List Popping:{L.pop()}")
L.insert(4, 20)
print(f"List Inserting : {L}") # position, value
L.reverse()
print(f"List Reversed: {L}")
L.sort()
reversed_list = reversed(L)
print("Reversed list: {}".format(reversed_list))
for i in reversed_list:
print(i)
print(f"List Sorted: {L}")
print("\nTuple example")
tup1 = ('physics', 'chemistry', 1997, 2000)
tup2 = (1, 2, 3, 4, 5, 6, 7)
print(f"tup1[0]: {tup1[0]}")
print(f"tup2[1:5]: {tup2[1:5]}")
tup3 = tup1 + tup2
print(f"Creating new from existing: tup3: {tup3}")
print("\nDictionary examples")
d = {'Name': 'Test', 'Age': 99, 'Class': 'failed'}
print(f"Dicstionary d: {d}")
d['Age'] = 0 # update existing entry
d['School'] = "Under a tree" # Add new entry
print(f"Updating d['Age']: {d['Age']}")
print(f"Updating d['School']: {d['School']}")
print(f"Dictionary d: {d}")
print(f"Get Qualification : {d.get('Qualification', 'NA')}")
print(f"Dictionary items: {d.items()}")
print(f"Dictionary keys: {d.keys()}")
print(f"Dictionary values: {d.values()}")
print("\nSets example")
my_set = {1, 3}
print(my_set)
my_set.add(2) # add an element
print(my_set)
my_set.update([2, 3, 4]) # add multiple elements
print(my_set)
my_set.update([4, 5], {1, 6, 8}) # add list and set
print(my_set)
my_set.remove(6)
print(my_set)
my_set.pop() # pop another random element
print(my_set)
A = {1, 2, 3, 4, 5}
B = {4, 5, 6, 7, 8}
print(A | B) # Union or A.union(B)
print(A & B) # Intersection or A.intersection(B)
print(A - B) # Difference or A.difference(B)
A = frozenset([1, 2, 3, 4])
B = frozenset([3, 4, 5, 6])
print(A.difference(B))
print(A | B)
print(A.add(3)) # Error
|
# Copyright (c) 2013, Blue Lynx and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import math
from frappe.utils import getdate, get_time, flt
from datetime import datetime, timedelta, date, time
import calendar
def execute(filters=None):
columns, data = [], []
if filters:
columns = get_column()
data = get_data(filters)
return columns, data
def get_column():
columns = [
{
"label": "Staff Name",
"fieldname": "staff_name",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Count (Hours)",
"fieldname": "pt_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "GX Count (Hours)",
"fieldname": "gx_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "Others (Hours)",
"fieldname": "ot_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Commissions",
"fieldname": "pt_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "GX Commissions",
"fieldname": "gc_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "Other Commissions",
"fieldname": "other_commission",
"fieldtype": "Currency",
"width": 150,
"default": 0.0
},
{
"label": "Total Commission",
"fieldname": "total_commission",
"fieldtype": "Currency",
"width": 150
}
]
return columns
def get_data(filters):
data = []
final_data = []
year = int(filters['year'])
if 'date_range' in filters:
if filters['date_range'] == "Month":
month = filters['month']
month_number = int(datetime.strptime(month, '%B').month)
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
elif filters['date_range'] == "Custom Range":
start = getdate(filters['from_date'])
end = getdate( filters['to_date'])
if 'service_staff' in filters:
staff_list = frappe.get_all('Service Staff', filters={'name': filters['service_staff']})
else:
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
settings = frappe.get_doc('Fitness Training Settings')
if staff_list:
for staff in staff_list:
pt_count = 0.0
ot_count = 0.0
other_commission = 0.0
service_staff = frappe.get_doc('Service Staff', staff.name)
if service_staff.fitness_service_assignment:
for services in service_staff.fitness_service_assignment:
if services.commission_applicable:
appointments_list = frappe.db.get_list('Fitness Training Appointment', filters=[['fitness_service', '=', services.fitness_package], ['appointment_date', 'between', [start, end]], ['payment_status', '=', 'Paid'], ['service_staff', '=', staff.name], ['appointment_status', 'in', {'Completed', 'No Show'}]], fields=['name', 'fitness_service'])
if services.commission_type == "Standard":
if appointments_list:
for appointments in appointments_list:
pt_service = frappe.get_doc('Fitness Services', appointments.fitness_service)
if pt_service.session_for == "Single":
pt_count += settings.single_session
elif pt_service.session_for == "Couple":
pt_count += settings.couple_session
elif services.commission_type == "Custom":
if appointments_list:
for appointments in appointments_list:
other_commission += services.commission_amount
ot_count += 1
staff['staff_name']= staff.name
staff['pt_count'] = pt_count
staff['ot_count'] = ot_count
staff['other_commission'] = other_commission
gc = []
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', staff.name], ['class_status', '=', 'Completed']], fields=['count(name) as gx_count'], group_by='trainer_name')
if gc_list:
for group_class in gc_list:
group_class_attendee = frappe.get_all('Group Class Attendees', filters={'group_class': group_class.name, 'attendee_status': 'Complete' })
if group_class_attendee:
if len(group_class_attendee) >= 3:
gc.append(group_class)
staff['gx_count'] = len(gc)
data.append(staff)
for row in data:
row['gc_commission'] = float(row['gx_count']) * float(settings.group_class_rate)
pt = calculate_pt(row['pt_count'], row['gx_count'])
row['pt_commission'] = pt
row['total_commission'] = row['gc_commission'] + row['pt_commission'] + row['other_commission']
final_data.append(row)
return final_data
def month():
year = 2021
months = 'July'
month_number = datetime.strptime(months, '%B').month
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
for staff in staff_list:
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', 'Jatinder'], ['class_status', '=', 'Completed']], fields=['count(name) as gc_count'], group_by='trainer_name')
for gc in gc_list:
return type(gc.gc_count)
@frappe.whitelist()
def calculate_pt(pt_count, gx_count):
total_count = pt_count + gx_count
scale = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked = total_count
decimal_rate = next(rate for (lower, upper), rate in scale.items() if lower <= hours_worked and upper >= hours_worked)
decimal_end = hours_worked - int(hours_worked)
end_pay = decimal_end * decimal_rate
# Use an integer for ease of calculation
hours_worked = int(hours_worked)
hours_paid_for = 0
# Beginning total pay is just the decimal "ending"
total_pay = end_pay
while hours_paid_for < hours_worked:
# Find the rate for the current bucket of hours
rate_filter = (rate for (lower, upper), rate in scale.items() if lower <= hours_paid_for and hours_paid_for < upper)
current_level = next(rate_filter)
total_pay += current_level
hours_paid_for += 1
total_session = total_pay
scale_1 = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked_1 = gx_count
decimal_rate_1 = next(rate for (lower, upper), rate in scale_1.items() if lower <= hours_worked_1 and upper >= hours_worked_1)
decimal_end_1 = hours_worked_1 - int(hours_worked_1)
end_pay_1 = decimal_end_1 * decimal_rate_1
# Use an integer for ease of calculation
hours_worked_1 = int(hours_worked_1)
hours_paid_for_1 = 0
# Beginning total pay is just the decimal "ending"
total_pay_1 = end_pay_1
while hours_paid_for_1 < hours_worked_1:
# Find the rate for the current bucket of hours
rate_filter = (rate for (lower, upper), rate in scale_1.items() if lower <= hours_paid_for_1 and hours_paid_for_1 < upper)
current_level = next(rate_filter)
total_pay_1 += current_level
hours_paid_for_1 += 1
total_gc = total_pay_1
commission_from_pt = total_session - total_gc
return commission_from_pt
|
# Copyright (c) 2013, Blue Lynx and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import math
from frappe.utils import getdate, get_time, flt
from datetime import datetime, timedelta, date, time
import calendar
def execute(filters=None):
columns, data = [], []
if filters:
columns = get_column()
data = get_data(filters)
return columns, data
def get_column():
columns = [
{
"label": "Staff Name",
"fieldname": "staff_name",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Count (Hours)",
"fieldname": "pt_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "GX Count (Hours)",
"fieldname": "gx_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "Others (Hours)",
"fieldname": "ot_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Commissions",
"fieldname": "pt_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "GX Commissions",
"fieldname": "gc_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "Other Commissions",
"fieldname": "other_commission",
"fieldtype": "Currency",
"width": 150,
"default": 0.0
},
{
"label": "Total Commission",
"fieldname": "total_commission",
"fieldtype": "Currency",
"width": 150
}
]
return columns
def get_data(filters):
data = []
final_data = []
year = int(filters['year'])
if 'date_range' in filters:
if filters['date_range'] == "Month":
month = filters['month']
month_number = int(datetime.strptime(month, '%B').month)
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
elif filters['date_range'] == "Custom Range":
start = getdate(filters['from_date'])
end = getdate( filters['to_date'])
if 'service_staff' in filters:
staff_list = frappe.get_all('Service Staff', filters={'name': filters['service_staff']})
else:
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
settings = frappe.get_doc('Fitness Training Settings')
if staff_list:
for staff in staff_list:
pt_count = 0.0
ot_count = 0.0
other_commission = 0.0
service_staff = frappe.get_doc('Service Staff', staff.name)
if service_staff.fitness_service_assignment:
for services in service_staff.fitness_service_assignment:
if services.commission_applicable:
appointments_list = frappe.db.get_list('Fitness Training Appointment', filters=[['fitness_service', '=', services.fitness_package], ['appointment_date', 'between', [start, end]], ['payment_status', '=', 'Paid'], ['service_staff', '=', staff.name], ['appointment_status', 'in', {'Completed', 'No Show'}]], fields=['name', 'fitness_service'])
if services.commission_type == "Standard":
if appointments_list:
for appointments in appointments_list:
pt_service = frappe.get_doc('Fitness Services', appointments.fitness_service)
if pt_service.session_for == "Single":
pt_count += settings.single_session
elif pt_service.session_for == "Couple":
pt_count += settings.couple_session
elif services.commission_type == "Custom":
if appointments_list:
for appointments in appointments_list:
other_commission += services.commission_amount
ot_count += 1
staff['staff_name']= staff.name
staff['pt_count'] = pt_count
staff['ot_count'] = ot_count
staff['other_commission'] = other_commission
gc = []
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', staff.name], ['class_status', '=', 'Completed']], fields=['count(name) as gx_count'], group_by='trainer_name')
if gc_list:
for group_class in gc_list:
group_class_attendee = frappe.get_all('Group Class Attendees', filters={'group_class': group_class.name, 'attendee_status': 'Complete' })
if group_class_attendee:
if len(group_class_attendee) >= 3:
gc.append(group_class)
staff['gx_count'] = len(gc)
data.append(staff)
for row in data:
row['gc_commission'] = float(row['gx_count']) * float(settings.group_class_rate)
pt = calculate_pt(row['pt_count'], row['gx_count'])
row['pt_commission'] = pt
row['total_commission'] = row['gc_commission'] + row['pt_commission'] + row['other_commission']
final_data.append(row)
return final_data
def month():
year = 2021
months = 'July'
month_number = datetime.strptime(months, '%B').month
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
for staff in staff_list:
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', 'Jatinder'], ['class_status', '=', 'Completed']], fields=['count(name) as gc_count'], group_by='trainer_name')
for gc in gc_list:
return type(gc.gc_count)
@frappe.whitelist()
def calculate_pt(pt_count, gx_count):
total_count = pt_count + gx_count
scale = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked = total_count
decimal_rate = next(rate for (lower, upper), rate in scale.items() if lower <= hours_worked and upper >= hours_worked)
decimal_end = hours_worked - int(hours_worked)
end_pay = decimal_end * decimal_rate
# Use an integer for ease of calculation
hours_worked = int(hours_worked)
hours_paid_for = 0
# Beginning total pay is just the decimal "ending"
total_pay = end_pay
while hours_paid_for < hours_worked:
# Find the rate for the current bucket of hours
rate_filter = (rate for (lower, upper), rate in scale.items() if lower <= hours_paid_for and hours_paid_for < upper)
current_level = next(rate_filter)
total_pay += current_level
hours_paid_for += 1
total_session = total_pay
scale_1 = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked_1 = gx_count
decimal_rate_1 = next(rate for (lower, upper), rate in scale_1.items() if lower <= hours_worked_1 and upper >= hours_worked_1)
decimal_end_1 = hours_worked_1 - int(hours_worked_1)
end_pay_1 = decimal_end_1 * decimal_rate_1
# Use an integer for ease of calculation
hours_worked_1 = int(hours_worked_1)
hours_paid_for_1 = 0
# Beginning total pay is just the decimal "ending"
total_pay_1 = end_pay_1
while hours_paid_for_1 < hours_worked_1:
# Find the rate for the current bucket of hours
rate_filter = (rate for (lower, upper), rate in scale_1.items() if lower <= hours_paid_for_1 and hours_paid_for_1 < upper)
current_level = next(rate_filter)
total_pay_1 += current_level
hours_paid_for_1 += 1
total_gc = total_pay_1
commission_from_pt = total_session - total_gc
return commission_from_pt
|
from __future__ import annotations
import base64
import hashlib
import inspect
import json
import os
import sys
import traceback
from collections import defaultdict
from logging import Logger
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
cast,
)
import boto3
import grapl_analyzerlib.counters # noqa: F401
from analyzer_executor_lib.redis_cache import EitherCache, construct_redis_client
from grapl_analyzerlib.analyzer import Analyzer
from grapl_analyzerlib.execution import ExecutionComplete, ExecutionFailed, ExecutionHit
from grapl_analyzerlib.grapl_client import GraphClient
from grapl_analyzerlib.nodes.base import BaseView
from grapl_analyzerlib.plugin_retriever import load_plugins
from grapl_analyzerlib.queryable import Queryable
from grapl_analyzerlib.subgraph_view import SubgraphView
from grapl_common.env_helpers import S3ResourceFactory
from grapl_common.grapl_logger import get_module_grapl_logger
from grapl_common.metrics.metric_reporter import MetricReporter, TagPair
from grapl_common.sqs.sqs_types import S3PutRecordDict, SQSMessageBody
from python_proto.pipeline import Metadata, OldEnvelope
if TYPE_CHECKING:
from mypy_boto3_s3 import S3ServiceResource
# Set up logger (this is for the whole file, including static methods)
LOGGER = get_module_grapl_logger()
# Set up plugins dir for models
MODEL_PLUGINS_DIR = os.getenv("MODEL_PLUGINS_DIR", "/tmp")
sys.path.insert(0, MODEL_PLUGINS_DIR)
# Ensure plugins dir exists
try:
directory = Path(MODEL_PLUGINS_DIR + "/model_plugins/")
directory.mkdir(parents=True, exist_ok=True)
except Exception as e:
LOGGER.error("Failed to create model plugins directory", e)
def verbose_cast_to_int(input: Optional[str]) -> Optional[int]:
if not input:
return None
try:
return int(input)
except (TypeError, ValueError):
raise ValueError(f"Couldn't cast this env variable into an int: {input}")
class AnalyzerExecutor:
# constants
CHUNK_SIZE_RETRY: int = 10
CHUNK_SIZE_DEFAULT: int = 100
def __init__(
self,
model_plugins_bucket: str,
analyzers_bucket: str,
analyzer_matched_subgraphs_bucket: str,
message_cache: EitherCache,
hit_cache: EitherCache,
chunk_size: int,
logger: Logger,
metric_reporter: MetricReporter,
) -> None:
self.model_plugins_bucket = model_plugins_bucket
self.analyzers_bucket = analyzers_bucket
self.analyzer_matched_subgraphs_bucket = analyzer_matched_subgraphs_bucket
self.message_cache = message_cache
self.hit_cache = hit_cache
self.chunk_size = chunk_size
self.logger = logger
self.metric_reporter = metric_reporter
@classmethod
def from_env(cls, env: Optional[Mapping[str, str]] = None) -> AnalyzerExecutor:
env = env or os.environ
# If we're retrying, change the chunk size
is_retry = bool(env.get("IS_RETRY", False))
if is_retry:
chunk_size = cls.CHUNK_SIZE_RETRY
else:
chunk_size = cls.CHUNK_SIZE_DEFAULT
# Set up message cache
messagecache_addr = env.get("MESSAGECACHE_ADDR")
messagecache_port: Optional[int] = verbose_cast_to_int(
env.get("MESSAGECACHE_PORT")
)
message_cache = construct_redis_client(messagecache_addr, messagecache_port)
# Set up hit cache
hitcache_addr = env.get("HITCACHE_ADDR")
hitcache_port: Optional[int] = verbose_cast_to_int(env.get("HITCACHE_PORT"))
hit_cache = construct_redis_client(hitcache_addr, hitcache_port)
metric_reporter = MetricReporter.create("analyzer-executor")
model_plugins_bucket = env["GRAPL_MODEL_PLUGINS_BUCKET"]
analyzers_bucket = env["GRAPL_ANALYZERS_BUCKET"]
analyzer_matched_subgraphs_bucket = env[
"GRAPL_ANALYZER_MATCHED_SUBGRAPHS_BUCKET"
]
return AnalyzerExecutor(
model_plugins_bucket=model_plugins_bucket,
analyzers_bucket=analyzers_bucket,
analyzer_matched_subgraphs_bucket=analyzer_matched_subgraphs_bucket,
message_cache=message_cache,
hit_cache=hit_cache,
chunk_size=chunk_size,
logger=LOGGER,
metric_reporter=metric_reporter,
)
def check_caches(
self, file_hash: str, msg_id: str, node_key: str, analyzer_name: str
) -> bool:
with self.metric_reporter.histogram_ctx("analyzer-executor.check_caches"):
if self.check_msg_cache(file_hash, node_key, msg_id):
self.logger.debug("cache hit - already processed")
return True
if self.check_hit_cache(analyzer_name, node_key):
self.logger.debug("cache hit - already matched")
return True
return False
def to_event_hash(self, components: Iterable[str]) -> str:
joined = ",".join(components)
event_hash = hashlib.sha256(joined.encode()).hexdigest()
return event_hash
def check_msg_cache(self, file: str, node_key: str, msg_id: str) -> bool:
event_hash = self.to_event_hash((file, node_key, msg_id))
return bool(self.message_cache.get(event_hash))
def update_msg_cache(self, file: str, node_key: str, msg_id: str) -> None:
event_hash = self.to_event_hash((file, node_key, msg_id))
self.message_cache.set(event_hash, "1")
def check_hit_cache(self, file: str, node_key: str) -> bool:
event_hash = self.to_event_hash((file, node_key))
return bool(self.hit_cache.get(event_hash))
def update_hit_cache(self, file: str, node_key: str) -> None:
event_hash = self.to_event_hash((file, node_key))
self.hit_cache.set(event_hash, "1")
async def handle_events(self, events: SQSMessageBody, context: Any) -> None:
# Parse sns message
self.logger.debug(f"handling events: {events} context: {context}")
client = GraphClient()
s3 = S3ResourceFactory(boto3).from_env()
load_plugins(
self.model_plugins_bucket,
s3.meta.client,
os.path.abspath(MODEL_PLUGINS_DIR),
)
for event in events["Records"]:
data = parse_s3_event(s3, event)
# FIXME: this code assumes inner_message is json
envelope = OldEnvelope.deserialize(data)
message = json.loads(envelope.inner_message)
LOGGER.info(f'Executing Analyzer: {message['key']}')
with self.metric_reporter.histogram_ctx(
"analyzer-executor.download_s3_file"
):
analyzer = download_s3_file(
s3,
self.analyzers_bucket,
message["key"],
).decode("utf8")
analyzer_name = message["key"].split("/")[-2]
subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))
# TODO: Validate signature of S3 file
LOGGER.info(f"event {event} {envelope.metadata}")
rx: Connection
tx: Connection
rx, tx = Pipe(duplex=False)
p = Process(
target=self.execute_file,
args=(analyzer_name, analyzer, subgraph, tx, "", self.chunk_size),
)
p.start()
for exec_hit in self.poll_process(rx=rx, analyzer_name=analyzer_name):
with self.metric_reporter.histogram_ctx(
"analyzer-executor.emit_event.ms",
(TagPair("analyzer_name", exec_hit.analyzer_name),),
):
emit_event(
self.analyzer_matched_subgraphs_bucket,
s3,
exec_hit,
envelope.metadata,
)
self.update_msg_cache(analyzer, exec_hit.root_node_key, message["key"])
self.update_hit_cache(analyzer_name, exec_hit.root_node_key)
p.join()
def poll_process(
self,
rx: Connection,
analyzer_name: str,
) -> Iterator[ExecutionHit]:
"""
Keep polling the spawned Process, and yield any ExecutionHits.
(This will probably disappear if Analyzers move to Docker images.)
"""
t = 0
while True:
p_res = rx.poll(timeout=5)
if not p_res:
t += 1
LOGGER.info(
f"Analyzer {analyzer_name} polled for for {t * 5} seconds without result"
)
continue
result: Optional[Any] = rx.recv()
if isinstance(result, ExecutionComplete):
self.logger.info(f"Analyzer {analyzer_name} execution complete")
return
# emit any hits to an S3 bucket
if isinstance(result, ExecutionHit):
self.logger.info(
f"Analyzer {analyzer_name} emitting event for:"
f"{result.analyzer_name} {result.root_node_key}"
)
yield result
assert not isinstance(
result, ExecutionFailed
), f"Analyzer {analyzer_name} failed."
def exec_analyzers(
self,
dg_client: GraphClient,
file: str,
msg_id: str,
nodes: List[BaseView],
analyzers: Dict[str, Analyzer],
sender: Any,
) -> None:
if not analyzers:
self.logger.warning("Received empty dict of analyzers")
return
if not nodes:
self.logger.warning("Received empty array of nodes")
for node in nodes:
querymap: Dict[str, List[Queryable]] = defaultdict(list)
for an_name, analyzer in analyzers.items():
if self.check_caches(file, msg_id, node.node_key, an_name):
continue
queries = analyzer.get_queries()
if isinstance(queries, list) or isinstance(queries, tuple):
querymap[an_name].extend(queries)
else:
querymap[an_name].append(queries)
for an_name, queries in querymap.items():
analyzer = analyzers[an_name]
for query in queries:
# TODO: Whether it was a hit or not is a good Tag
tags = (TagPair("analyzer_name", an_name),)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.query_first.ms", tags
):
response = query.query_first(
dg_client, contains_node_key=node.node_key
)
if response:
self.logger.debug(
f"Analyzer '{an_name}' received a hit, executing on_response()"
)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.on_response.ms", tags
):
analyzer.on_response(response, sender)
def execute_file(
self,
name: str,
file: str,
graph: SubgraphView,
sender: Connection,
msg_id: str,
chunk_size: int,
) -> None:
try:
pool = ThreadPool(processes=4)
exec(file, globals())
client = GraphClient()
analyzers = get_analyzer_objects(client)
if not analyzers:
self.logger.warning(f"Got no analyzers for file: {name}")
self.logger.info(f"Executing analyzers: {[an for an in analyzers.keys()]}")
for nodes in chunker([n for n in graph.node_iter()], chunk_size):
self.logger.info(f"Querying {len(nodes)} nodes")
def exec_analyzer(
nodes: List[BaseView], sender: Connection
) -> List[BaseView]:
try:
self.exec_analyzers(
client, file, msg_id, nodes, analyzers, sender
)
return nodes
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(
f"Execution of {name} failed with {e} {e.args}"
)
sender.send(ExecutionFailed())
raise
pool.apply_async(exec_analyzer, args=(nodes, sender))
pool.close()
pool.join()
sender.send(ExecutionComplete())
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(f"Execution of {name} failed with {e} {e.args}")
sender.send(ExecutionFailed())
raise
def parse_s3_event(s3: S3ServiceResource, event: S3PutRecordDict) -> bytes:
try:
bucket = event["s3"]["bucket"]["name"]
key = event["s3"]["object"]["key"]
except KeyError:
LOGGER.error("Could not parse s3 event: {}", exc_info=True)
raise
return download_s3_file(s3, bucket, key)
def download_s3_file(s3: S3ServiceResource, bucket: str, key: str) -> bytes:
obj = s3.Object(bucket, key)
return cast(bytes, obj.get()["Body"].read())
def is_analyzer(analyzer_name: str, analyzer_cls: type) -> bool:
if analyzer_name == "Analyzer": # This is the base class
return False
return (
hasattr(analyzer_cls, "get_queries")
and hasattr(analyzer_cls, "build")
and hasattr(analyzer_cls, "on_response")
)
def get_analyzer_objects(dgraph_client: GraphClient) -> Dict[str, Analyzer]:
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return {
an[0]: an[1].build(dgraph_client)
for an in clsmembers
if is_analyzer(an[0], an[1])
}
def chunker(seq: List[BaseView], size: int) -> List[List[BaseView]]:
return [seq[pos : pos + size] for pos in range(0, len(seq), size)]
def emit_event(
analyzer_matched_subgraphs_bucket: str,
s3: S3ServiceResource,
event: ExecutionHit,
metadata: Metadata,
) -> None:
LOGGER.info(f"emitting event for: {event.analyzer_name, event.nodes}")
meta_dict = {
"trace_id": str(metadata.trace_id),
}
event_s = json.dumps(
{
"nodes": json.loads(event.nodes),
"edges": json.loads(event.edges),
"analyzer_name": event.analyzer_name,
"risk_score": event.risk_score,
"lenses": event.lenses,
"risky_node_keys": event.risky_node_keys,
"metadata": meta_dict,
}
)
event_hash = hashlib.sha256(event_s.encode())
key = base64.urlsafe_b64encode(event_hash.digest()).decode("utf-8")
obj = s3.Object(analyzer_matched_subgraphs_bucket, key)
obj.put(Body=event_s.encode("utf-8"))
|
from __future__ import annotations
import base64
import hashlib
import inspect
import json
import os
import sys
import traceback
from collections import defaultdict
from logging import Logger
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
cast,
)
import boto3
import grapl_analyzerlib.counters # noqa: F401
from analyzer_executor_lib.redis_cache import EitherCache, construct_redis_client
from grapl_analyzerlib.analyzer import Analyzer
from grapl_analyzerlib.execution import ExecutionComplete, ExecutionFailed, ExecutionHit
from grapl_analyzerlib.grapl_client import GraphClient
from grapl_analyzerlib.nodes.base import BaseView
from grapl_analyzerlib.plugin_retriever import load_plugins
from grapl_analyzerlib.queryable import Queryable
from grapl_analyzerlib.subgraph_view import SubgraphView
from grapl_common.env_helpers import S3ResourceFactory
from grapl_common.grapl_logger import get_module_grapl_logger
from grapl_common.metrics.metric_reporter import MetricReporter, TagPair
from grapl_common.sqs.sqs_types import S3PutRecordDict, SQSMessageBody
from python_proto.pipeline import Metadata, OldEnvelope
if TYPE_CHECKING:
from mypy_boto3_s3 import S3ServiceResource
# Set up logger (this is for the whole file, including static methods)
LOGGER = get_module_grapl_logger()
# Set up plugins dir for models
MODEL_PLUGINS_DIR = os.getenv("MODEL_PLUGINS_DIR", "/tmp")
sys.path.insert(0, MODEL_PLUGINS_DIR)
# Ensure plugins dir exists
try:
directory = Path(MODEL_PLUGINS_DIR + "/model_plugins/")
directory.mkdir(parents=True, exist_ok=True)
except Exception as e:
LOGGER.error("Failed to create model plugins directory", e)
def verbose_cast_to_int(input: Optional[str]) -> Optional[int]:
if not input:
return None
try:
return int(input)
except (TypeError, ValueError):
raise ValueError(f"Couldn't cast this env variable into an int: {input}")
class AnalyzerExecutor:
# constants
CHUNK_SIZE_RETRY: int = 10
CHUNK_SIZE_DEFAULT: int = 100
def __init__(
self,
model_plugins_bucket: str,
analyzers_bucket: str,
analyzer_matched_subgraphs_bucket: str,
message_cache: EitherCache,
hit_cache: EitherCache,
chunk_size: int,
logger: Logger,
metric_reporter: MetricReporter,
) -> None:
self.model_plugins_bucket = model_plugins_bucket
self.analyzers_bucket = analyzers_bucket
self.analyzer_matched_subgraphs_bucket = analyzer_matched_subgraphs_bucket
self.message_cache = message_cache
self.hit_cache = hit_cache
self.chunk_size = chunk_size
self.logger = logger
self.metric_reporter = metric_reporter
@classmethod
def from_env(cls, env: Optional[Mapping[str, str]] = None) -> AnalyzerExecutor:
env = env or os.environ
# If we're retrying, change the chunk size
is_retry = bool(env.get("IS_RETRY", False))
if is_retry:
chunk_size = cls.CHUNK_SIZE_RETRY
else:
chunk_size = cls.CHUNK_SIZE_DEFAULT
# Set up message cache
messagecache_addr = env.get("MESSAGECACHE_ADDR")
messagecache_port: Optional[int] = verbose_cast_to_int(
env.get("MESSAGECACHE_PORT")
)
message_cache = construct_redis_client(messagecache_addr, messagecache_port)
# Set up hit cache
hitcache_addr = env.get("HITCACHE_ADDR")
hitcache_port: Optional[int] = verbose_cast_to_int(env.get("HITCACHE_PORT"))
hit_cache = construct_redis_client(hitcache_addr, hitcache_port)
metric_reporter = MetricReporter.create("analyzer-executor")
model_plugins_bucket = env["GRAPL_MODEL_PLUGINS_BUCKET"]
analyzers_bucket = env["GRAPL_ANALYZERS_BUCKET"]
analyzer_matched_subgraphs_bucket = env[
"GRAPL_ANALYZER_MATCHED_SUBGRAPHS_BUCKET"
]
return AnalyzerExecutor(
model_plugins_bucket=model_plugins_bucket,
analyzers_bucket=analyzers_bucket,
analyzer_matched_subgraphs_bucket=analyzer_matched_subgraphs_bucket,
message_cache=message_cache,
hit_cache=hit_cache,
chunk_size=chunk_size,
logger=LOGGER,
metric_reporter=metric_reporter,
)
def check_caches(
self, file_hash: str, msg_id: str, node_key: str, analyzer_name: str
) -> bool:
with self.metric_reporter.histogram_ctx("analyzer-executor.check_caches"):
if self.check_msg_cache(file_hash, node_key, msg_id):
self.logger.debug("cache hit - already processed")
return True
if self.check_hit_cache(analyzer_name, node_key):
self.logger.debug("cache hit - already matched")
return True
return False
def to_event_hash(self, components: Iterable[str]) -> str:
joined = ",".join(components)
event_hash = hashlib.sha256(joined.encode()).hexdigest()
return event_hash
def check_msg_cache(self, file: str, node_key: str, msg_id: str) -> bool:
event_hash = self.to_event_hash((file, node_key, msg_id))
return bool(self.message_cache.get(event_hash))
def update_msg_cache(self, file: str, node_key: str, msg_id: str) -> None:
event_hash = self.to_event_hash((file, node_key, msg_id))
self.message_cache.set(event_hash, "1")
def check_hit_cache(self, file: str, node_key: str) -> bool:
event_hash = self.to_event_hash((file, node_key))
return bool(self.hit_cache.get(event_hash))
def update_hit_cache(self, file: str, node_key: str) -> None:
event_hash = self.to_event_hash((file, node_key))
self.hit_cache.set(event_hash, "1")
async def handle_events(self, events: SQSMessageBody, context: Any) -> None:
# Parse sns message
self.logger.debug(f"handling events: {events} context: {context}")
client = GraphClient()
s3 = S3ResourceFactory(boto3).from_env()
load_plugins(
self.model_plugins_bucket,
s3.meta.client,
os.path.abspath(MODEL_PLUGINS_DIR),
)
for event in events["Records"]:
data = parse_s3_event(s3, event)
# FIXME: this code assumes inner_message is json
envelope = OldEnvelope.deserialize(data)
message = json.loads(envelope.inner_message)
LOGGER.info(f'Executing Analyzer: {message["key"]}')
with self.metric_reporter.histogram_ctx(
"analyzer-executor.download_s3_file"
):
analyzer = download_s3_file(
s3,
self.analyzers_bucket,
message["key"],
).decode("utf8")
analyzer_name = message["key"].split("/")[-2]
subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))
# TODO: Validate signature of S3 file
LOGGER.info(f"event {event} {envelope.metadata}")
rx: Connection
tx: Connection
rx, tx = Pipe(duplex=False)
p = Process(
target=self.execute_file,
args=(analyzer_name, analyzer, subgraph, tx, "", self.chunk_size),
)
p.start()
for exec_hit in self.poll_process(rx=rx, analyzer_name=analyzer_name):
with self.metric_reporter.histogram_ctx(
"analyzer-executor.emit_event.ms",
(TagPair("analyzer_name", exec_hit.analyzer_name),),
):
emit_event(
self.analyzer_matched_subgraphs_bucket,
s3,
exec_hit,
envelope.metadata,
)
self.update_msg_cache(analyzer, exec_hit.root_node_key, message["key"])
self.update_hit_cache(analyzer_name, exec_hit.root_node_key)
p.join()
def poll_process(
self,
rx: Connection,
analyzer_name: str,
) -> Iterator[ExecutionHit]:
"""
Keep polling the spawned Process, and yield any ExecutionHits.
(This will probably disappear if Analyzers move to Docker images.)
"""
t = 0
while True:
p_res = rx.poll(timeout=5)
if not p_res:
t += 1
LOGGER.info(
f"Analyzer {analyzer_name} polled for for {t * 5} seconds without result"
)
continue
result: Optional[Any] = rx.recv()
if isinstance(result, ExecutionComplete):
self.logger.info(f"Analyzer {analyzer_name} execution complete")
return
# emit any hits to an S3 bucket
if isinstance(result, ExecutionHit):
self.logger.info(
f"Analyzer {analyzer_name} emitting event for:"
f"{result.analyzer_name} {result.root_node_key}"
)
yield result
assert not isinstance(
result, ExecutionFailed
), f"Analyzer {analyzer_name} failed."
def exec_analyzers(
self,
dg_client: GraphClient,
file: str,
msg_id: str,
nodes: List[BaseView],
analyzers: Dict[str, Analyzer],
sender: Any,
) -> None:
if not analyzers:
self.logger.warning("Received empty dict of analyzers")
return
if not nodes:
self.logger.warning("Received empty array of nodes")
for node in nodes:
querymap: Dict[str, List[Queryable]] = defaultdict(list)
for an_name, analyzer in analyzers.items():
if self.check_caches(file, msg_id, node.node_key, an_name):
continue
queries = analyzer.get_queries()
if isinstance(queries, list) or isinstance(queries, tuple):
querymap[an_name].extend(queries)
else:
querymap[an_name].append(queries)
for an_name, queries in querymap.items():
analyzer = analyzers[an_name]
for query in queries:
# TODO: Whether it was a hit or not is a good Tag
tags = (TagPair("analyzer_name", an_name),)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.query_first.ms", tags
):
response = query.query_first(
dg_client, contains_node_key=node.node_key
)
if response:
self.logger.debug(
f"Analyzer '{an_name}' received a hit, executing on_response()"
)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.on_response.ms", tags
):
analyzer.on_response(response, sender)
def execute_file(
self,
name: str,
file: str,
graph: SubgraphView,
sender: Connection,
msg_id: str,
chunk_size: int,
) -> None:
try:
pool = ThreadPool(processes=4)
exec(file, globals())
client = GraphClient()
analyzers = get_analyzer_objects(client)
if not analyzers:
self.logger.warning(f"Got no analyzers for file: {name}")
self.logger.info(f"Executing analyzers: {[an for an in analyzers.keys()]}")
for nodes in chunker([n for n in graph.node_iter()], chunk_size):
self.logger.info(f"Querying {len(nodes)} nodes")
def exec_analyzer(
nodes: List[BaseView], sender: Connection
) -> List[BaseView]:
try:
self.exec_analyzers(
client, file, msg_id, nodes, analyzers, sender
)
return nodes
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(
f"Execution of {name} failed with {e} {e.args}"
)
sender.send(ExecutionFailed())
raise
pool.apply_async(exec_analyzer, args=(nodes, sender))
pool.close()
pool.join()
sender.send(ExecutionComplete())
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(f"Execution of {name} failed with {e} {e.args}")
sender.send(ExecutionFailed())
raise
def parse_s3_event(s3: S3ServiceResource, event: S3PutRecordDict) -> bytes:
try:
bucket = event["s3"]["bucket"]["name"]
key = event["s3"]["object"]["key"]
except KeyError:
LOGGER.error("Could not parse s3 event: {}", exc_info=True)
raise
return download_s3_file(s3, bucket, key)
def download_s3_file(s3: S3ServiceResource, bucket: str, key: str) -> bytes:
obj = s3.Object(bucket, key)
return cast(bytes, obj.get()["Body"].read())
def is_analyzer(analyzer_name: str, analyzer_cls: type) -> bool:
if analyzer_name == "Analyzer": # This is the base class
return False
return (
hasattr(analyzer_cls, "get_queries")
and hasattr(analyzer_cls, "build")
and hasattr(analyzer_cls, "on_response")
)
def get_analyzer_objects(dgraph_client: GraphClient) -> Dict[str, Analyzer]:
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return {
an[0]: an[1].build(dgraph_client)
for an in clsmembers
if is_analyzer(an[0], an[1])
}
def chunker(seq: List[BaseView], size: int) -> List[List[BaseView]]:
return [seq[pos : pos + size] for pos in range(0, len(seq), size)]
def emit_event(
analyzer_matched_subgraphs_bucket: str,
s3: S3ServiceResource,
event: ExecutionHit,
metadata: Metadata,
) -> None:
LOGGER.info(f"emitting event for: {event.analyzer_name, event.nodes}")
meta_dict = {
"trace_id": str(metadata.trace_id),
}
event_s = json.dumps(
{
"nodes": json.loads(event.nodes),
"edges": json.loads(event.edges),
"analyzer_name": event.analyzer_name,
"risk_score": event.risk_score,
"lenses": event.lenses,
"risky_node_keys": event.risky_node_keys,
"metadata": meta_dict,
}
)
event_hash = hashlib.sha256(event_s.encode())
key = base64.urlsafe_b64encode(event_hash.digest()).decode("utf-8")
obj = s3.Object(analyzer_matched_subgraphs_bucket, key)
obj.put(Body=event_s.encode("utf-8"))
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' execute.py '''
import contextlib
import os
import subprocess
import shlex
import tarfile
import tempfile
import traceback
from heron.common.src.python.utils.log import Log
from heron.tools.cli.src.python.result import SimpleResult, ProcessResult, Status
from heron.common.src.python import pex_loader
from heron.tools.cli.src.python import opts
from heron.tools.cli.src.python import jars
from heron.tools.common.src.python.utils import config
################################################################################
def heron_class(class_name, lib_jars, extra_jars=None, args=None, java_defines=None):
'''
Execute a heron class given the args and the jars needed for class path
:param class_name:
:param lib_jars:
:param extra_jars:
:param args:
:param java_defines:
:return:
'''
# default optional params to empty list if not provided
if extra_jars is None:
extra_jars = []
if args is None:
args = []
if java_defines is None:
java_defines = []
# Format all java -D options that need to be passed while running
# the class locally.
java_opts = ['-D' + opt for opt in java_defines]
java_path = config.get_java_path()
if java_path is None:
err_context = "Unable to find java command"
return SimpleResult(Status.InvocationError, err_context)
# Construct the command line for the sub process to run
# Because of the way Python execute works,
# the java opts must be passed as part of the list
all_args = [java_path, "-client", "-Xmx1g"] + \
java_opts + \
["-cp", config.get_classpath(extra_jars + lib_jars)]
all_args += [class_name] + list(args)
# set heron_config environment variable
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
# print the verbose message
Log.debug("Invoking class using command: `%s`", ' '.join(shlex.quote(a) for a in all_args))
Log.debug("Heron options: {%s}", str(heron_env["HERON_OPTIONS"]))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(all_args, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# stdout message has the information Java program sends back
# stderr message has extra information, such as debugging message
return ProcessResult(process)
def heron_tar(class_name, topology_tar, arguments, tmpdir_root, java_defines):
'''
:param class_name:
:param topology_tar:
:param arguments:
:param tmpdir_root:
:param java_defines:
:return:
'''
# Extract tar to a tmp folder.
tmpdir = tempfile.mkdtemp(dir=tmpdir_root, prefix='tmp')
with contextlib.closing(tarfile.open(topology_tar)) as tar:
tar.extractall(path=tmpdir)
# A tar generated by pants has all dependency jars under libs/
# in addition to the topology jar at top level. Pants keeps
# filename for jar and tar the same except for extension.
topology_jar = os.path.basename(topology_tar).replace(".tar.gz", "").replace(".tar", "") + ".jar"
extra_jars = [
os.path.join(tmpdir, topology_jar),
os.path.join(tmpdir, "*"),
os.path.join(tmpdir, "libs/*")
]
lib_jars = config.get_heron_libs(jars.topology_jars())
# Now execute the class
return heron_class(class_name, lib_jars, extra_jars, arguments, java_defines)
def heron_pex(topology_pex, topology_class_name, args=None):
"""Use a topology defined in a PEX."""
Log.debug("Importing %s from %s", topology_class_name, topology_pex)
if topology_class_name == '-':
# loading topology by running its main method (if __name__ == "__main__")
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_pex]
if args is not None:
cmd.extend(args)
Log.debug("Invoking class using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# pylint: disable=fixme
# todo(rli): improve python topology submission workflow
return ProcessResult(process)
try:
# loading topology from Topology's subclass (no main method)
# to support specifying the name of topology
Log.debug("args: %s", args)
if args is not None and isinstance(args, (list, tuple)) and len(args) > 0:
opts.set_config('cmdline.topology.name', args[0])
os.environ["HERON_OPTIONS"] = opts.get_heron_config()
Log.debug("Heron options: {%s}", os.environ["HERON_OPTIONS"])
pex_loader.load_pex(topology_pex)
topology_class = pex_loader.import_and_get_class(topology_pex, topology_class_name)
topology_class.write()
return SimpleResult(Status.Ok)
except Exception as ex:
Log.debug(traceback.format_exc())
err_context = f"Topology {topology_class_name} failed to be loaded from the given pex: {ex}"
return SimpleResult(Status.HeronError, err_context)
return None
# pylint: disable=superfluous-parens
def heron_cpp(topology_binary, args=None):
Log.debug("Executing %s", topology_binary)
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_binary]
if args is not None:
cmd.extend(args)
Log.debug("Invoking binary using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
print(f"""Invoking class using command: ``{" ".join(cmd)}''""")
print(f"Heron options: {str(heron_env["HERON_OPTIONS"])}")
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
proc = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
return ProcessResult(proc)
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' execute.py '''
import contextlib
import os
import subprocess
import shlex
import tarfile
import tempfile
import traceback
from heron.common.src.python.utils.log import Log
from heron.tools.cli.src.python.result import SimpleResult, ProcessResult, Status
from heron.common.src.python import pex_loader
from heron.tools.cli.src.python import opts
from heron.tools.cli.src.python import jars
from heron.tools.common.src.python.utils import config
################################################################################
def heron_class(class_name, lib_jars, extra_jars=None, args=None, java_defines=None):
'''
Execute a heron class given the args and the jars needed for class path
:param class_name:
:param lib_jars:
:param extra_jars:
:param args:
:param java_defines:
:return:
'''
# default optional params to empty list if not provided
if extra_jars is None:
extra_jars = []
if args is None:
args = []
if java_defines is None:
java_defines = []
# Format all java -D options that need to be passed while running
# the class locally.
java_opts = ['-D' + opt for opt in java_defines]
java_path = config.get_java_path()
if java_path is None:
err_context = "Unable to find java command"
return SimpleResult(Status.InvocationError, err_context)
# Construct the command line for the sub process to run
# Because of the way Python execute works,
# the java opts must be passed as part of the list
all_args = [java_path, "-client", "-Xmx1g"] + \
java_opts + \
["-cp", config.get_classpath(extra_jars + lib_jars)]
all_args += [class_name] + list(args)
# set heron_config environment variable
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
# print the verbose message
Log.debug("Invoking class using command: `%s`", ' '.join(shlex.quote(a) for a in all_args))
Log.debug("Heron options: {%s}", str(heron_env["HERON_OPTIONS"]))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(all_args, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# stdout message has the information Java program sends back
# stderr message has extra information, such as debugging message
return ProcessResult(process)
def heron_tar(class_name, topology_tar, arguments, tmpdir_root, java_defines):
'''
:param class_name:
:param topology_tar:
:param arguments:
:param tmpdir_root:
:param java_defines:
:return:
'''
# Extract tar to a tmp folder.
tmpdir = tempfile.mkdtemp(dir=tmpdir_root, prefix='tmp')
with contextlib.closing(tarfile.open(topology_tar)) as tar:
tar.extractall(path=tmpdir)
# A tar generated by pants has all dependency jars under libs/
# in addition to the topology jar at top level. Pants keeps
# filename for jar and tar the same except for extension.
topology_jar = os.path.basename(topology_tar).replace(".tar.gz", "").replace(".tar", "") + ".jar"
extra_jars = [
os.path.join(tmpdir, topology_jar),
os.path.join(tmpdir, "*"),
os.path.join(tmpdir, "libs/*")
]
lib_jars = config.get_heron_libs(jars.topology_jars())
# Now execute the class
return heron_class(class_name, lib_jars, extra_jars, arguments, java_defines)
def heron_pex(topology_pex, topology_class_name, args=None):
"""Use a topology defined in a PEX."""
Log.debug("Importing %s from %s", topology_class_name, topology_pex)
if topology_class_name == '-':
# loading topology by running its main method (if __name__ == "__main__")
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_pex]
if args is not None:
cmd.extend(args)
Log.debug("Invoking class using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# pylint: disable=fixme
# todo(rli): improve python topology submission workflow
return ProcessResult(process)
try:
# loading topology from Topology's subclass (no main method)
# to support specifying the name of topology
Log.debug("args: %s", args)
if args is not None and isinstance(args, (list, tuple)) and len(args) > 0:
opts.set_config('cmdline.topology.name', args[0])
os.environ["HERON_OPTIONS"] = opts.get_heron_config()
Log.debug("Heron options: {%s}", os.environ["HERON_OPTIONS"])
pex_loader.load_pex(topology_pex)
topology_class = pex_loader.import_and_get_class(topology_pex, topology_class_name)
topology_class.write()
return SimpleResult(Status.Ok)
except Exception as ex:
Log.debug(traceback.format_exc())
err_context = f"Topology {topology_class_name} failed to be loaded from the given pex: {ex}"
return SimpleResult(Status.HeronError, err_context)
return None
# pylint: disable=superfluous-parens
def heron_cpp(topology_binary, args=None):
Log.debug("Executing %s", topology_binary)
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_binary]
if args is not None:
cmd.extend(args)
Log.debug("Invoking binary using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
print(f"""Invoking class using command: ``{' '.join(cmd)}''""")
print(f"Heron options: {str(heron_env['HERON_OPTIONS'])}")
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
proc = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
return ProcessResult(proc)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script runs tests on the system to check for compliance against different CIS Benchmarks.
No changes are made to system files by this script. Audit only.
License: MIT
"""
from argparse import ArgumentParser
from datetime import datetime
from time import sleep
import json, subprocess, pathlib
__author__ = 'Florian Köhler'
__version__ = '0.1.0'
__license__ = 'MIT'
def parse_cli():
parser = ArgumentParser(description='This script runs tests on the system to check for compliance against different CIS Benchmarks. No changes are made to system files by this script. Audit only.')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--benchmark', '-b', type=pathlib.Path, required=True, metavar='FILE', help='Path to benchmark file (Required).')
parser.add_argument('--level', '-l', type=int, choices=[1, 2], default=None, help='Run tests for the specified level only. Defaults to both.')
parser.add_argument('--include', '-i', default=None, metavar='LIST', help='Comma separated list of tests to include.')
parser.add_argument('--exclude', '-e', default=None, metavar='LIST', help='Comma separated list of tests to exclude.')
return parser.parse_args()
class Recommendation:
def __init__(self, id, description, scored, level, type, test_input, expected_output):
self.id = id
self.description = description
self.scored = scored
self.level = level
self.type = type
self.test_input = test_input
self.expected_output = expected_output
self.error = None
self.passed = None
def match(self, level, include, exclude):
if not level or level in self.level:
return True
class Benchmark:
def __init__(self, args):
self.start_time = datetime.now()
self.recommendations = []
self.filtered_tests = []
self._total_number_tests = None
self._number_passed_tests = None
self._number_of_errors = None
self.test_types = {
"output_contains": self.test_output_contains,
}
print("Loading benchmark data...\n")
with open(args.benchmark, 'r') as benchmark_file:
data = benchmark_file.read()
benchmark_data = json.loads(data)
self.name = benchmark_data['name']
self.version = benchmark_data['version']
for recommendation in benchmark_data['recommendations']:
self.add_recommendation(recommendation)
for recommendation in self.recommendations:
if recommendation.type != 'chapter' and recommendation.scored:
if recommendation.match(args.level, args.include, args.exclude):
self.filtered_tests.append(recommendation)
self.execute_tests()
def add_recommendation(self, recommendation):
self.recommendations.append(Recommendation(recommendation['id'], recommendation['description'], recommendation['scored'], recommendation['level'], recommendation['type'], recommendation['test_input'], recommendation['expected_output']))
@property
def total_number_tests(self):
if not self._total_number_tests:
self._total_number_tests = len(self.filtered_tests)
return self._total_number_tests
@property
def number_passed_tests(self):
if not self._number_passed_tests:
passed_tests = []
for test in self.filtered_tests:
if test.passed:
passed_tests.append(test)
self._number_passed_tests = len(passed_tests)
return self._number_passed_tests
@property
def number_of_errors(self):
if not self._number_of_errors:
error_tests = []
for test in self.filtered_tests:
if test.error:
error_tests.append(test)
self._number_of_errors = len(error_tests)
return self._number_of_errors
def execute_tests(self):
print(f"0 of {self.total_number_tests} tests completed.", end="\r")
for index, test in enumerate(self.filtered_tests, start=1):
execute_test = self.test_types.get(test.type)
execute_test(test)
if index < self.total_number_tests:
print(f"{index} of {self.total_number_tests} tests completed.", end="\r")
else:
print(f"{index} of {self.total_number_tests} tests completed.\n")
self.output_results()
def output_results(self):
heading = f"CIS {self.name} Benchmark v{self.version} Results"
heading_separator = '-' * len(heading)
id_padding = len(max([str(test.id) for test in self.filtered_tests], key = len))
desc_padding = len(max([test.description for test in self.filtered_tests], key = len))
result_heading = 'ID'.ljust(id_padding, ' ') + ' ' + 'Description'.ljust(desc_padding, ' ') + ' Scored' + ' Level' + ' Result'
result_separator = '--'.ljust(id_padding, ' ') + ' ' + '-----------'.ljust(desc_padding, ' ') + ' ------' + ' -----' + ' ------'
print(heading)
print(heading_separator)
print(result_heading)
print(result_separator)
print("")
for test in self.filtered_tests:
print(f"{test.id.ljust(id_padding, " ")} {test.description.ljust(desc_padding, " ")} {"Yes " if test.scored else "No "} {"1, 2 " if len(test.level) == 2 else str(test.level[0]).ljust(5, " ")} {"Error " if test.error else "Pass " if test.passed else "Fail "}")
print("")
print(f"Passed {self.number_passed_tests} of {self.total_number_tests} tests in x seconds (x Skipped, {self.number_of_errors} Errors)")
def test_output_contains(self, test):
command = test.test_input.split(' ')
try:
output = subprocess.check_output(command, universal_newlines=True)
if test.expected_output in output:
test.passed = True
else:
test.passed = False
except:
test.error = True
test.passed = False
if __name__ == '__main__':
args = parse_cli()
Benchmark(args)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script runs tests on the system to check for compliance against different CIS Benchmarks.
No changes are made to system files by this script. Audit only.
License: MIT
"""
from argparse import ArgumentParser
from datetime import datetime
from time import sleep
import json, subprocess, pathlib
__author__ = 'Florian Köhler'
__version__ = '0.1.0'
__license__ = 'MIT'
def parse_cli():
parser = ArgumentParser(description='This script runs tests on the system to check for compliance against different CIS Benchmarks. No changes are made to system files by this script. Audit only.')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--benchmark', '-b', type=pathlib.Path, required=True, metavar='FILE', help='Path to benchmark file (Required).')
parser.add_argument('--level', '-l', type=int, choices=[1, 2], default=None, help='Run tests for the specified level only. Defaults to both.')
parser.add_argument('--include', '-i', default=None, metavar='LIST', help='Comma separated list of tests to include.')
parser.add_argument('--exclude', '-e', default=None, metavar='LIST', help='Comma separated list of tests to exclude.')
return parser.parse_args()
class Recommendation:
def __init__(self, id, description, scored, level, type, test_input, expected_output):
self.id = id
self.description = description
self.scored = scored
self.level = level
self.type = type
self.test_input = test_input
self.expected_output = expected_output
self.error = None
self.passed = None
def match(self, level, include, exclude):
if not level or level in self.level:
return True
class Benchmark:
def __init__(self, args):
self.start_time = datetime.now()
self.recommendations = []
self.filtered_tests = []
self._total_number_tests = None
self._number_passed_tests = None
self._number_of_errors = None
self.test_types = {
"output_contains": self.test_output_contains,
}
print("Loading benchmark data...\n")
with open(args.benchmark, 'r') as benchmark_file:
data = benchmark_file.read()
benchmark_data = json.loads(data)
self.name = benchmark_data['name']
self.version = benchmark_data['version']
for recommendation in benchmark_data['recommendations']:
self.add_recommendation(recommendation)
for recommendation in self.recommendations:
if recommendation.type != 'chapter' and recommendation.scored:
if recommendation.match(args.level, args.include, args.exclude):
self.filtered_tests.append(recommendation)
self.execute_tests()
def add_recommendation(self, recommendation):
self.recommendations.append(Recommendation(recommendation['id'], recommendation['description'], recommendation['scored'], recommendation['level'], recommendation['type'], recommendation['test_input'], recommendation['expected_output']))
@property
def total_number_tests(self):
if not self._total_number_tests:
self._total_number_tests = len(self.filtered_tests)
return self._total_number_tests
@property
def number_passed_tests(self):
if not self._number_passed_tests:
passed_tests = []
for test in self.filtered_tests:
if test.passed:
passed_tests.append(test)
self._number_passed_tests = len(passed_tests)
return self._number_passed_tests
@property
def number_of_errors(self):
if not self._number_of_errors:
error_tests = []
for test in self.filtered_tests:
if test.error:
error_tests.append(test)
self._number_of_errors = len(error_tests)
return self._number_of_errors
def execute_tests(self):
print(f"0 of {self.total_number_tests} tests completed.", end="\r")
for index, test in enumerate(self.filtered_tests, start=1):
execute_test = self.test_types.get(test.type)
execute_test(test)
if index < self.total_number_tests:
print(f"{index} of {self.total_number_tests} tests completed.", end="\r")
else:
print(f"{index} of {self.total_number_tests} tests completed.\n")
self.output_results()
def output_results(self):
heading = f"CIS {self.name} Benchmark v{self.version} Results"
heading_separator = '-' * len(heading)
id_padding = len(max([str(test.id) for test in self.filtered_tests], key = len))
desc_padding = len(max([test.description for test in self.filtered_tests], key = len))
result_heading = 'ID'.ljust(id_padding, ' ') + ' ' + 'Description'.ljust(desc_padding, ' ') + ' Scored' + ' Level' + ' Result'
result_separator = '--'.ljust(id_padding, ' ') + ' ' + '-----------'.ljust(desc_padding, ' ') + ' ------' + ' -----' + ' ------'
print(heading)
print(heading_separator)
print(result_heading)
print(result_separator)
print("")
for test in self.filtered_tests:
print(f"{test.id.ljust(id_padding, ' ')} {test.description.ljust(desc_padding, ' ')} {'Yes ' if test.scored else 'No '} {'1, 2 ' if len(test.level) == 2 else str(test.level[0]).ljust(5, ' ')} {'Error ' if test.error else 'Pass ' if test.passed else 'Fail '}")
print("")
print(f"Passed {self.number_passed_tests} of {self.total_number_tests} tests in x seconds (x Skipped, {self.number_of_errors} Errors)")
def test_output_contains(self, test):
command = test.test_input.split(' ')
try:
output = subprocess.check_output(command, universal_newlines=True)
if test.expected_output in output:
test.passed = True
else:
test.passed = False
except:
test.error = True
test.passed = False
if __name__ == '__main__':
args = parse_cli()
Benchmark(args)
|
#!/usr/bin/env python
"""Provenance post processing script for OSA pipeline."""
import copy
import logging
import shutil
import sys
from pathlib import Path, PurePath
import yaml
from osa.configs import options
from osa.configs.config import cfg
from osa.provenance.capture import get_activity_id, get_file_hash
from osa.provenance.io import provdoc2graph, provdoc2json, provlist2provdoc, read_prov
from osa.provenance.utils import get_log_config
from osa.utils.cliopts import provprocessparsing
from osa.utils.logging import myLogger
__all__ = ["copy_used_file", "parse_lines_log", "parse_lines_run", "produce_provenance"]
log = myLogger(logging.getLogger())
provconfig = yaml.safe_load(get_log_config())
LOG_FILENAME = provconfig["handlers"]["provHandler"]["filename"]
PROV_PREFIX = provconfig["PREFIX"]
PATH_DL1 = cfg.get("LST1", "DL1_DIR")
PATH_DL2 = cfg.get("LST1", "DL2_DIR")
def copy_used_file(src, outdir):
"""
Copy file used in process.
Parameters
----------
src
outdir
"""
# check src file exists
if not Path(src).is_file():
log.warning(f"{src} file cannot be accessed")
hash_src = get_file_hash(src, buffer="content")
filename = PurePath(src).name
destpath = Path(outdir) / filename
hash_out = ""
# get hash and new name
if destpath.exists():
hash_out = get_file_hash(str(destpath), buffer="content")
filename = filename + "_"
destpath = Path(outdir) / filename
# try copy file
if hash_src != hash_out:
try:
shutil.copyfile(src, str(destpath))
log.info(f"copying {destpath}")
except Exception as ex:
log.warning(f"could not copy {src} file into {destpath}: {ex}")
def parse_lines_log(filter_cut, calib_runs, run_number):
"""
Filter content in log file to produce a run/process wise session log.
Parameters
----------
filter_cut
calib_runs
run_number
Returns
-------
filtered
"""
filtered = []
if not filter_cut:
filter_cut = "all"
cuts = {
"calibration": ["drs4_pedestal", "calibrate_charge"],
"r0_to_dl1": ["r0_to_dl1", "dl1ab"],
"dl1_to_dl2": ["dl1_datacheck", "dl1_to_dl2"],
}
cuts["all"] = cuts["calibration"] + cuts["r0_to_dl1"] + cuts["dl1_to_dl2"]
with open(LOG_FILENAME, "r") as f:
for line in f.readlines():
ll = line.split(PROV_PREFIX)
if len(ll) != 3:
log.warning(
f"format {PROV_PREFIX} mismatch in log file {LOG_FILENAME}\n{line}"
)
continue
prov_str = ll.pop()
prov_dict = yaml.safe_load(prov_str)
keep = False
session_tag = prov_dict.get("session_tag", "0:0")
session_id = prov_dict.get("session_id", False)
tag_activity, tag_run = session_tag.split(":")
# filter by run and calib runs
if tag_run in [run_number, calib_runs]:
keep = True
# filter by activity
if tag_activity not in cuts[filter_cut]:
keep = False
# only keep first session start
if session_id and (tag_run in [run_number, calib_runs]):
keep = True
# make session starts with calibration
if session_id and filter_cut == "all" and not filtered:
prov_dict["session_id"] = f"{options.date}{run_number}"
prov_dict["name"] = run_number
prov_dict["observation_run"] = run_number
line = f"{ll[0]}{PROV_PREFIX}{ll[1]}{PROV_PREFIX}{prov_dict}\n"
# remove parallel sessions
if session_id and filtered:
keep = False
if keep:
filtered.append(line)
return filtered
def parse_lines_run(filter_step, prov_lines, out):
"""
Process provenance info to reduce session at run/process wise scope.
Parameters
----------
filter_step
prov_lines
out
Returns
-------
working_lines
"""
size = 0
container = {}
working_lines = []
r0filepath_str = ""
dl1filepath_str = ""
dl2filepath_str = ""
mufilepath_str = ""
ckfilepath_str = ""
id_activity_run = ""
end_time_line = ""
osa_config_copied = False
for line in prov_lines:
# get info
remove = False
endTime = line.get("endTime", "")
session_id = line.get("session_id", "")
activity_id = line.get("activity_id", "")
filepath = line.get("filepath", "")
used_role = line.get("used_role", "")
generated_role = line.get("generated_role", "")
parameters = line.get("parameters", "")
name = line.get("name", "")
content_type = line.get("contentType", "")
used_id = line.get("used_id", "")
osa_cfg = line.get("config_file", "")
# filter grain
session_tag = line.get("session_tag", "0:0")
tag_activity, _ = session_tag.split(":")
if tag_activity != filter_step and not session_id:
continue
# remove subruns info
if name == "DL1CheckSubrunDataset":
ckfilepath_str = filepath
elif name == "DL1SubrunDataset":
dl1filepath_str = filepath
elif name == "DL2SubrunDataset":
dl2filepath_str = filepath
elif name == "MuonsSubrunDataset":
mufilepath_str = filepath
elif name == "R0SubrunDataset":
r0filepath_str = filepath
if "Subrun" in name or "subrun" in used_role or "subrun" in generated_role:
remove = True
if parameters and "ObservationSubRun" in parameters:
del line["parameters"]["ObservationSubRun"]
# remove sub-runs activities and info
if name == filter_step and not id_activity_run:
id_activity_run = get_activity_id()
if name in container or used_id in container:
remove = True
if parameters and "parameters" in container:
remove = True
if name:
container[name] = True
if used_id:
container[used_id] = True
if parameters:
container["parameters"] = True
if endTime:
remove = True
end_time_line = line
size += 1
# remove duplicated produced files
if generated_role in container:
remove = True
if name == "DL2MergedFile":
container[name] = True
if "merged" in generated_role:
container[generated_role] = True
if name == "DL1CheckHDF5File":
container[name] = True
if "DL1Check HDF5 file" in generated_role:
container[generated_role] = True
if name == "DL1CheckPDFFile":
container[name] = True
if "DL1Check PDF file" in generated_role:
container[generated_role] = True
# replace with new run-wise activity_id
if activity_id:
line["activity_id"] = id_activity_run
# copy used files not subruns not RFs not mergedDL2
if (
filepath
and content_type != "application/x-spss-sav"
and name != "DL2MergedFile"
and not name.startswith("DL1Check")
and not remove
):
copy_used_file(filepath, out)
if session_id and osa_cfg and not osa_config_copied:
copy_used_file(osa_cfg, out)
osa_config_copied = True
if not remove:
working_lines.append(line)
# append collections used and generated at endtime line of last activity
if end_time_line:
working_lines.append(end_time_line)
if r0filepath_str and filter_step == "r0_to_dl1":
r0_entity_id = get_file_hash(r0filepath_str + "r0", buffer="path")
r0filepath_str = r0filepath_str.replace(PurePath(r0filepath_str).name, "")
used = {"entity_id": r0_entity_id}
used.update({"name": "R0Collection"})
used.update({"type": "SetCollection"})
used.update({"size": size})
used.update({"filepath": r0filepath_str})
working_lines.append(used)
used = {"activity_id": id_activity_run}
used.update({"used_id": r0_entity_id})
used.update({"used_role": "R0 Collection"})
working_lines.append(used)
if dl1filepath_str:
dl1filepath_str = dl1filepath_str.replace(PurePath(dl1filepath_str).name, "")
dl1_entity_id = get_file_hash(dl1filepath_str + "dl1", buffer="path")
dl1 = {"entity_id": dl1_entity_id}
dl1.update({"name": "DL1Collection"})
dl1.update({"type": "SetCollection"})
dl1.update({"size": size})
dl1.update({"filepath": dl1filepath_str})
working_lines.append(dl1)
if mufilepath_str:
mufilepath_str = mufilepath_str.replace(PurePath(mufilepath_str).name, "")
mu_entity_id = get_file_hash(mufilepath_str + "muons", buffer="path")
muons = {"entity_id": mu_entity_id}
muons.update({"name": "MuonsCollection"})
muons.update({"type": "SetCollection"})
muons.update({"size": size})
muons.update({"filepath": mufilepath_str})
working_lines.append(muons)
if mufilepath_str and filter_step == "r0_to_dl1":
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": mu_entity_id})
generated.update({"generated_role": "Muons Collection"})
working_lines.append(generated)
if dl1filepath_str and filter_step in ["r0_to_dl1", "dl1ab"]:
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": dl1_entity_id})
generated.update({"generated_role": "DL1 Collection"})
working_lines.append(generated)
if dl1filepath_str and filter_step in ["dl1_to_dl2", "dl1ab"]:
used = {"activity_id": id_activity_run}
used.update({"used_id": dl1_entity_id})
used.update({"used_role": "DL1 Collection"})
working_lines.append(used)
if dl1filepath_str and filter_step == "dl1_datacheck":
used = {"activity_id": id_activity_run}
used.update({"used_id": dl1_entity_id})
used.update({"used_role": "DL1 Collection"})
working_lines.append(used)
if mufilepath_str and filter_step == "dl1_datacheck":
used = {"activity_id": id_activity_run}
used.update({"used_id": mu_entity_id})
used.update({"used_role": "Muons Collection"})
working_lines.append(used)
if ckfilepath_str and filter_step == "dl1_datacheck":
ckfilepath_str = ckfilepath_str.replace(PurePath(ckfilepath_str).name, "")
chk_entity_id = get_file_hash(ckfilepath_str + "check", buffer="path")
dl1check = {"entity_id": chk_entity_id}
dl1check.update({"name": "DL1CheckCollection"})
dl1check.update({"type": "SetCollection"})
dl1check.update({"size": size})
dl1check.update({"filepath": ckfilepath_str})
working_lines.append(dl1check)
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": chk_entity_id})
generated.update({"generated_role": "DL1Checks Collection"})
working_lines.append(generated)
if dl2filepath_str and filter_step == "dl1_to_dl2":
dl2_entity_id = get_file_hash(dl2filepath_str + "dl2", buffer="path")
dl2filepath_str = dl2filepath_str.replace(PurePath(dl2filepath_str).name, "")
used = {"entity_id": dl2_entity_id}
used.update({"name": "DL2Collection"})
used.update({"type": "SetCollection"})
used.update({"size": size})
used.update({"filepath": dl2filepath_str})
working_lines.append(used)
used = {"activity_id": id_activity_run}
used.update({"generated_id": dl2_entity_id})
used.update({"generated_role": "DL2 Collection"})
working_lines.append(used)
else:
working_lines = []
return working_lines
def define_paths(grain, start_path, end_path, base_filename):
"""Define target folders according to granularity."""
paths = {}
# check destination folder exists
step_path = Path(start_path) / options.date / options.prod_id / end_path
if not step_path.exists():
log.error(f"Path {step_path} does not exist")
# make folder log/ if does not exist
paths["out_path"] = step_path / "log"
paths["out_path"].mkdir(parents=True, exist_ok=True)
# define paths for prov products
paths["log_path"] = paths["out_path"] / f"{grain}_{base_filename}.log"
paths["json_filepath"] = paths["out_path"] / f"{grain}_{base_filename}.json"
paths["graph_filepath"] = paths["out_path"] / f"{grain}_{base_filename}.pdf"
return paths
def produce_provenance_files(processed_lines, paths):
"""Create provenance products as JSON logs and graphs."""
with open(paths["log_path"], "w") as f:
for line in processed_lines:
f.write(f"{line}\n")
log.info(f"creating {paths["log_path"]}")
provdoc = provlist2provdoc(processed_lines)
# make json
try:
provdoc2json(provdoc, str(paths["json_filepath"]))
log.info(f"creating {paths["json_filepath"]}")
except Exception as ex:
log.exception(f"problem while creating json: {ex}")
# make graph
try:
provdoc2graph(provdoc, str(paths["graph_filepath"]), "pdf")
log.info(f"creating {paths["graph_filepath"]}")
except Exception as ex:
log.exception(f"problem while creating graph: {ex}")
def produce_provenance(session_log_filename, base_filename):
"""
Create run-wise provenance products as JSON logs
and graphs according to granularity.
"""
if options.filter == "calibration" or not options.filter:
paths_calibration = define_paths(
"calibration_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
plines_drs4 = parse_lines_run(
"drs4_pedestal",
read_prov(filename=session_log_filename),
str(paths_calibration["out_path"]),
)
plines_calib = parse_lines_run(
"calibrate_charge",
read_prov(filename=session_log_filename),
str(paths_calibration["out_path"]),
)
calibration_lines = plines_drs4 + plines_calib[1:]
# TODO
# create calibration prov files only if filtering
if options.filter == "calibration":
pass
if options.filter == "r0_to_dl1" or not options.filter:
paths_r0_dl1 = define_paths(
"r0_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
plines_r0 = parse_lines_run(
"r0_to_dl1",
read_prov(filename=session_log_filename),
str(paths_r0_dl1["out_path"]),
)
plines_ab = parse_lines_run(
"dl1ab",
read_prov(filename=session_log_filename),
str(paths_r0_dl1["out_path"]),
)
dl1_lines = plines_r0 + plines_ab[1:]
# create r0_to_dl1 prov files only if filtering
if options.filter == "r0_to_dl1":
produce_provenance_files(plines_r0 + plines_ab[1:], paths_r0_dl1)
if options.filter == "dl1_to_dl2" or not options.filter:
paths_dl1_dl2 = define_paths(
"dl1_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename
)
plines_check = parse_lines_run(
"dl1_datacheck",
read_prov(filename=session_log_filename),
str(paths_dl1_dl2["out_path"]),
)
plines_dl2 = parse_lines_run(
"dl1_to_dl2",
read_prov(filename=session_log_filename),
str(paths_dl1_dl2["out_path"]),
)
dl1_dl2_lines = plines_check + plines_dl2[1:]
# create dl1_to_dl2 prov files only if filtering
if options.filter == "dl1_to_dl2":
produce_provenance_files(plines_check + plines_dl2[1:], paths_dl1_dl2)
# create calibration_to_dl1 and calibration_to_dl2 prov files
if not options.filter:
calibration_to_dl1 = define_paths(
"calibration_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
calibration_to_dl2 = define_paths(
"calibration_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename
)
calibration_to_dl1_lines = calibration_lines + dl1_lines[1:]
lines_dl1 = copy.deepcopy(calibration_to_dl1_lines)
calibration_to_dl2_lines = calibration_to_dl1_lines + dl1_dl2_lines[1:]
lines_dl2 = copy.deepcopy(calibration_to_dl2_lines)
produce_provenance_files(lines_dl1, calibration_to_dl1)
produce_provenance_files(lines_dl2, calibration_to_dl2)
def main():
"""Extract the provenance information."""
provprocessparsing()
# Logging
if options.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
# check LOG_FILENAME exists
if not Path(LOG_FILENAME).exists():
log.error(f"file {LOG_FILENAME} does not exist")
# check LOG_FILENAME is not empty
if not Path(LOG_FILENAME).stat().st_size:
log.warning(f"file {LOG_FILENAME} is empty")
sys.exit(1)
# build base_filename
base_filename = f"{options.run}_prov"
session_log_filename = f"{base_filename}.log"
# parse LOG_FILENAME content for a specific run / process
calib_runs = f"{options.drs4_pedestal_run_id}-{options.pedcal_run_id}"
parsed_content = parse_lines_log(options.filter, calib_runs, options.run)
# create temporal session log file
with open(session_log_filename, "w") as f:
for line in parsed_content:
f.write(line)
try:
# create run-wise JSON logs and graphs for each
produce_provenance(session_log_filename, base_filename)
finally:
# remove temporal session log file
remove_session_log_file = Path(session_log_filename)
remove_session_log_file.unlink()
# remove LOG_FILENAME
if options.quit:
remove_log_file = Path(LOG_FILENAME)
remove_log_file.unlink()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""Provenance post processing script for OSA pipeline."""
import copy
import logging
import shutil
import sys
from pathlib import Path, PurePath
import yaml
from osa.configs import options
from osa.configs.config import cfg
from osa.provenance.capture import get_activity_id, get_file_hash
from osa.provenance.io import provdoc2graph, provdoc2json, provlist2provdoc, read_prov
from osa.provenance.utils import get_log_config
from osa.utils.cliopts import provprocessparsing
from osa.utils.logging import myLogger
__all__ = ["copy_used_file", "parse_lines_log", "parse_lines_run", "produce_provenance"]
log = myLogger(logging.getLogger())
provconfig = yaml.safe_load(get_log_config())
LOG_FILENAME = provconfig["handlers"]["provHandler"]["filename"]
PROV_PREFIX = provconfig["PREFIX"]
PATH_DL1 = cfg.get("LST1", "DL1_DIR")
PATH_DL2 = cfg.get("LST1", "DL2_DIR")
def copy_used_file(src, outdir):
"""
Copy file used in process.
Parameters
----------
src
outdir
"""
# check src file exists
if not Path(src).is_file():
log.warning(f"{src} file cannot be accessed")
hash_src = get_file_hash(src, buffer="content")
filename = PurePath(src).name
destpath = Path(outdir) / filename
hash_out = ""
# get hash and new name
if destpath.exists():
hash_out = get_file_hash(str(destpath), buffer="content")
filename = filename + "_"
destpath = Path(outdir) / filename
# try copy file
if hash_src != hash_out:
try:
shutil.copyfile(src, str(destpath))
log.info(f"copying {destpath}")
except Exception as ex:
log.warning(f"could not copy {src} file into {destpath}: {ex}")
def parse_lines_log(filter_cut, calib_runs, run_number):
"""
Filter content in log file to produce a run/process wise session log.
Parameters
----------
filter_cut
calib_runs
run_number
Returns
-------
filtered
"""
filtered = []
if not filter_cut:
filter_cut = "all"
cuts = {
"calibration": ["drs4_pedestal", "calibrate_charge"],
"r0_to_dl1": ["r0_to_dl1", "dl1ab"],
"dl1_to_dl2": ["dl1_datacheck", "dl1_to_dl2"],
}
cuts["all"] = cuts["calibration"] + cuts["r0_to_dl1"] + cuts["dl1_to_dl2"]
with open(LOG_FILENAME, "r") as f:
for line in f.readlines():
ll = line.split(PROV_PREFIX)
if len(ll) != 3:
log.warning(
f"format {PROV_PREFIX} mismatch in log file {LOG_FILENAME}\n{line}"
)
continue
prov_str = ll.pop()
prov_dict = yaml.safe_load(prov_str)
keep = False
session_tag = prov_dict.get("session_tag", "0:0")
session_id = prov_dict.get("session_id", False)
tag_activity, tag_run = session_tag.split(":")
# filter by run and calib runs
if tag_run in [run_number, calib_runs]:
keep = True
# filter by activity
if tag_activity not in cuts[filter_cut]:
keep = False
# only keep first session start
if session_id and (tag_run in [run_number, calib_runs]):
keep = True
# make session starts with calibration
if session_id and filter_cut == "all" and not filtered:
prov_dict["session_id"] = f"{options.date}{run_number}"
prov_dict["name"] = run_number
prov_dict["observation_run"] = run_number
line = f"{ll[0]}{PROV_PREFIX}{ll[1]}{PROV_PREFIX}{prov_dict}\n"
# remove parallel sessions
if session_id and filtered:
keep = False
if keep:
filtered.append(line)
return filtered
def parse_lines_run(filter_step, prov_lines, out):
"""
Process provenance info to reduce session at run/process wise scope.
Parameters
----------
filter_step
prov_lines
out
Returns
-------
working_lines
"""
size = 0
container = {}
working_lines = []
r0filepath_str = ""
dl1filepath_str = ""
dl2filepath_str = ""
mufilepath_str = ""
ckfilepath_str = ""
id_activity_run = ""
end_time_line = ""
osa_config_copied = False
for line in prov_lines:
# get info
remove = False
endTime = line.get("endTime", "")
session_id = line.get("session_id", "")
activity_id = line.get("activity_id", "")
filepath = line.get("filepath", "")
used_role = line.get("used_role", "")
generated_role = line.get("generated_role", "")
parameters = line.get("parameters", "")
name = line.get("name", "")
content_type = line.get("contentType", "")
used_id = line.get("used_id", "")
osa_cfg = line.get("config_file", "")
# filter grain
session_tag = line.get("session_tag", "0:0")
tag_activity, _ = session_tag.split(":")
if tag_activity != filter_step and not session_id:
continue
# remove subruns info
if name == "DL1CheckSubrunDataset":
ckfilepath_str = filepath
elif name == "DL1SubrunDataset":
dl1filepath_str = filepath
elif name == "DL2SubrunDataset":
dl2filepath_str = filepath
elif name == "MuonsSubrunDataset":
mufilepath_str = filepath
elif name == "R0SubrunDataset":
r0filepath_str = filepath
if "Subrun" in name or "subrun" in used_role or "subrun" in generated_role:
remove = True
if parameters and "ObservationSubRun" in parameters:
del line["parameters"]["ObservationSubRun"]
# remove sub-runs activities and info
if name == filter_step and not id_activity_run:
id_activity_run = get_activity_id()
if name in container or used_id in container:
remove = True
if parameters and "parameters" in container:
remove = True
if name:
container[name] = True
if used_id:
container[used_id] = True
if parameters:
container["parameters"] = True
if endTime:
remove = True
end_time_line = line
size += 1
# remove duplicated produced files
if generated_role in container:
remove = True
if name == "DL2MergedFile":
container[name] = True
if "merged" in generated_role:
container[generated_role] = True
if name == "DL1CheckHDF5File":
container[name] = True
if "DL1Check HDF5 file" in generated_role:
container[generated_role] = True
if name == "DL1CheckPDFFile":
container[name] = True
if "DL1Check PDF file" in generated_role:
container[generated_role] = True
# replace with new run-wise activity_id
if activity_id:
line["activity_id"] = id_activity_run
# copy used files not subruns not RFs not mergedDL2
if (
filepath
and content_type != "application/x-spss-sav"
and name != "DL2MergedFile"
and not name.startswith("DL1Check")
and not remove
):
copy_used_file(filepath, out)
if session_id and osa_cfg and not osa_config_copied:
copy_used_file(osa_cfg, out)
osa_config_copied = True
if not remove:
working_lines.append(line)
# append collections used and generated at endtime line of last activity
if end_time_line:
working_lines.append(end_time_line)
if r0filepath_str and filter_step == "r0_to_dl1":
r0_entity_id = get_file_hash(r0filepath_str + "r0", buffer="path")
r0filepath_str = r0filepath_str.replace(PurePath(r0filepath_str).name, "")
used = {"entity_id": r0_entity_id}
used.update({"name": "R0Collection"})
used.update({"type": "SetCollection"})
used.update({"size": size})
used.update({"filepath": r0filepath_str})
working_lines.append(used)
used = {"activity_id": id_activity_run}
used.update({"used_id": r0_entity_id})
used.update({"used_role": "R0 Collection"})
working_lines.append(used)
if dl1filepath_str:
dl1filepath_str = dl1filepath_str.replace(PurePath(dl1filepath_str).name, "")
dl1_entity_id = get_file_hash(dl1filepath_str + "dl1", buffer="path")
dl1 = {"entity_id": dl1_entity_id}
dl1.update({"name": "DL1Collection"})
dl1.update({"type": "SetCollection"})
dl1.update({"size": size})
dl1.update({"filepath": dl1filepath_str})
working_lines.append(dl1)
if mufilepath_str:
mufilepath_str = mufilepath_str.replace(PurePath(mufilepath_str).name, "")
mu_entity_id = get_file_hash(mufilepath_str + "muons", buffer="path")
muons = {"entity_id": mu_entity_id}
muons.update({"name": "MuonsCollection"})
muons.update({"type": "SetCollection"})
muons.update({"size": size})
muons.update({"filepath": mufilepath_str})
working_lines.append(muons)
if mufilepath_str and filter_step == "r0_to_dl1":
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": mu_entity_id})
generated.update({"generated_role": "Muons Collection"})
working_lines.append(generated)
if dl1filepath_str and filter_step in ["r0_to_dl1", "dl1ab"]:
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": dl1_entity_id})
generated.update({"generated_role": "DL1 Collection"})
working_lines.append(generated)
if dl1filepath_str and filter_step in ["dl1_to_dl2", "dl1ab"]:
used = {"activity_id": id_activity_run}
used.update({"used_id": dl1_entity_id})
used.update({"used_role": "DL1 Collection"})
working_lines.append(used)
if dl1filepath_str and filter_step == "dl1_datacheck":
used = {"activity_id": id_activity_run}
used.update({"used_id": dl1_entity_id})
used.update({"used_role": "DL1 Collection"})
working_lines.append(used)
if mufilepath_str and filter_step == "dl1_datacheck":
used = {"activity_id": id_activity_run}
used.update({"used_id": mu_entity_id})
used.update({"used_role": "Muons Collection"})
working_lines.append(used)
if ckfilepath_str and filter_step == "dl1_datacheck":
ckfilepath_str = ckfilepath_str.replace(PurePath(ckfilepath_str).name, "")
chk_entity_id = get_file_hash(ckfilepath_str + "check", buffer="path")
dl1check = {"entity_id": chk_entity_id}
dl1check.update({"name": "DL1CheckCollection"})
dl1check.update({"type": "SetCollection"})
dl1check.update({"size": size})
dl1check.update({"filepath": ckfilepath_str})
working_lines.append(dl1check)
generated = {"activity_id": id_activity_run}
generated.update({"generated_id": chk_entity_id})
generated.update({"generated_role": "DL1Checks Collection"})
working_lines.append(generated)
if dl2filepath_str and filter_step == "dl1_to_dl2":
dl2_entity_id = get_file_hash(dl2filepath_str + "dl2", buffer="path")
dl2filepath_str = dl2filepath_str.replace(PurePath(dl2filepath_str).name, "")
used = {"entity_id": dl2_entity_id}
used.update({"name": "DL2Collection"})
used.update({"type": "SetCollection"})
used.update({"size": size})
used.update({"filepath": dl2filepath_str})
working_lines.append(used)
used = {"activity_id": id_activity_run}
used.update({"generated_id": dl2_entity_id})
used.update({"generated_role": "DL2 Collection"})
working_lines.append(used)
else:
working_lines = []
return working_lines
def define_paths(grain, start_path, end_path, base_filename):
"""Define target folders according to granularity."""
paths = {}
# check destination folder exists
step_path = Path(start_path) / options.date / options.prod_id / end_path
if not step_path.exists():
log.error(f"Path {step_path} does not exist")
# make folder log/ if does not exist
paths["out_path"] = step_path / "log"
paths["out_path"].mkdir(parents=True, exist_ok=True)
# define paths for prov products
paths["log_path"] = paths["out_path"] / f"{grain}_{base_filename}.log"
paths["json_filepath"] = paths["out_path"] / f"{grain}_{base_filename}.json"
paths["graph_filepath"] = paths["out_path"] / f"{grain}_{base_filename}.pdf"
return paths
def produce_provenance_files(processed_lines, paths):
"""Create provenance products as JSON logs and graphs."""
with open(paths["log_path"], "w") as f:
for line in processed_lines:
f.write(f"{line}\n")
log.info(f"creating {paths['log_path']}")
provdoc = provlist2provdoc(processed_lines)
# make json
try:
provdoc2json(provdoc, str(paths["json_filepath"]))
log.info(f"creating {paths['json_filepath']}")
except Exception as ex:
log.exception(f"problem while creating json: {ex}")
# make graph
try:
provdoc2graph(provdoc, str(paths["graph_filepath"]), "pdf")
log.info(f"creating {paths['graph_filepath']}")
except Exception as ex:
log.exception(f"problem while creating graph: {ex}")
def produce_provenance(session_log_filename, base_filename):
"""
Create run-wise provenance products as JSON logs
and graphs according to granularity.
"""
if options.filter == "calibration" or not options.filter:
paths_calibration = define_paths(
"calibration_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
plines_drs4 = parse_lines_run(
"drs4_pedestal",
read_prov(filename=session_log_filename),
str(paths_calibration["out_path"]),
)
plines_calib = parse_lines_run(
"calibrate_charge",
read_prov(filename=session_log_filename),
str(paths_calibration["out_path"]),
)
calibration_lines = plines_drs4 + plines_calib[1:]
# TODO
# create calibration prov files only if filtering
if options.filter == "calibration":
pass
if options.filter == "r0_to_dl1" or not options.filter:
paths_r0_dl1 = define_paths(
"r0_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
plines_r0 = parse_lines_run(
"r0_to_dl1",
read_prov(filename=session_log_filename),
str(paths_r0_dl1["out_path"]),
)
plines_ab = parse_lines_run(
"dl1ab",
read_prov(filename=session_log_filename),
str(paths_r0_dl1["out_path"]),
)
dl1_lines = plines_r0 + plines_ab[1:]
# create r0_to_dl1 prov files only if filtering
if options.filter == "r0_to_dl1":
produce_provenance_files(plines_r0 + plines_ab[1:], paths_r0_dl1)
if options.filter == "dl1_to_dl2" or not options.filter:
paths_dl1_dl2 = define_paths(
"dl1_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename
)
plines_check = parse_lines_run(
"dl1_datacheck",
read_prov(filename=session_log_filename),
str(paths_dl1_dl2["out_path"]),
)
plines_dl2 = parse_lines_run(
"dl1_to_dl2",
read_prov(filename=session_log_filename),
str(paths_dl1_dl2["out_path"]),
)
dl1_dl2_lines = plines_check + plines_dl2[1:]
# create dl1_to_dl2 prov files only if filtering
if options.filter == "dl1_to_dl2":
produce_provenance_files(plines_check + plines_dl2[1:], paths_dl1_dl2)
# create calibration_to_dl1 and calibration_to_dl2 prov files
if not options.filter:
calibration_to_dl1 = define_paths(
"calibration_to_dl1", PATH_DL1, options.dl1_prod_id, base_filename
)
calibration_to_dl2 = define_paths(
"calibration_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename
)
calibration_to_dl1_lines = calibration_lines + dl1_lines[1:]
lines_dl1 = copy.deepcopy(calibration_to_dl1_lines)
calibration_to_dl2_lines = calibration_to_dl1_lines + dl1_dl2_lines[1:]
lines_dl2 = copy.deepcopy(calibration_to_dl2_lines)
produce_provenance_files(lines_dl1, calibration_to_dl1)
produce_provenance_files(lines_dl2, calibration_to_dl2)
def main():
"""Extract the provenance information."""
provprocessparsing()
# Logging
if options.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
# check LOG_FILENAME exists
if not Path(LOG_FILENAME).exists():
log.error(f"file {LOG_FILENAME} does not exist")
# check LOG_FILENAME is not empty
if not Path(LOG_FILENAME).stat().st_size:
log.warning(f"file {LOG_FILENAME} is empty")
sys.exit(1)
# build base_filename
base_filename = f"{options.run}_prov"
session_log_filename = f"{base_filename}.log"
# parse LOG_FILENAME content for a specific run / process
calib_runs = f"{options.drs4_pedestal_run_id}-{options.pedcal_run_id}"
parsed_content = parse_lines_log(options.filter, calib_runs, options.run)
# create temporal session log file
with open(session_log_filename, "w") as f:
for line in parsed_content:
f.write(line)
try:
# create run-wise JSON logs and graphs for each
produce_provenance(session_log_filename, base_filename)
finally:
# remove temporal session log file
remove_session_log_file = Path(session_log_filename)
remove_session_log_file.unlink()
# remove LOG_FILENAME
if options.quit:
remove_log_file = Path(LOG_FILENAME)
remove_log_file.unlink()
if __name__ == "__main__":
main()
|
from __future__ import annotations
import copy
from sys import getsizeof
import re
from typing import Dict, Iterable, List, Tuple, Union, overload
from api.errors import InvalidBlockException
from utils import Int
class Block:
"""
Class to handle data about various blockstates and allow for extra blocks to be created and interacted with.
.. important::
Creating version specific block objects via the `Block()` constructor instead of using
:meth:`api.world.World.get_block_instance` is supported but not encouraged. To avoid possible caveats of doing this,
make sure to either only instantiate blocks with Amulet blockstate data or use
:meth:`api.world.World.get_block_instance` instead
Here's a few examples on how create a Block object with extra blocks:
Creating a new Block object with the base of ``stone`` and has an extra block of ``water[level=1]``:
>>> stone = Block(blockstate="minecraft:stone")
>>> water_level_1 = Block(blockstate="minecraft:water[level=1]")
>>> stone_with_extra_block = stone + water_level_1
>>> repr(stone_with_extra_block)
'Block(minecraft:stone, minecraft:water[level=1])'
Creating a new Block object using the namespace and base_name:
>>> granite = Block(namespace="minecraft", base_name="granite")
Creating a new Block object with another layer of extra blocks:
>>> stone_water_granite = stone_with_extra_block + granite # Doesn't modify any of the other objects
>>> repr(stone_water_granite)
'Block(minecraft:stone, minecraft:water[level=1], minecraft:granite)'
Creating a new Block object by removing an extra block from all layers:
*Note: This removes all instances of the Block object from extra blocks*
>>> stone_granite = stone_water_granite - water_level_1 # Doesn't modify any of the other objects either
>>> repr(stone_granite)
'Block(minecraft:stone, minecraft:granite)'
Creating a new Block object by removing a specific layer:
>>> oak_log_axis_x = Block(blockstate="minecraft:oak_log[axis=x]")
>>> stone_water_granite_water_oak_log = stone_water_granite + water_level_1 + oak_log_axis_x
>>> repr(stone_water_granite_water_oak_log)
'Block(minecraft:stone, minecraft:water[level=1], minecraft:granite, minecraft:water[level=1], minecraft:oak_log[axis=x])'
>>> stone_granite_water_oak_log = stone_water_granite_water_oak_log.remove_layer(0)
>>> repr(stone_granite_water_oak_log)
'Block(minecraft:stone, minecraft:granite, minecraft:water[level=1], minecraft:oak_log[axis=x])'
"""
__slots__ = (
"_namespace",
"_base_name",
"_properties",
"_extra_blocks",
"_blockstate",
) # Reduces memory footprint
blockstate_regex = re.compile(
r"(?:(?P<namespace>[a-z0-9_.-]+):)?(?P<base_name>[a-z0-9/._-]+)(?:\[(?P<property_name>[a-z0-9_]+)=(?P<property_value>[a-z0-9_]+)(?P<properties>.*)\])?"
)
parameters_regex = re.compile(r"(?:,(?P<name>[a-z0-9_]+)=(?P<value>[a-z0-9_]+))")
def __init__(
self,
blockstate: str = None,
namespace: str = None,
base_name: str = None,
properties: Dict[str, Union[str, bool, int]] = None,
extra_blocks: Union[Block, Iterable[Block]] = None,
):
self._blockstate = blockstate
self._namespace = namespace
self._base_name = base_name
if namespace is not None and base_name is not None and properties is None:
properties = {}
self._properties = properties
self._extra_blocks = ()
if extra_blocks:
if isinstance(extra_blocks, Block):
extra_blocks = [extra_blocks]
self._extra_blocks = tuple(extra_blocks)
if blockstate:
self._gen_blockstate()
@property
def namespace(self) -> str:
"""
The namespace of the blockstate represented by the Block object (IE: `minecraft`)
:return: The namespace of the blockstate
"""
if self._namespace is None:
self._parse_blockstate_string()
return self._namespace
@property
def base_name(self) -> str:
"""
The base name of the blockstate represented by the Block object (IE: `stone`, `dirt`)
:return: The base name of the blockstate
"""
if self._base_name is None:
self._parse_blockstate_string()
return self._base_name
@property
def properties(self) -> Dict[str, Union[str, bool, int]]:
"""
The mapping of properties of the blockstate represented by the Block object (IE: `{"level": "1"}`)
:return: A dictionary of the properties of the blockstate
"""
if self._properties is None:
self._parse_blockstate_string()
return copy.deepcopy(self._properties)
@property
def blockstate(self) -> str:
"""
The full blockstate string of the blockstate represented by the Block object (IE: `minecraft:stone`, `minecraft:oak_log[axis=x]`)
:return: The blockstate string
"""
if self._blockstate is None:
self._gen_blockstate()
return self._blockstate
@property
def extra_blocks(self) -> Union[Tuple, Tuple[Block]]:
"""
Returns a tuple of the extra blocks contained in the Block instance
:return: A tuple of Block objects
"""
return self._extra_blocks
def _gen_blockstate(self):
self._blockstate = f"{self.namespace}:{self.base_name}"
if self.properties:
props = [f"{key}={value}" for key, value in sorted(self.properties.items())]
self._blockstate = f"{self._blockstate}[{",".join(props)}]"
@staticmethod
def parse_blockstate_string(blockstate: str) -> Tuple[str, str, Dict[str, str]]:
match = Block.blockstate_regex.match(blockstate)
namespace = match.group("namespace") or "minecraft"
base_name = match.group("base_name")
if match.group("property_name") is not None:
properties = {match.group("property_name"): match.group("property_value")}
else:
properties = {}
properties_string = match.group("properties")
if properties_string is not None:
properties_match = Block.parameters_regex.finditer(properties_string)
for match in properties_match:
properties[match.group("name")] = match.group("value")
return namespace, base_name, {k: v for k, v in sorted(properties.items())}
def _parse_blockstate_string(self):
self._namespace, self._base_name, self._properties = self.parse_blockstate_string(
self._blockstate
)
def __str__(self) -> str:
"""
:return: The base blockstate string of the Block object
"""
return self.blockstate
def __repr__(self) -> str:
"""
:return: The base blockstate string of the Block object along with the blockstate strings of included extra blocks
"""
return f"Block({", ".join([str(b) for b in (self, *self.extra_blocks)])})"
def __len__(self):
return len(self._extra_blocks) + 1
def _compare_extra_blocks(self, other: Block) -> bool:
if len(self.extra_blocks) != len(other.extra_blocks):
return False
if len(self.extra_blocks) == 0:
return True
for our_extra_block, their_extra_block in zip(
self.extra_blocks, other.extra_blocks
):
if our_extra_block != their_extra_block:
return False
return True
def __eq__(self, other: Block) -> bool:
"""
Checks the equality of this Block object to another Block object
:param other: The Block object to check against
:return: True if the Blocks objects are equal, False otherwise
"""
if self.__class__ != other.__class__:
return False
return self.blockstate == other.blockstate and self._compare_extra_blocks(other)
def __hash__(self) -> int:
"""
Hashes the Block object
:return: A hash of the Block object
"""
current_hash = hash(self.blockstate)
if self.extra_blocks:
current_hash = current_hash + hash(self.extra_blocks)
return current_hash
def __add__(self, other: Block) -> Block:
"""
Allows for other Block objects to be added to this Block object's ``extra_blocks``
:param other: The Block object to add to the end of this Block object's `extra_blocks`
:return: A new Block object with the same data but with an additional Block at the end of ``extra_blocks``
"""
if not isinstance(other, Block):
return NotImplemented
if (
len(other.extra_blocks) == 0
): # Reduces the amount of extra objects/references created
other_cpy = other
else:
other_cpy = Block(
namespace=other.namespace,
base_name=other.base_name,
properties=other.properties,
)
other_extras = []
for eb in other.extra_blocks:
if (
len(eb.extra_blocks) == 0
): # Reduces the amount of extra objects/references created
other_extras.append(eb)
else:
other_extras.append(
Block(
namespace=eb.namespace,
base_name=eb.base_name,
properties=eb.properties,
)
)
return Block(
namespace=self.namespace,
base_name=self.base_name,
properties=self.properties,
extra_blocks=[*self.extra_blocks, other_cpy, *other_extras],
)
def __sub__(self, other: Block) -> Block:
"""
Allows for other Block objects to be subtracted from this Block object's ``extra_blocks``
:param other: The Block object to subtract from this Block objects' ``extra_blocks``
:return: A new Block object without any instances of the subtracted block in ``extra_blocks``
"""
if not isinstance(other, Block):
return NotImplemented
if (
len(other.extra_blocks) == 0
): # Reduces the amount of extra objects/references created
other_cpy = other
else:
other_cpy = Block(
namespace=other.namespace,
base_name=other.base_name,
properties=other.properties,
)
other_extras = []
for eb in other.extra_blocks:
if len(eb.extra_blocks) == 0:
other_extras.append(eb)
else:
other_extras.append(
Block(
namespace=eb.namespace,
base_name=eb.base_name,
properties=eb.properties,
)
)
# Sets are unordered, so a regular set subtraction doesn't always return the order we want (it sometimes will!)
# So we loop through all of our extra blocks and only append those to the new_extras list if they aren't in
# extra_blocks_to_remove
new_extras = []
extra_blocks_to_remove = (other_cpy, *other_extras)
for eb in self.extra_blocks:
if eb not in extra_blocks_to_remove:
new_extras.append(eb)
return Block(
namespace=self.namespace,
base_name=self.base_name,
properties=self.properties,
extra_blocks=new_extras,
)
def remove_layer(self, layer: int) -> Block:
"""
Removes the Block object from the specified layer and returns the resulting new Block object
:param layer: The layer of extra block to remove
:return: A new instance of Block with the same data but with the extra block at specified layer removed
:raises `InvalidBlockException`: Raised when you remove the base block from a Block with no other extra blocks
"""
if (
layer == 0
and len(self.extra_blocks) > 0
and layer <= len(self.extra_blocks)
):
new_base = self._extra_blocks[0]
return Block(
namespace=new_base.namespace,
base_name=new_base.base_name,
properties=new_base.properties,
extra_blocks=[*self._extra_blocks[1:]],
)
elif layer > len(self.extra_blocks):
raise InvalidBlockException("You cannot remove a non-existant layer")
elif layer == 0:
raise InvalidBlockException(
"Removing the base block with no extra blocks is not supported"
)
return Block(
namespace=self.namespace,
base_name=self.base_name,
properties=self.properties,
extra_blocks=[*self.extra_blocks[: layer - 1], *self.extra_blocks[layer:]],
)
def __sizeof__(self):
size = (
getsizeof(self.namespace)
+ getsizeof(self.base_name)
+ getsizeof(self.properties)
+ getsizeof(self.blockstate)
)
for eb in self.extra_blocks:
size += getsizeof(eb)
return size
class BlockManager:
"""
Class to handle the mappings between Block objects and their index-based internal IDs
"""
def __init__(self):
"""
Creates a new BlockManager object
"""
self._index_to_block: List[Block] = []
self._block_to_index_map: Dict[Block, int] = {}
def __len__(self):
return len(self._index_to_block)
def __contains__(self, item: Block) -> bool:
return item in self._block_to_index_map
@overload
def __getitem__(self, item: Block) -> int:
...
@overload
def __getitem__(self, item: Int) -> Block:
...
def __getitem__(self, item):
"""
If a Block object is passed to this function, it'll return the internal ID/index of the
blockstate. If an int is given, this method will return the Block object at that specified index.
:param item: The Block object or int to get the mapping data of
:return: An int if a Block object was supplied, a Block object if an int was supplied
"""
try:
if isinstance(item, Block):
return self._block_to_index_map[item]
return self._index_to_block[item]
except (KeyError, IndexError):
raise KeyError(
f"There is no {item} in the BlockManager. "
f"You might want to use the `add_block` function for your blocks before accessing them."
)
def get_add_block(self, block: Block) -> int:
"""
Adds a Block object to the internal Block object/ID mappings. If the Block already exists in the mappings,
then the existing ID is returned
:param block: The Block to add to the manager
:return: The internal ID of the Block
"""
if block in self._block_to_index_map:
return self._block_to_index_map[block]
self._block_to_index_map[block] = i = len(self._block_to_index_map)
self._index_to_block.append(block)
return i
|
from __future__ import annotations
import copy
from sys import getsizeof
import re
from typing import Dict, Iterable, List, Tuple, Union, overload
from api.errors import InvalidBlockException
from utils import Int
class Block:
"""
Class to handle data about various blockstates and allow for extra blocks to be created and interacted with.
.. important::
Creating version specific block objects via the `Block()` constructor instead of using
:meth:`api.world.World.get_block_instance` is supported but not encouraged. To avoid possible caveats of doing this,
make sure to either only instantiate blocks with Amulet blockstate data or use
:meth:`api.world.World.get_block_instance` instead
Here's a few examples on how create a Block object with extra blocks:
Creating a new Block object with the base of ``stone`` and has an extra block of ``water[level=1]``:
>>> stone = Block(blockstate="minecraft:stone")
>>> water_level_1 = Block(blockstate="minecraft:water[level=1]")
>>> stone_with_extra_block = stone + water_level_1
>>> repr(stone_with_extra_block)
'Block(minecraft:stone, minecraft:water[level=1])'
Creating a new Block object using the namespace and base_name:
>>> granite = Block(namespace="minecraft", base_name="granite")
Creating a new Block object with another layer of extra blocks:
>>> stone_water_granite = stone_with_extra_block + granite # Doesn't modify any of the other objects
>>> repr(stone_water_granite)
'Block(minecraft:stone, minecraft:water[level=1], minecraft:granite)'
Creating a new Block object by removing an extra block from all layers:
*Note: This removes all instances of the Block object from extra blocks*
>>> stone_granite = stone_water_granite - water_level_1 # Doesn't modify any of the other objects either
>>> repr(stone_granite)
'Block(minecraft:stone, minecraft:granite)'
Creating a new Block object by removing a specific layer:
>>> oak_log_axis_x = Block(blockstate="minecraft:oak_log[axis=x]")
>>> stone_water_granite_water_oak_log = stone_water_granite + water_level_1 + oak_log_axis_x
>>> repr(stone_water_granite_water_oak_log)
'Block(minecraft:stone, minecraft:water[level=1], minecraft:granite, minecraft:water[level=1], minecraft:oak_log[axis=x])'
>>> stone_granite_water_oak_log = stone_water_granite_water_oak_log.remove_layer(0)
>>> repr(stone_granite_water_oak_log)
'Block(minecraft:stone, minecraft:granite, minecraft:water[level=1], minecraft:oak_log[axis=x])'
"""
__slots__ = (
"_namespace",
"_base_name",
"_properties",
"_extra_blocks",
"_blockstate",
) # Reduces memory footprint
blockstate_regex = re.compile(
r"(?:(?P<namespace>[a-z0-9_.-]+):)?(?P<base_name>[a-z0-9/._-]+)(?:\[(?P<property_name>[a-z0-9_]+)=(?P<property_value>[a-z0-9_]+)(?P<properties>.*)\])?"
)
parameters_regex = re.compile(r"(?:,(?P<name>[a-z0-9_]+)=(?P<value>[a-z0-9_]+))")
def __init__(
self,
blockstate: str = None,
namespace: str = None,
base_name: str = None,
properties: Dict[str, Union[str, bool, int]] = None,
extra_blocks: Union[Block, Iterable[Block]] = None,
):
self._blockstate = blockstate
self._namespace = namespace
self._base_name = base_name
if namespace is not None and base_name is not None and properties is None:
properties = {}
self._properties = properties
self._extra_blocks = ()
if extra_blocks:
if isinstance(extra_blocks, Block):
extra_blocks = [extra_blocks]
self._extra_blocks = tuple(extra_blocks)
if blockstate:
self._gen_blockstate()
@property
def namespace(self) -> str:
"""
The namespace of the blockstate represented by the Block object (IE: `minecraft`)
:return: The namespace of the blockstate
"""
if self._namespace is None:
self._parse_blockstate_string()
return self._namespace
@property
def base_name(self) -> str:
"""
The base name of the blockstate represented by the Block object (IE: `stone`, `dirt`)
:return: The base name of the blockstate
"""
if self._base_name is None:
self._parse_blockstate_string()
return self._base_name
@property
def properties(self) -> Dict[str, Union[str, bool, int]]:
"""
The mapping of properties of the blockstate represented by the Block object (IE: `{"level": "1"}`)
:return: A dictionary of the properties of the blockstate
"""
if self._properties is None:
self._parse_blockstate_string()
return copy.deepcopy(self._properties)
@property
def blockstate(self) -> str:
"""
The full blockstate string of the blockstate represented by the Block object (IE: `minecraft:stone`, `minecraft:oak_log[axis=x]`)
:return: The blockstate string
"""
if self._blockstate is None:
self._gen_blockstate()
return self._blockstate
@property
def extra_blocks(self) -> Union[Tuple, Tuple[Block]]:
"""
Returns a tuple of the extra blocks contained in the Block instance
:return: A tuple of Block objects
"""
return self._extra_blocks
def _gen_blockstate(self):
self._blockstate = f"{self.namespace}:{self.base_name}"
if self.properties:
props = [f"{key}={value}" for key, value in sorted(self.properties.items())]
self._blockstate = f"{self._blockstate}[{','.join(props)}]"
@staticmethod
def parse_blockstate_string(blockstate: str) -> Tuple[str, str, Dict[str, str]]:
match = Block.blockstate_regex.match(blockstate)
namespace = match.group("namespace") or "minecraft"
base_name = match.group("base_name")
if match.group("property_name") is not None:
properties = {match.group("property_name"): match.group("property_value")}
else:
properties = {}
properties_string = match.group("properties")
if properties_string is not None:
properties_match = Block.parameters_regex.finditer(properties_string)
for match in properties_match:
properties[match.group("name")] = match.group("value")
return namespace, base_name, {k: v for k, v in sorted(properties.items())}
def _parse_blockstate_string(self):
self._namespace, self._base_name, self._properties = self.parse_blockstate_string(
self._blockstate
)
def __str__(self) -> str:
"""
:return: The base blockstate string of the Block object
"""
return self.blockstate
def __repr__(self) -> str:
"""
:return: The base blockstate string of the Block object along with the blockstate strings of included extra blocks
"""
return f"Block({', '.join([str(b) for b in (self, *self.extra_blocks)])})"
def __len__(self):
return len(self._extra_blocks) + 1
def _compare_extra_blocks(self, other: Block) -> bool:
if len(self.extra_blocks) != len(other.extra_blocks):
return False
if len(self.extra_blocks) == 0:
return True
for our_extra_block, their_extra_block in zip(
self.extra_blocks, other.extra_blocks
):
if our_extra_block != their_extra_block:
return False
return True
def __eq__(self, other: Block) -> bool:
"""
Checks the equality of this Block object to another Block object
:param other: The Block object to check against
:return: True if the Blocks objects are equal, False otherwise
"""
if self.__class__ != other.__class__:
return False
return self.blockstate == other.blockstate and self._compare_extra_blocks(other)
def __hash__(self) -> int:
"""
Hashes the Block object
:return: A hash of the Block object
"""
current_hash = hash(self.blockstate)
if self.extra_blocks:
current_hash = current_hash + hash(self.extra_blocks)
return current_hash
def __add__(self, other: Block) -> Block:
"""
Allows for other Block objects to be added to this Block object's ``extra_blocks``
:param other: The Block object to add to the end of this Block object's `extra_blocks`
:return: A new Block object with the same data but with an additional Block at the end of ``extra_blocks``
"""
if not isinstance(other, Block):
return NotImplemented
if (
len(other.extra_blocks) == 0
): # Reduces the amount of extra objects/references created
other_cpy = other
else:
other_cpy = Block(
namespace=other.namespace,
base_name=other.base_name,
properties=other.properties,
)
other_extras = []
for eb in other.extra_blocks:
if (
len(eb.extra_blocks) == 0
): # Reduces the amount of extra objects/references created
other_extras.append(eb)
else:
other_extras.append(
Block(
namespace=eb.namespace,
base_name=eb.base_name,
properties=eb.properties,
)
)
return Block(
namespace=self.namespace,
base_name=self.base_name,
properties=self.properties,
extra_blocks=[*self.extra_blocks, other_cpy, *other_extras],
)
def __sub__(self, other: Block) -> Block:
"""
Allows for other Block objects to be subtracted from this Block object's ``extra_blocks``
:param other: The Block object to subtract from this Block objects' ``extra_blocks``
:return: A new Block object without any instances of the subtracted block in ``extra_blocks``
"""
if not isinstance(other, Block):
return NotImplemented
if (
len(other.extra_blocks) == 0
): # Reduces the amount of extra objects/references created
other_cpy = other
else:
other_cpy = Block(
namespace=other.namespace,
base_name=other.base_name,
properties=other.properties,
)
other_extras = []
for eb in other.extra_blocks:
if len(eb.extra_blocks) == 0:
other_extras.append(eb)
else:
other_extras.append(
Block(
namespace=eb.namespace,
base_name=eb.base_name,
properties=eb.properties,
)
)
# Sets are unordered, so a regular set subtraction doesn't always return the order we want (it sometimes will!)
# So we loop through all of our extra blocks and only append those to the new_extras list if they aren't in
# extra_blocks_to_remove
new_extras = []
extra_blocks_to_remove = (other_cpy, *other_extras)
for eb in self.extra_blocks:
if eb not in extra_blocks_to_remove:
new_extras.append(eb)
return Block(
namespace=self.namespace,
base_name=self.base_name,
properties=self.properties,
extra_blocks=new_extras,
)
def remove_layer(self, layer: int) -> Block:
"""
Removes the Block object from the specified layer and returns the resulting new Block object
:param layer: The layer of extra block to remove
:return: A new instance of Block with the same data but with the extra block at specified layer removed
:raises `InvalidBlockException`: Raised when you remove the base block from a Block with no other extra blocks
"""
if (
layer == 0
and len(self.extra_blocks) > 0
and layer <= len(self.extra_blocks)
):
new_base = self._extra_blocks[0]
return Block(
namespace=new_base.namespace,
base_name=new_base.base_name,
properties=new_base.properties,
extra_blocks=[*self._extra_blocks[1:]],
)
elif layer > len(self.extra_blocks):
raise InvalidBlockException("You cannot remove a non-existant layer")
elif layer == 0:
raise InvalidBlockException(
"Removing the base block with no extra blocks is not supported"
)
return Block(
namespace=self.namespace,
base_name=self.base_name,
properties=self.properties,
extra_blocks=[*self.extra_blocks[: layer - 1], *self.extra_blocks[layer:]],
)
def __sizeof__(self):
size = (
getsizeof(self.namespace)
+ getsizeof(self.base_name)
+ getsizeof(self.properties)
+ getsizeof(self.blockstate)
)
for eb in self.extra_blocks:
size += getsizeof(eb)
return size
class BlockManager:
"""
Class to handle the mappings between Block objects and their index-based internal IDs
"""
def __init__(self):
"""
Creates a new BlockManager object
"""
self._index_to_block: List[Block] = []
self._block_to_index_map: Dict[Block, int] = {}
def __len__(self):
return len(self._index_to_block)
def __contains__(self, item: Block) -> bool:
return item in self._block_to_index_map
@overload
def __getitem__(self, item: Block) -> int:
...
@overload
def __getitem__(self, item: Int) -> Block:
...
def __getitem__(self, item):
"""
If a Block object is passed to this function, it'll return the internal ID/index of the
blockstate. If an int is given, this method will return the Block object at that specified index.
:param item: The Block object or int to get the mapping data of
:return: An int if a Block object was supplied, a Block object if an int was supplied
"""
try:
if isinstance(item, Block):
return self._block_to_index_map[item]
return self._index_to_block[item]
except (KeyError, IndexError):
raise KeyError(
f"There is no {item} in the BlockManager. "
f"You might want to use the `add_block` function for your blocks before accessing them."
)
def get_add_block(self, block: Block) -> int:
"""
Adds a Block object to the internal Block object/ID mappings. If the Block already exists in the mappings,
then the existing ID is returned
:param block: The Block to add to the manager
:return: The internal ID of the Block
"""
if block in self._block_to_index_map:
return self._block_to_index_map[block]
self._block_to_index_map[block] = i = len(self._block_to_index_map)
self._index_to_block.append(block)
return i
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import argparse
import logging
import math
import os
import time
import warnings
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
import torchtext
from torchtext.data.utils import get_tokenizer
from fairscale.nn import Pipe
from fairscale.nn.model_parallel import initialize_model_parallel
from fairscale.nn.model_parallel.initialize import get_data_parallel_group, get_pipeline_parallel_group
from fairscale.nn.pipe import LazyModule, pipe
from fairscale.optim import GradScaler
from fairscale.optim.oss import OSS
from fairscale.utils.testing import dist_init, get_worker_map
try:
from fairscale.optim import Adam # type: ignore
can_benchmark = True
except ImportError:
from torch.optim import Adam # type: ignore
can_benchmark = False
def init_random_seed(seed: int):
import numpy
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
PIPE_CHUNKS = 2
iteration_count = 0
class EmbeddingLayer(nn.Embedding):
def __init__(self, ntoken, ninp, initrange):
super().__init__(ntoken, ninp)
self.ninp = ninp
self.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
return super().forward(src) * math.sqrt(self.ninp)
class PositionalEncodingLayer(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncodingLayer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerDecoderLayer(nn.TransformerEncoderLayer):
"""Though this class inherits from torch.nn.TransformerEncoderLayer,
it functions as a decoder in this model"""
def __init__(self, ninp, nhead, nhid, droupout):
super().__init__(ninp, nhead, nhid, droupout)
self.src_mask = None
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src):
global iteration_count
iteration_count += 1
# if iteration_count == 196:
# dump_cuda_tensors()
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
return super().forward(src, self.src_mask)
class LinearLayer(nn.Linear):
def __init__(self, ninp, ntoken, initrange):
super().__init__(ninp, ntoken)
self.bias.data.zero_()
self.weight.data.uniform_(-initrange, initrange)
class TransformerLMSequential(nn.Sequential):
"""A small language model based on the design of GPT-2 using nn.Sequential
for compatability with Pipe"""
def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
layers = [
EmbeddingLayer(ntokens, ninp, initrange),
PositionalEncodingLayer(ninp, dropout),
]
for _ in range(ndecoder):
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))
layers.append(LinearLayer(ninp, ntokens, initrange))
super(TransformerLMSequential, self).__init__(*layers)
def get_data(device):
with warnings.catch_warnings(record=True) as fjldska:
TEXT = torchtext.data.Field(
tokenize=get_tokenizer("basic_english"), init_token="<sos>", eos_token="<eos>", lower=True
)
train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)
TEXT.build_vocab(train_txt)
ntokens = len(TEXT.vocab.stoi)
batch_size = 20
eval_batch_size = 10
train_data = batchify(train_txt, batch_size, TEXT, device)
val_data = batchify(val_txt, eval_batch_size, TEXT, device)
test_data = batchify(test_txt, eval_batch_size, TEXT, device)
return ntokens, train_data, val_data, test_data
def batchify(data, bsz, TEXT, device):
data = TEXT.numericalize([data.examples[0].text])
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
def get_batch(source, i, bptt):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
def make_model(args, device, ntokens):
ninp = 2048 # embedding dimension
nhid = 2048 # the dimension of the feedforward network model in nn.TransformerEncoder
nhead = 32 # the number of heads in the multiheadattention models
dropout = 0
initrange = 0.1
ndecoder = args.num_decoder_layers
if args.lazy_construction:
layers = [
LazyModule(lambda: EmbeddingLayer(ntokens, ninp, initrange)),
LazyModule(lambda: PositionalEncodingLayer(ninp, dropout)),
]
for _ in range(ndecoder):
layers.append(LazyModule(lambda: TransformerDecoderLayer(ninp, nhead, nhid, dropout)))
layers.append(LazyModule(lambda: LinearLayer(ninp, ntokens, initrange)))
model = layers
else:
model = TransformerLMSequential(ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
criterion = nn.CrossEntropyLoss()
lr = 0.01 # learning rate
def make_adam(model):
if args.ddp_zero:
return OSS(params=model.parameters(), optim=Adam, group=get_data_parallel_group(), lr=lr)
else:
return Adam(model.parameters(), lr=lr)
optimizer = make_adam
scaler = GradScaler()
return model, criterion, optimizer, scaler
def get_tensors_by_size_bucket():
from collections import defaultdict
import gc
size_buckets = defaultdict(int)
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
return size_buckets
def dump_size_buckets(size_buckets, prefix=""):
from functools import reduce
import operator
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(prefix + f"{key} : {value}, {this}")
print(prefix + f"total = {total}")
last_size_buckets = None
once = True
def safe_rank():
try:
return torch.distributed.get_rank()
except AssertionError:
return 0
def check_size_buckets():
global last_size_buckets
global once
size_buckets = get_tensors_by_size_bucket()
if last_size_buckets is not None:
if size_buckets != last_size_buckets:
print(f"difference is oustanding tensors: {safe-rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
if once:
print(f"dumping buckets for: {safe_rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
once = False
else:
print(f"size buckets none on {safe_rank()}")
last_size_buckets = size_buckets
def dump_cuda_tensors():
print(f"dumping cuda tensors...")
from functools import reduce
import gc
import operator
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
print(f"outstanding cuda tensors:")
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(f"{key} : {value}, {this}")
print(f"total size = {total}")
import pprint
pprint.pprint(torch.cuda.memory_stats())
def train(lm_dataloader, model, criterion, optimizer, vocab_size, args):
model.train()
from functools import reduce
import operator
num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
if model.group:
total = torch.Tensor([num_params])
if torch.cuda.is_available():
total = total.cuda()
torch.distributed.all_reduce(total, group=model.group)
logging.info(
f"training model, #prams = {num_params}, group: {model.group.rank()}, grank:"
f" {torch.distributed.get_rank()}, sizes {model.group.size()}"
)
torch.distributed.barrier()
if model.group.rank() == 0:
logging.info(f"total #prams = {total.item()}")
else:
logging.info(f"training model, #prams = {num_params}")
vocab_size = 10000 # FIXME
total_loss = 0.0
start_time = time.time()
word_counter = 0
optimizer = optimizer(model)
def get_first_device(model):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if model.devices:
return model.devices[0]
else:
return torch.cuda.current_device()
def get_last_device(model):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if model.devices:
return model.devices[-1]
else:
return torch.cuda.current_device()
pipe_group = model.group
if args.ddp_zero:
model = DDP(
model,
device_ids=[torch.cuda.current_device()],
process_group=get_data_parallel_group(),
find_unused_parameters=False,
)
if pipe_group and pipe_group.rank() != 0 and pipe_group.rank() != (pipe_group.size() - 1):
thing = {"input": torch.zeros(args.batch_size)}
class FakeDataset:
def __getitem__(self, index):
return thing
def __len__(self):
return len(lm_dataloader)
lm_dataloader = FakeDataset()
for i, batch in enumerate(lm_dataloader):
bi = batch["input"]
if args.max_batch and i > args.max_batch:
break
optimizer.zero_grad()
try:
if (pipe_group is None or pipe_group.rank() == 0) and not args.ddp_zero:
tmp = batch["input"].to(get_first_device(model))
output = model(tmp)
else:
output = model(batch["input"])
except Exception as e:
raise RuntimeError(f"training failed on {torch.distributed.get_rank()}") from e
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
target = batch["target"].to(get_last_device(model))
output = output.to(target.device)
loss = criterion(output.view(-1, vocab_size), target.view(-1))
if args.ddp_zero:
ddp_group = get_data_parallel_group()
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM, group=ddp_group)
loss /= ddp_group.size()
loss.backward()
del target
else:
if args.ddp_zero:
model.module.back_helper(output)
else:
model.back_helper(output)
del output
torch.nn.utils.clip_grad_value_(model.parameters(), 0.05)
optimizer.step()
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
total_loss += loss.item()
log_interval = 1
word_counter += batch["ntokens"]
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, word_counter / elapsed, cur_loss, math.exp(cur_loss)
)
)
word_counter = 0
total_loss = 0
start_time = time.time()
# if i >= 10:
# break
# torch.cuda.empty_cache()
# check_size_buckets()
def evaluate(eval_model, data_source, criterion, bptt, ntokens):
eval_model.eval()
total_loss = 0.0
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, bptt):
data, targets = get_batch(data_source, i, bptt)
output = eval_model(data)
output = output.to(targets.device)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
def get_number_of_words(data):
return data.size()[0] * data.size()[1]
def benchmark_language_model(train_data, val_data, test_data, model, criterion, optimizer, ntokens, args):
epoch = 1
bptt = 35
start_time = time.time()
print("-" * 110)
print("| start of epoch {:1d}".format(epoch))
print("-" * 110)
epoch_start_time = time.time()
train(train_data, model, criterion, optimizer, bptt, ntokens, args)
val_loss = 1 # evaluate(model, val_data, criterion, bptt, ntokens)
print("-" * 89)
print(
"| end of epoch {:1d} | time: {:5.2f}s | valid loss {:5.2f} ".format(
epoch, (time.time() - epoch_start_time), val_loss
)
)
print("-" * 110)
elapsed_time = time.time() - start_time
nwords = get_number_of_words(train_data) + get_number_of_words(val_data)
wps = nwords / elapsed_time
test_loss = 1 # evaluate(model, test_data, criterion, bptt, ntokens)
print("=" * 89)
print(
"| end of training | test loss {:5.2f} \n| time: {:5.2f}s | words: {:3d} | wps: {:5.2f}".format(
test_loss, elapsed_time, nwords, wps
)
)
print("=" * 110)
if can_benchmark and len(model.balance) == 4:
# Assert that words per second is within 3 standard deviations of the average
# of six golden runs
assert wps > 36954.4 - (3 * 116.825)
print("Peak allocated bytes on cuda:0: {:1d}".format(torch.cuda.memory_stats(0)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:1: {:1d}".format(torch.cuda.memory_stats(1)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:2: {:1d}".format(torch.cuda.memory_stats(2)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:3: {:1d}".format(torch.cuda.memory_stats(3)["allocated_bytes.all.peak"]))
# Assert that memory usage on each GPU is within 10% of golden run
# Right-hand-side is golden run bytes * 110%
assert torch.cuda.memory_stats(0)["allocated_bytes.all.peak"] < 4061909504 * 1.1
assert torch.cuda.memory_stats(1)["allocated_bytes.all.peak"] < 4050944 * 1.1
assert torch.cuda.memory_stats(2)["allocated_bytes.all.peak"] < 10427392 * 1.1
assert torch.cuda.memory_stats(3)["allocated_bytes.all.peak"] < 2031824896 * 1.1
print("No regression detected")
def generate_balance_weighted(num_devices, num_layers, fraction=0.5):
balance = []
layers_assigned = 0
average_count = num_layers / num_devices
last_layers = int(average_count * fraction)
balance = generate_balance(num_devices - 1, num_layers - last_layers)
balance.append(last_layers)
return balance
def generate_balance(num_devices, num_layers):
balance = []
layers_assigned = 0
for i in range(num_devices):
x = (num_layers - layers_assigned) / (num_devices - i)
if x.is_integer():
balance.append(int(x))
layers_assigned += x
else:
balance.append(math.ceil(x))
layers_assigned += math.ceil(x)
return balance
def make_model_and_data(args, device, new_data: bool = True):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if new_data:
vocab_size = 10000
model, criterion, optimizer, scaler = make_model(args, device, vocab_size)
lm_dataset = BenchmarkLMDataset()
lm_dataloader = DataLoader(
lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm
)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": lm_dataloader,
"vocab_size": vocab_size,
}
else:
data = get_data(device)
ntokens, train_data, val_data, test_data = data
model, criterion, optimizer, scaler = make_model(args, device, ntokens)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": data,
}
def bench_single_process(args):
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
assert num_devices > 0
init_random_seed(0)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
new_data = True
blob = make_model_and_data(args, None, new_data=new_data)
model = blob["model"]
balance = generate_balance(min(num_devices, 4), len(model))
p = pipe.Pipe(
model, balance, chunks=args.chunks, pipelined_backward=args.pipelined_backward, checkpoint=args.checkpoint
)
del model
del blob["model"]
if new_data:
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
else:
ntokens, train_data, val_data, test_data = blob["data"]
benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)
def run_mp_worker(args, available_workers):
new_data = True
blob = make_model_and_data(args, None, new_data=new_data)
model = blob["model"]
balance = generate_balance_weighted(get_pipeline_parallel_group().size(), len(model), 0.8)
p = pipe.Pipe(
model,
balance,
style=Pipe.AsyncSchedule,
chunks=args.chunks,
worker_map=get_worker_map(),
input_device=torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"),
pipelined_backward=args.pipelined_backward,
checkpoint=args.checkpoint,
# loss_fn=blob["criterion"],
)
if torch.cuda.is_available():
p = p.cuda()
if args.all_at_once and p.pipeline:
print(f"running all at once")
p.pipeline.all_at_once = True
if new_data:
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
else:
ntokens, train_data, val_data, test_data = blob["data"]
benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)
def run_worker(rank, world_size, args):
if args.world_size != 0:
world_size = args.world_size
dist_init(rank + args.rank_base, world_size, hostname=args.host)
initialize_model_parallel(1, world_size)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
def bench_multi_process(args, all_at_once=False):
if args.local_world_size != 0:
world_size = args.local_world_size
else:
world_size = min(torch.cuda.device_count(), 2)
mp.spawn(run_worker, args=(world_size, args), nprocs=world_size, join=True)
best_device_map = {
0: "mlx5_0:1",
1: "mlx5_0:1",
2: "mlx5_1:1",
3: "mlx5_1:1",
4: "mlx5_2:1",
5: "mlx5_2:1",
6: "mlx5_3:1",
7: "mlx5_3:1",
}
def bench_mpi(args):
guess_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
os.environ["UCX_NET_DEVICES"] = best_device_map[local_rank]
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10638"
if args.socket_name:
os.environ["GLOO_SOCKET_IFNAME"] = args.socket_name
os.environ["TP_SOCKET_IFNAME"] = args.socket_name
torch.distributed.init_process_group(backend="gloo", rank=guess_rank, world_size=world_size)
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10639"
init_method = f"tcp://{os.environ["MASTER_ADDR"]}:{os.environ["MASTER_PORT"]}"
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(local_rank % torch.cuda.device_count())
rpc.init_rpc(
f"Test{rank}",
rank=rank,
world_size=world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(rpc_timeout=20, init_method=init_method),
)
backends = {"model_parallel_backend": "nccl", "pipeline_backend": "mpi", "ddp_backend": "nccl"}
if args.ddp_zero:
initialize_model_parallel(1, 4, **backends)
else:
initialize_model_parallel(1, world_size, **backends)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--local-world-size", "-l", type=int, default=0, help="local world size")
parser.add_argument("--world-size", "-w", type=int, default=0, help="world size")
parser.add_argument("--rank-base", "-r", type=int, help="rank base", default=0)
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
parser.add_argument("--no-mpi", action="store_true", default=False, help="disable mpi")
parser.add_argument("--chunks", type=int, default=1, help="number of microbatches per batch")
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
parser.add_argument("--all-at-once", action="store_true", default=False, help="do backward pass on whole batch at once")
parser.add_argument("--max-batch", type=int, default=4, help="Max number of batches")
parser.add_argument("--socket-name", type=str, default=None, help="socket ifname for gloo/tp")
parser.add_argument("--num-decoder-layers", type=int, default=10, help="Number of decoder layers in the model")
parser.add_argument("--ddp-zero", action="store_true", default=False, help="enable ddp")
parser.add_argument(
"--lazy-construction", action="store_true", default=False, help="Number of decoder layers in the model"
)
parser.add_argument(
"--checkpoint", default="never", choices=["always", "except_last", "never"], help="Checkpointing strategy for pipe"
)
parser.add_argument(
"--pipelined-backward", dest="pipelined_backward", action="store_true", help="Pipelined backward pass"
)
parser.add_argument(
"--no-pipelined-backward", dest="pipelined_backward", action="store_false", help="Pipelined backward pass"
)
parser.set_defaults(pipelined_backward=True)
if __name__ == "__main__":
args = parser.parse_args()
# bench_multi_process(args, all_at_once=True)
if args.no_mpi or "OMPI_COMM_WORLD_RANK" not in os.environ:
print(f"Running benchmark with args: {args}")
bench_single_process(args)
else:
if os.environ["OMPI_COMM_WORLD_RANK"] == "0":
print(f"Running benchmark with args: {args}")
bench_mpi(args)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import argparse
import logging
import math
import os
import time
import warnings
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
import torchtext
from torchtext.data.utils import get_tokenizer
from fairscale.nn import Pipe
from fairscale.nn.model_parallel import initialize_model_parallel
from fairscale.nn.model_parallel.initialize import get_data_parallel_group, get_pipeline_parallel_group
from fairscale.nn.pipe import LazyModule, pipe
from fairscale.optim import GradScaler
from fairscale.optim.oss import OSS
from fairscale.utils.testing import dist_init, get_worker_map
try:
from fairscale.optim import Adam # type: ignore
can_benchmark = True
except ImportError:
from torch.optim import Adam # type: ignore
can_benchmark = False
def init_random_seed(seed: int):
import numpy
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
PIPE_CHUNKS = 2
iteration_count = 0
class EmbeddingLayer(nn.Embedding):
def __init__(self, ntoken, ninp, initrange):
super().__init__(ntoken, ninp)
self.ninp = ninp
self.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
return super().forward(src) * math.sqrt(self.ninp)
class PositionalEncodingLayer(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncodingLayer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerDecoderLayer(nn.TransformerEncoderLayer):
"""Though this class inherits from torch.nn.TransformerEncoderLayer,
it functions as a decoder in this model"""
def __init__(self, ninp, nhead, nhid, droupout):
super().__init__(ninp, nhead, nhid, droupout)
self.src_mask = None
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src):
global iteration_count
iteration_count += 1
# if iteration_count == 196:
# dump_cuda_tensors()
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
return super().forward(src, self.src_mask)
class LinearLayer(nn.Linear):
def __init__(self, ninp, ntoken, initrange):
super().__init__(ninp, ntoken)
self.bias.data.zero_()
self.weight.data.uniform_(-initrange, initrange)
class TransformerLMSequential(nn.Sequential):
"""A small language model based on the design of GPT-2 using nn.Sequential
for compatability with Pipe"""
def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
layers = [
EmbeddingLayer(ntokens, ninp, initrange),
PositionalEncodingLayer(ninp, dropout),
]
for _ in range(ndecoder):
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))
layers.append(LinearLayer(ninp, ntokens, initrange))
super(TransformerLMSequential, self).__init__(*layers)
def get_data(device):
with warnings.catch_warnings(record=True) as fjldska:
TEXT = torchtext.data.Field(
tokenize=get_tokenizer("basic_english"), init_token="<sos>", eos_token="<eos>", lower=True
)
train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)
TEXT.build_vocab(train_txt)
ntokens = len(TEXT.vocab.stoi)
batch_size = 20
eval_batch_size = 10
train_data = batchify(train_txt, batch_size, TEXT, device)
val_data = batchify(val_txt, eval_batch_size, TEXT, device)
test_data = batchify(test_txt, eval_batch_size, TEXT, device)
return ntokens, train_data, val_data, test_data
def batchify(data, bsz, TEXT, device):
data = TEXT.numericalize([data.examples[0].text])
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
def get_batch(source, i, bptt):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
def make_model(args, device, ntokens):
ninp = 2048 # embedding dimension
nhid = 2048 # the dimension of the feedforward network model in nn.TransformerEncoder
nhead = 32 # the number of heads in the multiheadattention models
dropout = 0
initrange = 0.1
ndecoder = args.num_decoder_layers
if args.lazy_construction:
layers = [
LazyModule(lambda: EmbeddingLayer(ntokens, ninp, initrange)),
LazyModule(lambda: PositionalEncodingLayer(ninp, dropout)),
]
for _ in range(ndecoder):
layers.append(LazyModule(lambda: TransformerDecoderLayer(ninp, nhead, nhid, dropout)))
layers.append(LazyModule(lambda: LinearLayer(ninp, ntokens, initrange)))
model = layers
else:
model = TransformerLMSequential(ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
criterion = nn.CrossEntropyLoss()
lr = 0.01 # learning rate
def make_adam(model):
if args.ddp_zero:
return OSS(params=model.parameters(), optim=Adam, group=get_data_parallel_group(), lr=lr)
else:
return Adam(model.parameters(), lr=lr)
optimizer = make_adam
scaler = GradScaler()
return model, criterion, optimizer, scaler
def get_tensors_by_size_bucket():
from collections import defaultdict
import gc
size_buckets = defaultdict(int)
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
return size_buckets
def dump_size_buckets(size_buckets, prefix=""):
from functools import reduce
import operator
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(prefix + f"{key} : {value}, {this}")
print(prefix + f"total = {total}")
last_size_buckets = None
once = True
def safe_rank():
try:
return torch.distributed.get_rank()
except AssertionError:
return 0
def check_size_buckets():
global last_size_buckets
global once
size_buckets = get_tensors_by_size_bucket()
if last_size_buckets is not None:
if size_buckets != last_size_buckets:
print(f"difference is oustanding tensors: {safe-rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
if once:
print(f"dumping buckets for: {safe_rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
once = False
else:
print(f"size buckets none on {safe_rank()}")
last_size_buckets = size_buckets
def dump_cuda_tensors():
print(f"dumping cuda tensors...")
from functools import reduce
import gc
import operator
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
print(f"outstanding cuda tensors:")
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(f"{key} : {value}, {this}")
print(f"total size = {total}")
import pprint
pprint.pprint(torch.cuda.memory_stats())
def train(lm_dataloader, model, criterion, optimizer, vocab_size, args):
model.train()
from functools import reduce
import operator
num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
if model.group:
total = torch.Tensor([num_params])
if torch.cuda.is_available():
total = total.cuda()
torch.distributed.all_reduce(total, group=model.group)
logging.info(
f"training model, #prams = {num_params}, group: {model.group.rank()}, grank:"
f" {torch.distributed.get_rank()}, sizes {model.group.size()}"
)
torch.distributed.barrier()
if model.group.rank() == 0:
logging.info(f"total #prams = {total.item()}")
else:
logging.info(f"training model, #prams = {num_params}")
vocab_size = 10000 # FIXME
total_loss = 0.0
start_time = time.time()
word_counter = 0
optimizer = optimizer(model)
def get_first_device(model):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if model.devices:
return model.devices[0]
else:
return torch.cuda.current_device()
def get_last_device(model):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if model.devices:
return model.devices[-1]
else:
return torch.cuda.current_device()
pipe_group = model.group
if args.ddp_zero:
model = DDP(
model,
device_ids=[torch.cuda.current_device()],
process_group=get_data_parallel_group(),
find_unused_parameters=False,
)
if pipe_group and pipe_group.rank() != 0 and pipe_group.rank() != (pipe_group.size() - 1):
thing = {"input": torch.zeros(args.batch_size)}
class FakeDataset:
def __getitem__(self, index):
return thing
def __len__(self):
return len(lm_dataloader)
lm_dataloader = FakeDataset()
for i, batch in enumerate(lm_dataloader):
bi = batch["input"]
if args.max_batch and i > args.max_batch:
break
optimizer.zero_grad()
try:
if (pipe_group is None or pipe_group.rank() == 0) and not args.ddp_zero:
tmp = batch["input"].to(get_first_device(model))
output = model(tmp)
else:
output = model(batch["input"])
except Exception as e:
raise RuntimeError(f"training failed on {torch.distributed.get_rank()}") from e
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
target = batch["target"].to(get_last_device(model))
output = output.to(target.device)
loss = criterion(output.view(-1, vocab_size), target.view(-1))
if args.ddp_zero:
ddp_group = get_data_parallel_group()
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM, group=ddp_group)
loss /= ddp_group.size()
loss.backward()
del target
else:
if args.ddp_zero:
model.module.back_helper(output)
else:
model.back_helper(output)
del output
torch.nn.utils.clip_grad_value_(model.parameters(), 0.05)
optimizer.step()
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
total_loss += loss.item()
log_interval = 1
word_counter += batch["ntokens"]
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, word_counter / elapsed, cur_loss, math.exp(cur_loss)
)
)
word_counter = 0
total_loss = 0
start_time = time.time()
# if i >= 10:
# break
# torch.cuda.empty_cache()
# check_size_buckets()
def evaluate(eval_model, data_source, criterion, bptt, ntokens):
eval_model.eval()
total_loss = 0.0
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, bptt):
data, targets = get_batch(data_source, i, bptt)
output = eval_model(data)
output = output.to(targets.device)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
def get_number_of_words(data):
return data.size()[0] * data.size()[1]
def benchmark_language_model(train_data, val_data, test_data, model, criterion, optimizer, ntokens, args):
epoch = 1
bptt = 35
start_time = time.time()
print("-" * 110)
print("| start of epoch {:1d}".format(epoch))
print("-" * 110)
epoch_start_time = time.time()
train(train_data, model, criterion, optimizer, bptt, ntokens, args)
val_loss = 1 # evaluate(model, val_data, criterion, bptt, ntokens)
print("-" * 89)
print(
"| end of epoch {:1d} | time: {:5.2f}s | valid loss {:5.2f} ".format(
epoch, (time.time() - epoch_start_time), val_loss
)
)
print("-" * 110)
elapsed_time = time.time() - start_time
nwords = get_number_of_words(train_data) + get_number_of_words(val_data)
wps = nwords / elapsed_time
test_loss = 1 # evaluate(model, test_data, criterion, bptt, ntokens)
print("=" * 89)
print(
"| end of training | test loss {:5.2f} \n| time: {:5.2f}s | words: {:3d} | wps: {:5.2f}".format(
test_loss, elapsed_time, nwords, wps
)
)
print("=" * 110)
if can_benchmark and len(model.balance) == 4:
# Assert that words per second is within 3 standard deviations of the average
# of six golden runs
assert wps > 36954.4 - (3 * 116.825)
print("Peak allocated bytes on cuda:0: {:1d}".format(torch.cuda.memory_stats(0)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:1: {:1d}".format(torch.cuda.memory_stats(1)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:2: {:1d}".format(torch.cuda.memory_stats(2)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:3: {:1d}".format(torch.cuda.memory_stats(3)["allocated_bytes.all.peak"]))
# Assert that memory usage on each GPU is within 10% of golden run
# Right-hand-side is golden run bytes * 110%
assert torch.cuda.memory_stats(0)["allocated_bytes.all.peak"] < 4061909504 * 1.1
assert torch.cuda.memory_stats(1)["allocated_bytes.all.peak"] < 4050944 * 1.1
assert torch.cuda.memory_stats(2)["allocated_bytes.all.peak"] < 10427392 * 1.1
assert torch.cuda.memory_stats(3)["allocated_bytes.all.peak"] < 2031824896 * 1.1
print("No regression detected")
def generate_balance_weighted(num_devices, num_layers, fraction=0.5):
balance = []
layers_assigned = 0
average_count = num_layers / num_devices
last_layers = int(average_count * fraction)
balance = generate_balance(num_devices - 1, num_layers - last_layers)
balance.append(last_layers)
return balance
def generate_balance(num_devices, num_layers):
balance = []
layers_assigned = 0
for i in range(num_devices):
x = (num_layers - layers_assigned) / (num_devices - i)
if x.is_integer():
balance.append(int(x))
layers_assigned += x
else:
balance.append(math.ceil(x))
layers_assigned += math.ceil(x)
return balance
def make_model_and_data(args, device, new_data: bool = True):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if new_data:
vocab_size = 10000
model, criterion, optimizer, scaler = make_model(args, device, vocab_size)
lm_dataset = BenchmarkLMDataset()
lm_dataloader = DataLoader(
lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm
)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": lm_dataloader,
"vocab_size": vocab_size,
}
else:
data = get_data(device)
ntokens, train_data, val_data, test_data = data
model, criterion, optimizer, scaler = make_model(args, device, ntokens)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": data,
}
def bench_single_process(args):
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
assert num_devices > 0
init_random_seed(0)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
new_data = True
blob = make_model_and_data(args, None, new_data=new_data)
model = blob["model"]
balance = generate_balance(min(num_devices, 4), len(model))
p = pipe.Pipe(
model, balance, chunks=args.chunks, pipelined_backward=args.pipelined_backward, checkpoint=args.checkpoint
)
del model
del blob["model"]
if new_data:
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
else:
ntokens, train_data, val_data, test_data = blob["data"]
benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)
def run_mp_worker(args, available_workers):
new_data = True
blob = make_model_and_data(args, None, new_data=new_data)
model = blob["model"]
balance = generate_balance_weighted(get_pipeline_parallel_group().size(), len(model), 0.8)
p = pipe.Pipe(
model,
balance,
style=Pipe.AsyncSchedule,
chunks=args.chunks,
worker_map=get_worker_map(),
input_device=torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"),
pipelined_backward=args.pipelined_backward,
checkpoint=args.checkpoint,
# loss_fn=blob["criterion"],
)
if torch.cuda.is_available():
p = p.cuda()
if args.all_at_once and p.pipeline:
print(f"running all at once")
p.pipeline.all_at_once = True
if new_data:
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
else:
ntokens, train_data, val_data, test_data = blob["data"]
benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)
def run_worker(rank, world_size, args):
if args.world_size != 0:
world_size = args.world_size
dist_init(rank + args.rank_base, world_size, hostname=args.host)
initialize_model_parallel(1, world_size)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
def bench_multi_process(args, all_at_once=False):
if args.local_world_size != 0:
world_size = args.local_world_size
else:
world_size = min(torch.cuda.device_count(), 2)
mp.spawn(run_worker, args=(world_size, args), nprocs=world_size, join=True)
best_device_map = {
0: "mlx5_0:1",
1: "mlx5_0:1",
2: "mlx5_1:1",
3: "mlx5_1:1",
4: "mlx5_2:1",
5: "mlx5_2:1",
6: "mlx5_3:1",
7: "mlx5_3:1",
}
def bench_mpi(args):
guess_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
os.environ["UCX_NET_DEVICES"] = best_device_map[local_rank]
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10638"
if args.socket_name:
os.environ["GLOO_SOCKET_IFNAME"] = args.socket_name
os.environ["TP_SOCKET_IFNAME"] = args.socket_name
torch.distributed.init_process_group(backend="gloo", rank=guess_rank, world_size=world_size)
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10639"
init_method = f"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}"
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(local_rank % torch.cuda.device_count())
rpc.init_rpc(
f"Test{rank}",
rank=rank,
world_size=world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(rpc_timeout=20, init_method=init_method),
)
backends = {"model_parallel_backend": "nccl", "pipeline_backend": "mpi", "ddp_backend": "nccl"}
if args.ddp_zero:
initialize_model_parallel(1, 4, **backends)
else:
initialize_model_parallel(1, world_size, **backends)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--local-world-size", "-l", type=int, default=0, help="local world size")
parser.add_argument("--world-size", "-w", type=int, default=0, help="world size")
parser.add_argument("--rank-base", "-r", type=int, help="rank base", default=0)
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
parser.add_argument("--no-mpi", action="store_true", default=False, help="disable mpi")
parser.add_argument("--chunks", type=int, default=1, help="number of microbatches per batch")
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
parser.add_argument("--all-at-once", action="store_true", default=False, help="do backward pass on whole batch at once")
parser.add_argument("--max-batch", type=int, default=4, help="Max number of batches")
parser.add_argument("--socket-name", type=str, default=None, help="socket ifname for gloo/tp")
parser.add_argument("--num-decoder-layers", type=int, default=10, help="Number of decoder layers in the model")
parser.add_argument("--ddp-zero", action="store_true", default=False, help="enable ddp")
parser.add_argument(
"--lazy-construction", action="store_true", default=False, help="Number of decoder layers in the model"
)
parser.add_argument(
"--checkpoint", default="never", choices=["always", "except_last", "never"], help="Checkpointing strategy for pipe"
)
parser.add_argument(
"--pipelined-backward", dest="pipelined_backward", action="store_true", help="Pipelined backward pass"
)
parser.add_argument(
"--no-pipelined-backward", dest="pipelined_backward", action="store_false", help="Pipelined backward pass"
)
parser.set_defaults(pipelined_backward=True)
if __name__ == "__main__":
args = parser.parse_args()
# bench_multi_process(args, all_at_once=True)
if args.no_mpi or "OMPI_COMM_WORLD_RANK" not in os.environ:
print(f"Running benchmark with args: {args}")
bench_single_process(args)
else:
if os.environ["OMPI_COMM_WORLD_RANK"] == "0":
print(f"Running benchmark with args: {args}")
bench_mpi(args)
|
import re
from typing import Tuple
from duty.utils import att_parse, format_response
from duty.objects import MySignalEvent, dp
def delete_template(name: str, templates: list) -> Tuple[list, bool]:
for template in templates:
if template['name'].lower() == name:
templates.remove(template)
return templates, True
return templates, False
def get_template_list(event: MySignalEvent, templates: list):
if len(event.args) > 1:
if event.args[-1].isdigit() or (event.args[-1].startswith('-') and event.args[-1][1:].isdigit()):
page = int(event.args.pop(-1))
if page > 0:
page -= 1
else:
page = 0
category = ' '.join(event.args).lower()
template_list = None
if not category:
cats = {}
for t in templates:
cats[t['cat']] = cats.get(t['cat'], 0) + 1
message = "📚 Категории {name_genitive}:"
for cat in cats:
message += f"\n-- {cat} ({cats[cat]})"
else:
if category == 'все':
message = '📃 Список всех {name_genitive}:'
category = None
else:
message = f'📖 {{name_accusative_cap}} категории "{category}":'
message += list_by_page(templates, page, category)
if '\n' not in message:
if templates == []:
message = '{no_templates}'
else:
message = '⚠️ {name_accusative_cap} по указанному запросу не найдены'
return message
def list_by_page(templates, page, category) -> str:
if len(templates) > 40:
if page >= 0:
message = f'(страница #{page+1})'
else:
message = f'(страница #{abs(page)} с конца)'
else:
message = ''
shift = page*40
sliced_list = templates[shift:shift+40] if shift >= 0 else templates[shift-1:shift+39]
if page < 0:
try:
sliced_list.append(templates[shift+39])
except IndexError:
pass
offset = (shift+1) if shift >= 0 else (len(templates)+shift)
for i, t in enumerate(sliced_list, offset):
if category:
if t['cat'] != category:
continue
message += f'\n-- {t['name']}'
else:
message += f'\n{i}. {t['name']} | {t['cat']}'
if '\n' not in message:
return ''
return '\n' + message
@dp.longpoll_event_register('+шаб')
@dp.my_signal_event_register('+шаб')
def template_create(event: MySignalEvent) -> str:
name = re.findall(r"([^|]+)\|?([^|]*)", ' '.join(event.args))
if not name:
event.msg_op(2, "❗ Не указано название")
return "ok"
category = name[0][1].lower().strip() or 'без категории'
name = name[0][0].lower().strip()
if category == 'все':
event.msg_op(2, '❗ Невозможно создать шаблон с категорией "все"')
return "ok"
if not (event.payload or event.attachments or event.reply_message):
event.msg_op(2, "❗ Нет данных")
return "ok"
if event.reply_message:
data = event.reply_message['text']
event.attachments = att_parse(event.reply_message['attachments'])
if event.attachments:
if event.attachments[0].startswith('audio_message'):
event.msg_op(2, '⚠️ Для сохранения ГС используй команду "+гс"')
return "ok"
else:
data = event.payload
event.db.templates, exist = delete_template(name, event.db.templates)
event.db.templates.append({
"name": name,
"payload": data,
"cat": category,
"attachments": event.attachments
})
event.msg_op(2, f'✅ Шаблон "{name}" ' +
("перезаписан" if exist else "сохранен"), delete=2)
return "ok"
@dp.longpoll_event_register('шабы')
@dp.my_signal_event_register('шабы')
def template_list(event: MySignalEvent) -> str:
message = get_template_list(event, event.db.templates)
event.msg_op(2, format_response(message,
name_genitive='шаблонов',
name_accusative='шаблоны',
name_accusative_cap='Шаблоны',
no_templates='👀 Нет ни одного шаблона... Для создания используй команду "+шаб"'
))
return "ok"
def get_name(event: MySignalEvent) -> Tuple[MySignalEvent, str]:
return event, ' '.join(event.args).lower()
@dp.longpoll_event_register('-шаб')
@dp.my_signal_event_register('-шаб')
@dp.wrap_handler(get_name)
def template_delete(event: MySignalEvent, name: str) -> str:
event.db.templates, exist = delete_template(name, event.db.templates)
if exist:
msg = f'✅ Шаблон "{name}" удален'
else:
msg = f'⚠️ Шаблон "{name}" не найден'
event.msg_op(2, msg, delete=1)
return "ok"
@dp.longpoll_event_register('шаб')
@dp.my_signal_event_register('шаб')
@dp.wrap_handler(get_name)
def template_show(event: MySignalEvent, name: str) -> str:
template = None
for temp in event.db.templates:
if temp['name'] == name:
template = temp
break
if template:
atts = template['attachments']
atts.extend(event.attachments)
event.msg_op(2, temp['payload'] + '\n' + event.payload,
keep_forward_messages=1, attachment=','.join(atts))
else:
event.msg_op(2, f'❗ Шаблон "{name}" не найден')
return "ok"
|
import re
from typing import Tuple
from duty.utils import att_parse, format_response
from duty.objects import MySignalEvent, dp
def delete_template(name: str, templates: list) -> Tuple[list, bool]:
for template in templates:
if template['name'].lower() == name:
templates.remove(template)
return templates, True
return templates, False
def get_template_list(event: MySignalEvent, templates: list):
if len(event.args) > 1:
if event.args[-1].isdigit() or (event.args[-1].startswith('-') and event.args[-1][1:].isdigit()):
page = int(event.args.pop(-1))
if page > 0:
page -= 1
else:
page = 0
category = ' '.join(event.args).lower()
template_list = None
if not category:
cats = {}
for t in templates:
cats[t['cat']] = cats.get(t['cat'], 0) + 1
message = "📚 Категории {name_genitive}:"
for cat in cats:
message += f"\n-- {cat} ({cats[cat]})"
else:
if category == 'все':
message = '📃 Список всех {name_genitive}:'
category = None
else:
message = f'📖 {{name_accusative_cap}} категории "{category}":'
message += list_by_page(templates, page, category)
if '\n' not in message:
if templates == []:
message = '{no_templates}'
else:
message = '⚠️ {name_accusative_cap} по указанному запросу не найдены'
return message
def list_by_page(templates, page, category) -> str:
if len(templates) > 40:
if page >= 0:
message = f'(страница #{page+1})'
else:
message = f'(страница #{abs(page)} с конца)'
else:
message = ''
shift = page*40
sliced_list = templates[shift:shift+40] if shift >= 0 else templates[shift-1:shift+39]
if page < 0:
try:
sliced_list.append(templates[shift+39])
except IndexError:
pass
offset = (shift+1) if shift >= 0 else (len(templates)+shift)
for i, t in enumerate(sliced_list, offset):
if category:
if t['cat'] != category:
continue
message += f'\n-- {t["name"]}'
else:
message += f'\n{i}. {t["name"]} | {t["cat"]}'
if '\n' not in message:
return ''
return '\n' + message
@dp.longpoll_event_register('+шаб')
@dp.my_signal_event_register('+шаб')
def template_create(event: MySignalEvent) -> str:
name = re.findall(r"([^|]+)\|?([^|]*)", ' '.join(event.args))
if not name:
event.msg_op(2, "❗ Не указано название")
return "ok"
category = name[0][1].lower().strip() or 'без категории'
name = name[0][0].lower().strip()
if category == 'все':
event.msg_op(2, '❗ Невозможно создать шаблон с категорией "все"')
return "ok"
if not (event.payload or event.attachments or event.reply_message):
event.msg_op(2, "❗ Нет данных")
return "ok"
if event.reply_message:
data = event.reply_message['text']
event.attachments = att_parse(event.reply_message['attachments'])
if event.attachments:
if event.attachments[0].startswith('audio_message'):
event.msg_op(2, '⚠️ Для сохранения ГС используй команду "+гс"')
return "ok"
else:
data = event.payload
event.db.templates, exist = delete_template(name, event.db.templates)
event.db.templates.append({
"name": name,
"payload": data,
"cat": category,
"attachments": event.attachments
})
event.msg_op(2, f'✅ Шаблон "{name}" ' +
("перезаписан" if exist else "сохранен"), delete=2)
return "ok"
@dp.longpoll_event_register('шабы')
@dp.my_signal_event_register('шабы')
def template_list(event: MySignalEvent) -> str:
message = get_template_list(event, event.db.templates)
event.msg_op(2, format_response(message,
name_genitive='шаблонов',
name_accusative='шаблоны',
name_accusative_cap='Шаблоны',
no_templates='👀 Нет ни одного шаблона... Для создания используй команду "+шаб"'
))
return "ok"
def get_name(event: MySignalEvent) -> Tuple[MySignalEvent, str]:
return event, ' '.join(event.args).lower()
@dp.longpoll_event_register('-шаб')
@dp.my_signal_event_register('-шаб')
@dp.wrap_handler(get_name)
def template_delete(event: MySignalEvent, name: str) -> str:
event.db.templates, exist = delete_template(name, event.db.templates)
if exist:
msg = f'✅ Шаблон "{name}" удален'
else:
msg = f'⚠️ Шаблон "{name}" не найден'
event.msg_op(2, msg, delete=1)
return "ok"
@dp.longpoll_event_register('шаб')
@dp.my_signal_event_register('шаб')
@dp.wrap_handler(get_name)
def template_show(event: MySignalEvent, name: str) -> str:
template = None
for temp in event.db.templates:
if temp['name'] == name:
template = temp
break
if template:
atts = template['attachments']
atts.extend(event.attachments)
event.msg_op(2, temp['payload'] + '\n' + event.payload,
keep_forward_messages=1, attachment=','.join(atts))
else:
event.msg_op(2, f'❗ Шаблон "{name}" не найден')
return "ok"
|
# Crie um programa que tenha uma tupla única com nomes de produtos
# e seus respectivos preços, na sequência. No final, mostre uma
# listagem de preços, organizando os dados em forma tabular.
listagem = ('Lápis', 1.75,
'Borracha', 2,
'Caderno', 15.90,
'Estojo', 10,
'Compasso', 7.95,
'Mochila', 150.45,
'Canetas', 22.30,
'Livro', 34.50)
print('-'*40)
print(f'{'LISTAGEM DE PREÇOS':^40}')
print('-'*40)
for i in range(0, len(listagem)):
if i % 2 == 0:
print(f'{listagem[i]:.<30}', end='')
else:
print(f'R${listagem[i]:>7.2f}')
print('-'*40)
|
# Crie um programa que tenha uma tupla única com nomes de produtos
# e seus respectivos preços, na sequência. No final, mostre uma
# listagem de preços, organizando os dados em forma tabular.
listagem = ('Lápis', 1.75,
'Borracha', 2,
'Caderno', 15.90,
'Estojo', 10,
'Compasso', 7.95,
'Mochila', 150.45,
'Canetas', 22.30,
'Livro', 34.50)
print('-'*40)
print(f'{"LISTAGEM DE PREÇOS":^40}')
print('-'*40)
for i in range(0, len(listagem)):
if i % 2 == 0:
print(f'{listagem[i]:.<30}', end='')
else:
print(f'R${listagem[i]:>7.2f}')
print('-'*40)
|
# Copyright (c) 2018 The Pooch Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
The classes that actually handle the downloads.
"""
import sys
import ftplib
import requests
from .utils import parse_url
try:
from tqdm import tqdm
except ImportError:
tqdm = None
try:
import paramiko
except ImportError:
paramiko = None
def choose_downloader(url):
"""
Choose the appropriate downloader for the given URL based on the protocol.
Parameters
----------
url : str
A URL (including protocol).
Returns
-------
downloader
A downloader class (either :class:`pooch.HTTPDownloader`,
:class:`pooch.FTPDownloader`, or :class: `pooch.SFTPDownloader`).
Examples
--------
>>> downloader = choose_downloader("http://something.com")
>>> print(downloader.__class__.__name__)
HTTPDownloader
>>> downloader = choose_downloader("https://something.com")
>>> print(downloader.__class__.__name__)
HTTPDownloader
>>> downloader = choose_downloader("ftp://something.com")
>>> print(downloader.__class__.__name__)
FTPDownloader
"""
known_downloaders = {
"ftp": FTPDownloader,
"https": HTTPDownloader,
"http": HTTPDownloader,
"sftp": SFTPDownloader,
}
parsed_url = parse_url(url)
if parsed_url["protocol"] not in known_downloaders:
raise ValueError(
f"Unrecognized URL protocol '{parsed_url["protocol"]}' in '{url}'. "
f"Must be one of {known_downloaders.keys()}."
)
downloader = known_downloaders[parsed_url["protocol"]]()
return downloader
class HTTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over HTTP/HTTPS.
When called, downloads the given file URL into the specified local file.
Uses the :mod:`requests` library to manage downloads.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
progressbar : bool
If True, will print a progress bar of the download to standard error
(stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to be
installed.
chunk_size : int
Files are streamed *chunk_size* bytes at a time instead of loading
everything into memory at one. Usually doesn't need to be changed.
**kwargs
All keyword arguments given when creating an instance of this class
will be passed to :func:`requests.get`.
Examples
--------
Download one of the data files from the Pooch repository:
>>> import os
>>> from pooch import version, check_version
>>> url = "https://github.com/fatiando/pooch/raw/{}/data/tiny-data.txt"
>>> url = url.format(check_version(version.full_version))
>>> downloader = HTTPDownloader()
>>> # Not using with Pooch.fetch so no need to pass an instance of Pooch
>>> downloader(url=url, output_file="tiny-data.txt", pooch=None)
>>> os.path.exists("tiny-data.txt")
True
>>> with open("tiny-data.txt") as f:
... print(f.read().strip())
# A tiny data file for test purposes only
1 2 3 4 5 6
>>> os.remove("tiny-data.txt")
Authentication can be handled by passing a user name and password to
:func:`requests.get`. All arguments provided when creating an instance of
the class are forwarded to :func:`requests.get`. We'll use
``auth=(username, password)`` to use basic HTTPS authentication. The
https://httpbin.org website allows us to make a fake a login request using
whatever username and password we provide to it:
>>> user = "doggo"
>>> password = "goodboy"
>>> # httpbin will ask for the user and password we provide in the URL
>>> url = f"https://httpbin.org/basic-auth/{user}/{password}"
>>> # Trying without the login credentials causes an error
>>> downloader = HTTPDownloader()
>>> try:
... downloader(url=url, output_file="tiny-data.txt", pooch=None)
... except Exception:
... print("There was an error!")
There was an error!
>>> # Pass in the credentials to HTTPDownloader
>>> downloader = HTTPDownloader(auth=(user, password))
>>> downloader(url=url, output_file="tiny-data.txt", pooch=None)
>>> with open("tiny-data.txt") as f:
... for line in f:
... print(line.rstrip())
{
"authenticated": true,
"user": "doggo"
}
>>> os.remove("tiny-data.txt")
"""
def __init__(self, progressbar=False, chunk_size=1024, **kwargs):
self.kwargs = kwargs
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
"""
Download the given URL over HTTP to the given output file.
Uses :func:`requests.get`.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str or file-like object
Path (and file name) to which the file will be downloaded.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
kwargs = self.kwargs.copy()
kwargs.setdefault("stream", True)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
content = response.iter_content(chunk_size=self.chunk_size)
if self.progressbar:
total = int(response.headers.get("content-length", 0))
# Need to use ascii characters on Windows because there isn't
# always full unicode support
# (see https://github.com/tqdm/tqdm/issues/454)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=total,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
for chunk in content:
if chunk:
output_file.write(chunk)
output_file.flush()
if self.progressbar:
# Use the chunk size here because chunk may be much
# larger if the data are decompressed by requests after
# reading (happens with text files).
progress.update(self.chunk_size)
# Make sure the progress bar gets filled even if the actual number
# is chunks is smaller than expected. This happens when streaming
# text files that are compressed by the server when sending (gzip).
# Binary files don't experience this.
if self.progressbar:
progress.reset()
progress.update(total)
progress.close()
finally:
if ispath:
output_file.close()
class FTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over FTP.
When called, downloads the given file URL into the specified local file.
Uses the :mod:`ftplib` module to manage downloads.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
port : int
Port used for the FTP connection.
username : str
User name used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous FTP).
password : str
Password used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous FTP). Use the empty string
to indicate no password is required.
account : str
Some servers also require an "account" name for authentication.
timeout : int
Timeout in seconds for ftp socket operations, use None to mean no
timeout.
progressbar : bool
If True, will print a progress bar of the download to standard error
(stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to be
installed.
chunk_size : int
Files are streamed *chunk_size* bytes at a time instead of loading
everything into memory at one. Usually doesn't need to be changed.
"""
def __init__(
self,
port=21,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
chunk_size=1024,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
"""
Download the given URL over FTP to the given output file.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str or file-like object
Path (and file name) to which the file will be downloaded.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
parsed_url = parse_url(url)
ftp = ftplib.FTP(timeout=self.timeout)
ftp.connect(host=parsed_url["netloc"], port=self.port)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
ftp.login(user=self.username, passwd=self.password, acct=self.account)
command = f"RETR {parsed_url["path"]}"
if self.progressbar:
# Make sure the file is set to binary mode, otherwise we can't
# get the file size. See: https://stackoverflow.com/a/22093848
ftp.voidcmd("TYPE I")
size = int(ftp.size(parsed_url["path"]))
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(data):
"Update the progress bar and write to output"
progress.update(len(data))
output_file.write(data)
ftp.retrbinary(command, callback, blocksize=self.chunk_size)
else:
ftp.retrbinary(command, output_file.write, blocksize=self.chunk_size)
finally:
ftp.quit()
if ispath:
output_file.close()
class SFTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over SFTP.
When called, downloads the given file URL into the specified local file.
Requires `paramiko <https://github.com/paramiko/paramiko>`__ to be
installed.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
port : int
Port used for the SFTP connection.
username : str
User name used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous SFTP).
password : str
Password used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous SFTP). Use the empty
string to indicate no password is required.
timeout : int
Timeout in seconds for sftp socket operations, use None to mean no
timeout.
progressbar : bool
If True, will print a progress bar of the download to standard
error (stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to
be installed.
"""
def __init__(
self,
port=22,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
# Collect errors and raise only once so that both missing packages are
# captured. Otherwise, the user is only warned of one of them at a
# time (and we can't test properly when they are both missing).
errors = []
if self.progressbar and tqdm is None:
errors.append("Missing package 'tqdm' required for progress bars.")
if paramiko is None:
errors.append("Missing package 'paramiko' required for SFTP downloads.")
if errors:
raise ValueError(" ".join(errors))
def __call__(self, url, output_file, pooch):
"""
Download the given URL over SFTP to the given output file.
The output file must be given as a string (file name/path) and not an
open file object! Otherwise, paramiko cannot save to that file.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str
Path (and file name) to which the file will be downloaded. **Cannot
be a file object**.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
parsed_url = parse_url(url)
connection = paramiko.Transport(sock=(parsed_url["netloc"], self.port))
sftp = None
try:
connection.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(connection)
sftp.get_channel().settimeout = self.timeout
if self.progressbar:
size = int(sftp.stat(parsed_url["path"]).st_size)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(current, total):
"Update the progress bar and write to output"
progress.total = int(total)
progress.update(int(current - progress.n))
sftp.get(parsed_url["path"], output_file, callback=callback)
else:
sftp.get(parsed_url["path"], output_file)
finally:
connection.close()
if sftp is not None:
sftp.close()
|
# Copyright (c) 2018 The Pooch Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
The classes that actually handle the downloads.
"""
import sys
import ftplib
import requests
from .utils import parse_url
try:
from tqdm import tqdm
except ImportError:
tqdm = None
try:
import paramiko
except ImportError:
paramiko = None
def choose_downloader(url):
"""
Choose the appropriate downloader for the given URL based on the protocol.
Parameters
----------
url : str
A URL (including protocol).
Returns
-------
downloader
A downloader class (either :class:`pooch.HTTPDownloader`,
:class:`pooch.FTPDownloader`, or :class: `pooch.SFTPDownloader`).
Examples
--------
>>> downloader = choose_downloader("http://something.com")
>>> print(downloader.__class__.__name__)
HTTPDownloader
>>> downloader = choose_downloader("https://something.com")
>>> print(downloader.__class__.__name__)
HTTPDownloader
>>> downloader = choose_downloader("ftp://something.com")
>>> print(downloader.__class__.__name__)
FTPDownloader
"""
known_downloaders = {
"ftp": FTPDownloader,
"https": HTTPDownloader,
"http": HTTPDownloader,
"sftp": SFTPDownloader,
}
parsed_url = parse_url(url)
if parsed_url["protocol"] not in known_downloaders:
raise ValueError(
f"Unrecognized URL protocol '{parsed_url['protocol']}' in '{url}'. "
f"Must be one of {known_downloaders.keys()}."
)
downloader = known_downloaders[parsed_url["protocol"]]()
return downloader
class HTTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over HTTP/HTTPS.
When called, downloads the given file URL into the specified local file.
Uses the :mod:`requests` library to manage downloads.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
progressbar : bool
If True, will print a progress bar of the download to standard error
(stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to be
installed.
chunk_size : int
Files are streamed *chunk_size* bytes at a time instead of loading
everything into memory at one. Usually doesn't need to be changed.
**kwargs
All keyword arguments given when creating an instance of this class
will be passed to :func:`requests.get`.
Examples
--------
Download one of the data files from the Pooch repository:
>>> import os
>>> from pooch import version, check_version
>>> url = "https://github.com/fatiando/pooch/raw/{}/data/tiny-data.txt"
>>> url = url.format(check_version(version.full_version))
>>> downloader = HTTPDownloader()
>>> # Not using with Pooch.fetch so no need to pass an instance of Pooch
>>> downloader(url=url, output_file="tiny-data.txt", pooch=None)
>>> os.path.exists("tiny-data.txt")
True
>>> with open("tiny-data.txt") as f:
... print(f.read().strip())
# A tiny data file for test purposes only
1 2 3 4 5 6
>>> os.remove("tiny-data.txt")
Authentication can be handled by passing a user name and password to
:func:`requests.get`. All arguments provided when creating an instance of
the class are forwarded to :func:`requests.get`. We'll use
``auth=(username, password)`` to use basic HTTPS authentication. The
https://httpbin.org website allows us to make a fake a login request using
whatever username and password we provide to it:
>>> user = "doggo"
>>> password = "goodboy"
>>> # httpbin will ask for the user and password we provide in the URL
>>> url = f"https://httpbin.org/basic-auth/{user}/{password}"
>>> # Trying without the login credentials causes an error
>>> downloader = HTTPDownloader()
>>> try:
... downloader(url=url, output_file="tiny-data.txt", pooch=None)
... except Exception:
... print("There was an error!")
There was an error!
>>> # Pass in the credentials to HTTPDownloader
>>> downloader = HTTPDownloader(auth=(user, password))
>>> downloader(url=url, output_file="tiny-data.txt", pooch=None)
>>> with open("tiny-data.txt") as f:
... for line in f:
... print(line.rstrip())
{
"authenticated": true,
"user": "doggo"
}
>>> os.remove("tiny-data.txt")
"""
def __init__(self, progressbar=False, chunk_size=1024, **kwargs):
self.kwargs = kwargs
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
"""
Download the given URL over HTTP to the given output file.
Uses :func:`requests.get`.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str or file-like object
Path (and file name) to which the file will be downloaded.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
kwargs = self.kwargs.copy()
kwargs.setdefault("stream", True)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
content = response.iter_content(chunk_size=self.chunk_size)
if self.progressbar:
total = int(response.headers.get("content-length", 0))
# Need to use ascii characters on Windows because there isn't
# always full unicode support
# (see https://github.com/tqdm/tqdm/issues/454)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=total,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
for chunk in content:
if chunk:
output_file.write(chunk)
output_file.flush()
if self.progressbar:
# Use the chunk size here because chunk may be much
# larger if the data are decompressed by requests after
# reading (happens with text files).
progress.update(self.chunk_size)
# Make sure the progress bar gets filled even if the actual number
# is chunks is smaller than expected. This happens when streaming
# text files that are compressed by the server when sending (gzip).
# Binary files don't experience this.
if self.progressbar:
progress.reset()
progress.update(total)
progress.close()
finally:
if ispath:
output_file.close()
class FTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over FTP.
When called, downloads the given file URL into the specified local file.
Uses the :mod:`ftplib` module to manage downloads.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
port : int
Port used for the FTP connection.
username : str
User name used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous FTP).
password : str
Password used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous FTP). Use the empty string
to indicate no password is required.
account : str
Some servers also require an "account" name for authentication.
timeout : int
Timeout in seconds for ftp socket operations, use None to mean no
timeout.
progressbar : bool
If True, will print a progress bar of the download to standard error
(stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to be
installed.
chunk_size : int
Files are streamed *chunk_size* bytes at a time instead of loading
everything into memory at one. Usually doesn't need to be changed.
"""
def __init__(
self,
port=21,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
chunk_size=1024,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
self.chunk_size = chunk_size
if self.progressbar and tqdm is None:
raise ValueError("Missing package 'tqdm' required for progress bars.")
def __call__(self, url, output_file, pooch):
"""
Download the given URL over FTP to the given output file.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str or file-like object
Path (and file name) to which the file will be downloaded.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
parsed_url = parse_url(url)
ftp = ftplib.FTP(timeout=self.timeout)
ftp.connect(host=parsed_url["netloc"], port=self.port)
ispath = not hasattr(output_file, "write")
if ispath:
output_file = open(output_file, "w+b")
try:
ftp.login(user=self.username, passwd=self.password, acct=self.account)
command = f"RETR {parsed_url['path']}"
if self.progressbar:
# Make sure the file is set to binary mode, otherwise we can't
# get the file size. See: https://stackoverflow.com/a/22093848
ftp.voidcmd("TYPE I")
size = int(ftp.size(parsed_url["path"]))
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(data):
"Update the progress bar and write to output"
progress.update(len(data))
output_file.write(data)
ftp.retrbinary(command, callback, blocksize=self.chunk_size)
else:
ftp.retrbinary(command, output_file.write, blocksize=self.chunk_size)
finally:
ftp.quit()
if ispath:
output_file.close()
class SFTPDownloader: # pylint: disable=too-few-public-methods
"""
Download manager for fetching files over SFTP.
When called, downloads the given file URL into the specified local file.
Requires `paramiko <https://github.com/paramiko/paramiko>`__ to be
installed.
Use with :meth:`pooch.Pooch.fetch` or :func:`pooch.retrieve` to customize
the download of files (for example, to use authentication or print a
progress bar).
Parameters
----------
port : int
Port used for the SFTP connection.
username : str
User name used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous SFTP).
password : str
Password used to login to the server. Only needed if the server
requires authentication (i.e., no anonymous SFTP). Use the empty
string to indicate no password is required.
timeout : int
Timeout in seconds for sftp socket operations, use None to mean no
timeout.
progressbar : bool
If True, will print a progress bar of the download to standard
error (stderr). Requires `tqdm <https://github.com/tqdm/tqdm>`__ to
be installed.
"""
def __init__(
self,
port=22,
username="anonymous",
password="",
account="",
timeout=None,
progressbar=False,
):
self.port = port
self.username = username
self.password = password
self.account = account
self.timeout = timeout
self.progressbar = progressbar
# Collect errors and raise only once so that both missing packages are
# captured. Otherwise, the user is only warned of one of them at a
# time (and we can't test properly when they are both missing).
errors = []
if self.progressbar and tqdm is None:
errors.append("Missing package 'tqdm' required for progress bars.")
if paramiko is None:
errors.append("Missing package 'paramiko' required for SFTP downloads.")
if errors:
raise ValueError(" ".join(errors))
def __call__(self, url, output_file, pooch):
"""
Download the given URL over SFTP to the given output file.
The output file must be given as a string (file name/path) and not an
open file object! Otherwise, paramiko cannot save to that file.
Parameters
----------
url : str
The URL to the file you want to download.
output_file : str
Path (and file name) to which the file will be downloaded. **Cannot
be a file object**.
pooch : :class:`~pooch.Pooch`
The instance of :class:`~pooch.Pooch` that is calling this method.
"""
parsed_url = parse_url(url)
connection = paramiko.Transport(sock=(parsed_url["netloc"], self.port))
sftp = None
try:
connection.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(connection)
sftp.get_channel().settimeout = self.timeout
if self.progressbar:
size = int(sftp.stat(parsed_url["path"]).st_size)
use_ascii = bool(sys.platform == "win32")
progress = tqdm(
total=size,
ncols=79,
ascii=use_ascii,
unit="B",
unit_scale=True,
leave=True,
)
with progress:
def callback(current, total):
"Update the progress bar and write to output"
progress.total = int(total)
progress.update(int(current - progress.n))
sftp.get(parsed_url["path"], output_file, callback=callback)
else:
sftp.get(parsed_url["path"], output_file)
finally:
connection.close()
if sftp is not None:
sftp.close()
|
# -*- coding: utf-8 -*-
# Use hash instead of parameters for iterables folder names
# Otherwise path will be too long and generate OSError
from nipype import config
import clinica.pipelines.engine as cpe
cfg = dict(execution={"parameterize_dirs": False})
config.update_config(cfg)
class DeepLearningPrepareData(cpe.Pipeline):
"""Deeplearning prepare data - MRI in nifti format are transformed into
PyTorch tensors. The transformation is applied to: the whole volume, a
selection of 3D patches, or slices extracted from the 3D volume. By default
it uses the cropped version of the MRI (see option "--use_uncropper_image")
Returns:
A clinica pipeline object containing the Deeplearning prepare data pipeline.
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file."""
def get_input_fields(self):
"""Specify the list of possible inputs of this pipeline.
Returns:
A list of (string) input fields name.
"""
return ["input_nifti"]
def get_output_fields(self):
"""Specify the list of possible outputs of this pipeline.
Returns:
A list of (string) output fields name.
"""
return ["image_id"]
def build_input_node(self):
"""Build and connect an input node to the pipeline."""
from os import path
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from clinica.utils.exceptions import (
ClinicaBIDSError,
ClinicaCAPSError,
ClinicaException,
)
from clinica.utils.input_files import (
T1W_EXTENSIVE,
T1W_LINEAR,
T1W_LINEAR_CROPPED,
pet_linear_nii,
)
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.stream import cprint
from clinica.utils.ux import print_images_to_process
from .deeplearning_prepare_data_utils import check_mask_list
# Select the correct filetype corresponding to modality
if self.parameters.get("modality") == "t1-linear":
if self.parameters.get("use_uncropped_image"):
FILE_TYPE = T1W_LINEAR
else:
FILE_TYPE = T1W_LINEAR_CROPPED
if self.parameters.get("modality") == "t1-extensive":
FILE_TYPE = T1W_EXTENSIVE
if self.parameters.get("modality") == "pet-linear":
FILE_TYPE = pet_linear_nii(
self.parameters.get("acq_label"),
self.parameters.get("suvr_reference_region"),
self.parameters.get("use_uncropped_image"),
)
if self.parameters.get("modality") == "custom":
FILE_TYPE = {
"pattern": f"*{self.parameters.get("custom_suffix")}",
"description": "Custom suffix",
}
# Input file:
try:
input_files = clinica_file_reader(
self.subjects, self.sessions, self.caps_directory, FILE_TYPE
)
except ClinicaException as e:
err = (
"Clinica faced error(s) while trying to read files in your CAPS directory.\n"
+ str(e)
)
raise ClinicaBIDSError(err)
if len(self.subjects):
print_images_to_process(self.subjects, self.sessions)
cprint("The pipeline will last approximately 30 seconds per image.")
if self.parameters.get("extract_method") == "slice":
self.slice_direction = self.parameters.get("slice_direction")
self.slice_mode = self.parameters.get("slice_mode")
else:
self.slice_direction = "axial"
self.slice_mode = "rgb"
if self.parameters.get("extract_method") == "patch":
self.patch_size = self.parameters.get("patch_size")
self.stride_size = self.parameters.get("stride_size")
else:
self.patch_size = 50
self.stride_size = 50
# Load the corresponding masks
if self.parameters.get("extract_method") == "roi":
self.roi_list = self.parameters.get("roi_list")
if self.parameters.get("modality") == "custom":
self.mask_pattern = self.parameters.get("custom_mask_pattern")
self.template = self.parameters.get("custom_template")
if not self.template:
raise ValueError(
"A custom template must be defined when the modality is set to custom."
)
else:
self.mask_pattern = None
from .deeplearning_prepare_data_utils import TEMPLATE_DICT
self.template = TEMPLATE_DICT[self.parameters.get("modality")]
self.masks_location = path.join(
self.caps_directory, "masks", f"tpl-{self.template}"
)
if not self.roi_list:
raise ValueError("A list of regions must be given.")
else:
check_mask_list(
self.masks_location,
self.roi_list,
self.mask_pattern,
not self.parameters.get("use_uncropped_image"),
)
else:
self.masks_location = ""
self.mask_pattern = None
self.roi_list = []
# The reading node
# -------------------------
read_node = npe.Node(
name="ReadingFiles",
iterables=[
("input_nifti", input_files),
],
synchronize=True,
interface=nutil.IdentityInterface(fields=self.get_input_fields()),
)
self.connect(
[
(read_node, self.input_node, [("input_nifti", "input_nifti")]),
]
)
def build_output_node(self):
"""Build and connect an output node to the pipeline."""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from nipype.interfaces.io import DataSink
from clinica.utils.filemanip import get_subject_id
from clinica.utils.nipype import container_from_filename, fix_join
# Write node
# ----------------------
write_node = npe.Node(name="WriteCaps", interface=DataSink())
write_node.inputs.base_directory = self.caps_directory
write_node.inputs.parameterization = False
# Get subject ID node
# ----------------------
image_id_node = npe.Node(
interface=nutil.Function(
input_names=["bids_or_caps_file"],
output_names=["image_id"],
function=get_subject_id,
),
name="ImageID",
)
# Find container path from input filename
# ----------------------
container_path = npe.Node(
nutil.Function(
input_names=["bids_or_caps_filename"],
output_names=["container"],
function=container_from_filename,
),
name="ContainerPath",
)
# fmt: off
self.connect(
[
(self.input_node, image_id_node, [("input_nifti", "bids_or_caps_file")]),
(self.input_node, container_path, [("input_nifti", "bids_or_caps_filename")]),
# (image_id_node, write_node, [('image_id', '@image_id')]),
(image_id_node, write_node, [("image_id", "@image_id")]),
]
)
# fmt: on
subfolder = "image_based"
if self.parameters.get("extract_method") == "slice":
subfolder = "slice_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("slices_rgb_output", "@slices_rgb_output")]),
(self.output_node, write_node, [("slices_original_output", "@slices_original_output")]),
]
)
# fmt: on
elif self.parameters.get("extract_method") == "patch":
subfolder = "patch_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("patches_output", "@patches_output")])
]
)
# fmt: on
elif self.parameters.get("extract_method") == "roi":
subfolder = "roi_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("roi_output", "@roi_output")])
]
)
# fmt: on
else:
# fmt: off
self.connect(
[
(self.output_node, write_node, [("output_pt_file", "@output_pt_file")])
]
)
# fmt: on
mod_subfolder = ""
if self.parameters.get("modality") == "t1-linear":
mod_subfolder = "t1_linear"
if self.parameters.get("modality") == "t1-extensive":
mod_subfolder = "t1_extensive"
if self.parameters.get("modality") == "pet-linear":
mod_subfolder = "pet_linear"
if self.parameters.get("modality") == "custom":
mod_subfolder = "custom"
# fmt: off
self.connect(
[
(container_path, write_node, [
(("container", fix_join, "deeplearning_prepare_data", subfolder, mod_subfolder), "container")]),
]
)
# fmt: on
def build_core_nodes(self):
"""Build and connect the core nodes of the pipeline."""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from .deeplearning_prepare_data_utils import (
extract_patches,
extract_roi,
extract_slices,
save_as_pt,
)
# The processing nodes
# Node to save input in nii.gz format into pytorch .pt format
# ----------------------
save_as_pt = npe.MapNode(
name="save_as_pt",
iterfield=["input_img"],
interface=nutil.Function(
function=save_as_pt,
input_names=["input_img"],
output_names=["output_file"],
),
)
# Extract slices node (options: 3 directions, mode)
# ----------------------
extract_slices_node = npe.MapNode(
name="extract_slices",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_slices,
input_names=["input_tensor", "slice_direction", "slice_mode"],
output_names=["output_file_rgb", "output_file_original"],
),
)
extract_slices_node.inputs.slice_direction = self.slice_direction
extract_slices_node.inputs.slice_mode = self.slice_mode
# Extract patches node (options, patch size and stride size)
# ----------------------
extract_patches_node = npe.MapNode(
name="extract_patches",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_patches,
input_names=["input_tensor", "patch_size", "stride_size"],
output_names=["output_patch"],
),
)
extract_patches_node.inputs.patch_size = self.patch_size
extract_patches_node.inputs.stride_size = self.stride_size
# Extract ROi node
extract_roi_node = npe.MapNode(
name="extract_ROI",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_roi,
input_names=[
"input_tensor",
"masks_location",
"mask_pattern",
"cropped_input",
"roi_list",
"uncrop_output",
],
output_names=["output_roi"],
),
)
extract_roi_node.inputs.masks_location = self.masks_location
extract_roi_node.inputs.mask_pattern = self.mask_pattern
extract_roi_node.inputs.cropped_input = not self.parameters.get(
"use_uncropped_image"
)
extract_roi_node.inputs.roi_list = self.roi_list
extract_roi_node.inputs.uncrop_output = self.parameters.get("roi_uncrop_output")
# Connections
# ----------------------
# fmt: off
self.connect(
[
(self.input_node, save_as_pt, [("input_nifti", "input_img")]),
]
)
if self.parameters.get("extract_method") == "slice":
self.connect(
[
(save_as_pt, extract_slices_node, [("output_file", "input_tensor")]),
(extract_slices_node, self.output_node, [("output_file_rgb", "slices_rgb_output")]),
(extract_slices_node, self.output_node, [("output_file_original", "slices_original_output")]),
]
)
elif self.parameters.get("extract_method") == "patch":
self.connect(
[
(save_as_pt, extract_patches_node, [("output_file", "input_tensor")]),
(extract_patches_node, self.output_node, [("output_patch", "patches_output")]),
]
)
elif self.parameters.get("extract_method") == "roi":
self.connect(
[
(save_as_pt, extract_roi_node, [("output_file", "input_tensor")]),
(extract_roi_node, self.output_node, [("output_roi", "roi_output")]),
]
)
else:
self.connect(
[
(save_as_pt, self.output_node, [("output_file", "output_pt_file")]),
]
)
# fmt: on
|
# -*- coding: utf-8 -*-
# Use hash instead of parameters for iterables folder names
# Otherwise path will be too long and generate OSError
from nipype import config
import clinica.pipelines.engine as cpe
cfg = dict(execution={"parameterize_dirs": False})
config.update_config(cfg)
class DeepLearningPrepareData(cpe.Pipeline):
"""Deeplearning prepare data - MRI in nifti format are transformed into
PyTorch tensors. The transformation is applied to: the whole volume, a
selection of 3D patches, or slices extracted from the 3D volume. By default
it uses the cropped version of the MRI (see option "--use_uncropper_image")
Returns:
A clinica pipeline object containing the Deeplearning prepare data pipeline.
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file."""
def get_input_fields(self):
"""Specify the list of possible inputs of this pipeline.
Returns:
A list of (string) input fields name.
"""
return ["input_nifti"]
def get_output_fields(self):
"""Specify the list of possible outputs of this pipeline.
Returns:
A list of (string) output fields name.
"""
return ["image_id"]
def build_input_node(self):
"""Build and connect an input node to the pipeline."""
from os import path
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from clinica.utils.exceptions import (
ClinicaBIDSError,
ClinicaCAPSError,
ClinicaException,
)
from clinica.utils.input_files import (
T1W_EXTENSIVE,
T1W_LINEAR,
T1W_LINEAR_CROPPED,
pet_linear_nii,
)
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.stream import cprint
from clinica.utils.ux import print_images_to_process
from .deeplearning_prepare_data_utils import check_mask_list
# Select the correct filetype corresponding to modality
if self.parameters.get("modality") == "t1-linear":
if self.parameters.get("use_uncropped_image"):
FILE_TYPE = T1W_LINEAR
else:
FILE_TYPE = T1W_LINEAR_CROPPED
if self.parameters.get("modality") == "t1-extensive":
FILE_TYPE = T1W_EXTENSIVE
if self.parameters.get("modality") == "pet-linear":
FILE_TYPE = pet_linear_nii(
self.parameters.get("acq_label"),
self.parameters.get("suvr_reference_region"),
self.parameters.get("use_uncropped_image"),
)
if self.parameters.get("modality") == "custom":
FILE_TYPE = {
"pattern": f"*{self.parameters.get('custom_suffix')}",
"description": "Custom suffix",
}
# Input file:
try:
input_files = clinica_file_reader(
self.subjects, self.sessions, self.caps_directory, FILE_TYPE
)
except ClinicaException as e:
err = (
"Clinica faced error(s) while trying to read files in your CAPS directory.\n"
+ str(e)
)
raise ClinicaBIDSError(err)
if len(self.subjects):
print_images_to_process(self.subjects, self.sessions)
cprint("The pipeline will last approximately 30 seconds per image.")
if self.parameters.get("extract_method") == "slice":
self.slice_direction = self.parameters.get("slice_direction")
self.slice_mode = self.parameters.get("slice_mode")
else:
self.slice_direction = "axial"
self.slice_mode = "rgb"
if self.parameters.get("extract_method") == "patch":
self.patch_size = self.parameters.get("patch_size")
self.stride_size = self.parameters.get("stride_size")
else:
self.patch_size = 50
self.stride_size = 50
# Load the corresponding masks
if self.parameters.get("extract_method") == "roi":
self.roi_list = self.parameters.get("roi_list")
if self.parameters.get("modality") == "custom":
self.mask_pattern = self.parameters.get("custom_mask_pattern")
self.template = self.parameters.get("custom_template")
if not self.template:
raise ValueError(
"A custom template must be defined when the modality is set to custom."
)
else:
self.mask_pattern = None
from .deeplearning_prepare_data_utils import TEMPLATE_DICT
self.template = TEMPLATE_DICT[self.parameters.get("modality")]
self.masks_location = path.join(
self.caps_directory, "masks", f"tpl-{self.template}"
)
if not self.roi_list:
raise ValueError("A list of regions must be given.")
else:
check_mask_list(
self.masks_location,
self.roi_list,
self.mask_pattern,
not self.parameters.get("use_uncropped_image"),
)
else:
self.masks_location = ""
self.mask_pattern = None
self.roi_list = []
# The reading node
# -------------------------
read_node = npe.Node(
name="ReadingFiles",
iterables=[
("input_nifti", input_files),
],
synchronize=True,
interface=nutil.IdentityInterface(fields=self.get_input_fields()),
)
self.connect(
[
(read_node, self.input_node, [("input_nifti", "input_nifti")]),
]
)
def build_output_node(self):
"""Build and connect an output node to the pipeline."""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from nipype.interfaces.io import DataSink
from clinica.utils.filemanip import get_subject_id
from clinica.utils.nipype import container_from_filename, fix_join
# Write node
# ----------------------
write_node = npe.Node(name="WriteCaps", interface=DataSink())
write_node.inputs.base_directory = self.caps_directory
write_node.inputs.parameterization = False
# Get subject ID node
# ----------------------
image_id_node = npe.Node(
interface=nutil.Function(
input_names=["bids_or_caps_file"],
output_names=["image_id"],
function=get_subject_id,
),
name="ImageID",
)
# Find container path from input filename
# ----------------------
container_path = npe.Node(
nutil.Function(
input_names=["bids_or_caps_filename"],
output_names=["container"],
function=container_from_filename,
),
name="ContainerPath",
)
# fmt: off
self.connect(
[
(self.input_node, image_id_node, [("input_nifti", "bids_or_caps_file")]),
(self.input_node, container_path, [("input_nifti", "bids_or_caps_filename")]),
# (image_id_node, write_node, [('image_id', '@image_id')]),
(image_id_node, write_node, [("image_id", "@image_id")]),
]
)
# fmt: on
subfolder = "image_based"
if self.parameters.get("extract_method") == "slice":
subfolder = "slice_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("slices_rgb_output", "@slices_rgb_output")]),
(self.output_node, write_node, [("slices_original_output", "@slices_original_output")]),
]
)
# fmt: on
elif self.parameters.get("extract_method") == "patch":
subfolder = "patch_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("patches_output", "@patches_output")])
]
)
# fmt: on
elif self.parameters.get("extract_method") == "roi":
subfolder = "roi_based"
# fmt: off
self.connect(
[
(self.output_node, write_node, [("roi_output", "@roi_output")])
]
)
# fmt: on
else:
# fmt: off
self.connect(
[
(self.output_node, write_node, [("output_pt_file", "@output_pt_file")])
]
)
# fmt: on
mod_subfolder = ""
if self.parameters.get("modality") == "t1-linear":
mod_subfolder = "t1_linear"
if self.parameters.get("modality") == "t1-extensive":
mod_subfolder = "t1_extensive"
if self.parameters.get("modality") == "pet-linear":
mod_subfolder = "pet_linear"
if self.parameters.get("modality") == "custom":
mod_subfolder = "custom"
# fmt: off
self.connect(
[
(container_path, write_node, [
(("container", fix_join, "deeplearning_prepare_data", subfolder, mod_subfolder), "container")]),
]
)
# fmt: on
def build_core_nodes(self):
"""Build and connect the core nodes of the pipeline."""
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from .deeplearning_prepare_data_utils import (
extract_patches,
extract_roi,
extract_slices,
save_as_pt,
)
# The processing nodes
# Node to save input in nii.gz format into pytorch .pt format
# ----------------------
save_as_pt = npe.MapNode(
name="save_as_pt",
iterfield=["input_img"],
interface=nutil.Function(
function=save_as_pt,
input_names=["input_img"],
output_names=["output_file"],
),
)
# Extract slices node (options: 3 directions, mode)
# ----------------------
extract_slices_node = npe.MapNode(
name="extract_slices",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_slices,
input_names=["input_tensor", "slice_direction", "slice_mode"],
output_names=["output_file_rgb", "output_file_original"],
),
)
extract_slices_node.inputs.slice_direction = self.slice_direction
extract_slices_node.inputs.slice_mode = self.slice_mode
# Extract patches node (options, patch size and stride size)
# ----------------------
extract_patches_node = npe.MapNode(
name="extract_patches",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_patches,
input_names=["input_tensor", "patch_size", "stride_size"],
output_names=["output_patch"],
),
)
extract_patches_node.inputs.patch_size = self.patch_size
extract_patches_node.inputs.stride_size = self.stride_size
# Extract ROi node
extract_roi_node = npe.MapNode(
name="extract_ROI",
iterfield=["input_tensor"],
interface=nutil.Function(
function=extract_roi,
input_names=[
"input_tensor",
"masks_location",
"mask_pattern",
"cropped_input",
"roi_list",
"uncrop_output",
],
output_names=["output_roi"],
),
)
extract_roi_node.inputs.masks_location = self.masks_location
extract_roi_node.inputs.mask_pattern = self.mask_pattern
extract_roi_node.inputs.cropped_input = not self.parameters.get(
"use_uncropped_image"
)
extract_roi_node.inputs.roi_list = self.roi_list
extract_roi_node.inputs.uncrop_output = self.parameters.get("roi_uncrop_output")
# Connections
# ----------------------
# fmt: off
self.connect(
[
(self.input_node, save_as_pt, [("input_nifti", "input_img")]),
]
)
if self.parameters.get("extract_method") == "slice":
self.connect(
[
(save_as_pt, extract_slices_node, [("output_file", "input_tensor")]),
(extract_slices_node, self.output_node, [("output_file_rgb", "slices_rgb_output")]),
(extract_slices_node, self.output_node, [("output_file_original", "slices_original_output")]),
]
)
elif self.parameters.get("extract_method") == "patch":
self.connect(
[
(save_as_pt, extract_patches_node, [("output_file", "input_tensor")]),
(extract_patches_node, self.output_node, [("output_patch", "patches_output")]),
]
)
elif self.parameters.get("extract_method") == "roi":
self.connect(
[
(save_as_pt, extract_roi_node, [("output_file", "input_tensor")]),
(extract_roi_node, self.output_node, [("output_roi", "roi_output")]),
]
)
else:
self.connect(
[
(save_as_pt, self.output_node, [("output_file", "output_pt_file")]),
]
)
# fmt: on
|
#!/usr/bin/env python
import codecs
import os
import re
import sys
from setuptools import setup
DESCRIPTION = 'UI-level acceptance test framework'
def load_requirements(*requirements_paths):
"""
Load all requirements from the specified requirements files.
Requirements will include any constraints from files specified
with -c in the requirements files.
Returns a list of requirement strings.
"""
# UPDATED VIA SEMGREP - if you need to remove/modify this method remove this line and add a comment specifying why.
# minor update to allow brackets in library names
requirements = {}
constraint_files = set()
# groups "my-package-name<=x.y.z,..." into ("my-package-name", "<=x.y.z,...")
requirement_line_regex = re.compile(r"([a-zA-Z0-9-_.\[\]]+)([<>=][^#\s]+)?")
def add_version_constraint_or_raise(current_line, current_requirements, add_if_not_present):
regex_match = requirement_line_regex.match(current_line)
if regex_match:
package = regex_match.group(1)
version_constraints = regex_match.group(2)
existing_version_constraints = current_requirements.get(package, None)
# it's fine to add constraints to an unconstrained package, but raise an error if there are already
# constraints in place
if existing_version_constraints and existing_version_constraints != version_constraints:
raise BaseException(f'Multiple constraint definitions found for {package}:'
f' "{existing_version_constraints}" and "{version_constraints}".'
f'Combine constraints into one location with {package}'
f'{existing_version_constraints},{version_constraints}.')
if add_if_not_present or package in current_requirements:
current_requirements[package] = version_constraints
# process .in files and store the path to any constraint files that are pulled in
for path in requirements_paths:
with open(path) as reqs:
for line in reqs:
if is_requirement(line):
add_version_constraint_or_raise(line, requirements, True)
if line and line.startswith('-c') and not line.startswith('-c http'):
constraint_files.add(os.path.dirname(path) + '/' + line.split('#')[0].replace('-c', '').strip())
# process constraint files and add any new constraints found to existing requirements
for constraint_file in constraint_files:
with open(constraint_file) as reader:
for line in reader:
if is_requirement(line):
add_version_constraint_or_raise(line, requirements, False)
# process back into list of pkg><=constraints strings
constrained_requirements = [f'{pkg}{version or ''}' for (pkg, version) in sorted(requirements.items())]
return constrained_requirements
def is_requirement(line):
"""
Return True if the requirement line is a package requirement.
Returns:
bool: True if the line is not blank, a comment,
a URL, or an included file
"""
# UPDATED VIA SEMGREP - if you need to remove/modify this method remove this line and add a comment specifying why
return line and line.strip() and not line.startswith(('-r', '#', '-e', 'git+', '-c'))
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a v%s -m 'v%s'" % (VERSION, VERSION))
os.system("git push --tags")
sys.exit()
with codecs.open('README.rst', 'r', 'utf-8') as f:
LONG_DESCRIPTION = f.read()
def get_version(*file_paths):
"""
Extract the version string from the file at the given relative path fragments.
"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
with open(filename, encoding='utf-8') as opened_file:
version_file = opened_file.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
VERSION = get_version("bok_choy", "__init__.py")
setup(
name='bok_choy',
version=VERSION,
author='edX',
author_email='[email protected]',
url='http://github.com/edx/bok-choy',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license='Apache 2.0',
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance'],
packages=['bok_choy', 'bok_choy/a11y'],
package_data={'bok_choy': ['vendor/google/*.*', 'vendor/axe-core/*.*']},
install_requires=load_requirements('requirements/base.in'),
extras_require={
'visual_diff': ['needle']
}
)
|
#!/usr/bin/env python
import codecs
import os
import re
import sys
from setuptools import setup
DESCRIPTION = 'UI-level acceptance test framework'
def load_requirements(*requirements_paths):
"""
Load all requirements from the specified requirements files.
Requirements will include any constraints from files specified
with -c in the requirements files.
Returns a list of requirement strings.
"""
# UPDATED VIA SEMGREP - if you need to remove/modify this method remove this line and add a comment specifying why.
# minor update to allow brackets in library names
requirements = {}
constraint_files = set()
# groups "my-package-name<=x.y.z,..." into ("my-package-name", "<=x.y.z,...")
requirement_line_regex = re.compile(r"([a-zA-Z0-9-_.\[\]]+)([<>=][^#\s]+)?")
def add_version_constraint_or_raise(current_line, current_requirements, add_if_not_present):
regex_match = requirement_line_regex.match(current_line)
if regex_match:
package = regex_match.group(1)
version_constraints = regex_match.group(2)
existing_version_constraints = current_requirements.get(package, None)
# it's fine to add constraints to an unconstrained package, but raise an error if there are already
# constraints in place
if existing_version_constraints and existing_version_constraints != version_constraints:
raise BaseException(f'Multiple constraint definitions found for {package}:'
f' "{existing_version_constraints}" and "{version_constraints}".'
f'Combine constraints into one location with {package}'
f'{existing_version_constraints},{version_constraints}.')
if add_if_not_present or package in current_requirements:
current_requirements[package] = version_constraints
# process .in files and store the path to any constraint files that are pulled in
for path in requirements_paths:
with open(path) as reqs:
for line in reqs:
if is_requirement(line):
add_version_constraint_or_raise(line, requirements, True)
if line and line.startswith('-c') and not line.startswith('-c http'):
constraint_files.add(os.path.dirname(path) + '/' + line.split('#')[0].replace('-c', '').strip())
# process constraint files and add any new constraints found to existing requirements
for constraint_file in constraint_files:
with open(constraint_file) as reader:
for line in reader:
if is_requirement(line):
add_version_constraint_or_raise(line, requirements, False)
# process back into list of pkg><=constraints strings
constrained_requirements = [f'{pkg}{version or ""}' for (pkg, version) in sorted(requirements.items())]
return constrained_requirements
def is_requirement(line):
"""
Return True if the requirement line is a package requirement.
Returns:
bool: True if the line is not blank, a comment,
a URL, or an included file
"""
# UPDATED VIA SEMGREP - if you need to remove/modify this method remove this line and add a comment specifying why
return line and line.strip() and not line.startswith(('-r', '#', '-e', 'git+', '-c'))
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a v%s -m 'v%s'" % (VERSION, VERSION))
os.system("git push --tags")
sys.exit()
with codecs.open('README.rst', 'r', 'utf-8') as f:
LONG_DESCRIPTION = f.read()
def get_version(*file_paths):
"""
Extract the version string from the file at the given relative path fragments.
"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
with open(filename, encoding='utf-8') as opened_file:
version_file = opened_file.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
VERSION = get_version("bok_choy", "__init__.py")
setup(
name='bok_choy',
version=VERSION,
author='edX',
author_email='[email protected]',
url='http://github.com/edx/bok-choy',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license='Apache 2.0',
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance'],
packages=['bok_choy', 'bok_choy/a11y'],
package_data={'bok_choy': ['vendor/google/*.*', 'vendor/axe-core/*.*']},
install_requires=load_requirements('requirements/base.in'),
extras_require={
'visual_diff': ['needle']
}
)
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Tuple
from pants.option.scope import GLOBAL_SCOPE
class OptionsError(Exception):
"""An options system-related error."""
# -----------------------------------------------------------------------
# Option registration errors
# -----------------------------------------------------------------------
class RegistrationError(OptionsError):
"""An error at option registration time."""
def __init__(self, scope: str, option: str, **msg_format_args) -> None:
scope_str = "global scope" if scope == GLOBAL_SCOPE else f"scope {scope}"
if self.__doc__ is None:
raise ValueError(
"Invalid RegistrationError definition. "
"Please specify the error message in the docstring."
)
docstring = self.__doc__.format(**msg_format_args)
super().__init__(f"{docstring} [option {option} in {scope_str}].")
class BooleanOptionNameWithNo(RegistrationError):
"""Boolean option names cannot start with --no."""
class DefaultValueType(RegistrationError):
"""Default value {value_type}({default_value!r}) does not match option type {option_type}."""
class DefaultMemberValueType(DefaultValueType):
"""Default member value type mismatch.
Member value {value_type}({member_value!r}) does not match list option type {member_type}.
"""
class HelpType(RegistrationError):
"""The `help=` argument must be a string, but was of type `{help_type}`."""
class ImplicitValIsNone(RegistrationError):
"""Implicit value cannot be None."""
class InvalidKwarg(RegistrationError):
"""Invalid registration kwarg {kwarg}."""
class InvalidKwargNonGlobalScope(RegistrationError):
"""Invalid registration kwarg {kwarg} on non-global scope."""
class InvalidMemberType(RegistrationError):
"""member_type {member_type} not allowed."""
class MemberTypeNotAllowed(RegistrationError):
"""member_type not allowed on option with type {type_}.
It may only be specified if type=list.
"""
class NoOptionNames(RegistrationError):
"""No option names provided."""
class OptionAlreadyRegistered(RegistrationError):
"""An option with this name was already registered on this scope."""
class OptionNameDash(RegistrationError):
"""Option name must begin with a dash."""
class OptionNameDoubleDash(RegistrationError):
"""Long option name must begin with a double-dash."""
class PassthroughType(RegistrationError):
"""Options marked passthrough must be typed as a string list."""
# -----------------------------------------------------------------------
# Flag parsing errors
# -----------------------------------------------------------------------
class ParseError(OptionsError):
"""An error at flag parsing time."""
class BooleanConversionError(ParseError):
"""Indicates a value other than 'True' or 'False' when attempting to parse a bool."""
class FromfileError(ParseError):
"""Indicates a problem reading a value @fromfile."""
class MutuallyExclusiveOptionError(ParseError):
"""Indicates that two options in the same mutually exclusive group were specified."""
class UnknownFlagsError(ParseError):
"""Indicates that unknown command-line flags were encountered in some scope."""
def __init__(self, flags: Tuple[str, ...], arg_scope: str):
self.flags = flags
self.arg_scope = arg_scope
scope = f"scope {self.arg_scope}" if self.arg_scope else "global scope"
msg = f"Unknown flags {", ".join(self.flags)} on {scope}"
super().__init__(msg)
# -----------------------------------------------------------------------
# Config parsing errors
# -----------------------------------------------------------------------
class ConfigError(OptionsError):
"""An error encountered while parsing a config file."""
class ConfigValidationError(ConfigError):
"""A config file is invalid."""
class NoSectionError(ConfigError):
def __init__(self, section: str):
super().__init__(f"No section: {section}")
class NoOptionError(ConfigError):
def __init__(self, option: str, section: str):
super().__init__(f"No option {option} in section {section}")
class InterpolationMissingOptionError(ConfigError):
def __init__(self, option, section, rawval, reference):
super().__init__(
self,
f"Bad value substitution: option {option} in section {section} contains an "
f"interpolation key {reference} which is not a valid option name. Raw value: {rawval}",
)
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Tuple
from pants.option.scope import GLOBAL_SCOPE
class OptionsError(Exception):
"""An options system-related error."""
# -----------------------------------------------------------------------
# Option registration errors
# -----------------------------------------------------------------------
class RegistrationError(OptionsError):
"""An error at option registration time."""
def __init__(self, scope: str, option: str, **msg_format_args) -> None:
scope_str = "global scope" if scope == GLOBAL_SCOPE else f"scope {scope}"
if self.__doc__ is None:
raise ValueError(
"Invalid RegistrationError definition. "
"Please specify the error message in the docstring."
)
docstring = self.__doc__.format(**msg_format_args)
super().__init__(f"{docstring} [option {option} in {scope_str}].")
class BooleanOptionNameWithNo(RegistrationError):
"""Boolean option names cannot start with --no."""
class DefaultValueType(RegistrationError):
"""Default value {value_type}({default_value!r}) does not match option type {option_type}."""
class DefaultMemberValueType(DefaultValueType):
"""Default member value type mismatch.
Member value {value_type}({member_value!r}) does not match list option type {member_type}.
"""
class HelpType(RegistrationError):
"""The `help=` argument must be a string, but was of type `{help_type}`."""
class ImplicitValIsNone(RegistrationError):
"""Implicit value cannot be None."""
class InvalidKwarg(RegistrationError):
"""Invalid registration kwarg {kwarg}."""
class InvalidKwargNonGlobalScope(RegistrationError):
"""Invalid registration kwarg {kwarg} on non-global scope."""
class InvalidMemberType(RegistrationError):
"""member_type {member_type} not allowed."""
class MemberTypeNotAllowed(RegistrationError):
"""member_type not allowed on option with type {type_}.
It may only be specified if type=list.
"""
class NoOptionNames(RegistrationError):
"""No option names provided."""
class OptionAlreadyRegistered(RegistrationError):
"""An option with this name was already registered on this scope."""
class OptionNameDash(RegistrationError):
"""Option name must begin with a dash."""
class OptionNameDoubleDash(RegistrationError):
"""Long option name must begin with a double-dash."""
class PassthroughType(RegistrationError):
"""Options marked passthrough must be typed as a string list."""
# -----------------------------------------------------------------------
# Flag parsing errors
# -----------------------------------------------------------------------
class ParseError(OptionsError):
"""An error at flag parsing time."""
class BooleanConversionError(ParseError):
"""Indicates a value other than 'True' or 'False' when attempting to parse a bool."""
class FromfileError(ParseError):
"""Indicates a problem reading a value @fromfile."""
class MutuallyExclusiveOptionError(ParseError):
"""Indicates that two options in the same mutually exclusive group were specified."""
class UnknownFlagsError(ParseError):
"""Indicates that unknown command-line flags were encountered in some scope."""
def __init__(self, flags: Tuple[str, ...], arg_scope: str):
self.flags = flags
self.arg_scope = arg_scope
scope = f"scope {self.arg_scope}" if self.arg_scope else "global scope"
msg = f"Unknown flags {', '.join(self.flags)} on {scope}"
super().__init__(msg)
# -----------------------------------------------------------------------
# Config parsing errors
# -----------------------------------------------------------------------
class ConfigError(OptionsError):
"""An error encountered while parsing a config file."""
class ConfigValidationError(ConfigError):
"""A config file is invalid."""
class NoSectionError(ConfigError):
def __init__(self, section: str):
super().__init__(f"No section: {section}")
class NoOptionError(ConfigError):
def __init__(self, option: str, section: str):
super().__init__(f"No option {option} in section {section}")
class InterpolationMissingOptionError(ConfigError):
def __init__(self, option, section, rawval, reference):
super().__init__(
self,
f"Bad value substitution: option {option} in section {section} contains an "
f"interpolation key {reference} which is not a valid option name. Raw value: {rawval}",
)
|
import json
from time import sleep, strftime, localtime
from qbittorrent import Client
def load_configs():
with open('config.json', 'r', encoding="UTF-8") as file:
js = json.load(file)
global HOST, PORT, LOGIN, PASSWORD, MIN_SIZE, MAX_SIZE
HOST = js["HOST"]
PORT = js["PORT"]
LOGIN = js["LOGIN"]
PASSWORD = js["PASSWORD"]
MIN_SIZE = float(js["MIN_SIZE"])
MAX_SIZE = float(js["MAX_SIZE"])
def get_time():
return strftime("%Y-%m-%d %H:%M:%S", localtime())
def main():
try:
qb = Client(f'http://{HOST}:{PORT}/')
qb.login(LOGIN, PASSWORD)
try:
if qb is not None:
while True:
torrents = qb.torrents()
for torrent in torrents:
sleep(2) #без задержки может вернуть "-1"
if (MIN_SIZE*1073741824 > torrent['size'] or torrent['size'] > MAX_SIZE*1073741824) and float(str(torrent["progress"])[0]) != 1:
if torrent['size'] != 0:
print(f'{get_time()}: Torrent '{torrent['name']}" is out of size limit: {round(torrent['size']/1073741824, 2)} GB. Deleting...')
qb.delete_permanently(torrent['hash'])
sleep(3)
if torrent['state'] == 'stalledDL' and float(str(torrent["progress"])[0:4]) > 0.98:
print(f'{get_time()}: Torrent '{torrent['name']}" is stuck. Rechecking...')
qb.recheck(torrent['hash'])
qb.increase_priority(torrent['hash'])
sleep(300) #после проверки торрент может недолго быть в stalled, нужна задержка
except Exception as e:
print(f'{get_time()}: Failed to get torrent list or recheck stuck torrent: {e}')
except Exception as e:
print(f'{get_time()}: Failed to establish connection: {e}')
if __name__ == "__main__":
print(f'{get_time()}: Starting script...')
load_configs()
main()
|
import json
from time import sleep, strftime, localtime
from qbittorrent import Client
def load_configs():
with open('config.json', 'r', encoding="UTF-8") as file:
js = json.load(file)
global HOST, PORT, LOGIN, PASSWORD, MIN_SIZE, MAX_SIZE
HOST = js["HOST"]
PORT = js["PORT"]
LOGIN = js["LOGIN"]
PASSWORD = js["PASSWORD"]
MIN_SIZE = float(js["MIN_SIZE"])
MAX_SIZE = float(js["MAX_SIZE"])
def get_time():
return strftime("%Y-%m-%d %H:%M:%S", localtime())
def main():
try:
qb = Client(f'http://{HOST}:{PORT}/')
qb.login(LOGIN, PASSWORD)
try:
if qb is not None:
while True:
torrents = qb.torrents()
for torrent in torrents:
sleep(2) #без задержки может вернуть "-1"
if (MIN_SIZE*1073741824 > torrent['size'] or torrent['size'] > MAX_SIZE*1073741824) and float(str(torrent["progress"])[0]) != 1:
if torrent['size'] != 0:
print(f'{get_time()}: Torrent "{torrent["name"]}" is out of size limit: {round(torrent["size"]/1073741824, 2)} GB. Deleting...')
qb.delete_permanently(torrent['hash'])
sleep(3)
if torrent['state'] == 'stalledDL' and float(str(torrent["progress"])[0:4]) > 0.98:
print(f'{get_time()}: Torrent "{torrent["name"]}" is stuck. Rechecking...')
qb.recheck(torrent['hash'])
qb.increase_priority(torrent['hash'])
sleep(300) #после проверки торрент может недолго быть в stalled, нужна задержка
except Exception as e:
print(f'{get_time()}: Failed to get torrent list or recheck stuck torrent: {e}')
except Exception as e:
print(f'{get_time()}: Failed to establish connection: {e}')
if __name__ == "__main__":
print(f'{get_time()}: Starting script...')
load_configs()
main()
|
"""This module provides different adapter classes that allow
for a smoother combination of Qt and the Deep Learning ToolBox.
"""
# standard imports
from typing import Iterator, Iterable, Any, Callable
import logging
# Qt imports
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtWidgets import QComboBox, QListWidget, QListWidgetItem
# GUI imports
from .utils import qtName, protect, QDebug
# logging
LOG = logging.getLogger(__name__)
class ItemAdapter(QDebug):
"""This class provides functionality that can be used by QWidgets that
allow to choose from lists of items, like `QComboBox` and
`QListWidget`. It acts as a translator mapping between the data
structures used in the Deep Learning ToolBox and the Qt widgets.
The QWidgets allow to store items and associated data in different
ways:
* The `QListWidget` uses `QListWidgetItem`s to represent the list items.
Such an item is not a QWidget, but holds some information specifying
display properties (like foreground and background color or icons),
the text value of the item and it allows to store additional
associated user date by introducing specific roles.
* The `QComboBox` does not use an explict class to represent
list items, but it also allows to set display properties and
to store associated information for each item using roles.
Both Widgets have the following comonalities:
* New items can be registered with
`QComboBox.addItem(text, [icon], [userData])` and
`QListWidget.addItem(label=text)`
* Items can be accessed by index:
`QComboBox.itemText(index)` and `QListWidget.item(row).text()`
* Items can be accessed by text:
`QComboBox.findText(text)` gives a single index while
`QList.findItems(text)` returns a list of item objects.
* Items can be removed:
`QComboBox.removeItem(index)` and
`QListWidget.takeItem(QListWidget.item(index))
* There may be a current item (selected item). The numerical index
can be obtained by
`QComboBox.currentIndex()` and `QListWidget.currentRow()`
* The text of the current item can be obtained by
`QComboBox.currentText()` and `QListWidget.currentItem().text()`
* data associated with the current item can be obtained by
`QComboBox.currentData(role)` and `QListWidget.currentItem().data(role)`
"""
_itemToText: Callable[[Any], str] = str
def __init_subclass__(cls, itemType: type = None,
itemToText: Callable[[Any], str] = None,
**kwargs) -> None:
super().__init_subclass__(**kwargs)
if itemType is not None:
setattr(cls, qtName(itemType.__name__), cls._currentItem)
setattr(cls, qtName('set_' + itemType.__name__), cls._currentItem)
if itemToText is not None:
cls._itemToText = staticmethod(itemToText)
print(f"DEBUG1[{cls.__name__}]: itemToText:",
itemToText, cls._itemToText)
def __init__(self, itemToText: Callable[[Any], str] = None,
**kwargs) -> None:
super().__init__(**kwargs)
self.setItemToText(itemToText)
#
# methods to be implemented by subclasses
#
def _items(self) -> Iterator[Any]:
"""An iterator for the items in this
:py:class:`ItemAdapter`.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _items() method")
def _addItem(self, item: Any) -> None:
"""Add an item to this :py:class:`ItemAdapter`.
It is assumed that the item is not yet contained in this
:py:class:`ItemAdapter`.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _addItem() method")
def _removeItem(self, item: Any) -> None:
"""Remove an item from this :py:class:`ItemAdapter`. It is
assumed that the item is contained in this
:py:class:`ItemAdapter`, otherwise a
:py:class:`ValueError` is raised.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _removeElement() method")
def _currentItem(self) -> Any:
"""Get the currently selected item.
This may be `None` if no itm is selected.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _currentItem() method")
def _setCurrentItem(self, item: Any) -> None:
"""Select the given entry in this :py:class:`ItemAdapter`.
Arguments
---------
item: Any
The item to become the current item. If the item is not
contained in this :py:class:`ItemAdapter` (e.g. if
`item` is `None`), the current will be set to `None`.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _setCurrentItem() method")
#
# Implemented methods
#
def _countItems(self) -> int:
"""Get the number of items in this :py:class:`ItemAdapter`.
"""
return sum(1 for _ in self._items())
def _textForItem(self, item: Any) -> str:
"""Get the text to be display from a given item.
"""
return self._itemToText(item)
def _formatItem(self, item: Any) -> None:
"""May be implemented by a subclass to format an item.
This method is only called if the item is currently displayed
by this :py:class:`ItemAdapter` (has been added and was not removed),
but it may be called several times for the same item (to
trigger an update of this item).
The base implementation does nothing, but derived classes may
overwrite this method to allow for fancy formating.
"""
def _getItemAt(self, index: int) -> Any:
"""
Raises
------
IndexError:
The index provided is invalid.
"""
try:
return next((x for i, x in enumerate(self._items()) if i == index))
except StopIteration:
raise IndexError(f"Index {index} beyond end of items.")
def _getTextAt(self, index: int) -> str:
"""
Raises
------
IndexError:
The index provided is invalid.
"""
return self._textForItem(self._getItemAt(index))
def _indexOfItem(self, item: Any) -> int:
"""
Raises
------
LookupError:
The given item is not found in this :py:class:`ItemAdapter`.
"""
try:
return next(i for i, x in enumerate(self._items()) if x == item)
except StopIteration:
raise LookupError(f"Item {item} not found.")
def _indexOfText(self, text: str) -> int:
"""
Raises
------
LookupError:
The given text is not found in this :py:class:`ItemAdapter`.
"""
try:
return next(i for i, t in enumerate(self._texts()) if t == text)
except StopIteration:
raise LookupError(f"Item with text '{text}' not found")
def _findItem(self, text: str) -> Any:
"""
Raises
------
LookupError:
The given text is not found in this :py:class:`ItemAdapter`.
"""
try:
return next(item for item in self._items()
if self._textForItem(item) == text)
except StopIteration:
raise LookupError(f"Item with text '{text}' not found.")
def _setCurrentText(self, text: str) -> None:
"""
"""
self._setCurrentItem(self._findItem(text))
def _texts(self) -> Iterator[str]:
"""An iterator for the texts presented by this
:py:class:`ItemAdapter`.
"""
for item in self._items():
yield self._textForItem(item)
def _removeText(self, text: str) -> None:
"""Remove the item with the given text. This may be
overwritten by subclasses when a more efficient implementation
is possible.
"""
self._removeItem(self._findItem(text))
def _removeItemAt(self, index: int) -> None:
"""Remove the item at the given index.
Raises
------
IndexError:
The index provided is invalid.
"""
self._removeItem(self._getItemAt(index))
def _removeAllItems(self) -> None:
"""Remove all items in this :py:class:`ItemAdapter`.
"""
try:
self._removeItemAt(0)
except IndexError:
pass # no item left to remove
def _formatAllItems(self) -> None:
"""
"""
for item in self._items():
self._formatItem(item)
def _updateAllItems(self) -> None:
"""Update the display of the list elements. This may be implemented by
subclasses that would like to adapt the style of display
depending on the state of the element.
This method will be called when the list has been updated
(e.g. by directly adding or removing elements, or by filling
the list from some iterable), but subclasses may also call this
method proactively in repsonse to notifications.
"""
#
# public interface
#
def setFromIterable(self, iterable: Iterable) -> None:
"""Set the items in this :py:class:`ItemAdapter` from an
iterable. This will first remove the old items and then
add the new items.
"""
self._removeAllItems()
for item in iterable:
self._addItem(item)
def updateFromIterable(self, iterable: Iterable) -> None:
"""Update the items in this :py:class:`ItemAdapter` from an iterable.
Items from the iterable, that are not yet contained in the
list are added, while items originally contained in this
:py:class:`ItemAdapter`, that are not iterated by the
iterable, are removed.
"""
# 1. Create a set containing the texts for items already contained
# in this list (this is used for bookkeeping).
bookkeeping = set(self._texts())
# 2. Iterate over entries from the iterable and add entries
# missing in this list.
for item in iterable:
text = self._textForItem(item)
if text in bookkeeping:
bookkeeping.remove(text)
else:
self._addItem(item)
# 3. Remove items from this list that are no longer present
for text in bookkeeping:
self._removeText(text)
def setItemToText(self, itemToText: Callable[[Any], str]) -> None:
"""Set the function to be used when converting items
to their textual presentation.
"""
if itemToText is None:
self.__dict__.pop('_itemToText', None)
else:
self._itemToText = itemToText
self._formatAllItems()
@protect
def keyPressEvent(self, event: QKeyEvent) -> None:
"""Process key events. The :py:class:`ItemAdapter` supports
the following keys:
C: clear the currently selected entry
Note: in a :py:class:`QComboBox` this event is only received
if the combobox is closed (not while currently selecting an entry).
"""
key = event.key()
LOG.debug("ItemAdapter[%s].keyPressEvent: key=%d",
type(self).__name__, key)
if key == Qt.Key_C: # clear
self._setCurrentItem(None)
elif key == Qt.Key_Y: # no itemToText function (inherit from super)
self.setItemToText(None)
elif key == Qt.Key_Z: # simple str() as itemToText function (debug)
self.setItemToText(str)
elif hasattr(super(), 'keyPressEvent'):
super().keyPressEvent(event)
else:
event.ignore()
def debug(self) -> None:
"""Ouput debug information for this :py:class:`ItemAdapter`.
"""
if hasattr(super(), 'debug'):
super().debug()
print(f"debug: ItemAdapter[{type(self).__name__}]: "
f"with {self._countItems()} entries:")
for index, item in enumerate(self._items()):
print(f"debug:{"**" if item is self._currentItem() else " "}"
f"({index+1}) {self._textForItem(item)} "
f"[{repr(item)}]")
class QAdaptedComboBox(ItemAdapter, QComboBox):
"""A :py:class:`QComboBox` implementing the
:py:class:`ItemAdapter` interface.
"""
#
# methods to be implemented by subclasses
#
def _countItems(self) -> int:
"""Get the number of items in this :py:class:`QAdaptedComboBox`.
"""
return self.count()
def _items(self) -> Iterator[Any]:
"""An iterator for the items in this
:py:class:`QAdaptedComboBox`.
"""
for index in range(self.count()):
yield self.itemData(index)
def _texts(self) -> Iterator[str]:
"""An iterator for the texts presented by this
:py:class:`QAdaptedComboBox`.
"""
for index in range(self.count()):
yield self.itemText(index)
def _addItem(self, item: Any) -> None:
"""Add an item to this :py:class:`QAdaptedComboBox`.
It is assumed that the item is not yet contained in this
:py:class:`QAdaptedComboBox`.
"""
self.addItem(self._textForItem(item), item)
self._formatItem(item)
def _removeItem(self, item: Any) -> None:
"""Remove an item from this :py:class:`QAdaptedComboBox`.
It is assumed that the item is contained in this
:py:class:`QAdaptedComboBox`, otherwise a
:py:class:`ValueError` is raised.
"""
self._removeItemAt(self._indexOfItem(item))
def _removeItemAt(self, index: int) -> None:
"""Remove the item at the given index.
"""
self.removeItem(index)
def _removeText(self, text: str) -> None:
"""Remove the item with the given text. This may be
overwritten by subclasses when a more efficient implementation
is possible.
"""
self._removeItemAt(self._indexOfText(text))
def _formatItemAt(self, index: int) -> None:
"""Format the item at the given index to reflect
the state of the underlying item.
This method may be extended by subclasses.
"""
self.setItemText(index, self._textForItem(self.itemData(index)))
def _formatItem(self, item: Any) -> None:
"""Update the format of the item's presentation
in this :py:class:`QAdaptedComboBox`
to reflect its state.
"""
self._formatItemAt(self._indexOfItem(item))
def _formatAllItems(self) -> None:
"""Format all items in this :py:class:`QAdaptedComboBox`.
"""
for index in range(self.count()):
self._formatItemAt(index)
def _currentItem(self) -> Any:
"""Get the currently selected item.
This may be `None` if no itm is selected.
"""
return self.currentData()
def _setCurrentItem(self, item: Any) -> None:
"""Select the given entry in this :py:class:`QAdaptedComboBox`.
Arguments
---------
item: Any
The item to become the current item. If the item is not
contained in this :py:class:`QAdaptedComboBox` (e.g. if
`item` is `None`), the current will be set to `None`.
"""
try:
self.setCurrentIndex(self._indexOfItem(item))
except LookupError:
# For an empty QComboBox or a QComboBox in which no
# current entry is set, the index is -1 (which is also
# returned by QComboBox.findText if the entry is not found).
self.setCurrentIndex(-1)
class QAdaptedListWidget(ItemAdapter, QListWidget):
"""A :py:class:`QListWidget` implementing the
:py:class:`ItemAdapter` interface.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._formater = None
def setListWidgetItemFormater(self, formater:
Callable[[QListWidgetItem], None]) -> None:
"""Set a formater for the list items.
"""
self._formater = formater
self._formatAllItems()
def updateFormat(self) -> None:
"""Update the format of all items in this
:py:class:`QAdaptedListWidget`.
"""
self._formatAllItems()
#
# methods to be implemented by subclasses
#
def _countItems(self) -> int:
"""Get the number of items in this :py:class:`QAdaptedListWidget`.
"""
return self.count()
def _qitem(self, item: Any) -> QListWidgetItem:
"""Get the :py:class:`QListWidgetItem` that holds the given
item.
"""
return next((qitem for qitem in self._qitems()
if qitem.data(Qt.UserRole) is item), None)
def _qitems(self) -> Iterator[QListWidgetItem]:
"""An :py:class:`Iterator` for the :py:class:`QListWidgetItem`
in this :py:class:`QAdaptedListWidget`.
"""
for index in range(self.count()):
yield self.item(index)
def _formatQItem(self, qitem: QListWidgetItem) -> None:
"""Format the given :py:class:`QListWidgetItem` to reflect
the state of the underlying item.
This method may be extended by subclasses.
"""
qitem.setText(self._textForItem(qitem.data(Qt.UserRole)))
if self._formater is not None:
self._formater(qitem)
def _items(self) -> Iterator[Any]:
"""An iterator for the items in this
:py:class:`QAdaptedComboBox`.
"""
for qitem in self._qitems():
yield qitem.data(Qt.UserRole)
def _texts(self) -> Iterator[str]:
"""An iterator for the texts presented by this
:py:class:`QAdaptedListWidget`.
"""
for qitem in self._qitems():
yield qitem.text()
def _addItem(self, item: Any) -> None:
"""Add an item to this :py:class:`QAdaptedComboBox`.
It is assumed that the item is not yet contained in this
:py:class:`QAdaptedListWidget`.
"""
qitem = QListWidgetItem(self._textForItem(item))
qitem.setData(Qt.UserRole, item)
self.addItem(qitem)
self._formatQItem(qitem)
def _formatItem(self, item: Any) -> None:
"""Update the format of the item's presentation
in this :py:class:`QAdaptedListWidget`
to reflect its state.
"""
self._formatQItem(self._qitem(item))
def _formatAllItems(self) -> None:
"""Format all items in this :py:class:`QAdaptedListWidget`.
"""
for qitem in self._qitems():
self._formatQItem(qitem)
def _removeItem(self, item: Any) -> None:
"""Remove an item from this :py:class:`QAdaptedListWidget`.
It is assumed that the item is contained in this
:py:class:`QAdaptedComboBox`, otherwise a
:py:class:`ValueError` is raised.
"""
qitem = self.takeItem(self._indexOfItem(item))
del qitem
def _currentItem(self) -> Any:
"""Get the currently selected item.
This may be `None` if no itm is selected.
"""
qitem = self.currentItem()
return None if qitem is None else qitem.data(Qt.UserRole)
def _setCurrentItem(self, item: Any) -> None:
"""Select the given entry in this :py:class:`QAdaptedListWidget`.
Arguments
---------
item: Any
The item to become the current item. If the item is not
contained in this :py:class:`QAdaptedListWidget` (e.g. if
`item` is `None`), the current will be set to `None`.
"""
try:
self.setCurrentRow(self._indexOfItem(item))
except LookupError:
self.setCurrentRow(-1)
|
"""This module provides different adapter classes that allow
for a smoother combination of Qt and the Deep Learning ToolBox.
"""
# standard imports
from typing import Iterator, Iterable, Any, Callable
import logging
# Qt imports
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtWidgets import QComboBox, QListWidget, QListWidgetItem
# GUI imports
from .utils import qtName, protect, QDebug
# logging
LOG = logging.getLogger(__name__)
class ItemAdapter(QDebug):
"""This class provides functionality that can be used by QWidgets that
allow to choose from lists of items, like `QComboBox` and
`QListWidget`. It acts as a translator mapping between the data
structures used in the Deep Learning ToolBox and the Qt widgets.
The QWidgets allow to store items and associated data in different
ways:
* The `QListWidget` uses `QListWidgetItem`s to represent the list items.
Such an item is not a QWidget, but holds some information specifying
display properties (like foreground and background color or icons),
the text value of the item and it allows to store additional
associated user date by introducing specific roles.
* The `QComboBox` does not use an explict class to represent
list items, but it also allows to set display properties and
to store associated information for each item using roles.
Both Widgets have the following comonalities:
* New items can be registered with
`QComboBox.addItem(text, [icon], [userData])` and
`QListWidget.addItem(label=text)`
* Items can be accessed by index:
`QComboBox.itemText(index)` and `QListWidget.item(row).text()`
* Items can be accessed by text:
`QComboBox.findText(text)` gives a single index while
`QList.findItems(text)` returns a list of item objects.
* Items can be removed:
`QComboBox.removeItem(index)` and
`QListWidget.takeItem(QListWidget.item(index))
* There may be a current item (selected item). The numerical index
can be obtained by
`QComboBox.currentIndex()` and `QListWidget.currentRow()`
* The text of the current item can be obtained by
`QComboBox.currentText()` and `QListWidget.currentItem().text()`
* data associated with the current item can be obtained by
`QComboBox.currentData(role)` and `QListWidget.currentItem().data(role)`
"""
_itemToText: Callable[[Any], str] = str
def __init_subclass__(cls, itemType: type = None,
itemToText: Callable[[Any], str] = None,
**kwargs) -> None:
super().__init_subclass__(**kwargs)
if itemType is not None:
setattr(cls, qtName(itemType.__name__), cls._currentItem)
setattr(cls, qtName('set_' + itemType.__name__), cls._currentItem)
if itemToText is not None:
cls._itemToText = staticmethod(itemToText)
print(f"DEBUG1[{cls.__name__}]: itemToText:",
itemToText, cls._itemToText)
def __init__(self, itemToText: Callable[[Any], str] = None,
**kwargs) -> None:
super().__init__(**kwargs)
self.setItemToText(itemToText)
#
# methods to be implemented by subclasses
#
def _items(self) -> Iterator[Any]:
"""An iterator for the items in this
:py:class:`ItemAdapter`.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _items() method")
def _addItem(self, item: Any) -> None:
"""Add an item to this :py:class:`ItemAdapter`.
It is assumed that the item is not yet contained in this
:py:class:`ItemAdapter`.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _addItem() method")
def _removeItem(self, item: Any) -> None:
"""Remove an item from this :py:class:`ItemAdapter`. It is
assumed that the item is contained in this
:py:class:`ItemAdapter`, otherwise a
:py:class:`ValueError` is raised.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _removeElement() method")
def _currentItem(self) -> Any:
"""Get the currently selected item.
This may be `None` if no itm is selected.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _currentItem() method")
def _setCurrentItem(self, item: Any) -> None:
"""Select the given entry in this :py:class:`ItemAdapter`.
Arguments
---------
item: Any
The item to become the current item. If the item is not
contained in this :py:class:`ItemAdapter` (e.g. if
`item` is `None`), the current will be set to `None`.
"""
raise NotImplementedError("A 'ItemAdapter' has to implement "
"the _setCurrentItem() method")
#
# Implemented methods
#
def _countItems(self) -> int:
"""Get the number of items in this :py:class:`ItemAdapter`.
"""
return sum(1 for _ in self._items())
def _textForItem(self, item: Any) -> str:
"""Get the text to be display from a given item.
"""
return self._itemToText(item)
def _formatItem(self, item: Any) -> None:
"""May be implemented by a subclass to format an item.
This method is only called if the item is currently displayed
by this :py:class:`ItemAdapter` (has been added and was not removed),
but it may be called several times for the same item (to
trigger an update of this item).
The base implementation does nothing, but derived classes may
overwrite this method to allow for fancy formating.
"""
def _getItemAt(self, index: int) -> Any:
"""
Raises
------
IndexError:
The index provided is invalid.
"""
try:
return next((x for i, x in enumerate(self._items()) if i == index))
except StopIteration:
raise IndexError(f"Index {index} beyond end of items.")
def _getTextAt(self, index: int) -> str:
"""
Raises
------
IndexError:
The index provided is invalid.
"""
return self._textForItem(self._getItemAt(index))
def _indexOfItem(self, item: Any) -> int:
"""
Raises
------
LookupError:
The given item is not found in this :py:class:`ItemAdapter`.
"""
try:
return next(i for i, x in enumerate(self._items()) if x == item)
except StopIteration:
raise LookupError(f"Item {item} not found.")
def _indexOfText(self, text: str) -> int:
"""
Raises
------
LookupError:
The given text is not found in this :py:class:`ItemAdapter`.
"""
try:
return next(i for i, t in enumerate(self._texts()) if t == text)
except StopIteration:
raise LookupError(f"Item with text '{text}' not found")
def _findItem(self, text: str) -> Any:
"""
Raises
------
LookupError:
The given text is not found in this :py:class:`ItemAdapter`.
"""
try:
return next(item for item in self._items()
if self._textForItem(item) == text)
except StopIteration:
raise LookupError(f"Item with text '{text}' not found.")
def _setCurrentText(self, text: str) -> None:
"""
"""
self._setCurrentItem(self._findItem(text))
def _texts(self) -> Iterator[str]:
"""An iterator for the texts presented by this
:py:class:`ItemAdapter`.
"""
for item in self._items():
yield self._textForItem(item)
def _removeText(self, text: str) -> None:
"""Remove the item with the given text. This may be
overwritten by subclasses when a more efficient implementation
is possible.
"""
self._removeItem(self._findItem(text))
def _removeItemAt(self, index: int) -> None:
"""Remove the item at the given index.
Raises
------
IndexError:
The index provided is invalid.
"""
self._removeItem(self._getItemAt(index))
def _removeAllItems(self) -> None:
"""Remove all items in this :py:class:`ItemAdapter`.
"""
try:
self._removeItemAt(0)
except IndexError:
pass # no item left to remove
def _formatAllItems(self) -> None:
"""
"""
for item in self._items():
self._formatItem(item)
def _updateAllItems(self) -> None:
"""Update the display of the list elements. This may be implemented by
subclasses that would like to adapt the style of display
depending on the state of the element.
This method will be called when the list has been updated
(e.g. by directly adding or removing elements, or by filling
the list from some iterable), but subclasses may also call this
method proactively in repsonse to notifications.
"""
#
# public interface
#
def setFromIterable(self, iterable: Iterable) -> None:
"""Set the items in this :py:class:`ItemAdapter` from an
iterable. This will first remove the old items and then
add the new items.
"""
self._removeAllItems()
for item in iterable:
self._addItem(item)
def updateFromIterable(self, iterable: Iterable) -> None:
"""Update the items in this :py:class:`ItemAdapter` from an iterable.
Items from the iterable, that are not yet contained in the
list are added, while items originally contained in this
:py:class:`ItemAdapter`, that are not iterated by the
iterable, are removed.
"""
# 1. Create a set containing the texts for items already contained
# in this list (this is used for bookkeeping).
bookkeeping = set(self._texts())
# 2. Iterate over entries from the iterable and add entries
# missing in this list.
for item in iterable:
text = self._textForItem(item)
if text in bookkeeping:
bookkeeping.remove(text)
else:
self._addItem(item)
# 3. Remove items from this list that are no longer present
for text in bookkeeping:
self._removeText(text)
def setItemToText(self, itemToText: Callable[[Any], str]) -> None:
"""Set the function to be used when converting items
to their textual presentation.
"""
if itemToText is None:
self.__dict__.pop('_itemToText', None)
else:
self._itemToText = itemToText
self._formatAllItems()
@protect
def keyPressEvent(self, event: QKeyEvent) -> None:
"""Process key events. The :py:class:`ItemAdapter` supports
the following keys:
C: clear the currently selected entry
Note: in a :py:class:`QComboBox` this event is only received
if the combobox is closed (not while currently selecting an entry).
"""
key = event.key()
LOG.debug("ItemAdapter[%s].keyPressEvent: key=%d",
type(self).__name__, key)
if key == Qt.Key_C: # clear
self._setCurrentItem(None)
elif key == Qt.Key_Y: # no itemToText function (inherit from super)
self.setItemToText(None)
elif key == Qt.Key_Z: # simple str() as itemToText function (debug)
self.setItemToText(str)
elif hasattr(super(), 'keyPressEvent'):
super().keyPressEvent(event)
else:
event.ignore()
def debug(self) -> None:
"""Ouput debug information for this :py:class:`ItemAdapter`.
"""
if hasattr(super(), 'debug'):
super().debug()
print(f"debug: ItemAdapter[{type(self).__name__}]: "
f"with {self._countItems()} entries:")
for index, item in enumerate(self._items()):
print(f"debug:{'**' if item is self._currentItem() else ' '}"
f"({index+1}) {self._textForItem(item)} "
f"[{repr(item)}]")
class QAdaptedComboBox(ItemAdapter, QComboBox):
"""A :py:class:`QComboBox` implementing the
:py:class:`ItemAdapter` interface.
"""
#
# methods to be implemented by subclasses
#
def _countItems(self) -> int:
"""Get the number of items in this :py:class:`QAdaptedComboBox`.
"""
return self.count()
def _items(self) -> Iterator[Any]:
"""An iterator for the items in this
:py:class:`QAdaptedComboBox`.
"""
for index in range(self.count()):
yield self.itemData(index)
def _texts(self) -> Iterator[str]:
"""An iterator for the texts presented by this
:py:class:`QAdaptedComboBox`.
"""
for index in range(self.count()):
yield self.itemText(index)
def _addItem(self, item: Any) -> None:
"""Add an item to this :py:class:`QAdaptedComboBox`.
It is assumed that the item is not yet contained in this
:py:class:`QAdaptedComboBox`.
"""
self.addItem(self._textForItem(item), item)
self._formatItem(item)
def _removeItem(self, item: Any) -> None:
"""Remove an item from this :py:class:`QAdaptedComboBox`.
It is assumed that the item is contained in this
:py:class:`QAdaptedComboBox`, otherwise a
:py:class:`ValueError` is raised.
"""
self._removeItemAt(self._indexOfItem(item))
def _removeItemAt(self, index: int) -> None:
"""Remove the item at the given index.
"""
self.removeItem(index)
def _removeText(self, text: str) -> None:
"""Remove the item with the given text. This may be
overwritten by subclasses when a more efficient implementation
is possible.
"""
self._removeItemAt(self._indexOfText(text))
def _formatItemAt(self, index: int) -> None:
"""Format the item at the given index to reflect
the state of the underlying item.
This method may be extended by subclasses.
"""
self.setItemText(index, self._textForItem(self.itemData(index)))
def _formatItem(self, item: Any) -> None:
"""Update the format of the item's presentation
in this :py:class:`QAdaptedComboBox`
to reflect its state.
"""
self._formatItemAt(self._indexOfItem(item))
def _formatAllItems(self) -> None:
"""Format all items in this :py:class:`QAdaptedComboBox`.
"""
for index in range(self.count()):
self._formatItemAt(index)
def _currentItem(self) -> Any:
"""Get the currently selected item.
This may be `None` if no itm is selected.
"""
return self.currentData()
def _setCurrentItem(self, item: Any) -> None:
"""Select the given entry in this :py:class:`QAdaptedComboBox`.
Arguments
---------
item: Any
The item to become the current item. If the item is not
contained in this :py:class:`QAdaptedComboBox` (e.g. if
`item` is `None`), the current will be set to `None`.
"""
try:
self.setCurrentIndex(self._indexOfItem(item))
except LookupError:
# For an empty QComboBox or a QComboBox in which no
# current entry is set, the index is -1 (which is also
# returned by QComboBox.findText if the entry is not found).
self.setCurrentIndex(-1)
class QAdaptedListWidget(ItemAdapter, QListWidget):
"""A :py:class:`QListWidget` implementing the
:py:class:`ItemAdapter` interface.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._formater = None
def setListWidgetItemFormater(self, formater:
Callable[[QListWidgetItem], None]) -> None:
"""Set a formater for the list items.
"""
self._formater = formater
self._formatAllItems()
def updateFormat(self) -> None:
"""Update the format of all items in this
:py:class:`QAdaptedListWidget`.
"""
self._formatAllItems()
#
# methods to be implemented by subclasses
#
def _countItems(self) -> int:
"""Get the number of items in this :py:class:`QAdaptedListWidget`.
"""
return self.count()
def _qitem(self, item: Any) -> QListWidgetItem:
"""Get the :py:class:`QListWidgetItem` that holds the given
item.
"""
return next((qitem for qitem in self._qitems()
if qitem.data(Qt.UserRole) is item), None)
def _qitems(self) -> Iterator[QListWidgetItem]:
"""An :py:class:`Iterator` for the :py:class:`QListWidgetItem`
in this :py:class:`QAdaptedListWidget`.
"""
for index in range(self.count()):
yield self.item(index)
def _formatQItem(self, qitem: QListWidgetItem) -> None:
"""Format the given :py:class:`QListWidgetItem` to reflect
the state of the underlying item.
This method may be extended by subclasses.
"""
qitem.setText(self._textForItem(qitem.data(Qt.UserRole)))
if self._formater is not None:
self._formater(qitem)
def _items(self) -> Iterator[Any]:
"""An iterator for the items in this
:py:class:`QAdaptedComboBox`.
"""
for qitem in self._qitems():
yield qitem.data(Qt.UserRole)
def _texts(self) -> Iterator[str]:
"""An iterator for the texts presented by this
:py:class:`QAdaptedListWidget`.
"""
for qitem in self._qitems():
yield qitem.text()
def _addItem(self, item: Any) -> None:
"""Add an item to this :py:class:`QAdaptedComboBox`.
It is assumed that the item is not yet contained in this
:py:class:`QAdaptedListWidget`.
"""
qitem = QListWidgetItem(self._textForItem(item))
qitem.setData(Qt.UserRole, item)
self.addItem(qitem)
self._formatQItem(qitem)
def _formatItem(self, item: Any) -> None:
"""Update the format of the item's presentation
in this :py:class:`QAdaptedListWidget`
to reflect its state.
"""
self._formatQItem(self._qitem(item))
def _formatAllItems(self) -> None:
"""Format all items in this :py:class:`QAdaptedListWidget`.
"""
for qitem in self._qitems():
self._formatQItem(qitem)
def _removeItem(self, item: Any) -> None:
"""Remove an item from this :py:class:`QAdaptedListWidget`.
It is assumed that the item is contained in this
:py:class:`QAdaptedComboBox`, otherwise a
:py:class:`ValueError` is raised.
"""
qitem = self.takeItem(self._indexOfItem(item))
del qitem
def _currentItem(self) -> Any:
"""Get the currently selected item.
This may be `None` if no itm is selected.
"""
qitem = self.currentItem()
return None if qitem is None else qitem.data(Qt.UserRole)
def _setCurrentItem(self, item: Any) -> None:
"""Select the given entry in this :py:class:`QAdaptedListWidget`.
Arguments
---------
item: Any
The item to become the current item. If the item is not
contained in this :py:class:`QAdaptedListWidget` (e.g. if
`item` is `None`), the current will be set to `None`.
"""
try:
self.setCurrentRow(self._indexOfItem(item))
except LookupError:
self.setCurrentRow(-1)
|
import binascii
import errno
import functools
import hashlib
import importlib
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
from typing import Optional, Sequence, Tuple, Any, Union, Dict
import uuid
import grpc
import warnings
try:
from grpc import aio as aiogrpc
except ImportError:
from grpc.experimental import aio as aiogrpc
import inspect
from inspect import signature
from pathlib import Path
import numpy as np
import ray
from ray.core.generated.gcs_pb2 import ErrorTableData
import ray.ray_constants as ray_constants
from ray._private.tls_utils import load_certs_from_env
# Import psutil after ray so the packaged version is used.
import psutil
pwd = None
if sys.platform != "win32":
import pwd
logger = logging.getLogger(__name__)
# Linux can bind child processes' lifetimes to that of their parents via prctl.
# prctl support is detected dynamically once, and assumed thereafter.
linux_prctl = None
# Windows can bind processes' lifetimes to that of kernel-level "job objects".
# We keep a global job object to tie its lifetime to that of our own process.
win32_job = None
win32_AssignProcessToJobObject = None
def get_user_temp_dir():
if "RAY_TMPDIR" in os.environ:
return os.environ["RAY_TMPDIR"]
elif sys.platform.startswith("linux") and "TMPDIR" in os.environ:
return os.environ["TMPDIR"]
elif sys.platform.startswith("darwin") or sys.platform.startswith("linux"):
# Ideally we wouldn't need this fallback, but keep it for now for
# for compatibility
tempdir = os.path.join(os.sep, "tmp")
else:
tempdir = tempfile.gettempdir()
return tempdir
def get_ray_temp_dir():
return os.path.join(get_user_temp_dir(), "ray")
def _random_string():
id_hash = hashlib.shake_128()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest(ray_constants.ID_SIZE)
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
def format_error_message(exception_message: str, task_exception: bool = False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message: A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
"""
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines)
def push_error_to_driver(
worker, error_type: str, message: str, job_id: Optional[str] = None
):
"""Push an error message to the driver to be printed in the background.
Args:
worker: The worker to use.
error_type: The type of the error.
message: The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
worker.core_worker.push_error(job_id, error_type, message, time.time())
def construct_error_message(job_id, error_type, message, timestamp):
"""Construct an ErrorTableData object.
Args:
job_id: The ID of the job that the error should go to. If this is
nil, then the error will go to all drivers.
error_type: The type of the error.
message: The error message.
timestamp: The time of the error.
Returns:
The ErrorTableData object.
"""
data = ErrorTableData()
data.job_id = job_id.binary()
data.type = error_type
data.error_message = message
data.timestamp = timestamp
return data
def publish_error_to_driver(
error_type: str,
message: str,
gcs_publisher,
job_id=None,
):
"""Push an error message to the driver to be printed in the background.
Normally the push_error_to_driver function should be used. However, in some
instances, the raylet client is not available, e.g., because the
error happens in Python before the driver or worker has connected to the
backend processes.
Args:
error_type: The type of the error.
message: The message that will be printed in the background
on the driver.
gcs_publisher: The GCS publisher to use.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
error_data = construct_error_message(job_id, error_type, message, time.time())
try:
gcs_publisher.publish_error(job_id.hex().encode(), error_data)
except Exception:
logger.exception(f"Failed to publish error {error_data}")
def random_string():
"""Generate a random string to use as an ID.
Note that users may seed numpy, which could cause this function to generate
duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't
interfere with the state of the user's random number generator, so we
extract the state of the random number generator and reset it after we are
done.
TODO(rkn): If we want to later guarantee that these are generated in a
deterministic manner, then we will need to make some changes here.
Returns:
A random byte string of length ray_constants.ID_SIZE.
"""
# Get the state of the numpy random number generator.
numpy_state = np.random.get_state()
# Try to use true randomness.
np.random.seed(None)
# Generate the random ID.
random_id = np.random.bytes(ray_constants.ID_SIZE)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
return random_id
def decode(byte_str: str, allow_none: bool = False, encode_type: str = "utf-8"):
"""Make this unicode in Python 3, otherwise leave it as bytes.
Args:
byte_str: The byte string to decode.
allow_none: If true, then we will allow byte_str to be None in which
case we will return an empty string. TODO(rkn): Remove this flag.
This is only here to simplify upgrading to flatbuffers 1.10.0.
Returns:
A byte string in Python 2 and a unicode string in Python 3.
"""
if byte_str is None and allow_none:
return ""
if not isinstance(byte_str, bytes):
raise ValueError(f"The argument {byte_str} must be a bytes object.")
if sys.version_info >= (3, 0):
return byte_str.decode(encode_type)
else:
return byte_str
def ensure_str(s, encoding="utf-8", errors="strict"):
"""Coerce *s* to `str`.
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, str):
return s
else:
assert isinstance(s, bytes)
return s.decode(encoding, errors)
def binary_to_object_ref(binary_object_ref):
return ray.ObjectRef(binary_object_ref)
def binary_to_task_id(binary_task_id):
return ray.TaskID(binary_task_id)
def binary_to_hex(identifier):
hex_identifier = binascii.hexlify(identifier)
if sys.version_info >= (3, 0):
hex_identifier = hex_identifier.decode()
return hex_identifier
def hex_to_binary(hex_identifier):
return binascii.unhexlify(hex_identifier)
# TODO(qwang): Remove these hepler functions
# once we separate `WorkerID` from `UniqueID`.
def compute_job_id_from_driver(driver_id):
assert isinstance(driver_id, ray.WorkerID)
return ray.JobID(driver_id.binary()[0 : ray.JobID.size()])
def compute_driver_id_from_job(job_id):
assert isinstance(job_id, ray.JobID)
rest_length = ray_constants.ID_SIZE - job_id.size()
driver_id_str = job_id.binary() + (rest_length * b"\xff")
return ray.WorkerID(driver_id_str)
def get_cuda_visible_devices():
"""Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.
Returns:
devices (List[str]): If CUDA_VISIBLE_DEVICES is set, returns a
list of strings representing the IDs of the visible GPUs.
If it is not set or is set to NoDevFiles, returns empty list.
"""
gpu_ids_str = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if gpu_ids_str is None:
return None
if gpu_ids_str == "":
return []
if gpu_ids_str == "NoDevFiles":
return []
# GPU identifiers are given as strings representing integers or UUIDs.
return list(gpu_ids_str.split(","))
last_set_gpu_ids = None
def set_cuda_visible_devices(gpu_ids):
"""Set the CUDA_VISIBLE_DEVICES environment variable.
Args:
gpu_ids (List[str]): List of strings representing GPU IDs.
"""
if os.environ.get(ray_constants.NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR):
return
global last_set_gpu_ids
if last_set_gpu_ids == gpu_ids:
return # optimization: already set
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in gpu_ids])
last_set_gpu_ids = gpu_ids
def resources_from_ray_options(options_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Determine a task's resource requirements.
Args:
options_dict: The dictionary that contains resources requirements.
Returns:
A dictionary of the resource requirements for the task.
"""
resources = (options_dict.get("resources") or {}).copy()
if "CPU" in resources or "GPU" in resources:
raise ValueError(
"The resources dictionary must not contain the key 'CPU' or 'GPU'"
)
elif "memory" in resources or "object_store_memory" in resources:
raise ValueError(
"The resources dictionary must not "
"contain the key 'memory' or 'object_store_memory'"
)
num_cpus = options_dict.get("num_cpus")
num_gpus = options_dict.get("num_gpus")
memory = options_dict.get("memory")
object_store_memory = options_dict.get("object_store_memory")
accelerator_type = options_dict.get("accelerator_type")
if num_cpus is not None:
resources["CPU"] = num_cpus
if num_gpus is not None:
resources["GPU"] = num_gpus
if memory is not None:
resources["memory"] = ray_constants.to_memory_units(memory, round_up=True)
if object_store_memory is not None:
resources["object_store_memory"] = ray_constants.to_memory_units(
object_store_memory, round_up=True
)
if accelerator_type is not None:
resources[
f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}{accelerator_type}"
] = 0.001
return resources
class Unbuffered(object):
"""There's no "built-in" solution to programatically disabling buffering of
text files. Ray expects stdout/err to be text files, so creating an
unbuffered binary file is unacceptable.
See
https://mail.python.org/pipermail/tutor/2003-November/026645.html.
https://docs.python.org/3/library/functions.html#open
"""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def open_log(path, unbuffered=False, **kwargs):
"""
Opens the log file at `path`, with the provided kwargs being given to
`open`.
"""
# Disable buffering, see test_advanced_3.py::test_logging_to_driver
kwargs.setdefault("buffering", 1)
kwargs.setdefault("mode", "a")
kwargs.setdefault("encoding", "utf-8")
stream = open(path, **kwargs)
if unbuffered:
return Unbuffered(stream)
else:
return stream
def get_system_memory(
# For cgroups v1:
memory_limit_filename="/sys/fs/cgroup/memory/memory.limit_in_bytes",
# For cgroups v2:
memory_limit_filename_v2="/sys/fs/cgroup/memory.max",
):
"""Return the total amount of system memory in bytes.
Returns:
The total amount of system memory in bytes.
"""
# Try to accurately figure out the memory limit if we are in a docker
# container. Note that this file is not specific to Docker and its value is
# often much larger than the actual amount of memory.
docker_limit = None
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, "r") as f:
docker_limit = int(f.read())
elif os.path.exists(memory_limit_filename_v2):
with open(memory_limit_filename_v2, "r") as f:
max_file = f.read()
if max_file.isnumeric():
docker_limit = int(max_file)
else:
# max_file is "max", i.e. is unset.
docker_limit = None
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().total
if docker_limit is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_limit, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def _get_docker_cpus(
cpu_quota_file_name="/sys/fs/cgroup/cpu/cpu.cfs_quota_us",
cpu_period_file_name="/sys/fs/cgroup/cpu/cpu.cfs_period_us",
cpuset_file_name="/sys/fs/cgroup/cpuset/cpuset.cpus",
cpu_max_file_name="/sys/fs/cgroup/cpu.max",
) -> Optional[float]:
# TODO (Alex): Don't implement this logic oursleves.
# Docker has 2 underyling ways of implementing CPU limits:
# https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler
# 1. --cpuset-cpus 2. --cpus or --cpu-quota/--cpu-period (--cpu-shares is a
# soft limit so we don't worry about it). For Ray's purposes, if we use
# docker, the number of vCPUs on a machine is whichever is set (ties broken
# by smaller value).
cpu_quota = None
# See: https://bugs.openjdk.java.net/browse/JDK-8146115
if os.path.exists(cpu_quota_file_name) and os.path.exists(cpu_period_file_name):
try:
with open(cpu_quota_file_name, "r") as quota_file, open(
cpu_period_file_name, "r"
) as period_file:
cpu_quota = float(quota_file.read()) / float(period_file.read())
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
# Look at cpu.max for cgroups v2
elif os.path.exists(cpu_max_file_name):
try:
max_file = open(cpu_max_file_name).read()
quota_str, period_str = max_file.split()
if quota_str.isnumeric() and period_str.isnumeric():
cpu_quota = float(quota_str) / float(period_str)
else:
# quota_str is "max" meaning the cpu quota is unset
cpu_quota = None
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
if (cpu_quota is not None) and (cpu_quota < 0):
cpu_quota = None
elif cpu_quota == 0:
# Round up in case the cpu limit is less than 1.
cpu_quota = 1
cpuset_num = None
if os.path.exists(cpuset_file_name):
try:
with open(cpuset_file_name) as cpuset_file:
ranges_as_string = cpuset_file.read()
ranges = ranges_as_string.split(",")
cpu_ids = []
for num_or_range in ranges:
if "-" in num_or_range:
start, end = num_or_range.split("-")
cpu_ids.extend(list(range(int(start), int(end) + 1)))
else:
cpu_ids.append(int(num_or_range))
cpuset_num = len(cpu_ids)
except Exception:
logger.exception("Unexpected error calculating docker cpuset ids.")
# Possible to-do: Parse cgroups v2's cpuset.cpus.effective for the number
# of accessible CPUs.
if cpu_quota and cpuset_num:
return min(cpu_quota, cpuset_num)
return cpu_quota or cpuset_num
def get_num_cpus() -> int:
cpu_count = multiprocessing.cpu_count()
if os.environ.get("RAY_USE_MULTIPROCESSING_CPU_COUNT"):
logger.info(
"Detected RAY_USE_MULTIPROCESSING_CPU_COUNT=1: Using "
"multiprocessing.cpu_count() to detect the number of CPUs. "
"This may be inconsistent when used inside docker. "
"To correctly detect CPUs, unset the env var: "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT`."
)
return cpu_count
try:
# Not easy to get cpu count in docker, see:
# https://bugs.python.org/issue36054
docker_count = _get_docker_cpus()
if docker_count is not None and docker_count != cpu_count:
# Don't log this warning if we're on K8s or if the warning is
# explicitly disabled.
if (
"RAY_DISABLE_DOCKER_CPU_WARNING" not in os.environ
and "KUBERNETES_SERVICE_HOST" not in os.environ
):
logger.warning(
"Detecting docker specified CPUs. In "
"previous versions of Ray, CPU detection in containers "
"was incorrect. Please ensure that Ray has enough CPUs "
"allocated. As a temporary workaround to revert to the "
"prior behavior, set "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT=1` as an env var "
"before starting Ray. Set the env var: "
"`RAY_DISABLE_DOCKER_CPU_WARNING=1` to mute this warning."
)
# TODO (Alex): We should probably add support for fractional cpus.
if int(docker_count) != float(docker_count):
logger.warning(
f"Ray currently does not support initializing Ray"
f"with fractional cpus. Your num_cpus will be "
f"truncated from {docker_count} to "
f"{int(docker_count)}."
)
docker_count = int(docker_count)
cpu_count = docker_count
except Exception:
# `nproc` and cgroup are linux-only. If docker only works on linux
# (will run in a linux VM on other platforms), so this is fine.
pass
return cpu_count
def get_used_memory():
"""Return the currently used system memory in bytes
Returns:
The total amount of used memory
"""
# Try to accurately figure out the memory usage if we are in a docker
# container.
docker_usage = None
# For cgroups v1:
memory_usage_filename = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
# For cgroups v2:
memory_usage_filename_v2 = "/sys/fs/cgroup/memory.current"
if os.path.exists(memory_usage_filename):
with open(memory_usage_filename, "r") as f:
docker_usage = int(f.read())
elif os.path.exists(memory_usage_filename_v2):
with open(memory_usage_filename_v2, "r") as f:
docker_usage = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().used
if docker_usage is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_usage, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def estimate_available_memory():
"""Return the currently available amount of system memory in bytes.
Returns:
The total amount of available memory in bytes. Based on the used
and total memory.
"""
return get_system_memory() - get_used_memory()
def get_shared_memory_bytes():
"""Get the size of the shared memory file system.
Returns:
The size of the shared memory file system in bytes.
"""
# Make sure this is only called on Linux.
assert sys.platform == "linux" or sys.platform == "linux2"
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
finally:
os.close(shm_fd)
return shm_avail
def check_oversized_function(
pickled: bytes, name: str, obj_type: str, worker: "ray.Worker"
) -> None:
"""Send a warning message if the pickled function is too large.
Args:
pickled: the pickled function.
name: name of the pickled object.
obj_type: type of the pickled object, can be 'function',
'remote function', or 'actor'.
worker: the worker used to send warning message. message will be logged
locally if None.
"""
length = len(pickled)
if length <= ray_constants.FUNCTION_SIZE_WARN_THRESHOLD:
return
elif length < ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD:
warning_message = (
"The {} {} is very large ({} MiB). "
"Check that its definition is not implicitly capturing a large "
"array or other object in scope. Tip: use ray.put() to put large "
"objects in the Ray object store."
).format(obj_type, name, length // (1024 * 1024))
if worker:
push_error_to_driver(
worker,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,
"Warning: " + warning_message,
job_id=worker.current_job_id,
)
else:
error = (
"The {} {} is too large ({} MiB > FUNCTION_SIZE_ERROR_THRESHOLD={}"
" MiB). Check that its definition is not implicitly capturing a "
"large array or other object in scope. Tip: use ray.put() to "
"put large objects in the Ray object store."
).format(
obj_type,
name,
length // (1024 * 1024),
ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD // (1024 * 1024),
)
raise ValueError(error)
def is_main_thread():
return threading.current_thread().getName() == "MainThread"
def detect_fate_sharing_support_win32():
global win32_job, win32_AssignProcessToJobObject
if win32_job is None and sys.platform == "win32":
import ctypes
try:
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPVOID, LPCWSTR
kernel32 = ctypes.WinDLL("kernel32")
kernel32.CreateJobObjectW.argtypes = (LPVOID, LPCWSTR)
kernel32.CreateJobObjectW.restype = HANDLE
sijo_argtypes = (HANDLE, ctypes.c_int, LPVOID, DWORD)
kernel32.SetInformationJobObject.argtypes = sijo_argtypes
kernel32.SetInformationJobObject.restype = BOOL
kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)
kernel32.AssignProcessToJobObject.restype = BOOL
kernel32.IsDebuggerPresent.argtypes = ()
kernel32.IsDebuggerPresent.restype = BOOL
except (AttributeError, TypeError, ImportError):
kernel32 = None
job = kernel32.CreateJobObjectW(None, None) if kernel32 else None
job = subprocess.Handle(job) if job else job
if job:
from ctypes.wintypes import DWORD, LARGE_INTEGER, ULARGE_INTEGER
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("PerProcessUserTimeLimit", LARGE_INTEGER),
("PerJobUserTimeLimit", LARGE_INTEGER),
("LimitFlags", DWORD),
("MinimumWorkingSetSize", ctypes.c_size_t),
("MaximumWorkingSetSize", ctypes.c_size_t),
("ActiveProcessLimit", DWORD),
("Affinity", ctypes.c_size_t),
("PriorityClass", DWORD),
("SchedulingClass", DWORD),
]
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
("ReadOperationCount", ULARGE_INTEGER),
("WriteOperationCount", ULARGE_INTEGER),
("OtherOperationCount", ULARGE_INTEGER),
("ReadTransferCount", ULARGE_INTEGER),
("WriteTransferCount", ULARGE_INTEGER),
("OtherTransferCount", ULARGE_INTEGER),
]
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", ctypes.c_size_t),
("JobMemoryLimit", ctypes.c_size_t),
("PeakProcessMemoryUsed", ctypes.c_size_t),
("PeakJobMemoryUsed", ctypes.c_size_t),
]
debug = kernel32.IsDebuggerPresent()
# Defined in <WinNT.h>; also available here:
# https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-setinformationjobobject
JobObjectExtendedLimitInformation = 9
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
buf = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
buf.BasicLimitInformation.LimitFlags = (
(0 if debug else JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)
| JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION
| JOB_OBJECT_LIMIT_BREAKAWAY_OK
)
infoclass = JobObjectExtendedLimitInformation
if not kernel32.SetInformationJobObject(
job, infoclass, ctypes.byref(buf), ctypes.sizeof(buf)
):
job = None
win32_AssignProcessToJobObject = (
kernel32.AssignProcessToJobObject if kernel32 is not None else False
)
win32_job = job if job else False
return bool(win32_job)
def detect_fate_sharing_support_linux():
global linux_prctl
if linux_prctl is None and sys.platform.startswith("linux"):
try:
from ctypes import c_int, c_ulong, CDLL
prctl = CDLL(None).prctl
prctl.restype = c_int
prctl.argtypes = [c_int, c_ulong, c_ulong, c_ulong, c_ulong]
except (AttributeError, TypeError):
prctl = None
linux_prctl = prctl if prctl else False
return bool(linux_prctl)
def detect_fate_sharing_support():
result = None
if sys.platform == "win32":
result = detect_fate_sharing_support_win32()
elif sys.platform.startswith("linux"):
result = detect_fate_sharing_support_linux()
return result
def set_kill_on_parent_death_linux():
"""Ensures this process dies if its parent dies (fate-sharing).
Linux-only. Must be called in preexec_fn (i.e. by the child).
"""
if detect_fate_sharing_support_linux():
import signal
PR_SET_PDEATHSIG = 1
if linux_prctl(PR_SET_PDEATHSIG, signal.SIGKILL, 0, 0, 0) != 0:
import ctypes
raise OSError(ctypes.get_errno(), "prctl(PR_SET_PDEATHSIG) failed")
else:
assert False, "PR_SET_PDEATHSIG used despite being unavailable"
def set_kill_child_on_death_win32(child_proc):
"""Ensures the child process dies if this process dies (fate-sharing).
Windows-only. Must be called by the parent, after spawning the child.
Args:
child_proc: The subprocess.Popen or subprocess.Handle object.
"""
if isinstance(child_proc, subprocess.Popen):
child_proc = child_proc._handle
assert isinstance(child_proc, subprocess.Handle)
if detect_fate_sharing_support_win32():
if not win32_AssignProcessToJobObject(win32_job, int(child_proc)):
import ctypes
raise OSError(ctypes.get_last_error(), "AssignProcessToJobObject() failed")
else:
assert False, "AssignProcessToJobObject used despite being unavailable"
def set_sigterm_handler(sigterm_handler):
"""Registers a handler for SIGTERM in a platform-compatible manner."""
if sys.platform == "win32":
# Note that these signal handlers only work for console applications.
# TODO(mehrdadn): implement graceful process termination mechanism
# SIGINT is Ctrl+C, SIGBREAK is Ctrl+Break.
signal.signal(signal.SIGBREAK, sigterm_handler)
else:
signal.signal(signal.SIGTERM, sigterm_handler)
def try_make_directory_shared(directory_path):
try:
os.chmod(directory_path, 0o0777)
except OSError as e:
# Silently suppress the PermissionError that is thrown by the chmod.
# This is done because the user attempting to change the permissions
# on a directory may not own it. The chmod is attempted whether the
# directory is new or not to avoid race conditions.
# ray-project/ray/#3591
if e.errno in [errno.EACCES, errno.EPERM]:
pass
else:
raise
def try_to_create_directory(directory_path):
"""Attempt to create a directory that is globally readable/writable.
Args:
directory_path: The path of the directory to create.
"""
directory_path = os.path.expanduser(directory_path)
os.makedirs(directory_path, exist_ok=True)
# Change the log directory permissions so others can use it. This is
# important when multiple people are using the same machine.
try_make_directory_shared(directory_path)
def try_to_symlink(symlink_path, target_path):
"""Attempt to create a symlink.
If the symlink path exists and isn't a symlink, the symlink will not be
created. If a symlink exists in the path, it will be attempted to be
removed and replaced.
Args:
symlink_path: The path at which to create the symlink.
target_path: The path the symlink should point to.
"""
symlink_path = os.path.expanduser(symlink_path)
target_path = os.path.expanduser(target_path)
if os.path.exists(symlink_path):
if os.path.islink(symlink_path):
# Try to remove existing symlink.
try:
os.remove(symlink_path)
except OSError:
return
else:
# There's an existing non-symlink file, don't overwrite it.
return
try:
os.symlink(target_path, symlink_path)
except OSError:
return
def get_user():
if pwd is None:
return ""
try:
return pwd.getpwuid(os.getuid()).pw_name
except Exception:
return ""
def get_function_args(callable):
all_parameters = frozenset(signature(callable).parameters)
return list(all_parameters)
def get_conda_bin_executable(executable_name):
"""
Return path to the specified executable, assumed to be discoverable within
the 'bin' subdirectory of a conda installation. Adapted from
https://github.com/mlflow/mlflow.
"""
# Use CONDA_EXE as per https://github.com/conda/conda/issues/7126
if "CONDA_EXE" in os.environ:
conda_bin_dir = os.path.dirname(os.environ["CONDA_EXE"])
return os.path.join(conda_bin_dir, executable_name)
return executable_name
def get_conda_env_dir(env_name):
"""Find and validate the conda directory for a given conda environment.
For example, given the environment name `tf1`, this function checks
the existence of the corresponding conda directory, e.g.
`/Users/scaly/anaconda3/envs/tf1`, and returns it.
"""
conda_prefix = os.environ.get("CONDA_PREFIX")
if conda_prefix is None:
# The caller is neither in a conda env or in (base) env. This is rare
# because by default, new terminals start in (base), but we can still
# support this case.
conda_exe = os.environ.get("CONDA_EXE")
if conda_exe is None:
raise ValueError(
"Cannot find environment variables set by conda. "
"Please verify conda is installed."
)
# Example: CONDA_EXE=$HOME/anaconda3/bin/python
# Strip out /bin/python by going up two parent directories.
conda_prefix = str(Path(conda_exe).parent.parent)
# There are two cases:
# 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and
# CONDA_PREFIX=$HOME/anaconda3
# 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and
# CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name
if os.environ.get("CONDA_DEFAULT_ENV") == "base":
# Caller's curent environment is (base).
# Not recommended by conda, but we can still support it.
if env_name == "base":
# Desired environment is (base), located at e.g. $HOME/anaconda3
env_dir = conda_prefix
else:
# Desired environment is user-created, e.g.
# $HOME/anaconda3/envs/$env_name
env_dir = os.path.join(conda_prefix, "envs", env_name)
else:
# Now `conda_prefix` should be something like
# $HOME/anaconda3/envs/$current_env_name
# We want to replace the last component with the desired env name.
conda_envs_dir = os.path.split(conda_prefix)[0]
env_dir = os.path.join(conda_envs_dir, env_name)
if not os.path.isdir(env_dir):
raise ValueError(
"conda env "
+ env_name
+ " not found in conda envs directory. Run `conda env list` to "
+ "verify the name is correct."
)
return env_dir
def get_call_location(back: int = 1):
"""
Get the location (filename and line number) of a function caller, `back`
frames up the stack.
Args:
back: The number of frames to go up the stack, not including this
function.
"""
stack = inspect.stack()
try:
frame = stack[back + 1]
return f"{frame.filename}:{frame.lineno}"
except IndexError:
return "UNKNOWN"
# Used to only print a deprecation warning once for a given function if we
# don't wish to spam the caller.
_PRINTED_WARNING = set()
# The following is inspired by
# https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/python/util/deprecation.py#L274-L329
def deprecated(
instructions: Optional[str] = None,
removal_release: Optional[str] = None,
removal_date: Optional[str] = None,
warn_once: bool = True,
):
"""
Creates a decorator for marking functions as deprecated. The decorator
will log a deprecation warning on the first (or all, see `warn_once` arg)
invocations, and will otherwise leave the wrapped function unchanged.
Args:
instructions: Instructions for the caller to update their code.
removal_release: The release in which this deprecated function
will be removed. Only one of removal_release and removal_date
should be specified. If neither is specfieid, we'll warning that
the function will be removed "in a future release".
removal_date: The date on which this deprecated function will be
removed. Only one of removal_release and removal_date should be
specified. If neither is specfieid, we'll warning that
the function will be removed "in a future release".
warn_once: If true, the deprecation warning will only be logged
on the first invocation. Otherwise, the deprecation warning will
be logged on every invocation. Defaults to True.
Returns:
A decorator to be used for wrapping deprecated functions.
"""
if removal_release is not None and removal_date is not None:
raise ValueError(
"Only one of removal_release and removal_date should be specified."
)
def deprecated_wrapper(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
global _PRINTED_WARNING
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING.add(func)
msg = (
"From {}: {} (from {}) is deprecated and will ".format(
get_call_location(), func.__name__, func.__module__
)
+ "be removed "
+ (
f"in version {removal_release}."
if removal_release is not None
else f"after {removal_date}"
if removal_date is not None
else "in a future version"
)
+ (f" {instructions}" if instructions is not None else "")
)
warnings.warn(msg)
return func(*args, **kwargs)
return new_func
return deprecated_wrapper
def import_attr(full_path: str):
"""Given a full import path to a module attr, return the imported attr.
For example, the following are equivalent:
MyClass = import_attr("module.submodule:MyClass")
MyClass = import_attr("module.submodule.MyClass")
from module.submodule import MyClass
Returns:
Imported attr
"""
if full_path is None:
raise TypeError("import path cannot be None")
if ":" in full_path:
if full_path.count(":") > 1:
raise ValueError(
f'Got invalid import path "{full_path}". An '
"import path may have at most one colon."
)
module_name, attr_name = full_path.split(":")
else:
last_period_idx = full_path.rfind(".")
module_name = full_path[:last_period_idx]
attr_name = full_path[last_period_idx + 1 :]
module = importlib.import_module(module_name)
return getattr(module, attr_name)
def get_wheel_filename(
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Returns the filename used for the nightly Ray wheel.
Args:
sys_platform: The platform as returned by sys.platform. Examples:
"darwin", "linux", "win32"
ray_version: The Ray version as returned by ray.__version__ or
`ray --version`. Examples: "3.0.0.dev0"
py_version (str):
The major and minor Python versions concatenated. Examples: "36",
"37", "38", "39"
Returns:
The wheel file name. Examples:
ray-3.0.0.dev0-cp38-cp38-manylinux2014_x86_64.whl
"""
assert py_version in ["36", "37", "38", "39"], py_version
os_strings = {
"darwin": "macosx_10_15_x86_64"
if py_version in ["38", "39"]
else "macosx_10_15_intel",
"linux": "manylinux2014_x86_64",
"win32": "win_amd64",
}
assert sys_platform in os_strings, sys_platform
wheel_filename = (
f"ray-{ray_version}-cp{py_version}-"
f"cp{py_version}{"m" if py_version in ["36", "37"] else ""}"
f"-{os_strings[sys_platform]}.whl"
)
return wheel_filename
def get_master_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Return the URL for the wheel from a specific commit."""
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://s3-us-west-2.amazonaws.com/ray-wheels/master/"
f"{ray_commit}/{filename}"
)
def get_release_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Return the URL for the wheel for a specific release."""
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://ray-wheels.s3-us-west-2.amazonaws.com/releases/"
f"{ray_version}/{ray_commit}/{filename}"
)
# e.g. https://ray-wheels.s3-us-west-2.amazonaws.com/releases/1.4.0rc1/e7c7
# f6371a69eb727fa469e4cd6f4fbefd143b4c/ray-1.4.0rc1-cp36-cp36m-manylinux201
# 4_x86_64.whl
def validate_namespace(namespace: str):
if not isinstance(namespace, str):
raise TypeError("namespace must be None or a string.")
elif namespace == "":
raise ValueError(
'"" is not a valid namespace. ' "Pass None to not specify a namespace."
)
def init_grpc_channel(
address: str,
options: Optional[Sequence[Tuple[str, Any]]] = None,
asynchronous: bool = False,
):
grpc_module = aiogrpc if asynchronous else grpc
if os.environ.get("RAY_USE_TLS", "0").lower() in ("1", "true"):
server_cert_chain, private_key, ca_cert = load_certs_from_env()
credentials = grpc.ssl_channel_credentials(
certificate_chain=server_cert_chain,
private_key=private_key,
root_certificates=ca_cert,
)
channel = grpc_module.secure_channel(address, credentials, options=options)
else:
channel = grpc_module.insecure_channel(address, options=options)
return channel
def check_dashboard_dependencies_installed() -> bool:
"""Returns True if Ray Dashboard dependencies are installed.
Checks to see if we should start the dashboard agent or not based on the
Ray installation version the user has installed (ray vs. ray[default]).
Unfortunately there doesn't seem to be a cleaner way to detect this other
than just blindly importing the relevant packages.
"""
try:
import ray.dashboard.optional_deps # noqa: F401
return True
except ImportError:
return False
def internal_kv_list_with_retry(gcs_client, prefix, namespace, num_retries=20):
result = None
if isinstance(prefix, str):
prefix = prefix.encode()
if isinstance(namespace, str):
namespace = namespace.encode()
for _ in range(num_retries):
try:
result = gcs_client.internal_kv_keys(prefix, namespace)
except Exception as e:
if isinstance(e, grpc.RpcError) and e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV List failed")
result = None
if result is not None:
break
else:
logger.debug(f"Fetched {prefix}=None from KV. Retrying.")
time.sleep(2)
if result is None:
raise RuntimeError(
f"Could not list '{prefix}' from GCS. Did GCS start successfully?"
)
return result
def internal_kv_get_with_retry(gcs_client, key, namespace, num_retries=20):
result = None
if isinstance(key, str):
key = key.encode()
for _ in range(num_retries):
try:
result = gcs_client.internal_kv_get(key, namespace)
except Exception as e:
if isinstance(e, grpc.RpcError) and e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Get failed")
result = None
if result is not None:
break
else:
logger.debug(f"Fetched {key}=None from KV. Retrying.")
time.sleep(2)
if not result:
raise RuntimeError(
f"Could not read '{key.decode()}' from GCS. Did GCS start successfully?"
)
return result
def internal_kv_put_with_retry(gcs_client, key, value, namespace, num_retries=20):
if isinstance(key, str):
key = key.encode()
if isinstance(value, str):
value = value.encode()
if isinstance(namespace, str):
namespace = namespace.encode()
error = None
for _ in range(num_retries):
try:
return gcs_client.internal_kv_put(
key, value, overwrite=True, namespace=namespace
)
except grpc.RpcError as e:
if e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Put failed")
time.sleep(2)
error = e
# Reraise the last grpc.RpcError.
raise error
def compute_version_info():
"""Compute the versions of Python, and Ray.
Returns:
A tuple containing the version information.
"""
ray_version = ray.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
return ray_version, python_version
def get_directory_size_bytes(path: Union[str, Path] = ".") -> int:
"""Get the total size of a directory in bytes, including subdirectories."""
total_size_bytes = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is a symbolic link or a .pyc file
if not os.path.islink(fp) and not f.endswith(".pyc"):
total_size_bytes += os.path.getsize(fp)
return total_size_bytes
def check_version_info(cluster_metadata):
"""Check if the Python and Ray versions stored in GCS matches this process.
Args:
cluster_metadata: Ray cluster metadata from GCS.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
cluster_version_info = (
cluster_metadata["ray_version"],
cluster_metadata["python_version"],
)
version_info = compute_version_info()
if version_info != cluster_version_info:
node_ip_address = ray._private.services.get_node_ip_address()
error_message = (
"Version mismatch: The cluster was started with:\n"
" Ray: " + cluster_version_info[0] + "\n"
" Python: " + cluster_version_info[1] + "\n"
"This process on node " + node_ip_address + " was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
)
raise RuntimeError(error_message)
|
import binascii
import errno
import functools
import hashlib
import importlib
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
from typing import Optional, Sequence, Tuple, Any, Union, Dict
import uuid
import grpc
import warnings
try:
from grpc import aio as aiogrpc
except ImportError:
from grpc.experimental import aio as aiogrpc
import inspect
from inspect import signature
from pathlib import Path
import numpy as np
import ray
from ray.core.generated.gcs_pb2 import ErrorTableData
import ray.ray_constants as ray_constants
from ray._private.tls_utils import load_certs_from_env
# Import psutil after ray so the packaged version is used.
import psutil
pwd = None
if sys.platform != "win32":
import pwd
logger = logging.getLogger(__name__)
# Linux can bind child processes' lifetimes to that of their parents via prctl.
# prctl support is detected dynamically once, and assumed thereafter.
linux_prctl = None
# Windows can bind processes' lifetimes to that of kernel-level "job objects".
# We keep a global job object to tie its lifetime to that of our own process.
win32_job = None
win32_AssignProcessToJobObject = None
def get_user_temp_dir():
if "RAY_TMPDIR" in os.environ:
return os.environ["RAY_TMPDIR"]
elif sys.platform.startswith("linux") and "TMPDIR" in os.environ:
return os.environ["TMPDIR"]
elif sys.platform.startswith("darwin") or sys.platform.startswith("linux"):
# Ideally we wouldn't need this fallback, but keep it for now for
# for compatibility
tempdir = os.path.join(os.sep, "tmp")
else:
tempdir = tempfile.gettempdir()
return tempdir
def get_ray_temp_dir():
return os.path.join(get_user_temp_dir(), "ray")
def _random_string():
id_hash = hashlib.shake_128()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest(ray_constants.ID_SIZE)
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
def format_error_message(exception_message: str, task_exception: bool = False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message: A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
"""
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines)
def push_error_to_driver(
worker, error_type: str, message: str, job_id: Optional[str] = None
):
"""Push an error message to the driver to be printed in the background.
Args:
worker: The worker to use.
error_type: The type of the error.
message: The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
worker.core_worker.push_error(job_id, error_type, message, time.time())
def construct_error_message(job_id, error_type, message, timestamp):
"""Construct an ErrorTableData object.
Args:
job_id: The ID of the job that the error should go to. If this is
nil, then the error will go to all drivers.
error_type: The type of the error.
message: The error message.
timestamp: The time of the error.
Returns:
The ErrorTableData object.
"""
data = ErrorTableData()
data.job_id = job_id.binary()
data.type = error_type
data.error_message = message
data.timestamp = timestamp
return data
def publish_error_to_driver(
error_type: str,
message: str,
gcs_publisher,
job_id=None,
):
"""Push an error message to the driver to be printed in the background.
Normally the push_error_to_driver function should be used. However, in some
instances, the raylet client is not available, e.g., because the
error happens in Python before the driver or worker has connected to the
backend processes.
Args:
error_type: The type of the error.
message: The message that will be printed in the background
on the driver.
gcs_publisher: The GCS publisher to use.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
error_data = construct_error_message(job_id, error_type, message, time.time())
try:
gcs_publisher.publish_error(job_id.hex().encode(), error_data)
except Exception:
logger.exception(f"Failed to publish error {error_data}")
def random_string():
"""Generate a random string to use as an ID.
Note that users may seed numpy, which could cause this function to generate
duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't
interfere with the state of the user's random number generator, so we
extract the state of the random number generator and reset it after we are
done.
TODO(rkn): If we want to later guarantee that these are generated in a
deterministic manner, then we will need to make some changes here.
Returns:
A random byte string of length ray_constants.ID_SIZE.
"""
# Get the state of the numpy random number generator.
numpy_state = np.random.get_state()
# Try to use true randomness.
np.random.seed(None)
# Generate the random ID.
random_id = np.random.bytes(ray_constants.ID_SIZE)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
return random_id
def decode(byte_str: str, allow_none: bool = False, encode_type: str = "utf-8"):
"""Make this unicode in Python 3, otherwise leave it as bytes.
Args:
byte_str: The byte string to decode.
allow_none: If true, then we will allow byte_str to be None in which
case we will return an empty string. TODO(rkn): Remove this flag.
This is only here to simplify upgrading to flatbuffers 1.10.0.
Returns:
A byte string in Python 2 and a unicode string in Python 3.
"""
if byte_str is None and allow_none:
return ""
if not isinstance(byte_str, bytes):
raise ValueError(f"The argument {byte_str} must be a bytes object.")
if sys.version_info >= (3, 0):
return byte_str.decode(encode_type)
else:
return byte_str
def ensure_str(s, encoding="utf-8", errors="strict"):
"""Coerce *s* to `str`.
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, str):
return s
else:
assert isinstance(s, bytes)
return s.decode(encoding, errors)
def binary_to_object_ref(binary_object_ref):
return ray.ObjectRef(binary_object_ref)
def binary_to_task_id(binary_task_id):
return ray.TaskID(binary_task_id)
def binary_to_hex(identifier):
hex_identifier = binascii.hexlify(identifier)
if sys.version_info >= (3, 0):
hex_identifier = hex_identifier.decode()
return hex_identifier
def hex_to_binary(hex_identifier):
return binascii.unhexlify(hex_identifier)
# TODO(qwang): Remove these hepler functions
# once we separate `WorkerID` from `UniqueID`.
def compute_job_id_from_driver(driver_id):
assert isinstance(driver_id, ray.WorkerID)
return ray.JobID(driver_id.binary()[0 : ray.JobID.size()])
def compute_driver_id_from_job(job_id):
assert isinstance(job_id, ray.JobID)
rest_length = ray_constants.ID_SIZE - job_id.size()
driver_id_str = job_id.binary() + (rest_length * b"\xff")
return ray.WorkerID(driver_id_str)
def get_cuda_visible_devices():
"""Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.
Returns:
devices (List[str]): If CUDA_VISIBLE_DEVICES is set, returns a
list of strings representing the IDs of the visible GPUs.
If it is not set or is set to NoDevFiles, returns empty list.
"""
gpu_ids_str = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if gpu_ids_str is None:
return None
if gpu_ids_str == "":
return []
if gpu_ids_str == "NoDevFiles":
return []
# GPU identifiers are given as strings representing integers or UUIDs.
return list(gpu_ids_str.split(","))
last_set_gpu_ids = None
def set_cuda_visible_devices(gpu_ids):
"""Set the CUDA_VISIBLE_DEVICES environment variable.
Args:
gpu_ids (List[str]): List of strings representing GPU IDs.
"""
if os.environ.get(ray_constants.NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR):
return
global last_set_gpu_ids
if last_set_gpu_ids == gpu_ids:
return # optimization: already set
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in gpu_ids])
last_set_gpu_ids = gpu_ids
def resources_from_ray_options(options_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Determine a task's resource requirements.
Args:
options_dict: The dictionary that contains resources requirements.
Returns:
A dictionary of the resource requirements for the task.
"""
resources = (options_dict.get("resources") or {}).copy()
if "CPU" in resources or "GPU" in resources:
raise ValueError(
"The resources dictionary must not contain the key 'CPU' or 'GPU'"
)
elif "memory" in resources or "object_store_memory" in resources:
raise ValueError(
"The resources dictionary must not "
"contain the key 'memory' or 'object_store_memory'"
)
num_cpus = options_dict.get("num_cpus")
num_gpus = options_dict.get("num_gpus")
memory = options_dict.get("memory")
object_store_memory = options_dict.get("object_store_memory")
accelerator_type = options_dict.get("accelerator_type")
if num_cpus is not None:
resources["CPU"] = num_cpus
if num_gpus is not None:
resources["GPU"] = num_gpus
if memory is not None:
resources["memory"] = ray_constants.to_memory_units(memory, round_up=True)
if object_store_memory is not None:
resources["object_store_memory"] = ray_constants.to_memory_units(
object_store_memory, round_up=True
)
if accelerator_type is not None:
resources[
f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}{accelerator_type}"
] = 0.001
return resources
class Unbuffered(object):
"""There's no "built-in" solution to programatically disabling buffering of
text files. Ray expects stdout/err to be text files, so creating an
unbuffered binary file is unacceptable.
See
https://mail.python.org/pipermail/tutor/2003-November/026645.html.
https://docs.python.org/3/library/functions.html#open
"""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def open_log(path, unbuffered=False, **kwargs):
"""
Opens the log file at `path`, with the provided kwargs being given to
`open`.
"""
# Disable buffering, see test_advanced_3.py::test_logging_to_driver
kwargs.setdefault("buffering", 1)
kwargs.setdefault("mode", "a")
kwargs.setdefault("encoding", "utf-8")
stream = open(path, **kwargs)
if unbuffered:
return Unbuffered(stream)
else:
return stream
def get_system_memory(
# For cgroups v1:
memory_limit_filename="/sys/fs/cgroup/memory/memory.limit_in_bytes",
# For cgroups v2:
memory_limit_filename_v2="/sys/fs/cgroup/memory.max",
):
"""Return the total amount of system memory in bytes.
Returns:
The total amount of system memory in bytes.
"""
# Try to accurately figure out the memory limit if we are in a docker
# container. Note that this file is not specific to Docker and its value is
# often much larger than the actual amount of memory.
docker_limit = None
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, "r") as f:
docker_limit = int(f.read())
elif os.path.exists(memory_limit_filename_v2):
with open(memory_limit_filename_v2, "r") as f:
max_file = f.read()
if max_file.isnumeric():
docker_limit = int(max_file)
else:
# max_file is "max", i.e. is unset.
docker_limit = None
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().total
if docker_limit is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_limit, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def _get_docker_cpus(
cpu_quota_file_name="/sys/fs/cgroup/cpu/cpu.cfs_quota_us",
cpu_period_file_name="/sys/fs/cgroup/cpu/cpu.cfs_period_us",
cpuset_file_name="/sys/fs/cgroup/cpuset/cpuset.cpus",
cpu_max_file_name="/sys/fs/cgroup/cpu.max",
) -> Optional[float]:
# TODO (Alex): Don't implement this logic oursleves.
# Docker has 2 underyling ways of implementing CPU limits:
# https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler
# 1. --cpuset-cpus 2. --cpus or --cpu-quota/--cpu-period (--cpu-shares is a
# soft limit so we don't worry about it). For Ray's purposes, if we use
# docker, the number of vCPUs on a machine is whichever is set (ties broken
# by smaller value).
cpu_quota = None
# See: https://bugs.openjdk.java.net/browse/JDK-8146115
if os.path.exists(cpu_quota_file_name) and os.path.exists(cpu_period_file_name):
try:
with open(cpu_quota_file_name, "r") as quota_file, open(
cpu_period_file_name, "r"
) as period_file:
cpu_quota = float(quota_file.read()) / float(period_file.read())
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
# Look at cpu.max for cgroups v2
elif os.path.exists(cpu_max_file_name):
try:
max_file = open(cpu_max_file_name).read()
quota_str, period_str = max_file.split()
if quota_str.isnumeric() and period_str.isnumeric():
cpu_quota = float(quota_str) / float(period_str)
else:
# quota_str is "max" meaning the cpu quota is unset
cpu_quota = None
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
if (cpu_quota is not None) and (cpu_quota < 0):
cpu_quota = None
elif cpu_quota == 0:
# Round up in case the cpu limit is less than 1.
cpu_quota = 1
cpuset_num = None
if os.path.exists(cpuset_file_name):
try:
with open(cpuset_file_name) as cpuset_file:
ranges_as_string = cpuset_file.read()
ranges = ranges_as_string.split(",")
cpu_ids = []
for num_or_range in ranges:
if "-" in num_or_range:
start, end = num_or_range.split("-")
cpu_ids.extend(list(range(int(start), int(end) + 1)))
else:
cpu_ids.append(int(num_or_range))
cpuset_num = len(cpu_ids)
except Exception:
logger.exception("Unexpected error calculating docker cpuset ids.")
# Possible to-do: Parse cgroups v2's cpuset.cpus.effective for the number
# of accessible CPUs.
if cpu_quota and cpuset_num:
return min(cpu_quota, cpuset_num)
return cpu_quota or cpuset_num
def get_num_cpus() -> int:
cpu_count = multiprocessing.cpu_count()
if os.environ.get("RAY_USE_MULTIPROCESSING_CPU_COUNT"):
logger.info(
"Detected RAY_USE_MULTIPROCESSING_CPU_COUNT=1: Using "
"multiprocessing.cpu_count() to detect the number of CPUs. "
"This may be inconsistent when used inside docker. "
"To correctly detect CPUs, unset the env var: "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT`."
)
return cpu_count
try:
# Not easy to get cpu count in docker, see:
# https://bugs.python.org/issue36054
docker_count = _get_docker_cpus()
if docker_count is not None and docker_count != cpu_count:
# Don't log this warning if we're on K8s or if the warning is
# explicitly disabled.
if (
"RAY_DISABLE_DOCKER_CPU_WARNING" not in os.environ
and "KUBERNETES_SERVICE_HOST" not in os.environ
):
logger.warning(
"Detecting docker specified CPUs. In "
"previous versions of Ray, CPU detection in containers "
"was incorrect. Please ensure that Ray has enough CPUs "
"allocated. As a temporary workaround to revert to the "
"prior behavior, set "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT=1` as an env var "
"before starting Ray. Set the env var: "
"`RAY_DISABLE_DOCKER_CPU_WARNING=1` to mute this warning."
)
# TODO (Alex): We should probably add support for fractional cpus.
if int(docker_count) != float(docker_count):
logger.warning(
f"Ray currently does not support initializing Ray"
f"with fractional cpus. Your num_cpus will be "
f"truncated from {docker_count} to "
f"{int(docker_count)}."
)
docker_count = int(docker_count)
cpu_count = docker_count
except Exception:
# `nproc` and cgroup are linux-only. If docker only works on linux
# (will run in a linux VM on other platforms), so this is fine.
pass
return cpu_count
def get_used_memory():
"""Return the currently used system memory in bytes
Returns:
The total amount of used memory
"""
# Try to accurately figure out the memory usage if we are in a docker
# container.
docker_usage = None
# For cgroups v1:
memory_usage_filename = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
# For cgroups v2:
memory_usage_filename_v2 = "/sys/fs/cgroup/memory.current"
if os.path.exists(memory_usage_filename):
with open(memory_usage_filename, "r") as f:
docker_usage = int(f.read())
elif os.path.exists(memory_usage_filename_v2):
with open(memory_usage_filename_v2, "r") as f:
docker_usage = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().used
if docker_usage is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_usage, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def estimate_available_memory():
"""Return the currently available amount of system memory in bytes.
Returns:
The total amount of available memory in bytes. Based on the used
and total memory.
"""
return get_system_memory() - get_used_memory()
def get_shared_memory_bytes():
"""Get the size of the shared memory file system.
Returns:
The size of the shared memory file system in bytes.
"""
# Make sure this is only called on Linux.
assert sys.platform == "linux" or sys.platform == "linux2"
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
finally:
os.close(shm_fd)
return shm_avail
def check_oversized_function(
pickled: bytes, name: str, obj_type: str, worker: "ray.Worker"
) -> None:
"""Send a warning message if the pickled function is too large.
Args:
pickled: the pickled function.
name: name of the pickled object.
obj_type: type of the pickled object, can be 'function',
'remote function', or 'actor'.
worker: the worker used to send warning message. message will be logged
locally if None.
"""
length = len(pickled)
if length <= ray_constants.FUNCTION_SIZE_WARN_THRESHOLD:
return
elif length < ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD:
warning_message = (
"The {} {} is very large ({} MiB). "
"Check that its definition is not implicitly capturing a large "
"array or other object in scope. Tip: use ray.put() to put large "
"objects in the Ray object store."
).format(obj_type, name, length // (1024 * 1024))
if worker:
push_error_to_driver(
worker,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,
"Warning: " + warning_message,
job_id=worker.current_job_id,
)
else:
error = (
"The {} {} is too large ({} MiB > FUNCTION_SIZE_ERROR_THRESHOLD={}"
" MiB). Check that its definition is not implicitly capturing a "
"large array or other object in scope. Tip: use ray.put() to "
"put large objects in the Ray object store."
).format(
obj_type,
name,
length // (1024 * 1024),
ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD // (1024 * 1024),
)
raise ValueError(error)
def is_main_thread():
return threading.current_thread().getName() == "MainThread"
def detect_fate_sharing_support_win32():
global win32_job, win32_AssignProcessToJobObject
if win32_job is None and sys.platform == "win32":
import ctypes
try:
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPVOID, LPCWSTR
kernel32 = ctypes.WinDLL("kernel32")
kernel32.CreateJobObjectW.argtypes = (LPVOID, LPCWSTR)
kernel32.CreateJobObjectW.restype = HANDLE
sijo_argtypes = (HANDLE, ctypes.c_int, LPVOID, DWORD)
kernel32.SetInformationJobObject.argtypes = sijo_argtypes
kernel32.SetInformationJobObject.restype = BOOL
kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)
kernel32.AssignProcessToJobObject.restype = BOOL
kernel32.IsDebuggerPresent.argtypes = ()
kernel32.IsDebuggerPresent.restype = BOOL
except (AttributeError, TypeError, ImportError):
kernel32 = None
job = kernel32.CreateJobObjectW(None, None) if kernel32 else None
job = subprocess.Handle(job) if job else job
if job:
from ctypes.wintypes import DWORD, LARGE_INTEGER, ULARGE_INTEGER
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("PerProcessUserTimeLimit", LARGE_INTEGER),
("PerJobUserTimeLimit", LARGE_INTEGER),
("LimitFlags", DWORD),
("MinimumWorkingSetSize", ctypes.c_size_t),
("MaximumWorkingSetSize", ctypes.c_size_t),
("ActiveProcessLimit", DWORD),
("Affinity", ctypes.c_size_t),
("PriorityClass", DWORD),
("SchedulingClass", DWORD),
]
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
("ReadOperationCount", ULARGE_INTEGER),
("WriteOperationCount", ULARGE_INTEGER),
("OtherOperationCount", ULARGE_INTEGER),
("ReadTransferCount", ULARGE_INTEGER),
("WriteTransferCount", ULARGE_INTEGER),
("OtherTransferCount", ULARGE_INTEGER),
]
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", ctypes.c_size_t),
("JobMemoryLimit", ctypes.c_size_t),
("PeakProcessMemoryUsed", ctypes.c_size_t),
("PeakJobMemoryUsed", ctypes.c_size_t),
]
debug = kernel32.IsDebuggerPresent()
# Defined in <WinNT.h>; also available here:
# https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-setinformationjobobject
JobObjectExtendedLimitInformation = 9
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
buf = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
buf.BasicLimitInformation.LimitFlags = (
(0 if debug else JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)
| JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION
| JOB_OBJECT_LIMIT_BREAKAWAY_OK
)
infoclass = JobObjectExtendedLimitInformation
if not kernel32.SetInformationJobObject(
job, infoclass, ctypes.byref(buf), ctypes.sizeof(buf)
):
job = None
win32_AssignProcessToJobObject = (
kernel32.AssignProcessToJobObject if kernel32 is not None else False
)
win32_job = job if job else False
return bool(win32_job)
def detect_fate_sharing_support_linux():
global linux_prctl
if linux_prctl is None and sys.platform.startswith("linux"):
try:
from ctypes import c_int, c_ulong, CDLL
prctl = CDLL(None).prctl
prctl.restype = c_int
prctl.argtypes = [c_int, c_ulong, c_ulong, c_ulong, c_ulong]
except (AttributeError, TypeError):
prctl = None
linux_prctl = prctl if prctl else False
return bool(linux_prctl)
def detect_fate_sharing_support():
result = None
if sys.platform == "win32":
result = detect_fate_sharing_support_win32()
elif sys.platform.startswith("linux"):
result = detect_fate_sharing_support_linux()
return result
def set_kill_on_parent_death_linux():
"""Ensures this process dies if its parent dies (fate-sharing).
Linux-only. Must be called in preexec_fn (i.e. by the child).
"""
if detect_fate_sharing_support_linux():
import signal
PR_SET_PDEATHSIG = 1
if linux_prctl(PR_SET_PDEATHSIG, signal.SIGKILL, 0, 0, 0) != 0:
import ctypes
raise OSError(ctypes.get_errno(), "prctl(PR_SET_PDEATHSIG) failed")
else:
assert False, "PR_SET_PDEATHSIG used despite being unavailable"
def set_kill_child_on_death_win32(child_proc):
"""Ensures the child process dies if this process dies (fate-sharing).
Windows-only. Must be called by the parent, after spawning the child.
Args:
child_proc: The subprocess.Popen or subprocess.Handle object.
"""
if isinstance(child_proc, subprocess.Popen):
child_proc = child_proc._handle
assert isinstance(child_proc, subprocess.Handle)
if detect_fate_sharing_support_win32():
if not win32_AssignProcessToJobObject(win32_job, int(child_proc)):
import ctypes
raise OSError(ctypes.get_last_error(), "AssignProcessToJobObject() failed")
else:
assert False, "AssignProcessToJobObject used despite being unavailable"
def set_sigterm_handler(sigterm_handler):
"""Registers a handler for SIGTERM in a platform-compatible manner."""
if sys.platform == "win32":
# Note that these signal handlers only work for console applications.
# TODO(mehrdadn): implement graceful process termination mechanism
# SIGINT is Ctrl+C, SIGBREAK is Ctrl+Break.
signal.signal(signal.SIGBREAK, sigterm_handler)
else:
signal.signal(signal.SIGTERM, sigterm_handler)
def try_make_directory_shared(directory_path):
try:
os.chmod(directory_path, 0o0777)
except OSError as e:
# Silently suppress the PermissionError that is thrown by the chmod.
# This is done because the user attempting to change the permissions
# on a directory may not own it. The chmod is attempted whether the
# directory is new or not to avoid race conditions.
# ray-project/ray/#3591
if e.errno in [errno.EACCES, errno.EPERM]:
pass
else:
raise
def try_to_create_directory(directory_path):
"""Attempt to create a directory that is globally readable/writable.
Args:
directory_path: The path of the directory to create.
"""
directory_path = os.path.expanduser(directory_path)
os.makedirs(directory_path, exist_ok=True)
# Change the log directory permissions so others can use it. This is
# important when multiple people are using the same machine.
try_make_directory_shared(directory_path)
def try_to_symlink(symlink_path, target_path):
"""Attempt to create a symlink.
If the symlink path exists and isn't a symlink, the symlink will not be
created. If a symlink exists in the path, it will be attempted to be
removed and replaced.
Args:
symlink_path: The path at which to create the symlink.
target_path: The path the symlink should point to.
"""
symlink_path = os.path.expanduser(symlink_path)
target_path = os.path.expanduser(target_path)
if os.path.exists(symlink_path):
if os.path.islink(symlink_path):
# Try to remove existing symlink.
try:
os.remove(symlink_path)
except OSError:
return
else:
# There's an existing non-symlink file, don't overwrite it.
return
try:
os.symlink(target_path, symlink_path)
except OSError:
return
def get_user():
if pwd is None:
return ""
try:
return pwd.getpwuid(os.getuid()).pw_name
except Exception:
return ""
def get_function_args(callable):
all_parameters = frozenset(signature(callable).parameters)
return list(all_parameters)
def get_conda_bin_executable(executable_name):
"""
Return path to the specified executable, assumed to be discoverable within
the 'bin' subdirectory of a conda installation. Adapted from
https://github.com/mlflow/mlflow.
"""
# Use CONDA_EXE as per https://github.com/conda/conda/issues/7126
if "CONDA_EXE" in os.environ:
conda_bin_dir = os.path.dirname(os.environ["CONDA_EXE"])
return os.path.join(conda_bin_dir, executable_name)
return executable_name
def get_conda_env_dir(env_name):
"""Find and validate the conda directory for a given conda environment.
For example, given the environment name `tf1`, this function checks
the existence of the corresponding conda directory, e.g.
`/Users/scaly/anaconda3/envs/tf1`, and returns it.
"""
conda_prefix = os.environ.get("CONDA_PREFIX")
if conda_prefix is None:
# The caller is neither in a conda env or in (base) env. This is rare
# because by default, new terminals start in (base), but we can still
# support this case.
conda_exe = os.environ.get("CONDA_EXE")
if conda_exe is None:
raise ValueError(
"Cannot find environment variables set by conda. "
"Please verify conda is installed."
)
# Example: CONDA_EXE=$HOME/anaconda3/bin/python
# Strip out /bin/python by going up two parent directories.
conda_prefix = str(Path(conda_exe).parent.parent)
# There are two cases:
# 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and
# CONDA_PREFIX=$HOME/anaconda3
# 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and
# CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name
if os.environ.get("CONDA_DEFAULT_ENV") == "base":
# Caller's curent environment is (base).
# Not recommended by conda, but we can still support it.
if env_name == "base":
# Desired environment is (base), located at e.g. $HOME/anaconda3
env_dir = conda_prefix
else:
# Desired environment is user-created, e.g.
# $HOME/anaconda3/envs/$env_name
env_dir = os.path.join(conda_prefix, "envs", env_name)
else:
# Now `conda_prefix` should be something like
# $HOME/anaconda3/envs/$current_env_name
# We want to replace the last component with the desired env name.
conda_envs_dir = os.path.split(conda_prefix)[0]
env_dir = os.path.join(conda_envs_dir, env_name)
if not os.path.isdir(env_dir):
raise ValueError(
"conda env "
+ env_name
+ " not found in conda envs directory. Run `conda env list` to "
+ "verify the name is correct."
)
return env_dir
def get_call_location(back: int = 1):
"""
Get the location (filename and line number) of a function caller, `back`
frames up the stack.
Args:
back: The number of frames to go up the stack, not including this
function.
"""
stack = inspect.stack()
try:
frame = stack[back + 1]
return f"{frame.filename}:{frame.lineno}"
except IndexError:
return "UNKNOWN"
# Used to only print a deprecation warning once for a given function if we
# don't wish to spam the caller.
_PRINTED_WARNING = set()
# The following is inspired by
# https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/python/util/deprecation.py#L274-L329
def deprecated(
instructions: Optional[str] = None,
removal_release: Optional[str] = None,
removal_date: Optional[str] = None,
warn_once: bool = True,
):
"""
Creates a decorator for marking functions as deprecated. The decorator
will log a deprecation warning on the first (or all, see `warn_once` arg)
invocations, and will otherwise leave the wrapped function unchanged.
Args:
instructions: Instructions for the caller to update their code.
removal_release: The release in which this deprecated function
will be removed. Only one of removal_release and removal_date
should be specified. If neither is specfieid, we'll warning that
the function will be removed "in a future release".
removal_date: The date on which this deprecated function will be
removed. Only one of removal_release and removal_date should be
specified. If neither is specfieid, we'll warning that
the function will be removed "in a future release".
warn_once: If true, the deprecation warning will only be logged
on the first invocation. Otherwise, the deprecation warning will
be logged on every invocation. Defaults to True.
Returns:
A decorator to be used for wrapping deprecated functions.
"""
if removal_release is not None and removal_date is not None:
raise ValueError(
"Only one of removal_release and removal_date should be specified."
)
def deprecated_wrapper(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
global _PRINTED_WARNING
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING.add(func)
msg = (
"From {}: {} (from {}) is deprecated and will ".format(
get_call_location(), func.__name__, func.__module__
)
+ "be removed "
+ (
f"in version {removal_release}."
if removal_release is not None
else f"after {removal_date}"
if removal_date is not None
else "in a future version"
)
+ (f" {instructions}" if instructions is not None else "")
)
warnings.warn(msg)
return func(*args, **kwargs)
return new_func
return deprecated_wrapper
def import_attr(full_path: str):
"""Given a full import path to a module attr, return the imported attr.
For example, the following are equivalent:
MyClass = import_attr("module.submodule:MyClass")
MyClass = import_attr("module.submodule.MyClass")
from module.submodule import MyClass
Returns:
Imported attr
"""
if full_path is None:
raise TypeError("import path cannot be None")
if ":" in full_path:
if full_path.count(":") > 1:
raise ValueError(
f'Got invalid import path "{full_path}". An '
"import path may have at most one colon."
)
module_name, attr_name = full_path.split(":")
else:
last_period_idx = full_path.rfind(".")
module_name = full_path[:last_period_idx]
attr_name = full_path[last_period_idx + 1 :]
module = importlib.import_module(module_name)
return getattr(module, attr_name)
def get_wheel_filename(
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Returns the filename used for the nightly Ray wheel.
Args:
sys_platform: The platform as returned by sys.platform. Examples:
"darwin", "linux", "win32"
ray_version: The Ray version as returned by ray.__version__ or
`ray --version`. Examples: "3.0.0.dev0"
py_version (str):
The major and minor Python versions concatenated. Examples: "36",
"37", "38", "39"
Returns:
The wheel file name. Examples:
ray-3.0.0.dev0-cp38-cp38-manylinux2014_x86_64.whl
"""
assert py_version in ["36", "37", "38", "39"], py_version
os_strings = {
"darwin": "macosx_10_15_x86_64"
if py_version in ["38", "39"]
else "macosx_10_15_intel",
"linux": "manylinux2014_x86_64",
"win32": "win_amd64",
}
assert sys_platform in os_strings, sys_platform
wheel_filename = (
f"ray-{ray_version}-cp{py_version}-"
f"cp{py_version}{'m' if py_version in ['36', '37'] else ''}"
f"-{os_strings[sys_platform]}.whl"
)
return wheel_filename
def get_master_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Return the URL for the wheel from a specific commit."""
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://s3-us-west-2.amazonaws.com/ray-wheels/master/"
f"{ray_commit}/{filename}"
)
def get_release_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Return the URL for the wheel for a specific release."""
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://ray-wheels.s3-us-west-2.amazonaws.com/releases/"
f"{ray_version}/{ray_commit}/{filename}"
)
# e.g. https://ray-wheels.s3-us-west-2.amazonaws.com/releases/1.4.0rc1/e7c7
# f6371a69eb727fa469e4cd6f4fbefd143b4c/ray-1.4.0rc1-cp36-cp36m-manylinux201
# 4_x86_64.whl
def validate_namespace(namespace: str):
if not isinstance(namespace, str):
raise TypeError("namespace must be None or a string.")
elif namespace == "":
raise ValueError(
'"" is not a valid namespace. ' "Pass None to not specify a namespace."
)
def init_grpc_channel(
address: str,
options: Optional[Sequence[Tuple[str, Any]]] = None,
asynchronous: bool = False,
):
grpc_module = aiogrpc if asynchronous else grpc
if os.environ.get("RAY_USE_TLS", "0").lower() in ("1", "true"):
server_cert_chain, private_key, ca_cert = load_certs_from_env()
credentials = grpc.ssl_channel_credentials(
certificate_chain=server_cert_chain,
private_key=private_key,
root_certificates=ca_cert,
)
channel = grpc_module.secure_channel(address, credentials, options=options)
else:
channel = grpc_module.insecure_channel(address, options=options)
return channel
def check_dashboard_dependencies_installed() -> bool:
"""Returns True if Ray Dashboard dependencies are installed.
Checks to see if we should start the dashboard agent or not based on the
Ray installation version the user has installed (ray vs. ray[default]).
Unfortunately there doesn't seem to be a cleaner way to detect this other
than just blindly importing the relevant packages.
"""
try:
import ray.dashboard.optional_deps # noqa: F401
return True
except ImportError:
return False
def internal_kv_list_with_retry(gcs_client, prefix, namespace, num_retries=20):
result = None
if isinstance(prefix, str):
prefix = prefix.encode()
if isinstance(namespace, str):
namespace = namespace.encode()
for _ in range(num_retries):
try:
result = gcs_client.internal_kv_keys(prefix, namespace)
except Exception as e:
if isinstance(e, grpc.RpcError) and e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV List failed")
result = None
if result is not None:
break
else:
logger.debug(f"Fetched {prefix}=None from KV. Retrying.")
time.sleep(2)
if result is None:
raise RuntimeError(
f"Could not list '{prefix}' from GCS. Did GCS start successfully?"
)
return result
def internal_kv_get_with_retry(gcs_client, key, namespace, num_retries=20):
result = None
if isinstance(key, str):
key = key.encode()
for _ in range(num_retries):
try:
result = gcs_client.internal_kv_get(key, namespace)
except Exception as e:
if isinstance(e, grpc.RpcError) and e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Get failed")
result = None
if result is not None:
break
else:
logger.debug(f"Fetched {key}=None from KV. Retrying.")
time.sleep(2)
if not result:
raise RuntimeError(
f"Could not read '{key.decode()}' from GCS. Did GCS start successfully?"
)
return result
def internal_kv_put_with_retry(gcs_client, key, value, namespace, num_retries=20):
if isinstance(key, str):
key = key.encode()
if isinstance(value, str):
value = value.encode()
if isinstance(namespace, str):
namespace = namespace.encode()
error = None
for _ in range(num_retries):
try:
return gcs_client.internal_kv_put(
key, value, overwrite=True, namespace=namespace
)
except grpc.RpcError as e:
if e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Put failed")
time.sleep(2)
error = e
# Reraise the last grpc.RpcError.
raise error
def compute_version_info():
"""Compute the versions of Python, and Ray.
Returns:
A tuple containing the version information.
"""
ray_version = ray.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
return ray_version, python_version
def get_directory_size_bytes(path: Union[str, Path] = ".") -> int:
"""Get the total size of a directory in bytes, including subdirectories."""
total_size_bytes = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is a symbolic link or a .pyc file
if not os.path.islink(fp) and not f.endswith(".pyc"):
total_size_bytes += os.path.getsize(fp)
return total_size_bytes
def check_version_info(cluster_metadata):
"""Check if the Python and Ray versions stored in GCS matches this process.
Args:
cluster_metadata: Ray cluster metadata from GCS.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
cluster_version_info = (
cluster_metadata["ray_version"],
cluster_metadata["python_version"],
)
version_info = compute_version_info()
if version_info != cluster_version_info:
node_ip_address = ray._private.services.get_node_ip_address()
error_message = (
"Version mismatch: The cluster was started with:\n"
" Ray: " + cluster_version_info[0] + "\n"
" Python: " + cluster_version_info[1] + "\n"
"This process on node " + node_ip_address + " was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
)
raise RuntimeError(error_message)
|
# -*- coding: utf-8 -*-
import re
from cmyui.discord import Webhook
from cmyui.discord import Embed
from objects import glob
from objects.score import Score
from objects.score import Grade
from objects.player import Player
from objects.beatmap import Beatmap
from objects.match import Match
from objects.match import Slot
from constants.gamemodes import GameMode
CHAT_HOOK = glob.config.webhooks['chat-bridge']
SCORE_HOOK = glob.config.webhooks['score-log']
GRADE_EMOTES = {
Grade.XH: "<:grade_xh:833673474836660265>",
Grade.SH: "<:grade_sh:833673474277900318>",
Grade.X: "<:grade_x:833673474270167060>",
Grade.S: "<:grade_s:833673474022572032>",
Grade.A: "<:grade_a:833673433941934091>",
Grade.B: "<:grade_b:833673434122289172>",
Grade.C: "<:grade_c:833673433656721418>",
Grade.D: "<:grade_d:833673433408733194>",
Grade.F: "",
Grade.N: ""
}
GRADE_COLORS = {
Grade.XH: 0xE0F7FA, #Silver SS
Grade.SH: 0xE0F7FA, #Silver S
Grade.X: 0xFFEB3B, #SS
Grade.S: 0xFFEB3B, #S
Grade.A: 0x8BC34A,
Grade.B: 0x2196F3,
Grade.C: 0x9C27B0,
Grade.D: 0xF44336,
Grade.F: 0x212121,
Grade.N: 0x212121
}
MOD_EMOTES = {
'NF': "<:nf:833699841955201114>",
'EZ': "<:ez:833699842118647819>",
'TD': "TD",
'HD': "<:hd:833699841741422642>",
'HR': "<:hr:833699841644691456>",
'SD': "<:sd:833699840999424041>",
'DT': "<:dt:833699841741422645>",
'RX': "<:rx:833699841267597343>",
'HT': "<:ht:833699842022178847>",
'NC': "<:nc:833699841489895488>",
'FL': "<:fl:833699841510211588>",
'AU': "<:au:833699842269642762>",
'SO': "<:so:833699841287782441>",
'AP': "<:ap:833699842177368125>",
'PF': "<:pf:833699841510211585>",
'FI': "FI",
'RN': "RN",
'CN': "<:cn:833699841955201115>",
'TP': "<:tp:833699841288699944>",
'V2': "V2",
'MR': "MR",
'1K': "1K",
'2K': "2K",
'3K': "3K",
'4K': "4K",
'5K': "5K",
'6K': "6K",
'7K': "7K",
'8K': "8K",
'9K': "9K",
'CO': "CO",
}
MODE_EMOTES = {
GameMode.vn_std: "std",
GameMode.vn_taiko: "taiko",
GameMode.vn_catch: "catch",
GameMode.vn_mania: "mania",
GameMode.rx_std: "std (Rx)",
GameMode.rx_taiko: "taiko (Rx)",
GameMode.rx_catch: "catch (Rx)",
GameMode.ap_std: "std (Ap)",
}
def sanitize(m: str):
return m.replace("@", "[@]")
async def sendSubmitScore(s: Score):
wh = Webhook(url=SCORE_HOOK)
diff=[f'{s.sr:.2f}★']
if s.mods:
diff.insert(1, f'({''.join(map(lambda mod: MOD_EMOTES[mod], re.findall('..',repr(s.mods).replace('DTNC','NC'))))})')
e = Embed(title=s.bmap.full, url=f'https://osu.ppy.sh/b/{s.bmap.id}',color=GRADE_COLORS[s.grade])
e.set_author(name=f'{s.player.name} achieved #{s.rank} in {MODE_EMOTES[s.mode]}', url=f'https://osu.catgirl.moe/u/{s.player.id}', icon_url=f'https://a.osu.catgirl.moe/{s.player.id}')
e.add_field("Difficulty:", ' '.join(diff), True)
e.add_field("Accuracy:", f'{s.acc:.2f}% {GRADE_EMOTES[s.grade]} ({s.pp:,.2f}pp)', True)
e.add_field("Score:", f'{s.score:,} ({s.max_combo:,}/{s.bmap.max_combo:,}x)', True)
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{s.bmap.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
async def sendLogin(p: Player):
wh = Webhook(url=CHAT_HOOK, content=f'📥 **{sanitize(p.name)}** has joined the game.')
await wh.post(glob.http)
async def sendLogout(p: Player):
wh = Webhook(url=CHAT_HOOK, content=f'📤 **{sanitize(p.name)}** has left the game.')
await wh.post(glob.http)
async def sendRankMap(p: Player, b: Beatmap, s: str):
wh = Webhook(url=CHAT_HOOK)
e = Embed(title=b.full, url=f'https://osu.ppy.sh/b/{b.id}', color=0xE91E63)
e.set_author(name=f'{p.name} {s} a map', url=f'https://osu.catgirl.moe/u/{p.id}', icon_url=f'https://a.osu.catgirl.moe/{p.id}')
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{b.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
async def sendSendMessage(p: Player, m: str):
wh = Webhook(url=CHAT_HOOK, username=p.name, avatar_url=f'https://a.osu.catgirl.moe/{p.id}', content=sanitize(m))
await wh.post(glob.http)
async def sendMatchCreate(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'⭐ **{sanitize(p.name)}** created lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchJoin(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'➡️ **{sanitize(p.name)}** joined lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchPart(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'⬅️ **{sanitize(p.name)}** left lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchComplete(slots: list[Slot], m: Match):
submitted, not_submitted = await m.await_submissions(slots)
print(submitted)
print(not_submitted)
if submitted:
player_names = []
player_accuracy = []
player_scores = []
wh = Webhook(url=CHAT_HOOK)
bmap = next(iter(submitted)).recent_score.bmap
e = Embed(title=bmap.full, url=f'https://osu.ppy.sh/b/{bmap.id}',color=0xF44336)
for p, z in sorted(submitted.items(), key=lambda item: item[0].recent_score.score, reverse=True):
s = p.recent_score
player_names.append(p.name)
player_accuracy.append(f'{s.acc:.2f}% {GRADE_EMOTES[s.grade]} ({s.pp:,.2f}pp)')
player_scores.append(f'{s.score:,} ({s.max_combo:,}/{s.bmap.max_combo:,}x)')
e.set_author(name=f'Lobby "{sanitize(m.name)}" finished a map')
e.add_field("Players:", '\n'.join(player_names), True)
e.add_field("Accuracy:", '\n'.join(player_accuracy), True)
e.add_field("Score:", '\n'.join(player_scores), True)
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{bmap.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
|
# -*- coding: utf-8 -*-
import re
from cmyui.discord import Webhook
from cmyui.discord import Embed
from objects import glob
from objects.score import Score
from objects.score import Grade
from objects.player import Player
from objects.beatmap import Beatmap
from objects.match import Match
from objects.match import Slot
from constants.gamemodes import GameMode
CHAT_HOOK = glob.config.webhooks['chat-bridge']
SCORE_HOOK = glob.config.webhooks['score-log']
GRADE_EMOTES = {
Grade.XH: "<:grade_xh:833673474836660265>",
Grade.SH: "<:grade_sh:833673474277900318>",
Grade.X: "<:grade_x:833673474270167060>",
Grade.S: "<:grade_s:833673474022572032>",
Grade.A: "<:grade_a:833673433941934091>",
Grade.B: "<:grade_b:833673434122289172>",
Grade.C: "<:grade_c:833673433656721418>",
Grade.D: "<:grade_d:833673433408733194>",
Grade.F: "",
Grade.N: ""
}
GRADE_COLORS = {
Grade.XH: 0xE0F7FA, #Silver SS
Grade.SH: 0xE0F7FA, #Silver S
Grade.X: 0xFFEB3B, #SS
Grade.S: 0xFFEB3B, #S
Grade.A: 0x8BC34A,
Grade.B: 0x2196F3,
Grade.C: 0x9C27B0,
Grade.D: 0xF44336,
Grade.F: 0x212121,
Grade.N: 0x212121
}
MOD_EMOTES = {
'NF': "<:nf:833699841955201114>",
'EZ': "<:ez:833699842118647819>",
'TD': "TD",
'HD': "<:hd:833699841741422642>",
'HR': "<:hr:833699841644691456>",
'SD': "<:sd:833699840999424041>",
'DT': "<:dt:833699841741422645>",
'RX': "<:rx:833699841267597343>",
'HT': "<:ht:833699842022178847>",
'NC': "<:nc:833699841489895488>",
'FL': "<:fl:833699841510211588>",
'AU': "<:au:833699842269642762>",
'SO': "<:so:833699841287782441>",
'AP': "<:ap:833699842177368125>",
'PF': "<:pf:833699841510211585>",
'FI': "FI",
'RN': "RN",
'CN': "<:cn:833699841955201115>",
'TP': "<:tp:833699841288699944>",
'V2': "V2",
'MR': "MR",
'1K': "1K",
'2K': "2K",
'3K': "3K",
'4K': "4K",
'5K': "5K",
'6K': "6K",
'7K': "7K",
'8K': "8K",
'9K': "9K",
'CO': "CO",
}
MODE_EMOTES = {
GameMode.vn_std: "std",
GameMode.vn_taiko: "taiko",
GameMode.vn_catch: "catch",
GameMode.vn_mania: "mania",
GameMode.rx_std: "std (Rx)",
GameMode.rx_taiko: "taiko (Rx)",
GameMode.rx_catch: "catch (Rx)",
GameMode.ap_std: "std (Ap)",
}
def sanitize(m: str):
return m.replace("@", "[@]")
async def sendSubmitScore(s: Score):
wh = Webhook(url=SCORE_HOOK)
diff=[f'{s.sr:.2f}★']
if s.mods:
diff.insert(1, f'({"".join(map(lambda mod: MOD_EMOTES[mod], re.findall("..",repr(s.mods).replace("DTNC","NC"))))})')
e = Embed(title=s.bmap.full, url=f'https://osu.ppy.sh/b/{s.bmap.id}',color=GRADE_COLORS[s.grade])
e.set_author(name=f'{s.player.name} achieved #{s.rank} in {MODE_EMOTES[s.mode]}', url=f'https://osu.catgirl.moe/u/{s.player.id}', icon_url=f'https://a.osu.catgirl.moe/{s.player.id}')
e.add_field("Difficulty:", ' '.join(diff), True)
e.add_field("Accuracy:", f'{s.acc:.2f}% {GRADE_EMOTES[s.grade]} ({s.pp:,.2f}pp)', True)
e.add_field("Score:", f'{s.score:,} ({s.max_combo:,}/{s.bmap.max_combo:,}x)', True)
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{s.bmap.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
async def sendLogin(p: Player):
wh = Webhook(url=CHAT_HOOK, content=f'📥 **{sanitize(p.name)}** has joined the game.')
await wh.post(glob.http)
async def sendLogout(p: Player):
wh = Webhook(url=CHAT_HOOK, content=f'📤 **{sanitize(p.name)}** has left the game.')
await wh.post(glob.http)
async def sendRankMap(p: Player, b: Beatmap, s: str):
wh = Webhook(url=CHAT_HOOK)
e = Embed(title=b.full, url=f'https://osu.ppy.sh/b/{b.id}', color=0xE91E63)
e.set_author(name=f'{p.name} {s} a map', url=f'https://osu.catgirl.moe/u/{p.id}', icon_url=f'https://a.osu.catgirl.moe/{p.id}')
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{b.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
async def sendSendMessage(p: Player, m: str):
wh = Webhook(url=CHAT_HOOK, username=p.name, avatar_url=f'https://a.osu.catgirl.moe/{p.id}', content=sanitize(m))
await wh.post(glob.http)
async def sendMatchCreate(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'⭐ **{sanitize(p.name)}** created lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchJoin(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'➡️ **{sanitize(p.name)}** joined lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchPart(p: Player, m: Match):
wh = Webhook(url=CHAT_HOOK, content=f'⬅️ **{sanitize(p.name)}** left lobby *"{sanitize(m.name)}"*.')
await wh.post(glob.http)
async def sendMatchComplete(slots: list[Slot], m: Match):
submitted, not_submitted = await m.await_submissions(slots)
print(submitted)
print(not_submitted)
if submitted:
player_names = []
player_accuracy = []
player_scores = []
wh = Webhook(url=CHAT_HOOK)
bmap = next(iter(submitted)).recent_score.bmap
e = Embed(title=bmap.full, url=f'https://osu.ppy.sh/b/{bmap.id}',color=0xF44336)
for p, z in sorted(submitted.items(), key=lambda item: item[0].recent_score.score, reverse=True):
s = p.recent_score
player_names.append(p.name)
player_accuracy.append(f'{s.acc:.2f}% {GRADE_EMOTES[s.grade]} ({s.pp:,.2f}pp)')
player_scores.append(f'{s.score:,} ({s.max_combo:,}/{s.bmap.max_combo:,}x)')
e.set_author(name=f'Lobby "{sanitize(m.name)}" finished a map')
e.add_field("Players:", '\n'.join(player_names), True)
e.add_field("Accuracy:", '\n'.join(player_accuracy), True)
e.add_field("Score:", '\n'.join(player_scores), True)
e.set_image(url=f'https://assets.ppy.sh/beatmaps/{bmap.set_id}/covers/cover.jpg')
wh.add_embed(e)
await wh.post(glob.http)
|
#!/usr/bin/env python3
from time import time
import requests
class RuqqusClient:
def __init__(
self,
client_id,
client_secret,
code=None,
access_token=None,
refresh_token=None,
):
self.headers = {}
self.url = 'https://ruqqus.com'
self.client_id = client_id
self.client_secret = client_secret
self.access_token = access_token
self._refresh_token = refresh_token
self.code = code
self.user_agent = f'python-ruqqus-{self.client_id}'
self.token_expire_utc = 0
self.header = {}
self.refresh_headers()
if not self.client_id or not self.client_secret:
exit("You must provide both a 'client_id' and 'client_secret")
elif (
self.client_id
and self.client_secret
and not self.code
and not self.access_token
):
if refresh_token:
self.refresh_token()
else:
exit(
"You must provide either a 'code', 'access_token', "
"or a 'refresh_token'."
)
elif (
self.client_id
and self.client_secret
and self.code
and not self.access_token
):
if self._refresh_token:
self.refresh_token()
else:
self.get_access_token()
def admin(self):
raise NotImplementedError()
def mod(self):
raise NotImplementedError()
def identity(self):
self.refresh_headers()
return requests.get(
url=f'{self.url}/api/v1/identity', headers=self.headers
).json()
def user(self, username=None, type=None):
self.refresh_headers()
if not username:
return {'error': 'You must provide a username.'}
if type:
type = str(type).lower()
# Default to user
if not type or type == 'user':
return requests.get(
url=f'{self.url}/api/v1/user/{username}', headers=self.headers
).json()
elif type == 'is_available':
return requests.get(
url=f'{self.url}/api/v1/is_available/{username}',
headers=self.headers,
).json()
elif type == 'sub':
return requests.post(
url=f'{self.url}/api/follow/{username}', headers=self.headers
).json()
elif type == 'unsub':
return requests.post(
url=f'{self.url}/api/unfollow/{username}', headers=self.headers
).json()
else:
return {'error': 'Invalid Call'}
def guild(self, name=None, type=None):
self.refresh_headers()
if not name:
return {'error': 'You must provide a guildName.'}
if type:
type = str(type).lower()
if not type or type == 'guild':
return requests.get(
url=f'{self.url}/api/v1/guild/{name}',
headers=self.headers,
).json()
elif type == 'is_available':
# Default to is_available
return requests.get(
url=f'{self.url}/api/v1/board_available/{name}',
headers=self.headers,
).json()
elif type == 'sub':
return requests.post(
url=f'{self.url}/api/v1/subscribe/{name}',
headers=self.headers,
).json()
elif type == 'unsub':
return requests.post(
url=f'{self.url}/api/v1/unsubscribe/{name}',
headers=self.headers,
).json()
else:
return {'error': 'Invalid Call'}
def submit_post(self, *, guild, title, url):
self.refresh_headers()
return requests.post(
url=f'{self.url}/api/v1/submit',
headers=self.headers,
data={
'board': guild,
'title': title,
'url': url,
},
).json()
def get_guild_posts(self, *, name, page=1, sort='hot'):
self.refresh_headers()
url = f'{self.url}/api/v1/guild/{name.lstrip('+')}/listing'
response = requests.get(
url=url,
params={'page': page, 'sort': sort},
headers=self.headers,
)
response.raise_for_status()
return response.json()
def get(
self,
type=None,
sort=None,
time=None,
guild_name=None,
username=None,
post_id=None,
comment_id=None,
):
self.refresh_headers()
if not type:
return {'error': "You must specify which 'type' of get to use"}
else:
type = str(type).lower()
if time:
time = str(time).lower()
if time not in ['day', 'week', 'month', 'year']:
return {'error': 'Invalid time parameter.'}
if sort:
sort = str(sort).lower()
if sort not in ['top', 'hot', 'disputed', 'activity', 'new']:
return {'error': 'Invalid sort parameter.'}
if type == 'front':
if sort:
if time:
return requests.get(
url=(
f'{self.url}/api/v1/front/listing'
f'?sort={sort}&time={time}'
),
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/front/listing?sort={sort}',
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/front/listing', headers=self.headers
).json()
elif type == 'guild':
if not guild_name:
return {'error': 'You must provide a guildName'}
else:
guild_name = str(guild_name)
if sort:
if time:
return requests.get(
url=(
f'{self.url}/api/v1/guild/{guild_name}/listing'
f'?sort={sort}&time={time}'
),
headers=self.headers,
).json()
return requests.get(
url=(
f'{self.url}/api/v1/guild/{guild_name}/listing'
f'?sort={sort}'
),
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/guild/{guild_name}/listing',
headers=self.headers,
).json()
elif type == 'user':
if not username:
return {'error': 'You must provide a userName.'}
else:
username = str(username)
return requests.get(
url=f'{self.url}/api/v1/user/{username}', headers=self.headers
).json()
elif type == 'post':
if not post_id:
return {'error': 'You must provide a postId.'}
else:
post_id = str(post_id)
return requests.get(
url=f'{self.url}/api/v1/post/{post_id}', headers=self.headers
).json()
elif type == 'comment':
if not comment_id:
return {'error': 'You must provide a commentId.'}
else:
comment_id = str(comment_id)
return requests.get(
url=f'{self.url}/api/v1/comment/{comment_id}',
headers=self.headers,
).json()
else:
return {'error': 'Invalid Call'}
def refresh_headers(self, user_agent=None, access_token=None):
if self.access_token:
self.headers = {'Authorization': 'Bearer ' + self.access_token}
elif access_token:
self.headers = {'Authorization': 'Bearer ' + access_token}
else:
return {'error': 'You must provide an accessToken.'}
if user_agent:
self.header['user-agent'] = user_agent
self.user_agent = user_agent
elif self.user_agent:
self.header['user-agent'] = self.user_agent
else:
return {'error': 'You must provide a user-agent.'}
# refresh token 30 seconds before expiration
if self._refresh_token and self.token_expire_utc >= int(time() - 30):
self.refresh_token()
def refresh_token(self, refresh_token=None):
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh',
}
if not self._refresh_token:
if refresh_token:
data['refresh_token'] = refresh_token
else:
data['refresh_token'] = self._refresh_token
r = requests.post(
url=f'{self.url}/oauth/grant', headers=self.headers, data=data
).json()
self.access_token = r['access_token']
return r
def get_access_token(self):
self.refresh_headers()
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'code',
'code': self.code,
}
r = requests.post(
url=f'{self.url}/oauth/grant', headers=self.headers, data=data
).json()
self.access_token = r['access_token']
self._refresh_token = r['refresh_token']
self.token_expire_utc = r['expires_at']
return r
|
#!/usr/bin/env python3
from time import time
import requests
class RuqqusClient:
def __init__(
self,
client_id,
client_secret,
code=None,
access_token=None,
refresh_token=None,
):
self.headers = {}
self.url = 'https://ruqqus.com'
self.client_id = client_id
self.client_secret = client_secret
self.access_token = access_token
self._refresh_token = refresh_token
self.code = code
self.user_agent = f'python-ruqqus-{self.client_id}'
self.token_expire_utc = 0
self.header = {}
self.refresh_headers()
if not self.client_id or not self.client_secret:
exit("You must provide both a 'client_id' and 'client_secret")
elif (
self.client_id
and self.client_secret
and not self.code
and not self.access_token
):
if refresh_token:
self.refresh_token()
else:
exit(
"You must provide either a 'code', 'access_token', "
"or a 'refresh_token'."
)
elif (
self.client_id
and self.client_secret
and self.code
and not self.access_token
):
if self._refresh_token:
self.refresh_token()
else:
self.get_access_token()
def admin(self):
raise NotImplementedError()
def mod(self):
raise NotImplementedError()
def identity(self):
self.refresh_headers()
return requests.get(
url=f'{self.url}/api/v1/identity', headers=self.headers
).json()
def user(self, username=None, type=None):
self.refresh_headers()
if not username:
return {'error': 'You must provide a username.'}
if type:
type = str(type).lower()
# Default to user
if not type or type == 'user':
return requests.get(
url=f'{self.url}/api/v1/user/{username}', headers=self.headers
).json()
elif type == 'is_available':
return requests.get(
url=f'{self.url}/api/v1/is_available/{username}',
headers=self.headers,
).json()
elif type == 'sub':
return requests.post(
url=f'{self.url}/api/follow/{username}', headers=self.headers
).json()
elif type == 'unsub':
return requests.post(
url=f'{self.url}/api/unfollow/{username}', headers=self.headers
).json()
else:
return {'error': 'Invalid Call'}
def guild(self, name=None, type=None):
self.refresh_headers()
if not name:
return {'error': 'You must provide a guildName.'}
if type:
type = str(type).lower()
if not type or type == 'guild':
return requests.get(
url=f'{self.url}/api/v1/guild/{name}',
headers=self.headers,
).json()
elif type == 'is_available':
# Default to is_available
return requests.get(
url=f'{self.url}/api/v1/board_available/{name}',
headers=self.headers,
).json()
elif type == 'sub':
return requests.post(
url=f'{self.url}/api/v1/subscribe/{name}',
headers=self.headers,
).json()
elif type == 'unsub':
return requests.post(
url=f'{self.url}/api/v1/unsubscribe/{name}',
headers=self.headers,
).json()
else:
return {'error': 'Invalid Call'}
def submit_post(self, *, guild, title, url):
self.refresh_headers()
return requests.post(
url=f'{self.url}/api/v1/submit',
headers=self.headers,
data={
'board': guild,
'title': title,
'url': url,
},
).json()
def get_guild_posts(self, *, name, page=1, sort='hot'):
self.refresh_headers()
url = f'{self.url}/api/v1/guild/{name.lstrip("+")}/listing'
response = requests.get(
url=url,
params={'page': page, 'sort': sort},
headers=self.headers,
)
response.raise_for_status()
return response.json()
def get(
self,
type=None,
sort=None,
time=None,
guild_name=None,
username=None,
post_id=None,
comment_id=None,
):
self.refresh_headers()
if not type:
return {'error': "You must specify which 'type' of get to use"}
else:
type = str(type).lower()
if time:
time = str(time).lower()
if time not in ['day', 'week', 'month', 'year']:
return {'error': 'Invalid time parameter.'}
if sort:
sort = str(sort).lower()
if sort not in ['top', 'hot', 'disputed', 'activity', 'new']:
return {'error': 'Invalid sort parameter.'}
if type == 'front':
if sort:
if time:
return requests.get(
url=(
f'{self.url}/api/v1/front/listing'
f'?sort={sort}&time={time}'
),
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/front/listing?sort={sort}',
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/front/listing', headers=self.headers
).json()
elif type == 'guild':
if not guild_name:
return {'error': 'You must provide a guildName'}
else:
guild_name = str(guild_name)
if sort:
if time:
return requests.get(
url=(
f'{self.url}/api/v1/guild/{guild_name}/listing'
f'?sort={sort}&time={time}'
),
headers=self.headers,
).json()
return requests.get(
url=(
f'{self.url}/api/v1/guild/{guild_name}/listing'
f'?sort={sort}'
),
headers=self.headers,
).json()
return requests.get(
url=f'{self.url}/api/v1/guild/{guild_name}/listing',
headers=self.headers,
).json()
elif type == 'user':
if not username:
return {'error': 'You must provide a userName.'}
else:
username = str(username)
return requests.get(
url=f'{self.url}/api/v1/user/{username}', headers=self.headers
).json()
elif type == 'post':
if not post_id:
return {'error': 'You must provide a postId.'}
else:
post_id = str(post_id)
return requests.get(
url=f'{self.url}/api/v1/post/{post_id}', headers=self.headers
).json()
elif type == 'comment':
if not comment_id:
return {'error': 'You must provide a commentId.'}
else:
comment_id = str(comment_id)
return requests.get(
url=f'{self.url}/api/v1/comment/{comment_id}',
headers=self.headers,
).json()
else:
return {'error': 'Invalid Call'}
def refresh_headers(self, user_agent=None, access_token=None):
if self.access_token:
self.headers = {'Authorization': 'Bearer ' + self.access_token}
elif access_token:
self.headers = {'Authorization': 'Bearer ' + access_token}
else:
return {'error': 'You must provide an accessToken.'}
if user_agent:
self.header['user-agent'] = user_agent
self.user_agent = user_agent
elif self.user_agent:
self.header['user-agent'] = self.user_agent
else:
return {'error': 'You must provide a user-agent.'}
# refresh token 30 seconds before expiration
if self._refresh_token and self.token_expire_utc >= int(time() - 30):
self.refresh_token()
def refresh_token(self, refresh_token=None):
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh',
}
if not self._refresh_token:
if refresh_token:
data['refresh_token'] = refresh_token
else:
data['refresh_token'] = self._refresh_token
r = requests.post(
url=f'{self.url}/oauth/grant', headers=self.headers, data=data
).json()
self.access_token = r['access_token']
return r
def get_access_token(self):
self.refresh_headers()
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'code',
'code': self.code,
}
r = requests.post(
url=f'{self.url}/oauth/grant', headers=self.headers, data=data
).json()
self.access_token = r['access_token']
self._refresh_token = r['refresh_token']
self.token_expire_utc = r['expires_at']
return r
|
"""NLP Dataset"""
import os
import re
from typing import List, Union, Dict, Tuple
import nltk
import unicodedata
import numpy as np
from dlex.configs import ModuleConfigs
from dlex.utils.logging import logger
# nltk.download('punkt')
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def load_tkn_to_idx(filename):
tkn_to_idx = {}
fo = open(filename, encoding='utf-8')
for line in fo:
line = line.strip()
if line == "":
continue
tkn_to_idx[line] = len(tkn_to_idx)
fo.close()
return tkn_to_idx
def normalize_lower(sentence: str):
return sentence.strip().lower()
def normalize_lower_alphanumeric(sentence: str):
s = sentence.strip().lower()
s = re.sub("[^a-z0-9\uAC00-\uD7A3]+", " ", s)
return s
def normalize_string_ascii(sentence):
"""
:param str sentence:
:return: normalized sentence, separated by space
:rtype str
"""
# x = re.sub("[^ a-zA-Z0-9\uAC00-\uD7A3]+", " ", x)
# x = re.sub("[\u3040-\u30FF]+", "\u3042", x) # convert Hiragana and Katakana to あ
# x = re.sub("[\u4E00-\u9FFF]+", "\u6F22", x) # convert CJK unified ideographs to 漢
sent = unicodeToAscii(sentence.lower().strip())
sent = re.sub(r"([.!?,])", r" \1", sent)
sent = re.sub(r"[^a-zA-Z.!?,]+", r" ", sent)
sent = re.sub(r"\s+", " ", sent)
sent = re.sub("^ | $", "", sent)
words = sent.split(' ')
ret = []
for word in words:
ret.append(normalize_word(word))
return ' '.join(ret)
def normalize_string(sentence):
"""
:param str sentence:
:return: normalized sentence, separated by space
:rtype str
"""
# x = re.sub("[^ a-zA-Z0-9\uAC00-\uD7A3]+", " ", x)
# x = re.sub("[\u3040-\u30FF]+", "\u3042", x) # convert Hiragana and Katakana to あ
# x = re.sub("[\u4E00-\u9FFF]+", "\u6F22", x) # convert CJK unified ideographs to 漢
sentence = re.sub(r"([\.!?,\";\(\)])\'", r" \1", sentence)
# sent = re.sub(r"[^a-zA-Z.!?,]+", r" ", sent)
sentence = re.sub(r"\s+", " ", sentence)
sentence = re.sub("^ | $", "", sentence)
words = sentence.split(' ')
ret = []
for word in words:
ret.append(normalize_word(word))
return ' '.join(ret)
def normalize_word(word):
punctuations = [',', '.', '-', '"', ':', '!', '(', ')', '...', '?']
if word in ',.!?':
return word
elif word in punctuations:
return '<punc>'
elif any('0' <= c <= '9' for c in word):
return '<non-word>'
else:
return word.lower()
def normalize_none(s):
return s
def nltk_tokenize(s):
return nltk.word_tokenize(s)
class Tokenizer:
def __init__(self, normalize_fn=None, tokenize_fn=None):
self.normalize_fn = normalize_fn
self.tokenize_fn = tokenize_fn
def process(self, s):
s = self.normalize_fn(s)
s = self.tokenize_fn(s)
return s
spacy_nlp = None
def spacy_tokenize(s):
import spacy
from spacy.symbols import ORTH
global spacy_nlp
if spacy_nlp is None:
# sputnik.install('spacy', spacy.about.__version__, 'en_default', data_path=ModuleConfigs.get_tmp_path())
spacy_nlp = spacy.load('en_core_web_sm', via=ModuleConfigs.get_tmp_path())
spacy_nlp.tokenizer.add_special_case('<eos>', [{ORTH: '<eos>'}])
spacy_nlp.tokenizer.add_special_case('<bos>', [{ORTH: '<bos>'}])
spacy_nlp.tokenizer.add_special_case('<unk>', [{ORTH: '<unk>'}])
return [_s.text for _s in spacy_nlp.tokenizer(s)]
def normalize_char(char):
return char.lower().replace(' ', '_')
def space_tokenize(s):
return s.split(' ')
def char_tokenize(s: str):
s = s.replace(" ", "_")
return list(s)
def mecab_tokenize(s):
import MeCab
wakati = MeCab.Tagger("-Owakati")
return wakati.parse(s).split()
def write_vocab(
text: Union[str, List[str], List[List[str]]],
output_path: str,
tokenizer: Tokenizer = None,
min_freq=0,
specials=None):
"""
:param text: text or list of sentences
:param output_path:
:param tokenizer: if tokenizer is None, tokens are separated by space
:param min_freq:
:param specials:
:return:
"""
if tokenizer is None:
tokenizer = Tokenizer(normalize_none, space_tokenize)
if specials is None:
specials = ['<pad>', '<sos>', '<eos>', '<oov>']
word_freqs = {}
if isinstance(text, str):
text = [text]
for sent in text:
if isinstance(sent, str):
# if normalize_fn is not None:
# s = normalize_fn(sent.replace('_', ' '))
# else:
# s = sent
# ls = char_tokenize(s) if token == 'char' else space_tokenize(s)
sent = tokenizer.process(sent)
for word in sent:
if word.strip() == '':
continue
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
words = list([word for word in word_freqs if word_freqs[word] > min_freq])
words.sort(key=lambda word: word_freqs[word], reverse=True)
with open(output_path, "w", encoding='utf-8') as fo:
fo.write('\n'.join(specials) + '\n')
fo.write("\n".join(words))
logger.info("Vocab written to %s (%d tokens)", output_path, len(specials) + len(words))
def get_token_id(vocab, word):
"""
:type vocab: Vocab
:type word: str
:rtype: int
"""
if word in vocab:
return vocab[word]
else:
if '<oov>' in vocab:
return vocab['<oov>']
elif '<unk>' in vocab:
return vocab['<unk>']
else:
raise Exception("No out-of-vocabulary token found.")
class Vocab:
def __init__(self, index2token: List[str] = None, token2index: Dict[str, int] = None):
if index2token is None:
self._token2index = {}
self._index2token = []
else:
self._index2token = index2token
if token2index:
self._token2index = token2index
else:
self._token2index = {token: idx for idx, token in enumerate(index2token)}
self.embeddings = None
self.embedding_dim = None
@classmethod
def from_file(cls, file_name):
index2token = []
fo = open(file_name, encoding='utf-8')
for line in fo:
line = line.strip()
if line == "":
continue
index2token.append(line)
fo.close()
return cls(index2token)
def __getitem__(self, token: str) -> int:
return self._token2index[token] if token in self._token2index else self.oov_token_idx
def tolist(self) -> List[str]:
return self._index2token
def get_token_id(self, token):
return self[token] or self.oov_token_idx
def add_token(self, token: str):
if token not in self._token2index:
self._token2index[token] = len(self._token2index)
self._index2token.append(token)
def __len__(self):
return len(self._token2index)
def get_token(self, idx: int) -> str:
return self._index2token[idx]
def decode_idx_list(self, ls: List[int], ignore: List[int] = None, stop_at: int = None) -> List[str]:
ret = []
for idx in ls:
if stop_at and idx == stop_at:
break
elif ignore and idx in ignore:
continue
else:
ret.append(self.get_token(idx))
return ret
def encode_token_list(self, ls: List[str]) -> List[int]:
return [self.get_token_id(token) for token in ls]
@property
def sos_token_idx(self) -> int:
idx = self['<sos>'] or self['<s>']
assert idx is not None
return idx
@property
def eos_token_idx(self) -> int:
idx = self['<eos>'] or self['</s>']
assert idx is not None
return idx
@property
def blank_token_idx(self):
idx = self['<blank>'] or self['<pad>']
assert idx is not None
return idx
@property
def oov_token_idx(self) -> int:
if '<oov>' in self._token2index:
return self._token2index['<oov>']
elif '<unk>' in self._token2index:
return self._token2index['<unk>']
else:
raise Exception("<oov> token not found.")
def get_specials(self):
return [token for token in self._index2token if token.startswith('<')]
def init_pretrained_embeddings(
self,
pretrained: str,
emb_name: str = None,
dim: int = None) -> np.ndarray:
if pretrained == 'glove':
from torchtext.vocab import GloVe
dim = dim or 300
vocab = GloVe(
name=emb_name or '840B', dim=dim,
cache=os.path.join(ModuleConfigs.get_tmp_path(), "torchtext"))
elif pretrained == 'fasttext':
from torchtext.vocab import FastText
vocab = FastText()
else:
raise ValueError("Pre-trained embeddings not found.")
vectors = vocab.vectors
oovs = []
embeddings = np.zeros([len(self), dim])
for idx, t in enumerate(self._index2token):
_t = t.lower()
if _t in vocab.stoi:
embeddings[idx, :] = vectors[vocab.stoi[_t]].cpu().numpy()
if all(token in vocab.stoi for token in _t.split(' ')):
embeddings[idx, :] = np.sum([vectors[vocab.stoi[token]].cpu().numpy() for token in _t.split(' ')])
else:
oovs.append(_t)
if oovs:
logger.warning(f"{len(oovs)} tokens not found in pre-trained embeddings: {", ".join(oovs)}")
logger.debug(f"Load embeddings: {pretrained} (no. embeddings: {len(self) - len(oovs):,})")
self.embedding_dim = dim
self.embeddings = embeddings
def get_token_embedding(self, token: str) -> np.ndarray:
if self.embeddings is None:
raise ValueError('Embeddings are not initialized')
return self.embeddings[self.get_token_id(token)]
def embed_token_list(self, ls):
emb = np.zeros(self.embedding_dim)
for token in ls:
emb += self.get_token_embedding(token)
return emb
def load_embeddings(
pretrained: str,
emb_name: str = None,
dim: int = None,
vocab_size: int = None,
tokens: List[str] = None,
specials: List[str] = None) -> Tuple[np.ndarray, Vocab]:
"""
Load pre-trained embedding defined in dataset.embeddings
:param tokens: if specified, only load embeddings of these tokens
:param specials: special tokens
:return:
"""
if not pretrained:
assert dim is not None
assert vocab_size is not None
return np.random.rand(vocab_size, dim), None
elif pretrained.lower() in ["glove", "fasttext"]:
if pretrained.lower() == 'glove':
from torchtext.vocab import GloVe
vocab = GloVe(
name=emb_name, dim=dim,
cache=os.path.join(ModuleConfigs.get_tmp_path(), "torchtext"))
elif pretrained.lower() == 'fasttext':
from torchtext.vocab import FastText
vocab = FastText()
else:
raise ValueError("Pre-trained embeddings not found.")
vectors = vocab.vectors
index2token = vocab.itos
token2index = None
if tokens: # limit vocabulary to list of tokens
num_oovs = 0
keep = []
index2token = []
token2index = {}
for t in tokens:
_t = t.lower()
if _t in token2index:
if t not in token2index:
token2index[t] = token2index[_t]
elif _t in vocab.stoi:
keep.append(vocab.stoi[_t.lower()])
token2index[_t] = len(index2token)
token2index[t] = len(index2token)
index2token.append(_t)
else:
num_oovs += 1
vectors = vectors[keep]
if num_oovs:
logger.warning(f"{num_oovs} tokens not found in pre-trained embeddings")
logger.debug(f"Load embeddings: {pretrained} (no. embeddings: {len(index2token):,})")
if specials is not None:
for s in specials:
token2index[s] = len(index2token)
index2token.append(s)
index2token += specials
vectors = torch.cat([vectors, torch.rand(len(specials), len(vectors[0]))])
# return nn.Embedding.from_pretrained(vectors, freeze=emb.freeze or True), Vocab(index2token, token2index)
return vectors, Vocab(index2token, token2index)
else:
raise ValueError(f"{pretrained} is not supported.")
|
"""NLP Dataset"""
import os
import re
from typing import List, Union, Dict, Tuple
import nltk
import unicodedata
import numpy as np
from dlex.configs import ModuleConfigs
from dlex.utils.logging import logger
# nltk.download('punkt')
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def load_tkn_to_idx(filename):
tkn_to_idx = {}
fo = open(filename, encoding='utf-8')
for line in fo:
line = line.strip()
if line == "":
continue
tkn_to_idx[line] = len(tkn_to_idx)
fo.close()
return tkn_to_idx
def normalize_lower(sentence: str):
return sentence.strip().lower()
def normalize_lower_alphanumeric(sentence: str):
s = sentence.strip().lower()
s = re.sub("[^a-z0-9\uAC00-\uD7A3]+", " ", s)
return s
def normalize_string_ascii(sentence):
"""
:param str sentence:
:return: normalized sentence, separated by space
:rtype str
"""
# x = re.sub("[^ a-zA-Z0-9\uAC00-\uD7A3]+", " ", x)
# x = re.sub("[\u3040-\u30FF]+", "\u3042", x) # convert Hiragana and Katakana to あ
# x = re.sub("[\u4E00-\u9FFF]+", "\u6F22", x) # convert CJK unified ideographs to 漢
sent = unicodeToAscii(sentence.lower().strip())
sent = re.sub(r"([.!?,])", r" \1", sent)
sent = re.sub(r"[^a-zA-Z.!?,]+", r" ", sent)
sent = re.sub(r"\s+", " ", sent)
sent = re.sub("^ | $", "", sent)
words = sent.split(' ')
ret = []
for word in words:
ret.append(normalize_word(word))
return ' '.join(ret)
def normalize_string(sentence):
"""
:param str sentence:
:return: normalized sentence, separated by space
:rtype str
"""
# x = re.sub("[^ a-zA-Z0-9\uAC00-\uD7A3]+", " ", x)
# x = re.sub("[\u3040-\u30FF]+", "\u3042", x) # convert Hiragana and Katakana to あ
# x = re.sub("[\u4E00-\u9FFF]+", "\u6F22", x) # convert CJK unified ideographs to 漢
sentence = re.sub(r"([\.!?,\";\(\)])\'", r" \1", sentence)
# sent = re.sub(r"[^a-zA-Z.!?,]+", r" ", sent)
sentence = re.sub(r"\s+", " ", sentence)
sentence = re.sub("^ | $", "", sentence)
words = sentence.split(' ')
ret = []
for word in words:
ret.append(normalize_word(word))
return ' '.join(ret)
def normalize_word(word):
punctuations = [',', '.', '-', '"', ':', '!', '(', ')', '...', '?']
if word in ',.!?':
return word
elif word in punctuations:
return '<punc>'
elif any('0' <= c <= '9' for c in word):
return '<non-word>'
else:
return word.lower()
def normalize_none(s):
return s
def nltk_tokenize(s):
return nltk.word_tokenize(s)
class Tokenizer:
def __init__(self, normalize_fn=None, tokenize_fn=None):
self.normalize_fn = normalize_fn
self.tokenize_fn = tokenize_fn
def process(self, s):
s = self.normalize_fn(s)
s = self.tokenize_fn(s)
return s
spacy_nlp = None
def spacy_tokenize(s):
import spacy
from spacy.symbols import ORTH
global spacy_nlp
if spacy_nlp is None:
# sputnik.install('spacy', spacy.about.__version__, 'en_default', data_path=ModuleConfigs.get_tmp_path())
spacy_nlp = spacy.load('en_core_web_sm', via=ModuleConfigs.get_tmp_path())
spacy_nlp.tokenizer.add_special_case('<eos>', [{ORTH: '<eos>'}])
spacy_nlp.tokenizer.add_special_case('<bos>', [{ORTH: '<bos>'}])
spacy_nlp.tokenizer.add_special_case('<unk>', [{ORTH: '<unk>'}])
return [_s.text for _s in spacy_nlp.tokenizer(s)]
def normalize_char(char):
return char.lower().replace(' ', '_')
def space_tokenize(s):
return s.split(' ')
def char_tokenize(s: str):
s = s.replace(" ", "_")
return list(s)
def mecab_tokenize(s):
import MeCab
wakati = MeCab.Tagger("-Owakati")
return wakati.parse(s).split()
def write_vocab(
text: Union[str, List[str], List[List[str]]],
output_path: str,
tokenizer: Tokenizer = None,
min_freq=0,
specials=None):
"""
:param text: text or list of sentences
:param output_path:
:param tokenizer: if tokenizer is None, tokens are separated by space
:param min_freq:
:param specials:
:return:
"""
if tokenizer is None:
tokenizer = Tokenizer(normalize_none, space_tokenize)
if specials is None:
specials = ['<pad>', '<sos>', '<eos>', '<oov>']
word_freqs = {}
if isinstance(text, str):
text = [text]
for sent in text:
if isinstance(sent, str):
# if normalize_fn is not None:
# s = normalize_fn(sent.replace('_', ' '))
# else:
# s = sent
# ls = char_tokenize(s) if token == 'char' else space_tokenize(s)
sent = tokenizer.process(sent)
for word in sent:
if word.strip() == '':
continue
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
words = list([word for word in word_freqs if word_freqs[word] > min_freq])
words.sort(key=lambda word: word_freqs[word], reverse=True)
with open(output_path, "w", encoding='utf-8') as fo:
fo.write('\n'.join(specials) + '\n')
fo.write("\n".join(words))
logger.info("Vocab written to %s (%d tokens)", output_path, len(specials) + len(words))
def get_token_id(vocab, word):
"""
:type vocab: Vocab
:type word: str
:rtype: int
"""
if word in vocab:
return vocab[word]
else:
if '<oov>' in vocab:
return vocab['<oov>']
elif '<unk>' in vocab:
return vocab['<unk>']
else:
raise Exception("No out-of-vocabulary token found.")
class Vocab:
def __init__(self, index2token: List[str] = None, token2index: Dict[str, int] = None):
if index2token is None:
self._token2index = {}
self._index2token = []
else:
self._index2token = index2token
if token2index:
self._token2index = token2index
else:
self._token2index = {token: idx for idx, token in enumerate(index2token)}
self.embeddings = None
self.embedding_dim = None
@classmethod
def from_file(cls, file_name):
index2token = []
fo = open(file_name, encoding='utf-8')
for line in fo:
line = line.strip()
if line == "":
continue
index2token.append(line)
fo.close()
return cls(index2token)
def __getitem__(self, token: str) -> int:
return self._token2index[token] if token in self._token2index else self.oov_token_idx
def tolist(self) -> List[str]:
return self._index2token
def get_token_id(self, token):
return self[token] or self.oov_token_idx
def add_token(self, token: str):
if token not in self._token2index:
self._token2index[token] = len(self._token2index)
self._index2token.append(token)
def __len__(self):
return len(self._token2index)
def get_token(self, idx: int) -> str:
return self._index2token[idx]
def decode_idx_list(self, ls: List[int], ignore: List[int] = None, stop_at: int = None) -> List[str]:
ret = []
for idx in ls:
if stop_at and idx == stop_at:
break
elif ignore and idx in ignore:
continue
else:
ret.append(self.get_token(idx))
return ret
def encode_token_list(self, ls: List[str]) -> List[int]:
return [self.get_token_id(token) for token in ls]
@property
def sos_token_idx(self) -> int:
idx = self['<sos>'] or self['<s>']
assert idx is not None
return idx
@property
def eos_token_idx(self) -> int:
idx = self['<eos>'] or self['</s>']
assert idx is not None
return idx
@property
def blank_token_idx(self):
idx = self['<blank>'] or self['<pad>']
assert idx is not None
return idx
@property
def oov_token_idx(self) -> int:
if '<oov>' in self._token2index:
return self._token2index['<oov>']
elif '<unk>' in self._token2index:
return self._token2index['<unk>']
else:
raise Exception("<oov> token not found.")
def get_specials(self):
return [token for token in self._index2token if token.startswith('<')]
def init_pretrained_embeddings(
self,
pretrained: str,
emb_name: str = None,
dim: int = None) -> np.ndarray:
if pretrained == 'glove':
from torchtext.vocab import GloVe
dim = dim or 300
vocab = GloVe(
name=emb_name or '840B', dim=dim,
cache=os.path.join(ModuleConfigs.get_tmp_path(), "torchtext"))
elif pretrained == 'fasttext':
from torchtext.vocab import FastText
vocab = FastText()
else:
raise ValueError("Pre-trained embeddings not found.")
vectors = vocab.vectors
oovs = []
embeddings = np.zeros([len(self), dim])
for idx, t in enumerate(self._index2token):
_t = t.lower()
if _t in vocab.stoi:
embeddings[idx, :] = vectors[vocab.stoi[_t]].cpu().numpy()
if all(token in vocab.stoi for token in _t.split(' ')):
embeddings[idx, :] = np.sum([vectors[vocab.stoi[token]].cpu().numpy() for token in _t.split(' ')])
else:
oovs.append(_t)
if oovs:
logger.warning(f"{len(oovs)} tokens not found in pre-trained embeddings: {', '.join(oovs)}")
logger.debug(f"Load embeddings: {pretrained} (no. embeddings: {len(self) - len(oovs):,})")
self.embedding_dim = dim
self.embeddings = embeddings
def get_token_embedding(self, token: str) -> np.ndarray:
if self.embeddings is None:
raise ValueError('Embeddings are not initialized')
return self.embeddings[self.get_token_id(token)]
def embed_token_list(self, ls):
emb = np.zeros(self.embedding_dim)
for token in ls:
emb += self.get_token_embedding(token)
return emb
def load_embeddings(
pretrained: str,
emb_name: str = None,
dim: int = None,
vocab_size: int = None,
tokens: List[str] = None,
specials: List[str] = None) -> Tuple[np.ndarray, Vocab]:
"""
Load pre-trained embedding defined in dataset.embeddings
:param tokens: if specified, only load embeddings of these tokens
:param specials: special tokens
:return:
"""
if not pretrained:
assert dim is not None
assert vocab_size is not None
return np.random.rand(vocab_size, dim), None
elif pretrained.lower() in ["glove", "fasttext"]:
if pretrained.lower() == 'glove':
from torchtext.vocab import GloVe
vocab = GloVe(
name=emb_name, dim=dim,
cache=os.path.join(ModuleConfigs.get_tmp_path(), "torchtext"))
elif pretrained.lower() == 'fasttext':
from torchtext.vocab import FastText
vocab = FastText()
else:
raise ValueError("Pre-trained embeddings not found.")
vectors = vocab.vectors
index2token = vocab.itos
token2index = None
if tokens: # limit vocabulary to list of tokens
num_oovs = 0
keep = []
index2token = []
token2index = {}
for t in tokens:
_t = t.lower()
if _t in token2index:
if t not in token2index:
token2index[t] = token2index[_t]
elif _t in vocab.stoi:
keep.append(vocab.stoi[_t.lower()])
token2index[_t] = len(index2token)
token2index[t] = len(index2token)
index2token.append(_t)
else:
num_oovs += 1
vectors = vectors[keep]
if num_oovs:
logger.warning(f"{num_oovs} tokens not found in pre-trained embeddings")
logger.debug(f"Load embeddings: {pretrained} (no. embeddings: {len(index2token):,})")
if specials is not None:
for s in specials:
token2index[s] = len(index2token)
index2token.append(s)
index2token += specials
vectors = torch.cat([vectors, torch.rand(len(specials), len(vectors[0]))])
# return nn.Embedding.from_pretrained(vectors, freeze=emb.freeze or True), Vocab(index2token, token2index)
return vectors, Vocab(index2token, token2index)
else:
raise ValueError(f"{pretrained} is not supported.")
|
import os
from enb import icompression
from enb.config import get_options
options = get_options()
class HEVC(icompression.WrapperCodec, icompression.LosslessCodec):
def __init__(self, config_path=None, chroma_format="400"):
config_path = config_path if config_path is not None \
else os.path.join(os.path.dirname(os.path.abspath(__file__)),
f"hevc_lossless_{chroma_format}.cfg")
chroma_format = str(chroma_format)
assert chroma_format in ["400"], f"Chroma format {chroma_format} not supported."
icompression.WrapperCodec.__init__(
self,
compressor_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "TAppEncoderStatic"),
decompressor_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "TAppDecoderStatic"),
param_dict=dict(chroma_format=chroma_format))
self.config_path = config_path
def get_compression_params(self, original_path, compressed_path, original_file_info):
return f"-i {original_path} -c {self.config_path} -b {compressed_path} -wdt {original_file_info["width"]} " \
f"-hgt {original_file_info["height"]} -f {original_file_info["component_count"]} " \
f"-cf {self.param_dict["chroma_format"]} --InputChromaFormat={self.param_dict["chroma_format"]} " \
f"--InputBitDepth={8 * original_file_info["bytes_per_sample"]}"
def get_decompression_params(self, compressed_path, reconstructed_path, original_file_info):
return f"-b {compressed_path} -o {reconstructed_path} -d {8 * original_file_info["bytes_per_sample"]}"
@property
def label(self):
return "HEVC"
|
import os
from enb import icompression
from enb.config import get_options
options = get_options()
class HEVC(icompression.WrapperCodec, icompression.LosslessCodec):
def __init__(self, config_path=None, chroma_format="400"):
config_path = config_path if config_path is not None \
else os.path.join(os.path.dirname(os.path.abspath(__file__)),
f"hevc_lossless_{chroma_format}.cfg")
chroma_format = str(chroma_format)
assert chroma_format in ["400"], f"Chroma format {chroma_format} not supported."
icompression.WrapperCodec.__init__(
self,
compressor_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "TAppEncoderStatic"),
decompressor_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "TAppDecoderStatic"),
param_dict=dict(chroma_format=chroma_format))
self.config_path = config_path
def get_compression_params(self, original_path, compressed_path, original_file_info):
return f"-i {original_path} -c {self.config_path} -b {compressed_path} -wdt {original_file_info['width']} " \
f"-hgt {original_file_info['height']} -f {original_file_info['component_count']} " \
f"-cf {self.param_dict['chroma_format']} --InputChromaFormat={self.param_dict['chroma_format']} " \
f"--InputBitDepth={8 * original_file_info['bytes_per_sample']}"
def get_decompression_params(self, compressed_path, reconstructed_path, original_file_info):
return f"-b {compressed_path} -o {reconstructed_path} -d {8 * original_file_info['bytes_per_sample']}"
@property
def label(self):
return "HEVC"
|
import logging
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['geonlee.co'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': f'{env('REDIS_URL', default='redis://127.0.0.1:6379')}/{0}',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_AUTO_CREATE_BUCKET = True
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto3 import S3Boto3Storage
StaticRootS3BotoStorage = lambda: S3Boto3Storage(location='static') # noqa
MediaRootS3BotoStorage = lambda: S3Boto3Storage(location='media', file_overwrite=False) # noqa
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='Nomadgram <[email protected]>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Nomadgram]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins bon every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
|
import logging
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['geonlee.co'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': f'{env("REDIS_URL", default="redis://127.0.0.1:6379")}/{0}',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_AUTO_CREATE_BUCKET = True
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto3 import S3Boto3Storage
StaticRootS3BotoStorage = lambda: S3Boto3Storage(location='static') # noqa
MediaRootS3BotoStorage = lambda: S3Boto3Storage(location='media', file_overwrite=False) # noqa
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='Nomadgram <[email protected]>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Nomadgram]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins bon every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
|
def mostrar(alunos):
print('='*25)
for cont in range(3):
print(f' {cont+1} aluno {alunos['nomes'][cont]}')
print(f' notas {alunos['1nota'][cont]:4.2f}, {alunos['2nota'][cont]:4.2f}, {alunos['3nota'][cont]:4.2f}')
print('='*25)
|
def mostrar(alunos):
print('='*25)
for cont in range(3):
print(f' {cont+1} aluno {alunos["nomes"][cont]}')
print(f' notas {alunos["1nota"][cont]:4.2f}, {alunos["2nota"][cont]:4.2f}, {alunos["3nota"][cont]:4.2f}')
print('='*25)
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
Jsonschema validation of cloud custodian config.
We start with a walkthrough of the various class registries
of resource types and assemble and generate the schema.
We do some specialization to reduce overall schema size
via reference usage, although in some cases we prefer
copies, due to issues with inheritance via reference (
allowedProperties and enum extension).
All filters and actions are annotated with schema typically using
the utils.type_schema function.
"""
from collections import Counter
import json
import inspect
import logging
from jsonschema import Draft7Validator as JsonSchemaValidator
from jsonschema.exceptions import best_match
from c7n.policy import execution
from c7n.provider import clouds
from c7n.query import sources
from c7n.resources import load_available
from c7n.resolver import ValuesFrom
from c7n.filters.core import (
ValueFilter,
EventFilter,
AgeFilter,
ReduceFilter,
OPERATORS,
VALUE_TYPES,
)
from c7n.structure import StructureParser # noqa
def validate(data, schema=None, resource_types=()):
if schema is None:
schema = generate(resource_types)
JsonSchemaValidator.check_schema(schema)
validator = JsonSchemaValidator(schema)
errors = list(validator.iter_errors(data))
if not errors:
return check_unique(data) or []
try:
resp = policy_error_scope(specific_error(errors[0]), data)
name = isinstance(
errors[0].instance,
dict) and errors[0].instance.get(
'name',
'unknown') or 'unknown'
return [resp, name]
except Exception:
logging.exception(
"specific_error failed, traceback, followed by fallback")
return list(filter(None, [
errors[0],
best_match(validator.iter_errors(data)),
]))
def check_unique(data):
counter = Counter([p['name'] for p in data.get('policies', [])])
for k, v in list(counter.items()):
if v == 1:
counter.pop(k)
if counter:
return [ValueError(
"Only one policy with a given name allowed, duplicates: {}".format(counter)),
list(counter.keys())[0]]
def policy_error_scope(error, data):
"""Scope a schema error to its policy name and resource."""
err_path = list(error.absolute_path)
if err_path[0] != 'policies':
return error
pdata = data['policies'][err_path[1]]
pdata.get('name', 'unknown')
error.message = "Error on policy:{} resource:{}\n".format(
pdata.get('name', 'unknown'), pdata.get('resource', 'unknown')) + error.message
return error
def specific_error(error):
"""Try to find the best error for humans to resolve
The jsonschema.exceptions.best_match error is based purely on a
mix of a strong match (ie. not anyOf, oneOf) and schema depth,
this often yields odd results that are semantically confusing,
instead we can use a bit of structural knowledge of schema to
provide better results.
"""
if error.validator not in ('anyOf', 'oneOf'):
return error
r = t = None
if isinstance(error.instance, dict):
t = error.instance.get('type')
r = error.instance.get('resource')
if r is not None:
found = None
for idx, v in enumerate(error.validator_value):
if '$ref' in v and v['$ref'].rsplit('/', 2)[1].endswith(r):
found = idx
break
if found is not None:
# error context is a flat list of all validation
# failures, we have to index back to the policy
# of interest.
for e in error.context:
# resource policies have a fixed path from
# the top of the schema
if e.absolute_schema_path[4] == found:
return specific_error(e)
return specific_error(error.context[idx])
if t is not None:
found = None
for idx, v in enumerate(error.validator_value):
if ('$ref' in v and
v['$ref'].rsplit('/', 2)[-1].rsplit('.', 1)[-1] == t):
found = idx
break
elif 'type' in v and t in v['properties']['type']['enum']:
found = idx
break
if found is not None:
for e in error.context:
for el in reversed(e.absolute_schema_path):
if isinstance(el, int):
if el == found:
return e
break
return error
def generate(resource_types=()):
resource_defs = {}
definitions = {
'resources': resource_defs,
'string_dict': {
"type": "object",
"patternProperties": {
"": {"type": "string"},
},
},
'basic_dict': {
"type": "object",
"patternProperties": {
"": {
'oneOf': [
{"type": "string"},
{"type": "boolean"},
{"type": "number"},
],
}
},
},
'iam-statement': {
'additionalProperties': False,
'type': 'object',
'properties': {
'Sid': {'type': 'string'},
'Effect': {'type': 'string', 'enum': ['Allow', 'Deny']},
'Principal': {'anyOf': [
{'type': 'string'},
{'type': 'object'}, {'type': 'array'}]},
'NotPrincipal': {'anyOf': [{'type': 'object'}, {'type': 'array'}]},
'Action': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotAction': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Resource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotResource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Condition': {'type': 'object'}
},
'required': ['Sid', 'Effect'],
'oneOf': [
{'required': ['Principal', 'Action', 'Resource']},
{'required': ['NotPrincipal', 'Action', 'Resource']},
{'required': ['Principal', 'NotAction', 'Resource']},
{'required': ['NotPrincipal', 'NotAction', 'Resource']},
{'required': ['Principal', 'Action', 'NotResource']},
{'required': ['NotPrincipal', 'Action', 'NotResource']},
{'required': ['Principal', 'NotAction', 'NotResource']},
{'required': ['NotPrincipal', 'NotAction', 'NotResource']}
]
},
'actions': {},
'filters': {
'value': ValueFilter.schema,
'event': EventFilter.schema,
'age': AgeFilter.schema,
'reduce': ReduceFilter.schema,
# Shortcut form of value filter as k=v
'valuekv': {
'type': 'object',
'additionalProperties': {'oneOf': [{'type': 'number'}, {'type': 'null'},
{'type': 'array', 'maxItems': 0}, {'type': 'string'}, {'type': 'boolean'}]},
'minProperties': 1,
'maxProperties': 1},
},
'filters_common': {
'comparison_operators': {
'enum': list(OPERATORS.keys())},
'value_types': {'enum': VALUE_TYPES},
'value_from': ValuesFrom.schema,
'value': {'oneOf': [
{'type': 'array'},
{'type': 'string'},
{'type': 'boolean'},
{'type': 'number'},
{'type': 'null'}]},
},
'policy': {
'type': 'object',
'required': ['name', 'resource'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
'pattern': "^[A-z][A-z0-9]*(-*[A-z0-9]+)*$"},
'conditions': {
'type': 'array',
'items': {'anyOf': [
{'type': 'object', 'additionalProperties': False,
'properties': {'or': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'not': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'and': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'$ref': '#/definitions/filters/value'},
{'$ref': '#/definitions/filters/event'},
{'$ref': '#/definitions/filters/valuekv'}]}},
# these should be deprecated for conditions
'region': {'type': 'string'},
'tz': {'type': 'string'},
'start': {'format': 'date-time'},
'end': {'format': 'date-time'},
'resource': {'type': 'string'},
'max-resources': {'anyOf': [
{'type': 'integer', 'minimum': 1},
{'$ref': '#/definitions/max-resources-properties'}
]},
'max-resources-percent': {'type': 'number', 'minimum': 0, 'maximum': 100},
'comment': {'type': 'string'},
'title': {'type': 'string'},
'description': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'metadata': {'type': 'object'},
'mode': {'$ref': '#/definitions/policy-mode'},
'source': {'enum': list(sources.keys())},
'actions': {
'type': 'array',
},
'filters': {
'type': 'array'
},
'metrics': {
'type': 'array'
},
#
# TODO: source queries should really move under
# source. This was initially used for describe sources
# to expose server side query mechanisms, however its
# important to note it also prevents resource cache
# utilization between policies that have different
# queries.
'query': {
'type': 'array', 'items': {'type': 'object'}}
},
},
'policy-mode': {
'anyOf': [e.schema for _, e in execution.items()],
},
'max-resources-properties': {
'type': 'object',
'additionalProperties': False,
'properties': {
'amount': {"type": 'integer', 'minimum': 1},
'op': {'enum': ['or', 'and']},
'percent': {'type': 'number', 'minimum': 0, 'maximum': 100}
}
}
}
resource_refs = []
for cloud_name, cloud_type in sorted(clouds.items()):
for type_name, resource_type in sorted(cloud_type.resources.items()):
r_type_name = "%s.%s" % (cloud_name, type_name)
if resource_types and r_type_name not in resource_types:
if not resource_type.type_aliases:
continue
elif not {"%s.%s" % (cloud_name, ralias) for ralias
in resource_type.type_aliases}.intersection(
resource_types):
continue
aliases = []
if resource_type.type_aliases:
aliases.extend(["%s.%s" % (cloud_name, a) for a in resource_type.type_aliases])
# aws gets legacy aliases with no cloud prefix
if cloud_name == 'aws':
aliases.extend(resource_type.type_aliases)
# aws gets additional alias for default name
if cloud_name == 'aws':
aliases.append(type_name)
resource_refs.append(
process_resource(
r_type_name,
resource_type,
resource_defs,
aliases,
definitions,
cloud_name
))
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
'id': 'http://schema.cloudcustodian.io/v0/custodian.json',
'definitions': definitions,
'type': 'object',
'required': ['policies'],
'additionalProperties': False,
'properties': {
'vars': {'type': 'object'},
'policies': {
'type': 'array',
'additionalItems': False,
'items': {'anyOf': resource_refs}
}
}
}
# allow empty policies with lazy load
if not resource_refs:
schema['properties']['policies']['items'] = {'type': 'object'}
return schema
def process_resource(
type_name, resource_type, resource_defs, aliases=None,
definitions=None, provider_name=None):
r = resource_defs.setdefault(type_name, {'actions': {}, 'filters': {}})
action_refs = []
for a in ElementSchema.elements(resource_type.action_registry):
action_name = a.type
if a.schema_alias:
action_alias = "%s.%s" % (provider_name, action_name)
if action_alias in definitions['actions']:
if definitions['actions'][action_alias] != a.schema: # NOQA
msg = "Schema mismatch on type:{} action:{} w/ schema alias ".format(
type_name, action_name)
raise SyntaxError(msg)
else:
definitions['actions'][action_alias] = a.schema
action_refs.append({'$ref': '#/definitions/actions/%s' % action_alias})
else:
r['actions'][action_name] = a.schema
action_refs.append(
{'$ref': '#/definitions/resources/%s/actions/%s' % (
type_name, action_name)})
# one word action shortcuts
action_refs.append(
{'enum': list(resource_type.action_registry.keys())})
filter_refs = []
for f in ElementSchema.elements(resource_type.filter_registry):
filter_name = f.type
if filter_name == 'value':
filter_refs.append({'$ref': '#/definitions/filters/value'})
filter_refs.append({'$ref': '#/definitions/filters/valuekv'})
elif filter_name == 'event':
filter_refs.append({'$ref': '#/definitions/filters/event'})
elif f.schema_alias:
filter_alias = "%s.%s" % (provider_name, filter_name)
if filter_alias in definitions['filters']:
assert definitions['filters'][filter_alias] == f.schema, "Schema mismatch on filter w/ schema alias" # NOQA
else:
definitions['filters'][filter_alias] = f.schema
filter_refs.append({'$ref': '#/definitions/filters/%s' % filter_alias})
continue
else:
r['filters'][filter_name] = f.schema
filter_refs.append(
{'$ref': '#/definitions/resources/%s/filters/%s' % (
type_name, filter_name)})
# one word filter shortcuts
filter_refs.append(
{'enum': list(resource_type.filter_registry.keys())})
block_fref = '#/definitions/resources/%s/policy/allOf/1/properties/filters' % (
type_name)
filter_refs.extend([
{'type': 'object', 'additionalProperties': False,
'properties': {'or': {'$ref': block_fref}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'and': {'$ref': block_fref}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'not': {'$ref': block_fref}}}])
resource_policy = {
'allOf': [
{'$ref': '#/definitions/policy'},
{'properties': {
'resource': {'enum': [type_name]},
'filters': {
'type': 'array',
'items': {'anyOf': filter_refs}},
'actions': {
'type': 'array',
'items': {'anyOf': action_refs}}}},
]
}
if aliases:
resource_policy['allOf'][1]['properties'][
'resource']['enum'].extend(aliases)
if type_name == 'ec2':
resource_policy['allOf'][1]['properties']['query'] = {}
r['policy'] = resource_policy
return {'$ref': '#/definitions/resources/%s/policy' % type_name}
def resource_outline(provider=None):
outline = {}
for cname, ctype in sorted(clouds.items()):
if provider and provider != cname:
continue
cresources = outline[cname] = {}
for rname, rtype in sorted(ctype.resources.items()):
cresources['%s.%s' % (cname, rname)] = rinfo = {}
rinfo['filters'] = sorted(rtype.filter_registry.keys())
rinfo['actions'] = sorted(rtype.action_registry.keys())
return outline
def resource_vocabulary(cloud_name=None, qualify_name=True, aliases=True):
vocabulary = {}
resources = {}
if aliases:
vocabulary['aliases'] = {}
for cname, ctype in clouds.items():
if cloud_name is not None and cloud_name != cname:
continue
for rname, rtype in ctype.resources.items():
if qualify_name:
resources['%s.%s' % (cname, rname)] = rtype
else:
resources[rname] = rtype
for type_name, resource_type in resources.items():
classes = {'actions': {}, 'filters': {}, 'resource': resource_type}
actions = []
for cls in ElementSchema.elements(resource_type.action_registry):
action_name = ElementSchema.name(cls)
actions.append(action_name)
classes['actions'][action_name] = cls
filters = []
for cls in ElementSchema.elements(resource_type.filter_registry):
filter_name = ElementSchema.name(cls)
filters.append(filter_name)
classes['filters'][filter_name] = cls
vocabulary[type_name] = {
'filters': sorted(filters),
'actions': sorted(actions),
'classes': classes,
}
if aliases and resource_type.type_aliases:
provider = type_name.split('.', 1)[0]
for type_alias in resource_type.type_aliases:
vocabulary['aliases'][
"{}.{}".format(provider, type_alias)] = vocabulary[type_name]
if provider == 'aws':
vocabulary['aliases'][type_alias] = vocabulary[type_name]
vocabulary[type_name]['resource_type'] = type_name
vocabulary["mode"] = {}
for mode_name, cls in execution.items():
vocabulary["mode"][mode_name] = cls
return vocabulary
class ElementSchema:
"""Utility functions for working with resource's filters and actions.
"""
@staticmethod
def elements(registry):
"""Given a resource registry return sorted de-aliased values.
"""
seen = {}
for k, v in registry.items():
if k in ('and', 'or', 'not'):
continue
if v in seen:
continue
else:
seen[ElementSchema.name(v)] = v
return [seen[k] for k in sorted(seen)]
@staticmethod
def resolve(vocabulary, schema_path):
"""Given a resource vocabulary and a dotted path, resolve an element.
"""
current = vocabulary
frag = None
if schema_path.startswith('.'):
# The preprended '.' is an odd artifact
schema_path = schema_path[1:]
parts = schema_path.split('.')
while parts:
k = parts.pop(0)
if frag:
k = "%s.%s" % (frag, k)
frag = None
parts.insert(0, 'classes')
elif k in clouds:
frag = k
if len(parts) == 1:
parts.append('resource')
continue
if k not in current:
raise ValueError("Invalid schema path %s" % schema_path)
current = current[k]
return current
@staticmethod
def name(cls):
"""For a filter or action return its name."""
return cls.schema['properties']['type']['enum'][0]
@staticmethod
def doc(cls):
"""Return 'best' formatted doc string for a given class.
Walks up class hierarchy, skipping known bad. Returns
empty string if no suitable doc string found.
"""
# walk up class hierarchy for nearest
# good doc string, skip known
if cls.__doc__ is not None:
return inspect.cleandoc(cls.__doc__)
doc = None
for b in cls.__bases__:
if b in (ValueFilter, object):
continue
doc = b.__doc__ or ElementSchema.doc(b)
if doc is not None:
return inspect.cleandoc(doc)
return ""
@staticmethod
def schema(definitions, cls):
"""Return a pretty'ified version of an element schema."""
schema = isinstance(cls, type) and dict(cls.schema) or dict(cls)
schema.pop('type', None)
schema.pop('additionalProperties', None)
return ElementSchema._expand_schema(schema, definitions)
@staticmethod
def _expand_schema(schema, definitions):
"""Expand references in schema to their full schema"""
for k, v in list(schema.items()):
if k == '$ref':
# the value here is in the form of: '#/definitions/path/to/key'
parts = v.split('/')
if ['#', 'definitions'] != parts[0:2]:
raise ValueError("Invalid Ref %s" % v)
current = definitions
for p in parts[2:]:
if p not in current:
return None
current = current[p]
return ElementSchema._expand_schema(current, definitions)
elif isinstance(v, dict):
schema[k] = ElementSchema._expand_schema(v, definitions)
return schema
def pprint_schema_summary(vocabulary):
providers = {}
non_providers = {}
for type_name, rv in vocabulary.items():
if '.' not in type_name:
non_providers[type_name] = len(rv)
else:
provider, name = type_name.split('.', 1)
stats = providers.setdefault(provider, {
'resources': 0, 'actions': Counter(), 'filters': Counter()})
stats['resources'] += 1
for a in rv.get('actions'):
stats['actions'][a] += 1
for f in rv.get('filters'):
stats['filters'][f] += 1
for provider, stats in providers.items():
print("%s:" % provider)
print(" resource count: %d" % stats['resources'])
print(" actions: %d" % len(stats['actions']))
print(" filters: %d" % len(stats['filters']))
for non_providers_type, length in non_providers.items():
print("%s:" % non_providers_type)
print(" count: %d" % length)
def json_dump(resource=None):
load_available()
print(json.dumps(generate(resource), indent=2))
if __name__ == '__main__':
json_dump()
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
Jsonschema validation of cloud custodian config.
We start with a walkthrough of the various class registries
of resource types and assemble and generate the schema.
We do some specialization to reduce overall schema size
via reference usage, although in some cases we prefer
copies, due to issues with inheritance via reference (
allowedProperties and enum extension).
All filters and actions are annotated with schema typically using
the utils.type_schema function.
"""
from collections import Counter
import json
import inspect
import logging
from jsonschema import Draft7Validator as JsonSchemaValidator
from jsonschema.exceptions import best_match
from c7n.policy import execution
from c7n.provider import clouds
from c7n.query import sources
from c7n.resources import load_available
from c7n.resolver import ValuesFrom
from c7n.filters.core import (
ValueFilter,
EventFilter,
AgeFilter,
ReduceFilter,
OPERATORS,
VALUE_TYPES,
)
from c7n.structure import StructureParser # noqa
def validate(data, schema=None, resource_types=()):
if schema is None:
schema = generate(resource_types)
JsonSchemaValidator.check_schema(schema)
validator = JsonSchemaValidator(schema)
errors = list(validator.iter_errors(data))
if not errors:
return check_unique(data) or []
try:
resp = policy_error_scope(specific_error(errors[0]), data)
name = isinstance(
errors[0].instance,
dict) and errors[0].instance.get(
'name',
'unknown') or 'unknown'
return [resp, name]
except Exception:
logging.exception(
"specific_error failed, traceback, followed by fallback")
return list(filter(None, [
errors[0],
best_match(validator.iter_errors(data)),
]))
def check_unique(data):
counter = Counter([p['name'] for p in data.get('policies', [])])
for k, v in list(counter.items()):
if v == 1:
counter.pop(k)
if counter:
return [ValueError(
"Only one policy with a given name allowed, duplicates: {}".format(counter)),
list(counter.keys())[0]]
def policy_error_scope(error, data):
"""Scope a schema error to its policy name and resource."""
err_path = list(error.absolute_path)
if err_path[0] != 'policies':
return error
pdata = data['policies'][err_path[1]]
pdata.get('name', 'unknown')
error.message = "Error on policy:{} resource:{}\n".format(
pdata.get('name', 'unknown'), pdata.get('resource', 'unknown')) + error.message
return error
def specific_error(error):
"""Try to find the best error for humans to resolve
The jsonschema.exceptions.best_match error is based purely on a
mix of a strong match (ie. not anyOf, oneOf) and schema depth,
this often yields odd results that are semantically confusing,
instead we can use a bit of structural knowledge of schema to
provide better results.
"""
if error.validator not in ('anyOf', 'oneOf'):
return error
r = t = None
if isinstance(error.instance, dict):
t = error.instance.get('type')
r = error.instance.get('resource')
if r is not None:
found = None
for idx, v in enumerate(error.validator_value):
if '$ref' in v and v['$ref'].rsplit('/', 2)[1].endswith(r):
found = idx
break
if found is not None:
# error context is a flat list of all validation
# failures, we have to index back to the policy
# of interest.
for e in error.context:
# resource policies have a fixed path from
# the top of the schema
if e.absolute_schema_path[4] == found:
return specific_error(e)
return specific_error(error.context[idx])
if t is not None:
found = None
for idx, v in enumerate(error.validator_value):
if ('$ref' in v and
v['$ref'].rsplit('/', 2)[-1].rsplit('.', 1)[-1] == t):
found = idx
break
elif 'type' in v and t in v['properties']['type']['enum']:
found = idx
break
if found is not None:
for e in error.context:
for el in reversed(e.absolute_schema_path):
if isinstance(el, int):
if el == found:
return e
break
return error
def generate(resource_types=()):
resource_defs = {}
definitions = {
'resources': resource_defs,
'string_dict': {
"type": "object",
"patternProperties": {
"": {"type": "string"},
},
},
'basic_dict': {
"type": "object",
"patternProperties": {
"": {
'oneOf': [
{"type": "string"},
{"type": "boolean"},
{"type": "number"},
],
}
},
},
'iam-statement': {
'additionalProperties': False,
'type': 'object',
'properties': {
'Sid': {'type': 'string'},
'Effect': {'type': 'string', 'enum': ['Allow', 'Deny']},
'Principal': {'anyOf': [
{'type': 'string'},
{'type': 'object'}, {'type': 'array'}]},
'NotPrincipal': {'anyOf': [{'type': 'object'}, {'type': 'array'}]},
'Action': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotAction': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Resource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotResource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Condition': {'type': 'object'}
},
'required': ['Sid', 'Effect'],
'oneOf': [
{'required': ['Principal', 'Action', 'Resource']},
{'required': ['NotPrincipal', 'Action', 'Resource']},
{'required': ['Principal', 'NotAction', 'Resource']},
{'required': ['NotPrincipal', 'NotAction', 'Resource']},
{'required': ['Principal', 'Action', 'NotResource']},
{'required': ['NotPrincipal', 'Action', 'NotResource']},
{'required': ['Principal', 'NotAction', 'NotResource']},
{'required': ['NotPrincipal', 'NotAction', 'NotResource']}
]
},
'actions': {},
'filters': {
'value': ValueFilter.schema,
'event': EventFilter.schema,
'age': AgeFilter.schema,
'reduce': ReduceFilter.schema,
# Shortcut form of value filter as k=v
'valuekv': {
'type': 'object',
'additionalProperties': {'oneOf': [{'type': 'number'}, {'type': 'null'},
{'type': 'array', 'maxItems': 0}, {'type': 'string'}, {'type': 'boolean'}]},
'minProperties': 1,
'maxProperties': 1},
},
'filters_common': {
'comparison_operators': {
'enum': list(OPERATORS.keys())},
'value_types': {'enum': VALUE_TYPES},
'value_from': ValuesFrom.schema,
'value': {'oneOf': [
{'type': 'array'},
{'type': 'string'},
{'type': 'boolean'},
{'type': 'number'},
{'type': 'null'}]},
},
'policy': {
'type': 'object',
'required': ['name', 'resource'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
'pattern': "^[A-z][A-z0-9]*(-*[A-z0-9]+)*$"},
'conditions': {
'type': 'array',
'items': {'anyOf': [
{'type': 'object', 'additionalProperties': False,
'properties': {'or': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'not': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'and': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'$ref': '#/definitions/filters/value'},
{'$ref': '#/definitions/filters/event'},
{'$ref': '#/definitions/filters/valuekv'}]}},
# these should be deprecated for conditions
'region': {'type': 'string'},
'tz': {'type': 'string'},
'start': {'format': 'date-time'},
'end': {'format': 'date-time'},
'resource': {'type': 'string'},
'max-resources': {'anyOf': [
{'type': 'integer', 'minimum': 1},
{'$ref': '#/definitions/max-resources-properties'}
]},
'max-resources-percent': {'type': 'number', 'minimum': 0, 'maximum': 100},
'comment': {'type': 'string'},
'title': {'type': 'string'},
'description': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'metadata': {'type': 'object'},
'mode': {'$ref': '#/definitions/policy-mode'},
'source': {'enum': list(sources.keys())},
'actions': {
'type': 'array',
},
'filters': {
'type': 'array'
},
'metrics': {
'type': 'array'
},
#
# TODO: source queries should really move under
# source. This was initially used for describe sources
# to expose server side query mechanisms, however its
# important to note it also prevents resource cache
# utilization between policies that have different
# queries.
'query': {
'type': 'array', 'items': {'type': 'object'}}
},
},
'policy-mode': {
'anyOf': [e.schema for _, e in execution.items()],
},
'max-resources-properties': {
'type': 'object',
'additionalProperties': False,
'properties': {
'amount': {"type": 'integer', 'minimum': 1},
'op': {'enum': ['or', 'and']},
'percent': {'type': 'number', 'minimum': 0, 'maximum': 100}
}
}
}
resource_refs = []
for cloud_name, cloud_type in sorted(clouds.items()):
for type_name, resource_type in sorted(cloud_type.resources.items()):
r_type_name = "%s.%s" % (cloud_name, type_name)
if resource_types and r_type_name not in resource_types:
if not resource_type.type_aliases:
continue
elif not {"%s.%s" % (cloud_name, ralias) for ralias
in resource_type.type_aliases}.intersection(
resource_types):
continue
aliases = []
if resource_type.type_aliases:
aliases.extend(["%s.%s" % (cloud_name, a) for a in resource_type.type_aliases])
# aws gets legacy aliases with no cloud prefix
if cloud_name == 'aws':
aliases.extend(resource_type.type_aliases)
# aws gets additional alias for default name
if cloud_name == 'aws':
aliases.append(type_name)
resource_refs.append(
process_resource(
r_type_name,
resource_type,
resource_defs,
aliases,
definitions,
cloud_name
))
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
'id': 'http://schema.cloudcustodian.io/v0/custodian.json',
'definitions': definitions,
'type': 'object',
'required': ['policies'],
'additionalProperties': False,
'properties': {
'vars': {'type': 'object'},
'policies': {
'type': 'array',
'additionalItems': False,
'items': {'anyOf': resource_refs}
}
}
}
# allow empty policies with lazy load
if not resource_refs:
schema['properties']['policies']['items'] = {'type': 'object'}
return schema
def process_resource(
type_name, resource_type, resource_defs, aliases=None,
definitions=None, provider_name=None):
r = resource_defs.setdefault(type_name, {'actions': {}, 'filters': {}})
action_refs = []
for a in ElementSchema.elements(resource_type.action_registry):
action_name = a.type
if a.schema_alias:
action_alias = "%s.%s" % (provider_name, action_name)
if action_alias in definitions['actions']:
if definitions['actions'][action_alias] != a.schema: # NOQA
msg = "Schema mismatch on type:{} action:{} w/ schema alias ".format(
type_name, action_name)
raise SyntaxError(msg)
else:
definitions['actions'][action_alias] = a.schema
action_refs.append({'$ref': '#/definitions/actions/%s' % action_alias})
else:
r['actions'][action_name] = a.schema
action_refs.append(
{'$ref': '#/definitions/resources/%s/actions/%s' % (
type_name, action_name)})
# one word action shortcuts
action_refs.append(
{'enum': list(resource_type.action_registry.keys())})
filter_refs = []
for f in ElementSchema.elements(resource_type.filter_registry):
filter_name = f.type
if filter_name == 'value':
filter_refs.append({'$ref': '#/definitions/filters/value'})
filter_refs.append({'$ref': '#/definitions/filters/valuekv'})
elif filter_name == 'event':
filter_refs.append({'$ref': '#/definitions/filters/event'})
elif f.schema_alias:
filter_alias = "%s.%s" % (provider_name, filter_name)
if filter_alias in definitions['filters']:
assert definitions['filters'][filter_alias] == f.schema, "Schema mismatch on filter w/ schema alias" # NOQA
else:
definitions['filters'][filter_alias] = f.schema
filter_refs.append({'$ref': '#/definitions/filters/%s' % filter_alias})
continue
else:
r['filters'][filter_name] = f.schema
filter_refs.append(
{'$ref': '#/definitions/resources/%s/filters/%s' % (
type_name, filter_name)})
# one word filter shortcuts
filter_refs.append(
{'enum': list(resource_type.filter_registry.keys())})
block_fref = '#/definitions/resources/%s/policy/allOf/1/properties/filters' % (
type_name)
filter_refs.extend([
{'type': 'object', 'additionalProperties': False,
'properties': {'or': {'$ref': block_fref}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'and': {'$ref': block_fref}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'not': {'$ref': block_fref}}}])
resource_policy = {
'allOf': [
{'$ref': '#/definitions/policy'},
{'properties': {
'resource': {'enum': [type_name]},
'filters': {
'type': 'array',
'items': {'anyOf': filter_refs}},
'actions': {
'type': 'array',
'items': {'anyOf': action_refs}}}},
]
}
if aliases:
resource_policy['allOf'][1]['properties'][
'resource']['enum'].extend(aliases)
if type_name == 'ec2':
resource_policy['allOf'][1]['properties']['query'] = {}
r['policy'] = resource_policy
return {'$ref': '#/definitions/resources/%s/policy' % type_name}
def resource_outline(provider=None):
outline = {}
for cname, ctype in sorted(clouds.items()):
if provider and provider != cname:
continue
cresources = outline[cname] = {}
for rname, rtype in sorted(ctype.resources.items()):
cresources['%s.%s' % (cname, rname)] = rinfo = {}
rinfo['filters'] = sorted(rtype.filter_registry.keys())
rinfo['actions'] = sorted(rtype.action_registry.keys())
return outline
def resource_vocabulary(cloud_name=None, qualify_name=True, aliases=True):
vocabulary = {}
resources = {}
if aliases:
vocabulary['aliases'] = {}
for cname, ctype in clouds.items():
if cloud_name is not None and cloud_name != cname:
continue
for rname, rtype in ctype.resources.items():
if qualify_name:
resources['%s.%s' % (cname, rname)] = rtype
else:
resources[rname] = rtype
for type_name, resource_type in resources.items():
classes = {'actions': {}, 'filters': {}, 'resource': resource_type}
actions = []
for cls in ElementSchema.elements(resource_type.action_registry):
action_name = ElementSchema.name(cls)
actions.append(action_name)
classes['actions'][action_name] = cls
filters = []
for cls in ElementSchema.elements(resource_type.filter_registry):
filter_name = ElementSchema.name(cls)
filters.append(filter_name)
classes['filters'][filter_name] = cls
vocabulary[type_name] = {
'filters': sorted(filters),
'actions': sorted(actions),
'classes': classes,
}
if aliases and resource_type.type_aliases:
provider = type_name.split('.', 1)[0]
for type_alias in resource_type.type_aliases:
vocabulary['aliases'][
"{}.{}".format(provider, type_alias)] = vocabulary[type_name]
if provider == 'aws':
vocabulary['aliases'][type_alias] = vocabulary[type_name]
vocabulary[type_name]['resource_type'] = type_name
vocabulary["mode"] = {}
for mode_name, cls in execution.items():
vocabulary["mode"][mode_name] = cls
return vocabulary
class ElementSchema:
"""Utility functions for working with resource's filters and actions.
"""
@staticmethod
def elements(registry):
"""Given a resource registry return sorted de-aliased values.
"""
seen = {}
for k, v in registry.items():
if k in ('and', 'or', 'not'):
continue
if v in seen:
continue
else:
seen[ElementSchema.name(v)] = v
return [seen[k] for k in sorted(seen)]
@staticmethod
def resolve(vocabulary, schema_path):
"""Given a resource vocabulary and a dotted path, resolve an element.
"""
current = vocabulary
frag = None
if schema_path.startswith('.'):
# The preprended '.' is an odd artifact
schema_path = schema_path[1:]
parts = schema_path.split('.')
while parts:
k = parts.pop(0)
if frag:
k = "%s.%s" % (frag, k)
frag = None
parts.insert(0, 'classes')
elif k in clouds:
frag = k
if len(parts) == 1:
parts.append('resource')
continue
if k not in current:
raise ValueError("Invalid schema path %s" % schema_path)
current = current[k]
return current
@staticmethod
def name(cls):
"""For a filter or action return its name."""
return cls.schema['properties']['type']['enum'][0]
@staticmethod
def doc(cls):
"""Return 'best' formatted doc string for a given class.
Walks up class hierarchy, skipping known bad. Returns
empty string if no suitable doc string found.
"""
# walk up class hierarchy for nearest
# good doc string, skip known
if cls.__doc__ is not None:
return inspect.cleandoc(cls.__doc__)
doc = None
for b in cls.__bases__:
if b in (ValueFilter, object):
continue
doc = b.__doc__ or ElementSchema.doc(b)
if doc is not None:
return inspect.cleandoc(doc)
return ""
@staticmethod
def schema(definitions, cls):
"""Return a pretty'ified version of an element schema."""
schema = isinstance(cls, type) and dict(cls.schema) or dict(cls)
schema.pop('type', None)
schema.pop('additionalProperties', None)
return ElementSchema._expand_schema(schema, definitions)
@staticmethod
def _expand_schema(schema, definitions):
"""Expand references in schema to their full schema"""
for k, v in list(schema.items()):
if k == '$ref':
# the value here is in the form of: '#/definitions/path/to/key'
parts = v.split('/')
if ['#', 'definitions'] != parts[0:2]:
raise ValueError("Invalid Ref %s" % v)
current = definitions
for p in parts[2:]:
if p not in current:
return None
current = current[p]
return ElementSchema._expand_schema(current, definitions)
elif isinstance(v, dict):
schema[k] = ElementSchema._expand_schema(v, definitions)
return schema
def pprint_schema_summary(vocabulary):
providers = {}
non_providers = {}
for type_name, rv in vocabulary.items():
if '.' not in type_name:
non_providers[type_name] = len(rv)
else:
provider, name = type_name.split('.', 1)
stats = providers.setdefault(provider, {
'resources': 0, 'actions': Counter(), 'filters': Counter()})
stats['resources'] += 1
for a in rv.get('actions'):
stats['actions'][a] += 1
for f in rv.get('filters'):
stats['filters'][f] += 1
for provider, stats in providers.items():
print("%s:" % provider)
print(" resource count: %d" % stats['resources'])
print(" actions: %d" % len(stats['actions']))
print(" filters: %d" % len(stats['filters']))
for non_providers_type, length in non_providers.items():
print("%s:" % non_providers_type)
print(" count: %d" % length)
def json_dump(resource=None):
load_available()
print(json.dumps(generate(resource), indent=2))
if __name__ == '__main__':
json_dump()
|
# 201005: rename/restructure .yml files for consistency with xtb-level data
# 201006: in read_conformer() fix error message when log files are missing
import os,re,itertools,time
#import pybel
#from openbabel import pybel
import numpy as np
import pandas as pd
import pathlib as pl
cwd = pl.Path.cwd()
import yaml
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
from rdkit import Chem,Geometry
from rdkit.Chem import rdmolfiles, AllChem, rdMolAlign,rdmolops
from multiprocessing import Pool
import morfeus # Kjell Jorner
from PL_split_logs_201006 import split_log # TG
from PL_conformer_selection_200411 import mirror_mol, delete_element_from_rdkitmol, delete_haloalkane_halides # TG #changed from PL_conformer_selection_201019 5/17/21 by EP
import PL_gaussian_properties_201021 as gp # TG
import vmin4 as vmin # TG/Iris Guo
import P_int_200916 as P_int # Robert Pollice (,TG(,ML))
# import PL_visvol as visvol # Ellyn Peters
# covalent radii, from Pyykko and Atsumi, Chem. Eur. J. 15, 2009, 188-197
# values for metals decreased by 10% according to Robert Paton's Sterimol implementation
rcov = {
"H": 0.32,"He": 0.46,"Li": 1.2,"Be": 0.94,"B": 0.77,"C": 0.75,"N": 0.71,"O": 0.63,"F": 0.64,"Ne": 0.67,"Na": 1.4,"Mg": 1.25,"Al": 1.13,"Si": 1.04,"P": 1.1,"S": 1.02,"Cl": 0.99,"Ar": 0.96,"K": 1.76,"Ca": 1.54,"Sc": 1.33,"Ti": 1.22,"V": 1.21,"Cr": 1.1,"Mn": 1.07,"Fe": 1.04,"Co": 1.0,"Ni": 0.99,"Cu": 1.01,"Zn": 1.09,"Ga": 1.12,"Ge": 1.09,"As": 1.15,"Se": 1.1,"Br": 1.14,"Kr": 1.17,"Rb": 1.89,"Sr": 1.67,"Y": 1.47,"Zr": 1.39,"Nb": 1.32,"Mo": 1.24,"Tc": 1.15,"Ru": 1.13,"Rh": 1.13,"Pd": 1.08,"Ag": 1.15,"Cd": 1.23,"In": 1.28,"Sn": 1.26,"Sb": 1.26,"Te": 1.23,"I": 1.32,"Xe": 1.31,"Cs": 2.09,"Ba": 1.76,"La": 1.62,"Ce": 1.47,"Pr": 1.58,"Nd": 1.57,"Pm": 1.56,"Sm": 1.55,"Eu": 1.51,"Gd": 1.52,"Tb": 1.51,"Dy": 1.5,"Ho": 1.49,"Er": 1.49,"Tm": 1.48,"Yb": 1.53,"Lu": 1.46,"Hf": 1.37,"Ta": 1.31,"W": 1.23,"Re": 1.18,"Os": 1.16,"Ir": 1.11,"Pt": 1.12,"Au": 1.13,"Hg": 1.32,"Tl": 1.3,"Pb": 1.3,"Bi": 1.36,"Po": 1.31,"At": 1.38,"Rn": 1.42,"Fr": 2.01,"Ra": 1.81,"Ac": 1.67,"Th": 1.58,"Pa": 1.52,"U": 1.53,"Np": 1.54,"Pu": 1.55
}
# some constants
R = 0.0019872036 #kcal mol^-1 K^-1
T = 298.15 #K
hartree_kcalmol = 627.50947
periodictable = ["Bq","H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl","Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo","X"]
def get_conmat(elements, coords):
# partially based on code from Robert Paton's Sterimol script, which based this part on Grimme's D3 code
# elements is a list of strings, coords is a numpy array or nested list of shape N_atoms x 3
if type(coords) == list:
coords = np.asarray(coords)
natom = len(elements)
#max_elem = 94
k1 = 16.0
k2 = 4.0/3.0
conmat = np.zeros((natom,natom))
for i in range(0,natom):
if elements[i] not in rcov.keys():
continue
for iat in range(0,natom):
if elements[iat] not in rcov.keys():
continue
if iat != i:
dxyz = coords[iat]-coords[i]
r = np.linalg.norm(dxyz)
rco = rcov[elements[i]]+rcov[elements[iat]]
rco = rco*k2
rr=rco/r
damp=1.0/(1.0+np.math.exp(-k1*(rr-1.0)))
if damp > 0.85: #check if threshold is good enough for general purpose
conmat[i,iat],conmat[iat,i] = 1,1
return(conmat)
def add_valence(elements,coords,conmat,base_idx,add_element="Pd"):
# Adds a valence to base so that the angle to the previous substituents is maximized and reorders the coordinate output for convenience
# add_element: add any of the following elements:
distpx = {"O":1.5,"Se":2.12,"Pd":2.28,"X":1.8} # typical bond distances to P
if type(coords) == list:
coords = np.asarray(coords)
num_atoms = len(elements)
coord_base = coords[base_idx]
base_element = elements[base_idx]
vec = np.array([0.0,0.0,0.0])
bonded = []
for atom in range(num_atoms):
if conmat[base_idx][atom]:
bonded.append(atom)
vec += coord_base - coords[atom]
coordox = distpx[add_element]*vec/np.linalg.norm(vec) + coord_base
atoms = [x for x in range(num_atoms+1)]
coords_temp = np.vstack((coords,coordox))
if sum(get_conmat(elements+[add_element],coords_temp)[-1]) != 1.0:
print(" Warning: possible collision!")
# sort coordinates so that base is first, add_element is second, and the other atoms bonded to base are next
elements_new = [base_element,add_element]+[elements[a] for a in bonded] + [a for i,a in enumerate(elements) if i not in [base_idx]+bonded]
coords_new = np.vstack((coord_base, coordox, coords[bonded], coords[[i for i,a in enumerate(elements) if i not in [base_idx]+bonded]]))
return(elements_new, coords_new)
def write_xyz(elements,coords,filename):
with open(filename,"w") as f:
f.write(f"{len(elements)}\n\n")
for i,a in enumerate(elements):
f.write(f"{a.title():>3} " + " ".join([f"{coords[i][j]:15f}" for j in range(3)]) + "\n")
def rmsd_matrix(conformers):
molobjects = [rdmolfiles.MolFromMolFile(str(cwd/conformer/f"{conformer}_opt.sdf"),removeHs=False,strictParsing=False) for conformer in conformers]
molobjects = [Chem.RemoveHs(mol) for mol in molobjects] # Remove all H: optional but speeds up RMSD calculation
molobjects = [delete_haloalkane_halides(mol) for mol in molobjects] # Remove halides in perhaloalkyl moieties. Improves RMSD matching and timing
molobjects_inv = [mirror_mol(mol) for mol in molobjects] # create mirror images of each conformer
rmsd_mat = np.zeros((len(conformers),len(conformers)))
for i,j in itertools.product(range(len(conformers)),range(len(conformers))):
if i<j: continue
if i==j:
rmsd_mat[i,j] = 1
else:
rmsd_mat[i,j] = min((rdMolAlign.GetBestRMS(molobjects[i],molobjects[j]),rdMolAlign.GetBestRMS(molobjects[i],molobjects_inv[j])))
rmsd_mat[j,i] = rmsd_mat[i,j]
return(rmsd_mat)
def dict_key_rmsd(candidate_pair):
return float(rmsd_matrix(candidate_pair)[0,1])
# which energies to read from which log-file
energylogs = {
"e_dz":"freq",
"e_tz_gas":"nbo",
"e_tz_gas":"sp",
"e_tz_solv":"solv",
"e_tz_ra":"ra",
"e_tz_rc":"rc",
}
# which properties to read from which log-file
proplogs = {
"freq":["nimag","g","t"],
"sp" :["dipole","homo","qpole","t"],
"ra" :["homo","nbo","t"],
"rc" :["homo","nbo","t"],
"nbo" :["nbo","nborbsP","t"],
"nmr" :["nmr","t"],
"efg" :["efg","nuesp","t"],
"solv":["ecds","t"],
}
# assign names to each descriptor
propoutput = {
"freq_g": ["","g"],
"freq_nimag": ["nimag"],
"sp_dipole": ["dipolemoment",],
"sp_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"ra_homo":["somo_ra","","","",""],
"rc_homo":["somo_rc","","","",""],
"sp_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
"nbo_nbo": ["nbo_P"],
"ra_nbo": ["nbo_P_ra","spindens_P_ra"],
"rc_nbo": ["nbo_P_rc","spindens_P_rc"],
"nmr_nmr": ["nmr_P","nmrtens_sxx_P","nmrtens_syy_P","nmrtens_szz_P",],
"efg_efg": ["efg_amp_P","efgtens_xx_P","efgtens_yy_P","efgtens_zz_P"],
"efg_nuesp": ["nuesp_P",],
"solv_ecds": ["E_solv_cds"],
"nbo_dipole": ["dipolemoment",],
"nbo_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"nbo_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
}
boltzproperties = ['vmin_vmin','vmin_r','dipolemoment', 'fmo_e_homo', 'fmo_e_lumo', 'fmo_mu', 'fmo_eta', 'fmo_omega', 'somo_ra', 'somo_rc', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'nbo_P', 'nbo_P_ra', 'spindens_P_ra', 'nbo_P_rc', 'spindens_P_rc', 'nmr_P', 'nmrtens_sxx_P', 'nmrtens_syy_P', 'nmrtens_szz_P', 'efg_amp_P', 'efgtens_xx_P', 'efgtens_yy_P', 'efgtens_zz_P', 'nuesp_P', 'E_solv_cds', 'nbo_lp_P_percent_s', 'nbo_lp_P_occ', 'nbo_lp_P_e', 'nbo_bd_e_max', 'nbo_bd_e_avg', 'nbo_bds_e_min', 'nbo_bds_e_avg', 'nbo_bd_occ_min', 'nbo_bd_occ_avg', 'nbo_bds_occ_max', 'nbo_bds_occ_avg', 'E_solv_total', 'E_solv_elstat', 'E_oxidation', 'E_reduction', 'fukui_p', 'fukui_m', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_ratio_vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL',"Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"] # "vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
mmproperties = ['dipolemoment', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL'] # ,"vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
Pintresults = ["Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"]
def morfeus_properties(elements,coordinates,confdata):
# Morfeus: Sterimol, Vbur, pyr
morfdict = {}
if "pyr_P" not in confdata.keys() and confdata["p_val"] == 3:
# Pyramidalization - two equivalent measurments P and alpha
pyr = morfeus.Pyramidalization(elements=elements,coordinates=coordinates,atom_index=1,excluded_atoms=[2]) # remove Pd
morfdict["pyr_P"] = float(pyr.P)
morfdict["pyr_alpha"] = float(pyr.alpha)
if "vbur_vbur" not in confdata.keys():
#Buried volume - get quadrant volumes and distal volume
# iterate through P-substituents, aligning the quadrants paralell to each once (= xz_plane definition)
# Metal/point of reference should be 2.28 A away from P
# z_axis_atoms: P
# xz_plane_atoms: each of the substituents once
# keep lowest and highest quadrant and octant volume across all three orientations of the coordinate system
# keep highest difference of any neighboring quadrant volume
# keep volume in each of the two hemispheres
qvbur_all = np.array([])
qvdist_all = np.array([])
qvtot_all = np.array([])
max_delta_qvbur_all = []
max_delta_qvtot_all = []
ovbur_all = np.array([])
ovtot_all = np.array([])
for i in range(3):#confdata["p_val"]):
bv = morfeus.BuriedVolume(elements,coordinates,2,excluded_atoms=[2],z_axis_atoms=[1],xz_plane_atoms=[3+i])
bv.octant_analysis()
bv.compute_distal_volume(method="buried_volume",octants=True)
vbur = bv.buried_volume # these are identical for each iteration
vdist = bv.distal_volume #
vtot = vbur + vdist #
qvbur = np.asarray(list(bv.quadrants["buried_volume"].values()))
qvdist = np.asarray(list(bv.quadrants["distal_volume"].values()))
qvtot = qvbur + qvdist
qvbur_all = np.append(qvbur_all,qvbur)
qvtot_all = np.append(qvtot_all,qvtot)
max_delta_qvbur_all.append(max([abs(qvbur[j]-qvbur[j-1]) for j in range(4)]))
max_delta_qvtot_all.append(max([abs(qvtot[j]-qvtot[j-1]) for j in range(4)]))
ovbur = np.asarray(list(bv.octants["buried_volume"].values()))
ovdist = np.asarray(list(bv.octants["distal_volume"].values()))
ovtot = ovbur + ovdist
ovbur_all = np.append(ovbur_all,ovbur)
ovtot_all = np.append(ovtot_all,ovtot)
near_vbur = ovbur[4:].sum() # these are identical for each iteration
far_vbur = ovbur[:4].sum() #
near_vtot = ovtot[4:].sum() #
far_vtot = ovtot[:4].sum() #
morfdict["vbur_vbur"] = vbur
morfdict["vbur_vtot"] = float(vtot)
morfdict["vbur_ratio_vbur_vtot"] = float(vbur/vtot)
morfdict["vbur_qvbur_min"] = float(min(qvbur_all))
morfdict["vbur_qvbur_max"] = float(max(qvbur_all))
morfdict["vbur_qvtot_min"] = float(min(qvtot_all))
morfdict["vbur_qvtot_max"] = float(max(qvtot_all))
morfdict["vbur_max_delta_qvbur"] = float(max(max_delta_qvbur_all))
morfdict["vbur_max_delta_qvtot"] = float(max(max_delta_qvtot_all))
morfdict["vbur_ovbur_min"] = float(min(ovbur_all))
morfdict["vbur_ovbur_max"] = float(max(ovbur_all))
morfdict["vbur_ovtot_min"] = float(min(ovtot_all))
morfdict["vbur_ovtot_max"] = float(max(ovtot_all))
morfdict["vbur_near_vbur"] = float(near_vbur)
morfdict["vbur_far_vbur"] = float(far_vbur)
morfdict["vbur_near_vtot"] = float(near_vtot)
morfdict["vbur_far_vtot"] = float(far_vtot)
if "sterimol_B1" not in confdata.keys():
# Sterimol
# for Sterimol values matching Rob Paton's implementation:
patonradii = morfeus.helpers.get_radii(elements, radii_type="bondi")
patonradii = np.array(patonradii)
patonradii[patonradii == 1.2] = 1.09
sterimol = morfeus.Sterimol(elements, coordinates, 2, 1, radii=patonradii, n_rot_vectors=3600)
morfdict["sterimol_B1"] = float(sterimol.B_1_value)
morfdict["sterimol_B5"] = float(sterimol.B_5_value)
morfdict["sterimol_L"] = float(sterimol.L_value)
# buried Sterimol
sterimol_bur = morfeus.Sterimol(elements, coordinates, 2, 1,calculate=False,radii=patonradii, n_rot_vectors=3600)
sterimol_bur.bury(sphere_radius=5.5,method="delete",radii_scale=0.5)
# sterimol.bury(sphere_radius=4.5,method="delete",radii_scale=1)
morfdict["sterimol_burB1"] = float(sterimol_bur.B_1_value)
morfdict["sterimol_burB5"] = float(sterimol_bur.B_5_value)
morfdict["sterimol_burL"] = float(sterimol_bur.L_value)
return(morfdict)
def gp_properties(ligand,conformer,p_idx):
# reads gaussian log files
gpdict = {}
gpdict["properties"] = {}
contents = {
"streams":{},
"filecont":{},
}
# read energies
for e,log in energylogs.items():
contents["streams"][log] = gp.get_outstreams(cwd/conformer/f"{conformer}_{log}.log")
if contents["streams"][log] == "failed or incomplete job":
return({"error":True})
else:
gpdict[e] = gp.get_e_hf(contents["streams"][log])
gpdict["error"] = False
# going through each log file, get the relevant properties
for log in proplogs.keys():
contents["filecont"][log] = gp.get_filecont(cwd/conformer/f"{conformer}_{log}.log")
for prop in proplogs[log]:
gpresults = gp.jobtypes[prop][0](contents[gp.jobtypes[prop][1]][log],p_idx)
if prop == "nborbsP": # NBO orbital analysis returns a dictionary with the proper labels
gpdict["properties"].update(gpresults)
elif prop == "t": # subjob time
gpdict[f"{log}_t"] = gpresults
elif prop in ["e_dz","g","e_tz_gas","e_tz_solv","e_tz_ra","e_tz_rc","nimag"]:
gpdict.update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
else: # all other functions return a list. This is assigned into a dict with proper names here
gpdict["properties"].update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
gpdict["g_tz_gas"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_gas"] # in Hartree
gpdict["g_tz_solv"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_solv"] # in Hartree
gpdict["properties"]["E_solv_total"] = (gpdict["e_tz_solv"] - gpdict["e_tz_gas"]) * hartree_kcalmol # in kcal/mol
gpdict["properties"]["E_solv_elstat"] = gpdict["properties"]["E_solv_total"] - gpdict["properties"]["E_solv_cds"] # in kcal/mol
gpdict["properties"]["E_oxidation"] = gpdict["e_tz_rc"] - gpdict["e_tz_gas"] # in Hartree
gpdict["properties"]["E_reduction"] = gpdict["e_tz_ra"] - gpdict["e_tz_gas"] # in Hartree
gpdict["properties"]["fukui_p"] = gpdict["properties"]["nbo_P"]-gpdict["properties"]["nbo_P_ra"] # fukui electrophilicity
gpdict["properties"]["fukui_m"] = gpdict["properties"]["nbo_P_rc"]-gpdict["properties"]["nbo_P"] # fukui nucleophilicity
gpdict["t_total"] = sum([gpdict[f"{log}_t"] for log in proplogs.keys()])
if "" in gpdict.keys():
del gpdict[""]
if "" in gpdict["properties"].keys():
del gpdict["properties"][""]
return(gpdict)
def read_conformer(cwd, ligand, conformer): # cwd: pathlib path of current working directory. ligand: 0-digit ligand ID. conformer: full name of the conformer (including the ID at the beginnig)
confdata = {}
errors = []
checklogs = [cwd/conformer/f"{conformer}_{l}.log" for l in proplogs.keys() if not (cwd/conformer/f"{conformer}_{l}.log").exists()]
if len(checklogs) != 0:
#! log this as a conformer-level error
err = f"Missing Gaussian log files, flagged in read_conformer: {",".join([chkl.name for chkl in checklogs])}"
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
return(confdata,errors)
if "elements_pd" not in confdata.keys():
# mol = next(pybel.readfile("g09",str(cwd/conformer/f"{conformer}_nbo.log")))
#mol = next(pybel.readfile("g09",str(cwd/conformer/f"{conformer}_opt.log")))
#elements = [periodictable[a.atomicnum] for a in mol.atoms]
#coordinates = [list(a.coords) for a in mol.atoms]
#coordinates_a = np.array([a.coords for a in mol.atoms])
def read_gaussian_logfile(fn):
time0=time.time()
read=False
for line in open(fn,"r"):
if read:
if "---" in line and len(elements)>0:
read=False
if read:
if "X" not in line and "---" not in line:
atomnum = int(line.split()[1])
#print(line.replace("\n",""))
#print(atomnum)
el = periodictable[atomnum]
elements.append(el)
coordinates.append([float(line.split()[3]),float(line.split()[4]), float(line.split()[5])])
if "Coordinates (Angstroms)" in line:
coordinates, elements = [], []
read=True
time1=time.time()
print("gaussian log parser done in %.2f seconds"%(time1-time0))
return(coordinates, elements)
coordinates, elements = read_gaussian_logfile(str(cwd/conformer/f"{conformer}_opt.log"))
coordinates_a = np.array(coordinates)
conmat = get_conmat(elements,coordinates_a)
p_idx = [i for i in range(len(elements)) if elements[i] == "P" and sum(conmat[i]) <= 3][0] # this removes quaternary P (phosphonium, phosphate etc) but allows for P with 2 substituents (phosphabenzene, phosphaimine etc). Can we be sure that we never have more than one non-quaternary P(III)?
elements_pd, coordinates_pd = add_valence(elements,coordinates,conmat,p_idx,add_element="Pd") # Add "Pd" at the reference position in the P-lone pair region
if not (cwd/conformer/f"{conformer}_opt_Pd.xyz").exists():
#out = pybel.Outputfile("xyz",str(cwd/conformer/f"{conformer}_opt.xyz"))
#out.write(mol)
#out.close()
write_xyz(elements, coordinates, cwd/conformer/f"{conformer}_opt.xyz")
#out = pybel.Outputfile("sdf",str(cwd/conformer/f"{conformer}_opt.sdf"))
#out.write(mol)
#out.close()
os.system("obabel -ixyz %s -osdf >> %s"%(str(cwd/conformer/f"{conformer}_opt.xyz"), str(cwd/conformer/f"{conformer}_opt.sdf")))
write_xyz(elements_pd,coordinates_pd,cwd/conformer/f"{conformer}_opt_Pd.xyz")
confdata["coords"] = coordinates
confdata["coords_pd"] = coordinates_pd.tolist()
confdata["elements"] = elements
confdata["elements_pd"] = elements_pd
confdata["conmat"] = conmat.tolist()
confdata["p_idx"] = p_idx
confdata["p_val"] = int(sum(conmat[p_idx])) # how many substituents at P
confdata["properties"] = {}
## get properties
# gp_properties: everything that can be read from the Gaussian log files (most electronic properties)
confdata.update(gp_properties(ligand,conformer,confdata["p_idx"]))
if confdata["error"]:
#! log this as a conformer-level error
err = "Error in the Gaussian computations, flagged in read_conformer, please check log files."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
if confdata["nimag"] != 0:
#! log this as a conformer-level error
err = f"Number of imaginary frequencies: {confdata["nimag"]}."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
confdata["error"] = True
return(confdata,errors)
# morfeus: properties that use the geometry/steric properties
confdata["properties"].update(morfeus_properties(confdata["elements_pd"],confdata["coords_pd"],confdata))
# # P_int
# if "Pint_P_int" not in confdata.keys():
# confdata.update(P_int.P_int_main(name=conformer,directory=cwd/conformer))
# read results
disp = "d3"
pint_read = P_int.read_dedout(cwd/conformer,conformer,disp)+P_int.read_multiwfnout(cwd/conformer,conformer)+P_int.read_disp(cwd/conformer,conformer,disp)
confdata["properties"].update({Pintresults[i]:float(pint_read[i]) for i in range(7)})
# V_min
try:
if "vmin_vmin" not in confdata.keys():
vminob = vmin.get_vmin(f"{conformer}.fchk",str(cwd/conformer)+"/",True)
confdata["properties"]["vmin_vmin"] = float(vminob.v_min)
confdata["properties"]["vmin_r"] = float(vminob.r_min)
except:
err = f"Vmin FileNotFoundError."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
# visvol
# if "vv_total_visible_volume" not in confdata.keys():
# confdata.update(visvol.get_vis_vol(cwd/conformer/f"{conformer}_opt_Pd.xyz",radii_type = 'rcov',prox_cutoff = 3.5,ignore_H = 0,write_results = 1, plot = 0))
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
def read_ligand(cwd, ligand, conformers, liganddata = {}): # cwd is the ligand-level directory
status = {"ligandlevel": [],}
if len(liganddata.keys()) == 0:
if (cwd/f"{ligand}_data.yml").exists():
with open(cwd/f"{ligand}_data.yml","r") as f:
liganddata = yaml.load(f,Loader=Loader)
if (cwd/f"{ligand}_confdata.yml").exists():
with open(cwd/f"{ligand}_confdata.yml","r") as f:
liganddata["confdata"] = yaml.load(f,Loader=Loader)
else:
liganddata = {
"conformers_all": conformers,
"conformers": conformers.copy(), # Duplicates and computations with errors (including nimag=1) will be removed from this list
"number_of_conformers": len(conformers),
"removed_duplicates": [],
"confdata": {},#{c:{} for c in conformers},
"boltzmann_averaged_data": {},
"min_data": {},
"max_data": {},
"delta_data": {},
"vburminconf_data": {},
}
newconfs = 0
for conformer in conformers:
if conformer in liganddata["removed_duplicates"]:
continue
print(conformer)
if conformer in liganddata["confdata"].keys():
pass
elif (cwd/conformer/f"{conformer}_data.yml").exists():
with open(cwd/conformer/f"{conformer}_data.yml","r") as f:
liganddata["confdata"][conformer] = yaml.load(f,Loader=Loader)
newconfs += 1
else:
print("read conformer data")
liganddata["confdata"][conformer],status[conformer] = read_conformer(cwd, ligand, conformer) # returns the dictionary with the conformer data and a list with errors
newconfs += 1
if newconfs > 0:
# error, NIMAG removal
liganddata["conformers_w_error"] = [conformer for conformer in liganddata["conformers"] if liganddata["confdata"][conformer]["error"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["conformers_w_error"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
energies = ["e_dz","g","e_tz_gas","g_tz_gas","e_tz_solv","g_tz_solv"]
liganddata["energies"] = {}
liganddata["relative_energies"] = {}
for e in energies:
liganddata["energies"][e] = {conformer: liganddata["confdata"][conformer][e] for conformer in liganddata["conformers"]}
liganddata[e+"_min"] = min(liganddata["energies"][e].values())
liganddata[e+"_minconf"] = list(liganddata["energies"][e].keys())[np.argmin(list(liganddata["energies"][e].values()))]
liganddata["relative_energies"][e+"_rel"] = {conformer: (liganddata["energies"][e][conformer]-liganddata[e+"_min"])*hartree_kcalmol for conformer in liganddata["conformers"]}
# erel_df = pd.DataFrame(np.array([list(liganddata[e+"_rel"].values()) for e in energies]).T ,columns=energies,index=liganddata["conformers"] )
erel_df = pd.DataFrame([liganddata["relative_energies"][e+"_rel"] for e in energies],index=energies).T
#liganddata["relative_energies_df"] = erel_df
liganddata["relative_energies_dict"] = erel_df.to_dict()
# Find duplicates:
# 1) find pairs of conformers that are within E_rel < 0.1 kcal/mol (relative energies seem to be much more reliable than relative free energies)
# 2) check these pairs to also have RMSD < 0.2 A
# 3) Remove the conformer with higher relative free energy
duplicates_candidates = [(i,j) for i,j in itertools.combinations(liganddata["conformers"],2) if abs(erel_df["e_dz"].loc[i] - erel_df["e_dz"].loc[j]) < 0.1]
try:
# Throw a name error here if you wanna only run the except
cores = max(os.cpu_count() - 2, 1)
with Pool(cores) as p:
values = p.map(dict_key_rmsd, duplicates_candidates)
liganddata["rmsd_candidates"] = {key: value for key, value in zip(duplicates_candidates, values)}
# The less cool, non-parallel way
#liganddata["rmsd_candidates"] = {candidate_pair: float(rmsd_matrix(candidate_pair)[0,1]) for candidate_pair in duplicates_candidates} # keep all RMSD for potential debugging
liganddata["duplicates"] = [candidate_pair for candidate_pair in liganddata["rmsd_candidates"] if liganddata["rmsd_candidates"][candidate_pair] < 0.2]
except: # RDkit failed to generate Mol objects and thus could not compute RMSD, or some of the internal structures in those mol files are different despite actually being the same. Default to duplicate detection based on dipole moment and chemical shift similarity
#! log this on ligand level for double-checking
err = "Warning: RDKit error at duplicate RMSD testing. Please double check."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
dipole_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["dipolemoment"] - liganddata["confdata"][j]["properties"]["dipolemoment"]) < 0.025])
nmr_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["nmr_P"] - liganddata["confdata"][j]["properties"]["nmr_P"]) < 0.1])
liganddata["duplicates"] = sorted(dipole_candidates & nmr_candidates)
liganddata["removed_duplicates"] = [erel_df.loc[list(pair)]["g_tz_gas"].idxmax() for pair in liganddata["duplicates"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["removed_duplicates"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
# Boltzmann averaging
#boltzfacs = {conformer: np.exp(-liganddata["relative_energies_df"]["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
boltzfacs = {conformer: np.exp(-erel_df["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
Q = sum(boltzfacs.values())
liganddata["boltzmann_weights"] = {conformer: float(boltzfacs[conformer]/Q) for conformer in liganddata["conformers"] } # probability
for prop in boltzproperties:
confsmissingprop = [conf for conf in liganddata["conformers"] if prop not in liganddata["confdata"][conf]["properties"].keys()]
if len(confsmissingprop) == 0:
liganddata["boltzmann_averaged_data"][prop] = sum([liganddata["boltzmann_weights"][conf] * liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"]])
else: # if a single conformer is missing a property value, set Boltzmann-average to None
#! log this as a ligand-level error with prop and confsmissingprop
err = f"Warning: {len(confsmissingprop)}/{len(liganddata["conformers"])} conformers missing values for property {prop}: {",".join(confsmissingprop)}."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
liganddata["boltzmann_averaged_data"][prop] = None
continue
# "Condensed" properties
liganddata["vburminconf"] = liganddata["conformers"][np.argmin([liganddata["confdata"][conf]["properties"]["vbur_vbur"] for conf in liganddata["conformers"]])]
for prop in mmproperties:
proplist = [liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"] if prop in liganddata["confdata"][conf]["properties"].keys()]
# if a single conformer is missing a property value, still perform min/max analysis (Boltzmann-average will be None to indicate missing value(s))
# if all confs are missing this prop, set min/max/delta to None
if len(proplist) == 0:
liganddata["min_data"][prop] = None
liganddata["max_data"][prop] = None
liganddata["delta_data"][prop] = None
liganddata["vburminconf_data"][prop] = None
else:
liganddata["min_data"][prop] = min(proplist)
liganddata["max_data"][prop] = max(proplist)
liganddata["delta_data"][prop] = liganddata["max_data"][prop] - liganddata["min_data"][prop]
liganddata["vburminconf_data"][prop] = liganddata["confdata"][liganddata["vburminconf"]]["properties"][prop]
liganddata["time_all"] = sum([liganddata["confdata"][conf]["t_total"] for conf in liganddata["conformers_all"] if "t_total" in liganddata["confdata"][conf].keys()])
with open(cwd/f"{ligand}_data.yml","w") as f:
yaml.dump({k:v for k,v in liganddata.items() if k != "confdata"},f,Dumper=Dumper)
with open(cwd/f"{ligand}_confdata.yml","w") as f:
yaml.dump(liganddata["confdata"],f,Dumper=Dumper)
erel_df.to_csv(cwd/f"{ligand}_relative_energies.csv",sep=";")
return(liganddata,status)
def main_split_logs(cwd, ligand):
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
# if not (cwd/"done").exists():
# (cwd/"done").mkdir()
conformers = [i.name for i in (cwd/ligand).iterdir() if i.is_dir()]
conformers_good = []
for conformer in conformers:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand, conformer)
if status != "Error":
#(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
conformers_good.append(conformer)
return(conformers_good)
if __name__ == '__main__':
starttime_all = time.time()
ligname = re.compile("[0-9]{8}")
ligands = sorted([i.name for i in cwd.iterdir() if (ligname.match(i.name) and i.is_dir())])
conformers = {ligand: [i.name for i in (cwd/ligand).iterdir() if i.is_dir()] for ligand in ligands}
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
if not (cwd/"done").exists():
(cwd/"done").mkdir()
for ligand in ligands:
for conformer in conformers[ligand]:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand,conformer)
if status != "Error":
(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
if (cwd/"allligands_data.yml").exists():
with open(cwd/"allligands_data.yml","r") as f:
allliganddata = yaml.load(f,Loader=Loader)
else:
allliganddata = {}
for ligand in ligands:
print(ligand)
print(conformers[ligand])
if ligand in allliganddata.keys():
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand],allliganddata[ligand])
else:
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand])
with open(cwd/"allligands_data.yml","w") as f:
yaml.dump(allliganddata,f,Dumper=Dumper)
variants = ["boltz","min","max","delta","vburminconf"]
columns = [i+"_boltz" for i in boltzproperties if i not in mmproperties] + [f"{i}_{j}" for i,j in itertools.product(mmproperties,variants)]# + ["t_total","number_of_conformers"]
df = pd.DataFrame(columns = columns,index = ligands)
for l in ligands:
for c in columns:
print(allliganddata[l]["properties"])
exit()
df.loc[l][c] = allliganddata[l]["properties"][c]
df["t_total"] = [allliganddata[l]["t_total"] for l in ligands]
df["number_of_conformers"] = [allliganddata[l]["number_of_conformers"] for l in ligands]
df.to_csv("allligands_data.csv",sep=";")
print(f"All done. Total time: {round((time.time()-starttime_all),2)} sec")
|
# 201005: rename/restructure .yml files for consistency with xtb-level data
# 201006: in read_conformer() fix error message when log files are missing
import os,re,itertools,time
#import pybel
#from openbabel import pybel
import numpy as np
import pandas as pd
import pathlib as pl
cwd = pl.Path.cwd()
import yaml
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
from rdkit import Chem,Geometry
from rdkit.Chem import rdmolfiles, AllChem, rdMolAlign,rdmolops
from multiprocessing import Pool
import morfeus # Kjell Jorner
from PL_split_logs_201006 import split_log # TG
from PL_conformer_selection_200411 import mirror_mol, delete_element_from_rdkitmol, delete_haloalkane_halides # TG #changed from PL_conformer_selection_201019 5/17/21 by EP
import PL_gaussian_properties_201021 as gp # TG
import vmin4 as vmin # TG/Iris Guo
import P_int_200916 as P_int # Robert Pollice (,TG(,ML))
# import PL_visvol as visvol # Ellyn Peters
# covalent radii, from Pyykko and Atsumi, Chem. Eur. J. 15, 2009, 188-197
# values for metals decreased by 10% according to Robert Paton's Sterimol implementation
rcov = {
"H": 0.32,"He": 0.46,"Li": 1.2,"Be": 0.94,"B": 0.77,"C": 0.75,"N": 0.71,"O": 0.63,"F": 0.64,"Ne": 0.67,"Na": 1.4,"Mg": 1.25,"Al": 1.13,"Si": 1.04,"P": 1.1,"S": 1.02,"Cl": 0.99,"Ar": 0.96,"K": 1.76,"Ca": 1.54,"Sc": 1.33,"Ti": 1.22,"V": 1.21,"Cr": 1.1,"Mn": 1.07,"Fe": 1.04,"Co": 1.0,"Ni": 0.99,"Cu": 1.01,"Zn": 1.09,"Ga": 1.12,"Ge": 1.09,"As": 1.15,"Se": 1.1,"Br": 1.14,"Kr": 1.17,"Rb": 1.89,"Sr": 1.67,"Y": 1.47,"Zr": 1.39,"Nb": 1.32,"Mo": 1.24,"Tc": 1.15,"Ru": 1.13,"Rh": 1.13,"Pd": 1.08,"Ag": 1.15,"Cd": 1.23,"In": 1.28,"Sn": 1.26,"Sb": 1.26,"Te": 1.23,"I": 1.32,"Xe": 1.31,"Cs": 2.09,"Ba": 1.76,"La": 1.62,"Ce": 1.47,"Pr": 1.58,"Nd": 1.57,"Pm": 1.56,"Sm": 1.55,"Eu": 1.51,"Gd": 1.52,"Tb": 1.51,"Dy": 1.5,"Ho": 1.49,"Er": 1.49,"Tm": 1.48,"Yb": 1.53,"Lu": 1.46,"Hf": 1.37,"Ta": 1.31,"W": 1.23,"Re": 1.18,"Os": 1.16,"Ir": 1.11,"Pt": 1.12,"Au": 1.13,"Hg": 1.32,"Tl": 1.3,"Pb": 1.3,"Bi": 1.36,"Po": 1.31,"At": 1.38,"Rn": 1.42,"Fr": 2.01,"Ra": 1.81,"Ac": 1.67,"Th": 1.58,"Pa": 1.52,"U": 1.53,"Np": 1.54,"Pu": 1.55
}
# some constants
R = 0.0019872036 #kcal mol^-1 K^-1
T = 298.15 #K
hartree_kcalmol = 627.50947
periodictable = ["Bq","H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl","Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo","X"]
def get_conmat(elements, coords):
# partially based on code from Robert Paton's Sterimol script, which based this part on Grimme's D3 code
# elements is a list of strings, coords is a numpy array or nested list of shape N_atoms x 3
if type(coords) == list:
coords = np.asarray(coords)
natom = len(elements)
#max_elem = 94
k1 = 16.0
k2 = 4.0/3.0
conmat = np.zeros((natom,natom))
for i in range(0,natom):
if elements[i] not in rcov.keys():
continue
for iat in range(0,natom):
if elements[iat] not in rcov.keys():
continue
if iat != i:
dxyz = coords[iat]-coords[i]
r = np.linalg.norm(dxyz)
rco = rcov[elements[i]]+rcov[elements[iat]]
rco = rco*k2
rr=rco/r
damp=1.0/(1.0+np.math.exp(-k1*(rr-1.0)))
if damp > 0.85: #check if threshold is good enough for general purpose
conmat[i,iat],conmat[iat,i] = 1,1
return(conmat)
def add_valence(elements,coords,conmat,base_idx,add_element="Pd"):
# Adds a valence to base so that the angle to the previous substituents is maximized and reorders the coordinate output for convenience
# add_element: add any of the following elements:
distpx = {"O":1.5,"Se":2.12,"Pd":2.28,"X":1.8} # typical bond distances to P
if type(coords) == list:
coords = np.asarray(coords)
num_atoms = len(elements)
coord_base = coords[base_idx]
base_element = elements[base_idx]
vec = np.array([0.0,0.0,0.0])
bonded = []
for atom in range(num_atoms):
if conmat[base_idx][atom]:
bonded.append(atom)
vec += coord_base - coords[atom]
coordox = distpx[add_element]*vec/np.linalg.norm(vec) + coord_base
atoms = [x for x in range(num_atoms+1)]
coords_temp = np.vstack((coords,coordox))
if sum(get_conmat(elements+[add_element],coords_temp)[-1]) != 1.0:
print(" Warning: possible collision!")
# sort coordinates so that base is first, add_element is second, and the other atoms bonded to base are next
elements_new = [base_element,add_element]+[elements[a] for a in bonded] + [a for i,a in enumerate(elements) if i not in [base_idx]+bonded]
coords_new = np.vstack((coord_base, coordox, coords[bonded], coords[[i for i,a in enumerate(elements) if i not in [base_idx]+bonded]]))
return(elements_new, coords_new)
def write_xyz(elements,coords,filename):
with open(filename,"w") as f:
f.write(f"{len(elements)}\n\n")
for i,a in enumerate(elements):
f.write(f"{a.title():>3} " + " ".join([f"{coords[i][j]:15f}" for j in range(3)]) + "\n")
def rmsd_matrix(conformers):
molobjects = [rdmolfiles.MolFromMolFile(str(cwd/conformer/f"{conformer}_opt.sdf"),removeHs=False,strictParsing=False) for conformer in conformers]
molobjects = [Chem.RemoveHs(mol) for mol in molobjects] # Remove all H: optional but speeds up RMSD calculation
molobjects = [delete_haloalkane_halides(mol) for mol in molobjects] # Remove halides in perhaloalkyl moieties. Improves RMSD matching and timing
molobjects_inv = [mirror_mol(mol) for mol in molobjects] # create mirror images of each conformer
rmsd_mat = np.zeros((len(conformers),len(conformers)))
for i,j in itertools.product(range(len(conformers)),range(len(conformers))):
if i<j: continue
if i==j:
rmsd_mat[i,j] = 1
else:
rmsd_mat[i,j] = min((rdMolAlign.GetBestRMS(molobjects[i],molobjects[j]),rdMolAlign.GetBestRMS(molobjects[i],molobjects_inv[j])))
rmsd_mat[j,i] = rmsd_mat[i,j]
return(rmsd_mat)
def dict_key_rmsd(candidate_pair):
return float(rmsd_matrix(candidate_pair)[0,1])
# which energies to read from which log-file
energylogs = {
"e_dz":"freq",
"e_tz_gas":"nbo",
"e_tz_gas":"sp",
"e_tz_solv":"solv",
"e_tz_ra":"ra",
"e_tz_rc":"rc",
}
# which properties to read from which log-file
proplogs = {
"freq":["nimag","g","t"],
"sp" :["dipole","homo","qpole","t"],
"ra" :["homo","nbo","t"],
"rc" :["homo","nbo","t"],
"nbo" :["nbo","nborbsP","t"],
"nmr" :["nmr","t"],
"efg" :["efg","nuesp","t"],
"solv":["ecds","t"],
}
# assign names to each descriptor
propoutput = {
"freq_g": ["","g"],
"freq_nimag": ["nimag"],
"sp_dipole": ["dipolemoment",],
"sp_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"ra_homo":["somo_ra","","","",""],
"rc_homo":["somo_rc","","","",""],
"sp_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
"nbo_nbo": ["nbo_P"],
"ra_nbo": ["nbo_P_ra","spindens_P_ra"],
"rc_nbo": ["nbo_P_rc","spindens_P_rc"],
"nmr_nmr": ["nmr_P","nmrtens_sxx_P","nmrtens_syy_P","nmrtens_szz_P",],
"efg_efg": ["efg_amp_P","efgtens_xx_P","efgtens_yy_P","efgtens_zz_P"],
"efg_nuesp": ["nuesp_P",],
"solv_ecds": ["E_solv_cds"],
"nbo_dipole": ["dipolemoment",],
"nbo_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"nbo_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
}
boltzproperties = ['vmin_vmin','vmin_r','dipolemoment', 'fmo_e_homo', 'fmo_e_lumo', 'fmo_mu', 'fmo_eta', 'fmo_omega', 'somo_ra', 'somo_rc', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'nbo_P', 'nbo_P_ra', 'spindens_P_ra', 'nbo_P_rc', 'spindens_P_rc', 'nmr_P', 'nmrtens_sxx_P', 'nmrtens_syy_P', 'nmrtens_szz_P', 'efg_amp_P', 'efgtens_xx_P', 'efgtens_yy_P', 'efgtens_zz_P', 'nuesp_P', 'E_solv_cds', 'nbo_lp_P_percent_s', 'nbo_lp_P_occ', 'nbo_lp_P_e', 'nbo_bd_e_max', 'nbo_bd_e_avg', 'nbo_bds_e_min', 'nbo_bds_e_avg', 'nbo_bd_occ_min', 'nbo_bd_occ_avg', 'nbo_bds_occ_max', 'nbo_bds_occ_avg', 'E_solv_total', 'E_solv_elstat', 'E_oxidation', 'E_reduction', 'fukui_p', 'fukui_m', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_ratio_vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL',"Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"] # "vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
mmproperties = ['dipolemoment', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL'] # ,"vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
Pintresults = ["Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"]
def morfeus_properties(elements,coordinates,confdata):
# Morfeus: Sterimol, Vbur, pyr
morfdict = {}
if "pyr_P" not in confdata.keys() and confdata["p_val"] == 3:
# Pyramidalization - two equivalent measurments P and alpha
pyr = morfeus.Pyramidalization(elements=elements,coordinates=coordinates,atom_index=1,excluded_atoms=[2]) # remove Pd
morfdict["pyr_P"] = float(pyr.P)
morfdict["pyr_alpha"] = float(pyr.alpha)
if "vbur_vbur" not in confdata.keys():
#Buried volume - get quadrant volumes and distal volume
# iterate through P-substituents, aligning the quadrants paralell to each once (= xz_plane definition)
# Metal/point of reference should be 2.28 A away from P
# z_axis_atoms: P
# xz_plane_atoms: each of the substituents once
# keep lowest and highest quadrant and octant volume across all three orientations of the coordinate system
# keep highest difference of any neighboring quadrant volume
# keep volume in each of the two hemispheres
qvbur_all = np.array([])
qvdist_all = np.array([])
qvtot_all = np.array([])
max_delta_qvbur_all = []
max_delta_qvtot_all = []
ovbur_all = np.array([])
ovtot_all = np.array([])
for i in range(3):#confdata["p_val"]):
bv = morfeus.BuriedVolume(elements,coordinates,2,excluded_atoms=[2],z_axis_atoms=[1],xz_plane_atoms=[3+i])
bv.octant_analysis()
bv.compute_distal_volume(method="buried_volume",octants=True)
vbur = bv.buried_volume # these are identical for each iteration
vdist = bv.distal_volume #
vtot = vbur + vdist #
qvbur = np.asarray(list(bv.quadrants["buried_volume"].values()))
qvdist = np.asarray(list(bv.quadrants["distal_volume"].values()))
qvtot = qvbur + qvdist
qvbur_all = np.append(qvbur_all,qvbur)
qvtot_all = np.append(qvtot_all,qvtot)
max_delta_qvbur_all.append(max([abs(qvbur[j]-qvbur[j-1]) for j in range(4)]))
max_delta_qvtot_all.append(max([abs(qvtot[j]-qvtot[j-1]) for j in range(4)]))
ovbur = np.asarray(list(bv.octants["buried_volume"].values()))
ovdist = np.asarray(list(bv.octants["distal_volume"].values()))
ovtot = ovbur + ovdist
ovbur_all = np.append(ovbur_all,ovbur)
ovtot_all = np.append(ovtot_all,ovtot)
near_vbur = ovbur[4:].sum() # these are identical for each iteration
far_vbur = ovbur[:4].sum() #
near_vtot = ovtot[4:].sum() #
far_vtot = ovtot[:4].sum() #
morfdict["vbur_vbur"] = vbur
morfdict["vbur_vtot"] = float(vtot)
morfdict["vbur_ratio_vbur_vtot"] = float(vbur/vtot)
morfdict["vbur_qvbur_min"] = float(min(qvbur_all))
morfdict["vbur_qvbur_max"] = float(max(qvbur_all))
morfdict["vbur_qvtot_min"] = float(min(qvtot_all))
morfdict["vbur_qvtot_max"] = float(max(qvtot_all))
morfdict["vbur_max_delta_qvbur"] = float(max(max_delta_qvbur_all))
morfdict["vbur_max_delta_qvtot"] = float(max(max_delta_qvtot_all))
morfdict["vbur_ovbur_min"] = float(min(ovbur_all))
morfdict["vbur_ovbur_max"] = float(max(ovbur_all))
morfdict["vbur_ovtot_min"] = float(min(ovtot_all))
morfdict["vbur_ovtot_max"] = float(max(ovtot_all))
morfdict["vbur_near_vbur"] = float(near_vbur)
morfdict["vbur_far_vbur"] = float(far_vbur)
morfdict["vbur_near_vtot"] = float(near_vtot)
morfdict["vbur_far_vtot"] = float(far_vtot)
if "sterimol_B1" not in confdata.keys():
# Sterimol
# for Sterimol values matching Rob Paton's implementation:
patonradii = morfeus.helpers.get_radii(elements, radii_type="bondi")
patonradii = np.array(patonradii)
patonradii[patonradii == 1.2] = 1.09
sterimol = morfeus.Sterimol(elements, coordinates, 2, 1, radii=patonradii, n_rot_vectors=3600)
morfdict["sterimol_B1"] = float(sterimol.B_1_value)
morfdict["sterimol_B5"] = float(sterimol.B_5_value)
morfdict["sterimol_L"] = float(sterimol.L_value)
# buried Sterimol
sterimol_bur = morfeus.Sterimol(elements, coordinates, 2, 1,calculate=False,radii=patonradii, n_rot_vectors=3600)
sterimol_bur.bury(sphere_radius=5.5,method="delete",radii_scale=0.5)
# sterimol.bury(sphere_radius=4.5,method="delete",radii_scale=1)
morfdict["sterimol_burB1"] = float(sterimol_bur.B_1_value)
morfdict["sterimol_burB5"] = float(sterimol_bur.B_5_value)
morfdict["sterimol_burL"] = float(sterimol_bur.L_value)
return(morfdict)
def gp_properties(ligand,conformer,p_idx):
# reads gaussian log files
gpdict = {}
gpdict["properties"] = {}
contents = {
"streams":{},
"filecont":{},
}
# read energies
for e,log in energylogs.items():
contents["streams"][log] = gp.get_outstreams(cwd/conformer/f"{conformer}_{log}.log")
if contents["streams"][log] == "failed or incomplete job":
return({"error":True})
else:
gpdict[e] = gp.get_e_hf(contents["streams"][log])
gpdict["error"] = False
# going through each log file, get the relevant properties
for log in proplogs.keys():
contents["filecont"][log] = gp.get_filecont(cwd/conformer/f"{conformer}_{log}.log")
for prop in proplogs[log]:
gpresults = gp.jobtypes[prop][0](contents[gp.jobtypes[prop][1]][log],p_idx)
if prop == "nborbsP": # NBO orbital analysis returns a dictionary with the proper labels
gpdict["properties"].update(gpresults)
elif prop == "t": # subjob time
gpdict[f"{log}_t"] = gpresults
elif prop in ["e_dz","g","e_tz_gas","e_tz_solv","e_tz_ra","e_tz_rc","nimag"]:
gpdict.update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
else: # all other functions return a list. This is assigned into a dict with proper names here
gpdict["properties"].update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
gpdict["g_tz_gas"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_gas"] # in Hartree
gpdict["g_tz_solv"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_solv"] # in Hartree
gpdict["properties"]["E_solv_total"] = (gpdict["e_tz_solv"] - gpdict["e_tz_gas"]) * hartree_kcalmol # in kcal/mol
gpdict["properties"]["E_solv_elstat"] = gpdict["properties"]["E_solv_total"] - gpdict["properties"]["E_solv_cds"] # in kcal/mol
gpdict["properties"]["E_oxidation"] = gpdict["e_tz_rc"] - gpdict["e_tz_gas"] # in Hartree
gpdict["properties"]["E_reduction"] = gpdict["e_tz_ra"] - gpdict["e_tz_gas"] # in Hartree
gpdict["properties"]["fukui_p"] = gpdict["properties"]["nbo_P"]-gpdict["properties"]["nbo_P_ra"] # fukui electrophilicity
gpdict["properties"]["fukui_m"] = gpdict["properties"]["nbo_P_rc"]-gpdict["properties"]["nbo_P"] # fukui nucleophilicity
gpdict["t_total"] = sum([gpdict[f"{log}_t"] for log in proplogs.keys()])
if "" in gpdict.keys():
del gpdict[""]
if "" in gpdict["properties"].keys():
del gpdict["properties"][""]
return(gpdict)
def read_conformer(cwd, ligand, conformer): # cwd: pathlib path of current working directory. ligand: 0-digit ligand ID. conformer: full name of the conformer (including the ID at the beginnig)
confdata = {}
errors = []
checklogs = [cwd/conformer/f"{conformer}_{l}.log" for l in proplogs.keys() if not (cwd/conformer/f"{conformer}_{l}.log").exists()]
if len(checklogs) != 0:
#! log this as a conformer-level error
err = f"Missing Gaussian log files, flagged in read_conformer: {','.join([chkl.name for chkl in checklogs])}"
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
return(confdata,errors)
if "elements_pd" not in confdata.keys():
# mol = next(pybel.readfile("g09",str(cwd/conformer/f"{conformer}_nbo.log")))
#mol = next(pybel.readfile("g09",str(cwd/conformer/f"{conformer}_opt.log")))
#elements = [periodictable[a.atomicnum] for a in mol.atoms]
#coordinates = [list(a.coords) for a in mol.atoms]
#coordinates_a = np.array([a.coords for a in mol.atoms])
def read_gaussian_logfile(fn):
time0=time.time()
read=False
for line in open(fn,"r"):
if read:
if "---" in line and len(elements)>0:
read=False
if read:
if "X" not in line and "---" not in line:
atomnum = int(line.split()[1])
#print(line.replace("\n",""))
#print(atomnum)
el = periodictable[atomnum]
elements.append(el)
coordinates.append([float(line.split()[3]),float(line.split()[4]), float(line.split()[5])])
if "Coordinates (Angstroms)" in line:
coordinates, elements = [], []
read=True
time1=time.time()
print("gaussian log parser done in %.2f seconds"%(time1-time0))
return(coordinates, elements)
coordinates, elements = read_gaussian_logfile(str(cwd/conformer/f"{conformer}_opt.log"))
coordinates_a = np.array(coordinates)
conmat = get_conmat(elements,coordinates_a)
p_idx = [i for i in range(len(elements)) if elements[i] == "P" and sum(conmat[i]) <= 3][0] # this removes quaternary P (phosphonium, phosphate etc) but allows for P with 2 substituents (phosphabenzene, phosphaimine etc). Can we be sure that we never have more than one non-quaternary P(III)?
elements_pd, coordinates_pd = add_valence(elements,coordinates,conmat,p_idx,add_element="Pd") # Add "Pd" at the reference position in the P-lone pair region
if not (cwd/conformer/f"{conformer}_opt_Pd.xyz").exists():
#out = pybel.Outputfile("xyz",str(cwd/conformer/f"{conformer}_opt.xyz"))
#out.write(mol)
#out.close()
write_xyz(elements, coordinates, cwd/conformer/f"{conformer}_opt.xyz")
#out = pybel.Outputfile("sdf",str(cwd/conformer/f"{conformer}_opt.sdf"))
#out.write(mol)
#out.close()
os.system("obabel -ixyz %s -osdf >> %s"%(str(cwd/conformer/f"{conformer}_opt.xyz"), str(cwd/conformer/f"{conformer}_opt.sdf")))
write_xyz(elements_pd,coordinates_pd,cwd/conformer/f"{conformer}_opt_Pd.xyz")
confdata["coords"] = coordinates
confdata["coords_pd"] = coordinates_pd.tolist()
confdata["elements"] = elements
confdata["elements_pd"] = elements_pd
confdata["conmat"] = conmat.tolist()
confdata["p_idx"] = p_idx
confdata["p_val"] = int(sum(conmat[p_idx])) # how many substituents at P
confdata["properties"] = {}
## get properties
# gp_properties: everything that can be read from the Gaussian log files (most electronic properties)
confdata.update(gp_properties(ligand,conformer,confdata["p_idx"]))
if confdata["error"]:
#! log this as a conformer-level error
err = "Error in the Gaussian computations, flagged in read_conformer, please check log files."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
if confdata["nimag"] != 0:
#! log this as a conformer-level error
err = f"Number of imaginary frequencies: {confdata['nimag']}."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
confdata["error"] = True
return(confdata,errors)
# morfeus: properties that use the geometry/steric properties
confdata["properties"].update(morfeus_properties(confdata["elements_pd"],confdata["coords_pd"],confdata))
# # P_int
# if "Pint_P_int" not in confdata.keys():
# confdata.update(P_int.P_int_main(name=conformer,directory=cwd/conformer))
# read results
disp = "d3"
pint_read = P_int.read_dedout(cwd/conformer,conformer,disp)+P_int.read_multiwfnout(cwd/conformer,conformer)+P_int.read_disp(cwd/conformer,conformer,disp)
confdata["properties"].update({Pintresults[i]:float(pint_read[i]) for i in range(7)})
# V_min
try:
if "vmin_vmin" not in confdata.keys():
vminob = vmin.get_vmin(f"{conformer}.fchk",str(cwd/conformer)+"/",True)
confdata["properties"]["vmin_vmin"] = float(vminob.v_min)
confdata["properties"]["vmin_r"] = float(vminob.r_min)
except:
err = f"Vmin FileNotFoundError."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
# visvol
# if "vv_total_visible_volume" not in confdata.keys():
# confdata.update(visvol.get_vis_vol(cwd/conformer/f"{conformer}_opt_Pd.xyz",radii_type = 'rcov',prox_cutoff = 3.5,ignore_H = 0,write_results = 1, plot = 0))
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
def read_ligand(cwd, ligand, conformers, liganddata = {}): # cwd is the ligand-level directory
status = {"ligandlevel": [],}
if len(liganddata.keys()) == 0:
if (cwd/f"{ligand}_data.yml").exists():
with open(cwd/f"{ligand}_data.yml","r") as f:
liganddata = yaml.load(f,Loader=Loader)
if (cwd/f"{ligand}_confdata.yml").exists():
with open(cwd/f"{ligand}_confdata.yml","r") as f:
liganddata["confdata"] = yaml.load(f,Loader=Loader)
else:
liganddata = {
"conformers_all": conformers,
"conformers": conformers.copy(), # Duplicates and computations with errors (including nimag=1) will be removed from this list
"number_of_conformers": len(conformers),
"removed_duplicates": [],
"confdata": {},#{c:{} for c in conformers},
"boltzmann_averaged_data": {},
"min_data": {},
"max_data": {},
"delta_data": {},
"vburminconf_data": {},
}
newconfs = 0
for conformer in conformers:
if conformer in liganddata["removed_duplicates"]:
continue
print(conformer)
if conformer in liganddata["confdata"].keys():
pass
elif (cwd/conformer/f"{conformer}_data.yml").exists():
with open(cwd/conformer/f"{conformer}_data.yml","r") as f:
liganddata["confdata"][conformer] = yaml.load(f,Loader=Loader)
newconfs += 1
else:
print("read conformer data")
liganddata["confdata"][conformer],status[conformer] = read_conformer(cwd, ligand, conformer) # returns the dictionary with the conformer data and a list with errors
newconfs += 1
if newconfs > 0:
# error, NIMAG removal
liganddata["conformers_w_error"] = [conformer for conformer in liganddata["conformers"] if liganddata["confdata"][conformer]["error"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["conformers_w_error"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
energies = ["e_dz","g","e_tz_gas","g_tz_gas","e_tz_solv","g_tz_solv"]
liganddata["energies"] = {}
liganddata["relative_energies"] = {}
for e in energies:
liganddata["energies"][e] = {conformer: liganddata["confdata"][conformer][e] for conformer in liganddata["conformers"]}
liganddata[e+"_min"] = min(liganddata["energies"][e].values())
liganddata[e+"_minconf"] = list(liganddata["energies"][e].keys())[np.argmin(list(liganddata["energies"][e].values()))]
liganddata["relative_energies"][e+"_rel"] = {conformer: (liganddata["energies"][e][conformer]-liganddata[e+"_min"])*hartree_kcalmol for conformer in liganddata["conformers"]}
# erel_df = pd.DataFrame(np.array([list(liganddata[e+"_rel"].values()) for e in energies]).T ,columns=energies,index=liganddata["conformers"] )
erel_df = pd.DataFrame([liganddata["relative_energies"][e+"_rel"] for e in energies],index=energies).T
#liganddata["relative_energies_df"] = erel_df
liganddata["relative_energies_dict"] = erel_df.to_dict()
# Find duplicates:
# 1) find pairs of conformers that are within E_rel < 0.1 kcal/mol (relative energies seem to be much more reliable than relative free energies)
# 2) check these pairs to also have RMSD < 0.2 A
# 3) Remove the conformer with higher relative free energy
duplicates_candidates = [(i,j) for i,j in itertools.combinations(liganddata["conformers"],2) if abs(erel_df["e_dz"].loc[i] - erel_df["e_dz"].loc[j]) < 0.1]
try:
# Throw a name error here if you wanna only run the except
cores = max(os.cpu_count() - 2, 1)
with Pool(cores) as p:
values = p.map(dict_key_rmsd, duplicates_candidates)
liganddata["rmsd_candidates"] = {key: value for key, value in zip(duplicates_candidates, values)}
# The less cool, non-parallel way
#liganddata["rmsd_candidates"] = {candidate_pair: float(rmsd_matrix(candidate_pair)[0,1]) for candidate_pair in duplicates_candidates} # keep all RMSD for potential debugging
liganddata["duplicates"] = [candidate_pair for candidate_pair in liganddata["rmsd_candidates"] if liganddata["rmsd_candidates"][candidate_pair] < 0.2]
except: # RDkit failed to generate Mol objects and thus could not compute RMSD, or some of the internal structures in those mol files are different despite actually being the same. Default to duplicate detection based on dipole moment and chemical shift similarity
#! log this on ligand level for double-checking
err = "Warning: RDKit error at duplicate RMSD testing. Please double check."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
dipole_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["dipolemoment"] - liganddata["confdata"][j]["properties"]["dipolemoment"]) < 0.025])
nmr_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["nmr_P"] - liganddata["confdata"][j]["properties"]["nmr_P"]) < 0.1])
liganddata["duplicates"] = sorted(dipole_candidates & nmr_candidates)
liganddata["removed_duplicates"] = [erel_df.loc[list(pair)]["g_tz_gas"].idxmax() for pair in liganddata["duplicates"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["removed_duplicates"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
# Boltzmann averaging
#boltzfacs = {conformer: np.exp(-liganddata["relative_energies_df"]["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
boltzfacs = {conformer: np.exp(-erel_df["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
Q = sum(boltzfacs.values())
liganddata["boltzmann_weights"] = {conformer: float(boltzfacs[conformer]/Q) for conformer in liganddata["conformers"] } # probability
for prop in boltzproperties:
confsmissingprop = [conf for conf in liganddata["conformers"] if prop not in liganddata["confdata"][conf]["properties"].keys()]
if len(confsmissingprop) == 0:
liganddata["boltzmann_averaged_data"][prop] = sum([liganddata["boltzmann_weights"][conf] * liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"]])
else: # if a single conformer is missing a property value, set Boltzmann-average to None
#! log this as a ligand-level error with prop and confsmissingprop
err = f"Warning: {len(confsmissingprop)}/{len(liganddata['conformers'])} conformers missing values for property {prop}: {','.join(confsmissingprop)}."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
liganddata["boltzmann_averaged_data"][prop] = None
continue
# "Condensed" properties
liganddata["vburminconf"] = liganddata["conformers"][np.argmin([liganddata["confdata"][conf]["properties"]["vbur_vbur"] for conf in liganddata["conformers"]])]
for prop in mmproperties:
proplist = [liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"] if prop in liganddata["confdata"][conf]["properties"].keys()]
# if a single conformer is missing a property value, still perform min/max analysis (Boltzmann-average will be None to indicate missing value(s))
# if all confs are missing this prop, set min/max/delta to None
if len(proplist) == 0:
liganddata["min_data"][prop] = None
liganddata["max_data"][prop] = None
liganddata["delta_data"][prop] = None
liganddata["vburminconf_data"][prop] = None
else:
liganddata["min_data"][prop] = min(proplist)
liganddata["max_data"][prop] = max(proplist)
liganddata["delta_data"][prop] = liganddata["max_data"][prop] - liganddata["min_data"][prop]
liganddata["vburminconf_data"][prop] = liganddata["confdata"][liganddata["vburminconf"]]["properties"][prop]
liganddata["time_all"] = sum([liganddata["confdata"][conf]["t_total"] for conf in liganddata["conformers_all"] if "t_total" in liganddata["confdata"][conf].keys()])
with open(cwd/f"{ligand}_data.yml","w") as f:
yaml.dump({k:v for k,v in liganddata.items() if k != "confdata"},f,Dumper=Dumper)
with open(cwd/f"{ligand}_confdata.yml","w") as f:
yaml.dump(liganddata["confdata"],f,Dumper=Dumper)
erel_df.to_csv(cwd/f"{ligand}_relative_energies.csv",sep=";")
return(liganddata,status)
def main_split_logs(cwd, ligand):
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
# if not (cwd/"done").exists():
# (cwd/"done").mkdir()
conformers = [i.name for i in (cwd/ligand).iterdir() if i.is_dir()]
conformers_good = []
for conformer in conformers:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand, conformer)
if status != "Error":
#(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
conformers_good.append(conformer)
return(conformers_good)
if __name__ == '__main__':
starttime_all = time.time()
ligname = re.compile("[0-9]{8}")
ligands = sorted([i.name for i in cwd.iterdir() if (ligname.match(i.name) and i.is_dir())])
conformers = {ligand: [i.name for i in (cwd/ligand).iterdir() if i.is_dir()] for ligand in ligands}
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
if not (cwd/"done").exists():
(cwd/"done").mkdir()
for ligand in ligands:
for conformer in conformers[ligand]:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand,conformer)
if status != "Error":
(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
if (cwd/"allligands_data.yml").exists():
with open(cwd/"allligands_data.yml","r") as f:
allliganddata = yaml.load(f,Loader=Loader)
else:
allliganddata = {}
for ligand in ligands:
print(ligand)
print(conformers[ligand])
if ligand in allliganddata.keys():
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand],allliganddata[ligand])
else:
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand])
with open(cwd/"allligands_data.yml","w") as f:
yaml.dump(allliganddata,f,Dumper=Dumper)
variants = ["boltz","min","max","delta","vburminconf"]
columns = [i+"_boltz" for i in boltzproperties if i not in mmproperties] + [f"{i}_{j}" for i,j in itertools.product(mmproperties,variants)]# + ["t_total","number_of_conformers"]
df = pd.DataFrame(columns = columns,index = ligands)
for l in ligands:
for c in columns:
print(allliganddata[l]["properties"])
exit()
df.loc[l][c] = allliganddata[l]["properties"][c]
df["t_total"] = [allliganddata[l]["t_total"] for l in ligands]
df["number_of_conformers"] = [allliganddata[l]["number_of_conformers"] for l in ligands]
df.to_csv("allligands_data.csv",sep=";")
print(f"All done. Total time: {round((time.time()-starttime_all),2)} sec")
|
# Owner(s): ["oncall: jit"]
from typing import Any, Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface # noqa: F401
import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestMisc(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + " " + test}, I'm a {test}")
print("format blank")
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
with self.assertRaisesRegex(RuntimeError, "positional arg"):
sm(3, 'hello')
self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))
def test_tuple_subscripted_assign(self):
with self.assertRaisesRegex(RuntimeError, "subscripted assignment"):
@torch.jit.script
def foo(a: Tuple[int, int]) -> None:
a[0] = a[1]
with self.assertRaisesRegex(RuntimeError, "augmented assignment"):
@torch.jit.script
def bar(a: Tuple[int, int]) -> None:
a[0] += a[1]
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_str_refine_any(self):
def forward(x: Any) -> str:
if isinstance(x, str):
return x
return "foo"
forward = torch.jit.script(forward)
self.assertEqual(forward(1), "foo")
self.assertEqual(forward("bar"), "bar")
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_if_returning_any(self):
"""
Check that an if statement can return different
types early from each branch when the return
type of the function is Any.
"""
def if_function(inp: torch.Tensor) -> Any:
if inp.shape[0] == 1:
return inp * inp
else:
return "str"
self.checkScript(if_function, (torch.randn(5),))
def test_export_opnames_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
class FooMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 * x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x * y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 / x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.two(self.one(x, x))
make_global(OneTwoModule)
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
torch._C._enable_mobile_interface_call_export()
scripted_M_mod = torch.jit.script(M())
self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
def test_broadcasting_list(self):
"""
Test BroadcastingList and torch.nn._size_N_t alias
"""
from torch._jit_internal import BroadcastingList2
from torch.nn.common_types import _size_2_t
def sum_i(x: _size_2_t) -> int:
return x[0] + x[1]
def sum_f(x: BroadcastingList2[float]) -> float:
return x[0] + x[1]
self.assertTrue(torch.jit.script(sum_i)(4) == 8)
self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)
|
# Owner(s): ["oncall: jit"]
from typing import Any, Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface # noqa: F401
import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestMisc(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}")
print("format blank")
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
with self.assertRaisesRegex(RuntimeError, "positional arg"):
sm(3, 'hello')
self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))
def test_tuple_subscripted_assign(self):
with self.assertRaisesRegex(RuntimeError, "subscripted assignment"):
@torch.jit.script
def foo(a: Tuple[int, int]) -> None:
a[0] = a[1]
with self.assertRaisesRegex(RuntimeError, "augmented assignment"):
@torch.jit.script
def bar(a: Tuple[int, int]) -> None:
a[0] += a[1]
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_str_refine_any(self):
def forward(x: Any) -> str:
if isinstance(x, str):
return x
return "foo"
forward = torch.jit.script(forward)
self.assertEqual(forward(1), "foo")
self.assertEqual(forward("bar"), "bar")
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_if_returning_any(self):
"""
Check that an if statement can return different
types early from each branch when the return
type of the function is Any.
"""
def if_function(inp: torch.Tensor) -> Any:
if inp.shape[0] == 1:
return inp * inp
else:
return "str"
self.checkScript(if_function, (torch.randn(5),))
def test_export_opnames_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
class FooMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 * x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x * y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 / x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.two(self.one(x, x))
make_global(OneTwoModule)
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
torch._C._enable_mobile_interface_call_export()
scripted_M_mod = torch.jit.script(M())
self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
def test_broadcasting_list(self):
"""
Test BroadcastingList and torch.nn._size_N_t alias
"""
from torch._jit_internal import BroadcastingList2
from torch.nn.common_types import _size_2_t
def sum_i(x: _size_2_t) -> int:
return x[0] + x[1]
def sum_f(x: BroadcastingList2[float]) -> float:
return x[0] + x[1]
self.assertTrue(torch.jit.script(sum_i)(4) == 8)
self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)
|
import os
import logging
import sentry_sdk
from aiogram import Bot, Dispatcher, executor, types
from datetime import datetime, timedelta
from pypi_tools.logic import remove_track_for_package
import pypi_tools.data as d
from pypi_tools.helpers import validate_input
import pypi_tools.vizualizer as v
import pypi_tools.readme as r
import asyncio
import aioredis
logging.basicConfig(level=logging.INFO)
redis_host = f"redis://{os.environ.get("REDIS_HOST")}"
#sentry_sdk.init(os.environ["SENTRY_PATH"])
bot = Bot(token=os.environ["BOT_API_KEY"], parse_mode="html")
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def send_welcome(message):
text = f"Hello, {message.chat.first_name} {message.chat.last_name}! \n" \
f"Welcome to <b>PyPi Tools Bot.</b>\n\n" \
"This Bot created special to obtain information from Official Python PyPi Server\n" \
+ r.help_text + r.current_version
await message.answer(text)
@dp.message_handler(commands=['help'])
async def send_welcome(message):
await message.answer(r.help_text)
@dp.message_handler(lambda message: message.text and (
'/stats' in message.text.lower() or 'stats:' in message.text.lower()))
@validate_input(command='stats',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
await message.answer(output)
@dp.message_handler(lambda message: message.text and (
'/plot' in message.text.lower() or 'plot:' in message.text.lower()))
@validate_input(command='plot',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats_with_graph(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
temp = 'temp/'
os.makedirs(temp, exist_ok=True)
# for pandas range
start_date = current_date - timedelta(days=2)
file_name = f'{temp}/{package_name}:{current_date - timedelta(days=1)}:{days}.png'
if not os.path.isfile(file_name):
file_name = v.generate_graph(start_date, [item for _, item in data_.items()][::-1], file_name)
file_ = types.InputFile(file_name)
await message.answer(output)
await message.answer_photo(file_)
@dp.message_handler(commands=['random'])
async def command(message):
output = await d.get_random_package()
await message.answer(output)
@dp.message_handler(commands=['search', 'search:detailed'])
@validate_input(command='search',
known_sub_commands={'detailed': lambda _package_name: d.request_package_info_from_pypi(
_package_name, detailed=True)},
additional_error="Or use with sub-command to get detailed information:"
"/search:detailed aiohttp")
async def search_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
if sub_command:
output = await sub_command(package_name)
else:
output = await d.request_package_info_from_pypi(package_name)
await message.answer(output)
@dp.message_handler(commands=['releases', 'releases:full'])
@validate_input(command='releases',
known_sub_commands={'full': 'full'},
additional_error="Or use with sub-command to get full list of releases:"
"/releases:full aiohttp")
async def releases_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
releases = await d.get_release_list(package_name=package_name)
if sub_command and sub_command == 'full':
output = f"Full Releases list for Package {package_name}\n\n"
for version, v_date in releases.items():
output += f"<b>{version}</b>: {v_date}\n"
else:
output = f"Last 7 Releases for Package {package_name}\n\n"
for num, items in enumerate(list(releases.items())):
if num > 7:
break
version, v_date = items
output += f"<b>{version}</b>: {v_date}\n"
await message.answer(output)
track_sub_commands = {'stop': lambda key: remove_track_for_package(key),
'nodev': 'nodev'}
@dp.message_handler(commands=['track', 'track:stop', 'track:nodev'])
@validate_input(command='track',
known_sub_commands=track_sub_commands,
additional_error="Or use with sub-command to stop track a package releases"
"/track:stop aiohttp")
async def track_command(message):
""" handler to react on /track command and it sub-commands"""
pool = await aioredis.create_redis_pool(redis_host)
with await pool as redis:
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
chat_id = str(message.chat.id)
key = chat_id + ":" + package_name
if sub_command and sub_command != 'nodev':
output = await sub_command(key)
else:
nodev = False
if sub_command:
nodev = True
versions = await d.get_release_list(package_name, nodev)
if versions is None:
output = f'Package {package_name} does not exists'
else:
current_version = d.get_last_release_version(versions)
output = f"Current {package_name} version is {current_version} \n" \
"You will be announced with new version release"
version = current_version[0]
if nodev:
version = version + ':nodev'
await redis.set(key, version)
await message.answer(output)
@dp.message_handler()
async def echo_all(message: types.Message):
await message.answer(message.text)
if __name__ == '__main__':
try:
executor.start_polling(dp, skip_updates=True)
except Exception as e:
sentry_sdk.capture_exception(e)
|
import os
import logging
import sentry_sdk
from aiogram import Bot, Dispatcher, executor, types
from datetime import datetime, timedelta
from pypi_tools.logic import remove_track_for_package
import pypi_tools.data as d
from pypi_tools.helpers import validate_input
import pypi_tools.vizualizer as v
import pypi_tools.readme as r
import asyncio
import aioredis
logging.basicConfig(level=logging.INFO)
redis_host = f"redis://{os.environ.get('REDIS_HOST')}"
#sentry_sdk.init(os.environ["SENTRY_PATH"])
bot = Bot(token=os.environ["BOT_API_KEY"], parse_mode="html")
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def send_welcome(message):
text = f"Hello, {message.chat.first_name} {message.chat.last_name}! \n" \
f"Welcome to <b>PyPi Tools Bot.</b>\n\n" \
"This Bot created special to obtain information from Official Python PyPi Server\n" \
+ r.help_text + r.current_version
await message.answer(text)
@dp.message_handler(commands=['help'])
async def send_welcome(message):
await message.answer(r.help_text)
@dp.message_handler(lambda message: message.text and (
'/stats' in message.text.lower() or 'stats:' in message.text.lower()))
@validate_input(command='stats',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
await message.answer(output)
@dp.message_handler(lambda message: message.text and (
'/plot' in message.text.lower() or 'plot:' in message.text.lower()))
@validate_input(command='plot',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats_with_graph(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
temp = 'temp/'
os.makedirs(temp, exist_ok=True)
# for pandas range
start_date = current_date - timedelta(days=2)
file_name = f'{temp}/{package_name}:{current_date - timedelta(days=1)}:{days}.png'
if not os.path.isfile(file_name):
file_name = v.generate_graph(start_date, [item for _, item in data_.items()][::-1], file_name)
file_ = types.InputFile(file_name)
await message.answer(output)
await message.answer_photo(file_)
@dp.message_handler(commands=['random'])
async def command(message):
output = await d.get_random_package()
await message.answer(output)
@dp.message_handler(commands=['search', 'search:detailed'])
@validate_input(command='search',
known_sub_commands={'detailed': lambda _package_name: d.request_package_info_from_pypi(
_package_name, detailed=True)},
additional_error="Or use with sub-command to get detailed information:"
"/search:detailed aiohttp")
async def search_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
if sub_command:
output = await sub_command(package_name)
else:
output = await d.request_package_info_from_pypi(package_name)
await message.answer(output)
@dp.message_handler(commands=['releases', 'releases:full'])
@validate_input(command='releases',
known_sub_commands={'full': 'full'},
additional_error="Or use with sub-command to get full list of releases:"
"/releases:full aiohttp")
async def releases_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
releases = await d.get_release_list(package_name=package_name)
if sub_command and sub_command == 'full':
output = f"Full Releases list for Package {package_name}\n\n"
for version, v_date in releases.items():
output += f"<b>{version}</b>: {v_date}\n"
else:
output = f"Last 7 Releases for Package {package_name}\n\n"
for num, items in enumerate(list(releases.items())):
if num > 7:
break
version, v_date = items
output += f"<b>{version}</b>: {v_date}\n"
await message.answer(output)
track_sub_commands = {'stop': lambda key: remove_track_for_package(key),
'nodev': 'nodev'}
@dp.message_handler(commands=['track', 'track:stop', 'track:nodev'])
@validate_input(command='track',
known_sub_commands=track_sub_commands,
additional_error="Or use with sub-command to stop track a package releases"
"/track:stop aiohttp")
async def track_command(message):
""" handler to react on /track command and it sub-commands"""
pool = await aioredis.create_redis_pool(redis_host)
with await pool as redis:
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
chat_id = str(message.chat.id)
key = chat_id + ":" + package_name
if sub_command and sub_command != 'nodev':
output = await sub_command(key)
else:
nodev = False
if sub_command:
nodev = True
versions = await d.get_release_list(package_name, nodev)
if versions is None:
output = f'Package {package_name} does not exists'
else:
current_version = d.get_last_release_version(versions)
output = f"Current {package_name} version is {current_version} \n" \
"You will be announced with new version release"
version = current_version[0]
if nodev:
version = version + ':nodev'
await redis.set(key, version)
await message.answer(output)
@dp.message_handler()
async def echo_all(message: types.Message):
await message.answer(message.text)
if __name__ == '__main__':
try:
executor.start_polling(dp, skip_updates=True)
except Exception as e:
sentry_sdk.capture_exception(e)
|
import time
import os
import datetime
import json
import logging
import requests
from utils.server_chan import server_push
from utils.qq_email import qq_email_push
from utils.qmsg import qmsg_push
from login import CampusLogin
def initLogging():
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format="[%(levelname)s]; %(message)s")
def get_token(username, password, device_id):
"""
获取用户令牌,模拟登录获取:https://github.com/zhongbr/wanmei_campus
:param device_id: 设备ID
:param username: 账号
:param password: 密码
:return:
"""
for _ in range(3):
try:
campus_login = CampusLogin(phone_num=username, device_id=device_id)
except Exception as e:
logging.warning(e)
continue
login_dict = campus_login.pwd_login(password)
if login_dict["status"]:
logging.info(f"{username[:4]},{login_dict["msg"]}")
return login_dict["token"]
elif login_dict['errmsg'] == "该手机号未注册完美校园":
logging.warning(f"{username[:4]},{login_dict["errmsg"]}")
return None
elif login_dict['errmsg'].startswith("密码错误"):
logging.warning(f"{username[:4]},{login_dict["errmsg"]}")
logging.warning("代码是死的,密码错误了就是错误了,赶紧去查看一下是不是输错了!")
return None
else:
logging.info(f"{username[:4]},{login_dict["errmsg"]}")
logging.warning('正在尝试重新登录......')
time.sleep(5)
return None
def get_school_name(token):
post_data = {"token": token, "method": "WX_BASE_INFO", "param": "%7B%7D"}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
try:
res = requests.post(
"https://server.59wanmei.com/YKT_Interface/xyk",
data=post_data,
headers=headers,
)
return res.json()["data"]["customerName"]
except:
return "泪目,没获取到学校名字"
def get_user_info(token):
"""
用来获取custom_id,即类似与打卡模板id
:param token: 用户令牌
:return: return
"""
data = {"appClassify": "DK", "token": token}
for _ in range(3):
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/getUserInfo", data=data
)
user_info = res.json()["userInfo"]
logging.info('获取个人信息成功')
return user_info
except:
logging.warning('获取个人信息失败,正在重试......')
time.sleep(1)
return None
def get_post_json(post_json):
"""
获取打卡数据
:param jsons: 用来获取打卡数据的json字段
:return:
"""
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/sass/api/epmpics",
json=post_json,
timeout=10,
).json()
except:
logging.warning("获取完美校园打卡post参数失败,正在重试...")
time.sleep(1)
continue
if res["code"] != "10000":
logging.warning(res)
data = json.loads(res["data"])
# print(data)
post_dict = {
"areaStr": data['areaStr'],
"deptStr": data['deptStr'],
"deptid": data['deptStr']['deptid'] if data['deptStr'] else None,
"customerid": data['customerid'],
"userid": data['userid'],
"username": data['username'],
"stuNo": data['stuNo'],
"phonenum": data["phonenum"],
"templateid": data["templateid"],
"updatainfo": [
{"propertyname": i["propertyname"], "value": i["value"]}
for i in data["cusTemplateRelations"]
],
"updatainfo_detail": [
{
"propertyname": i["propertyname"],
"checkValues": i["checkValues"],
"description": i["decription"],
"value": i["value"],
}
for i in data["cusTemplateRelations"]
],
"checkbox": [
{"description": i["decription"], "value": i["value"], "propertyname": i["propertyname"]}
for i in data["cusTemplateRelations"]
],
}
# print(json.dumps(post_dict, sort_keys=True, indent=4, ensure_ascii=False))
logging.info("获取完美校园打卡post参数成功")
return post_dict
return None
def healthy_check_in(token, username, post_dict):
"""
第一类健康打卡
:param username: 手机号
:param token: 用户令牌
:param post_dict: 打卡数据
:return:
"""
check_json = {
"businessType": "epmpics",
"method": "submitUpInfo",
"jsonData": {
"deptStr": post_dict["deptStr"],
"areaStr": post_dict["areaStr"],
"reportdate": round(time.time() * 1000),
"customerid": post_dict["customerid"],
"deptid": post_dict["deptid"],
"source": "app",
"templateid": post_dict["templateid"],
"stuNo": post_dict["stuNo"],
"username": post_dict["username"],
"phonenum": username,
"userid": post_dict["userid"],
"updatainfo": post_dict["updatainfo"],
"gpsType": 1,
"token": token,
},
}
for _ in range(3):
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/sass/api/epmpics", json=check_json
).json()
if res['code'] == '10000':
logging.info(res)
return {
"status": 1,
"res": res,
"post_dict": post_dict,
"check_json": check_json,
"type": "healthy",
}
elif "频繁" in res['data']:
logging.info(res)
return {
"status": 1,
"res": res,
"post_dict": post_dict,
"check_json": check_json,
"type": "healthy",
}
else:
logging.warning(res)
return {"status": 0, "errmsg": f"{post_dict["username"]}: {res}"}
except:
errmsg = f"```打卡请求出错```"
logging.warning("健康打卡请求出错")
return {"status": 0, "errmsg": errmsg}
return {"status": 0, "errmsg": "健康打卡请求出错"}
def get_recall_data(token):
"""
获取第二类健康打卡的打卡数据
:param token: 用户令牌
:return: 返回dict数据
"""
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/api/reported/recall",
data={"token": token},
timeout=10,
).json()
except:
logging.warning("获取完美校园打卡post参数失败,正在重试...")
time.sleep(1)
continue
if res["code"] == 0:
logging.info("获取完美校园打卡post参数成功")
return res["data"]
else:
logging.warning(res)
return None
def receive_check_in(token, custom_id, post_dict):
"""
第二类健康打卡
:param token: 用户令牌
:param custom_id: 健康打卡id
:param post_dict: 健康打卡数据
:return:
"""
check_json = {
"userId": post_dict["userId"],
"name": post_dict["name"],
"stuNo": post_dict["stuNo"],
"whereabouts": post_dict["whereabouts"],
"familyWhereabouts": "",
"beenToWuhan": post_dict["beenToWuhan"],
"contactWithPatients": post_dict["contactWithPatients"],
"symptom": post_dict["symptom"],
"fever": post_dict["fever"],
"cough": post_dict["cough"],
"soreThroat": post_dict["soreThroat"],
"debilitation": post_dict["debilitation"],
"diarrhea": post_dict["diarrhea"],
"cold": post_dict["cold"],
"staySchool": post_dict["staySchool"],
"contacts": post_dict["contacts"],
"emergencyPhone": post_dict["emergencyPhone"],
"address": post_dict["address"],
"familyForAddress": "",
"collegeId": post_dict["collegeId"],
"majorId": post_dict["majorId"],
"classId": post_dict["classId"],
"classDescribe": post_dict["classDescribe"],
"temperature": post_dict["temperature"],
"confirmed": post_dict["confirmed"],
"isolated": post_dict["isolated"],
"passingWuhan": post_dict["passingWuhan"],
"passingHubei": post_dict["passingHubei"],
"patientSide": post_dict["patientSide"],
"patientContact": post_dict["patientContact"],
"mentalHealth": post_dict["mentalHealth"],
"wayToSchool": post_dict["wayToSchool"],
"backToSchool": post_dict["backToSchool"],
"haveBroadband": post_dict["haveBroadband"],
"emergencyContactName": post_dict["emergencyContactName"],
"helpInfo": "",
"passingCity": "",
"longitude": "", # 请在此处填写需要打卡位置的longitude
"latitude": "", # 请在此处填写需要打卡位置的latitude
"token": token,
}
headers = {
"referer": f"https://reportedh5.17wanxiao.com/nCovReport/index.html?token={token}&customerId={custom_id}",
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/reported/receive",
headers=headers,
data=check_json,
).json()
# 以json格式打印json字符串
# print(res)
if res["code"] == 0:
logging.info(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type="healthy",
)
else:
logging.warning(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type="healthy",
)
except:
errmsg = f"```打卡请求出错```"
logging.warning("打卡请求出错,网络不稳定")
return dict(status=0, errmsg=errmsg)
def get_ap():
"""
获取当前时间,用于校内打卡
:return: 返回布尔列表:[am, pm, ev]
"""
now_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
am = 0 <= now_time.hour < 12
pm = 12 <= now_time.hour < 17
ev = 17 <= now_time.hour <= 23
return [am, pm, ev]
def get_id_list(token, custom_id):
"""
通过校内模板id获取校内打卡具体的每个时间段id
:param token: 用户令牌
:param custom_id: 校内打卡模板id
:return: 返回校内打卡id列表
"""
post_data = {
"customerAppTypeId": custom_id,
"longitude": "",
"latitude": "",
"token": token,
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/rules", data=post_data
)
# print(res.text)
return res.json()["customerAppTypeDto"]["ruleList"]
except:
return None
def get_id_list_v1(token):
"""
通过校内模板id获取校内打卡具体的每个时间段id(初版,暂留)
:param token: 用户令牌
:return: 返回校内打卡id列表
"""
post_data = {"appClassify": "DK", "token": token}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/childApps",
data=post_data,
)
if res.json()["appList"]:
id_list = sorted(
res.json()["appList"][-1]["customerAppTypeRuleList"],
key=lambda x: x["id"],
)
res_dict = [
{"id": j["id"], "templateid": f"clockSign{i + 1}"}
for i, j in enumerate(id_list)
]
return res_dict
return None
except:
return None
def campus_check_in(username, token, post_dict, id):
"""
校内打卡
:param username: 电话号
:param token: 用户令牌
:param post_dict: 校内打卡数据
:param id: 校内打卡id
:return:
"""
check_json = {
"businessType": "epmpics",
"method": "submitUpInfoSchool",
"jsonData": {
"deptStr": post_dict["deptStr"],
"areaStr": post_dict["areaStr"],
"reportdate": round(time.time() * 1000),
"customerid": post_dict["customerid"],
"deptid": post_dict["deptid"],
"source": "app",
"templateid": post_dict["templateid"],
"stuNo": post_dict["stuNo"],
"username": post_dict["username"],
"phonenum": username,
"userid": post_dict["userid"],
"updatainfo": post_dict["updatainfo"],
"customerAppTypeRuleId": id,
"clockState": 0,
"token": token,
},
"token": token,
}
# print(check_json)
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/sass/api/epmpics", json=check_json
).json()
# 以json格式打印json字符串
if res["code"] != "10000":
logging.warning(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type=post_dict["templateid"],
)
else:
logging.info(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type=post_dict["templateid"],
)
except BaseException:
errmsg = f"```校内打卡请求出错```"
logging.warning("校内打卡请求出错")
return dict(status=0, errmsg=errmsg)
def check_in(username, password, device_id):
check_dict_list = []
# 登录获取token用于打卡
token = get_token(username, password, device_id)
if not token:
errmsg = f"{username[:4]},获取token失败,打卡失败"
logging.warning(errmsg)
check_dict_list.append({"status": 0, "errmsg": errmsg})
return check_dict_list
# print(token)
# 获取现在是上午,还是下午,还是晚上
# ape_list = get_ap()
# 获取学校使用打卡模板Id
user_info = get_user_info(token)
if not user_info:
errmsg = f"{username[:4]},获取user_info失败,打卡失败"
logging.warning(errmsg)
check_dict_list.append({"status": 0, "errmsg": errmsg})
return check_dict_list
# 获取第一类健康打卡的参数
json1 = {
"businessType": "epmpics",
"jsonData": {"templateid": "pneumonia", "token": token},
"method": "userComeApp",
}
post_dict = get_post_json(json1)
if post_dict:
# 第一类健康打卡
# print(post_dict)
# 修改温度等参数
# for j in post_dict['updatainfo']: # 这里获取打卡json字段的打卡信息,微信推送的json字段
# if j['propertyname'] == 'temperature': # 找到propertyname为temperature的字段
# j['value'] = '36.2' # 由于原先为null,这里直接设置36.2(根据自己学校打卡选项来)
# if j['propertyname'] == 'xinqing':
# j['value'] = '健康'
# if j['propertyname'] == 'outdoor':
# j['value'] = '否'
# if j['propertyname'] == 'symptom':
# j['value'] = '无症状'
# if j['propertyname'] == 'ownbodyzk':
# j['value'] = '身体健康,无异常'
# 修改地址,依照自己完美校园,查一下地址即可
# post_dict['areaStr'] = '{"streetNumber":"89号","street":"建设东路","district":"","city":"新乡市","province":"河南省",' \
# '"town":"","pois":"河南师范大学(东区)","lng":113.91572178314209,' \
# '"lat":35.327695868943984,"address":"牧野区建设东路89号河南师范大学(东区)","text":"河南省-新乡市",' \
# '"code":""} '
healthy_check_dict = healthy_check_in(token, username, post_dict)
check_dict_list.append(healthy_check_dict)
else:
# 获取第二类健康打卡参数
post_dict = get_recall_data(token)
# 第二类健康打卡
healthy_check_dict = receive_check_in(token, user_info["customerId"], post_dict)
check_dict_list.append(healthy_check_dict)
# # 获取校内打卡ID
# id_list = get_id_list(token, user_info.get('customerAppTypeId'))
# # print(id_list)
# if not id_list:
# return check_dict_list
#
# # 校内打卡
# for index, i in enumerate(id_list):
# if ape_list[index]:
# # print(i)
# logging.info(f"-------------------------------{i["templateid"]}-------------------------------")
# json2 = {"businessType": "epmpics",
# "jsonData": {"templateid": i['templateid'], "customerAppTypeRuleId": i['id'],
# "stuNo": post_dict['stuNo'],
# "token": token}, "method": "userComeAppSchool",
# "token": token}
# campus_dict = get_post_json(json2)
# campus_dict['areaStr'] = post_dict['areaStr']
# for j in campus_dict['updatainfo']:
# if j['propertyname'] == 'temperature':
# j['value'] = '36.4'
# if j['propertyname'] == 'symptom':
# j['value'] = '无症状'
# campus_check_dict = campus_check_in(username, token, campus_dict, i['id'])
# check_dict_list.append(campus_check_dict)
# logging.info("--------------------------------------------------------------")
return check_dict_list
def wanxiao_server_push(sckey, check_info_list):
utc8_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
push_list = [f"""
------
#### 现在时间:
```
{utc8_time.strftime("%Y-%m-%d %H:%M:%S %p")}
```"""]
for check_info in check_info_list:
if check_info["status"]:
if check_info["post_dict"].get("checkbox"):
post_msg = "\n".join(
[
f"| {i["description"]} | {j["value"]} |"
for i in check_info["post_dict"].get("checkbox")
for j in check_info["post_dict"].get("updatainfo")
if i["propertyname"] == j["propertyname"]
]
)
else:
post_msg = "暂无详情"
name = check_info["post_dict"].get("username")
if not name:
name = check_info["post_dict"]["name"]
push_list.append(
f"""#### {name}{check_info['type']}打卡信息:
```
{json.dumps(check_info['check_json'], sort_keys=True, indent=4, ensure_ascii=False)}
```
------
| Text | Message |
| :----------------------------------- | :--- |
{post_msg}
------
```
{check_info['res']}
```"""
)
else:
push_list.append(
f"""------
#### {check_info['errmsg']}
------
"""
)
push_list.append(
f"""
>
> [17wanxiaoCheckin-Actions](https://github.com/ReaJason/17wanxiaoCheckin-Actions)
>
>微信消息测试!
"""
)
return server_push(sckey, "健康打卡", "\n".join(push_list))
def wanxiao_qq_mail_push(send_email, send_pwd, receive_email, check_info_list):
bj_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
bj_time.strftime("%Y-%m-%d %H:%M:%S %p")
mail_msg_list = [f"""
<h2><center> >>>> <a href="https://github.com/ReaJason/17wanxiaoCheckin-Actions">17wanxiaoCheckin-Actions</a>
<<<<</center></h2>
<h2><center>微信消息提醒!</center></h2>
<h3><center>打卡时间:{bj_time}</center></h3>
"""
]
for check in check_info_list:
if check["status"]:
name = check['post_dict'].get('username')
if not name:
name = check['post_dict']['name']
mail_msg_list.append(f"""<hr>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: deepskyblue;">{name}:{check["type"]} 打卡结果:{check['res']}</summary>
<pre><code>
{json.dumps(check['check_json'], sort_keys=True, indent=4, ensure_ascii=False)}
</code></pre>
</details>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: black;" >>>>填写数据抓包详情(便于代码的编写)<<<</summary>
<pre><code>
{json.dumps(check['post_dict']['updatainfo_detail'], sort_keys=True, indent=4, ensure_ascii=False)}
</code></pre>
</details>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: lightskyblue;" >>>>打卡信息数据表格<<<</summary>
<table id="customers">
<tr>
<th>Text</th>
<th>Value</th>
</tr>
"""
)
for index, box in enumerate(check["post_dict"]["checkbox"]):
if index % 2:
mail_msg_list.append(
f"""<tr>
<td>{box['description']}</td>
<td>{box['value']}</td>
</tr>"""
)
else:
mail_msg_list.append(f"""<tr class="alt">
<td>{box['description']}</td>
<td>{box['value']}</td>
</tr>"""
)
mail_msg_list.append(
f"""
</table></details>"""
)
else:
mail_msg_list.append(
f"""<hr>
<b style="color: red">{check['errmsg']}</b>"""
)
css = """<style type="text/css">
#customers
{
font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
width:100%;
border-collapse:collapse;
}
#customers td, #customers th
{
font-size:1em;
border:1px solid #98bf21;
padding:3px 7px 2px 7px;
}
#customers th
{
font-size:1.1em;
text-align:left;
padding-top:5px;
padding-bottom:4px;
background-color:#A7C942;
color:#ffffff;
}
#customers tr.alt td
{
color:#000000;
background-color:#EAF2D3;
}
</style>"""
mail_msg_list.append(css)
return qq_email_push(send_email, send_pwd, receive_email,
title="完美校园健康打卡", text="".join(mail_msg_list))
def wanxiao_qmsg_push(key, qq_num, check_info_list, send_type):
utc8_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
push_list = [f'@face=74@ {utc8_time.strftime('%Y-%m-%d %H:%M:%S')} @face=74@ ']
for check_info in check_info_list:
if check_info["status"]:
name = check_info["post_dict"].get("username")
if not name:
name = check_info["post_dict"]["name"]
push_list.append(f"""\
@face=54@ {name}{check_info['type']} @face=54@
@face=211@
{check_info['res']}
@face=211@""")
else:
push_list.append(check_info['errmsg'])
return qmsg_push(key, qq_num, "\n".join(push_list), send_type)
def main_handler(*args, **kwargs):
initLogging()
raw_info = []
username_list = os.environ['USERNAME'].split(',')
password_list = os.environ['PASSWORD'].split(',')
device_id_list = os.environ['DEVICEID'].split(',')
sckey = os.environ.get('SCKEY')
key = os.environ.get('KEY')
qq_num = os.environ.get('QQ_NUM')
send_email = os.environ.get('SEND_EMAIL')
send_pwd = os.environ.get('SEND_PWD')
receive_email = os.environ.get('RECEIVE_EMAIL')
for username, password, device_id in zip(
[i.strip() for i in username_list if i != ''],
[i.strip() for i in password_list if i != ''],
[i.strip() for i in device_id_list if i != '']):
check_dict = check_in(username, password, device_id)
raw_info.extend(check_dict)
if sckey:
logging.info(wanxiao_server_push(sckey, raw_info))
if send_email and send_pwd and receive_email:
logging.info(wanxiao_qq_mail_push(send_email, send_pwd, receive_email, raw_info))
if key:
logging.info(wanxiao_qmsg_push(key, qq_num, raw_info, send_type="send"))
if __name__ == "__main__":
main_handler()
|
import time
import os
import datetime
import json
import logging
import requests
from utils.server_chan import server_push
from utils.qq_email import qq_email_push
from utils.qmsg import qmsg_push
from login import CampusLogin
def initLogging():
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format="[%(levelname)s]; %(message)s")
def get_token(username, password, device_id):
"""
获取用户令牌,模拟登录获取:https://github.com/zhongbr/wanmei_campus
:param device_id: 设备ID
:param username: 账号
:param password: 密码
:return:
"""
for _ in range(3):
try:
campus_login = CampusLogin(phone_num=username, device_id=device_id)
except Exception as e:
logging.warning(e)
continue
login_dict = campus_login.pwd_login(password)
if login_dict["status"]:
logging.info(f"{username[:4]},{login_dict['msg']}")
return login_dict["token"]
elif login_dict['errmsg'] == "该手机号未注册完美校园":
logging.warning(f"{username[:4]},{login_dict['errmsg']}")
return None
elif login_dict['errmsg'].startswith("密码错误"):
logging.warning(f"{username[:4]},{login_dict['errmsg']}")
logging.warning("代码是死的,密码错误了就是错误了,赶紧去查看一下是不是输错了!")
return None
else:
logging.info(f"{username[:4]},{login_dict['errmsg']}")
logging.warning('正在尝试重新登录......')
time.sleep(5)
return None
def get_school_name(token):
post_data = {"token": token, "method": "WX_BASE_INFO", "param": "%7B%7D"}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
try:
res = requests.post(
"https://server.59wanmei.com/YKT_Interface/xyk",
data=post_data,
headers=headers,
)
return res.json()["data"]["customerName"]
except:
return "泪目,没获取到学校名字"
def get_user_info(token):
"""
用来获取custom_id,即类似与打卡模板id
:param token: 用户令牌
:return: return
"""
data = {"appClassify": "DK", "token": token}
for _ in range(3):
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/getUserInfo", data=data
)
user_info = res.json()["userInfo"]
logging.info('获取个人信息成功')
return user_info
except:
logging.warning('获取个人信息失败,正在重试......')
time.sleep(1)
return None
def get_post_json(post_json):
"""
获取打卡数据
:param jsons: 用来获取打卡数据的json字段
:return:
"""
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/sass/api/epmpics",
json=post_json,
timeout=10,
).json()
except:
logging.warning("获取完美校园打卡post参数失败,正在重试...")
time.sleep(1)
continue
if res["code"] != "10000":
logging.warning(res)
data = json.loads(res["data"])
# print(data)
post_dict = {
"areaStr": data['areaStr'],
"deptStr": data['deptStr'],
"deptid": data['deptStr']['deptid'] if data['deptStr'] else None,
"customerid": data['customerid'],
"userid": data['userid'],
"username": data['username'],
"stuNo": data['stuNo'],
"phonenum": data["phonenum"],
"templateid": data["templateid"],
"updatainfo": [
{"propertyname": i["propertyname"], "value": i["value"]}
for i in data["cusTemplateRelations"]
],
"updatainfo_detail": [
{
"propertyname": i["propertyname"],
"checkValues": i["checkValues"],
"description": i["decription"],
"value": i["value"],
}
for i in data["cusTemplateRelations"]
],
"checkbox": [
{"description": i["decription"], "value": i["value"], "propertyname": i["propertyname"]}
for i in data["cusTemplateRelations"]
],
}
# print(json.dumps(post_dict, sort_keys=True, indent=4, ensure_ascii=False))
logging.info("获取完美校园打卡post参数成功")
return post_dict
return None
def healthy_check_in(token, username, post_dict):
"""
第一类健康打卡
:param username: 手机号
:param token: 用户令牌
:param post_dict: 打卡数据
:return:
"""
check_json = {
"businessType": "epmpics",
"method": "submitUpInfo",
"jsonData": {
"deptStr": post_dict["deptStr"],
"areaStr": post_dict["areaStr"],
"reportdate": round(time.time() * 1000),
"customerid": post_dict["customerid"],
"deptid": post_dict["deptid"],
"source": "app",
"templateid": post_dict["templateid"],
"stuNo": post_dict["stuNo"],
"username": post_dict["username"],
"phonenum": username,
"userid": post_dict["userid"],
"updatainfo": post_dict["updatainfo"],
"gpsType": 1,
"token": token,
},
}
for _ in range(3):
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/sass/api/epmpics", json=check_json
).json()
if res['code'] == '10000':
logging.info(res)
return {
"status": 1,
"res": res,
"post_dict": post_dict,
"check_json": check_json,
"type": "healthy",
}
elif "频繁" in res['data']:
logging.info(res)
return {
"status": 1,
"res": res,
"post_dict": post_dict,
"check_json": check_json,
"type": "healthy",
}
else:
logging.warning(res)
return {"status": 0, "errmsg": f"{post_dict['username']}: {res}"}
except:
errmsg = f"```打卡请求出错```"
logging.warning("健康打卡请求出错")
return {"status": 0, "errmsg": errmsg}
return {"status": 0, "errmsg": "健康打卡请求出错"}
def get_recall_data(token):
"""
获取第二类健康打卡的打卡数据
:param token: 用户令牌
:return: 返回dict数据
"""
for _ in range(3):
try:
res = requests.post(
url="https://reportedh5.17wanxiao.com/api/reported/recall",
data={"token": token},
timeout=10,
).json()
except:
logging.warning("获取完美校园打卡post参数失败,正在重试...")
time.sleep(1)
continue
if res["code"] == 0:
logging.info("获取完美校园打卡post参数成功")
return res["data"]
else:
logging.warning(res)
return None
def receive_check_in(token, custom_id, post_dict):
"""
第二类健康打卡
:param token: 用户令牌
:param custom_id: 健康打卡id
:param post_dict: 健康打卡数据
:return:
"""
check_json = {
"userId": post_dict["userId"],
"name": post_dict["name"],
"stuNo": post_dict["stuNo"],
"whereabouts": post_dict["whereabouts"],
"familyWhereabouts": "",
"beenToWuhan": post_dict["beenToWuhan"],
"contactWithPatients": post_dict["contactWithPatients"],
"symptom": post_dict["symptom"],
"fever": post_dict["fever"],
"cough": post_dict["cough"],
"soreThroat": post_dict["soreThroat"],
"debilitation": post_dict["debilitation"],
"diarrhea": post_dict["diarrhea"],
"cold": post_dict["cold"],
"staySchool": post_dict["staySchool"],
"contacts": post_dict["contacts"],
"emergencyPhone": post_dict["emergencyPhone"],
"address": post_dict["address"],
"familyForAddress": "",
"collegeId": post_dict["collegeId"],
"majorId": post_dict["majorId"],
"classId": post_dict["classId"],
"classDescribe": post_dict["classDescribe"],
"temperature": post_dict["temperature"],
"confirmed": post_dict["confirmed"],
"isolated": post_dict["isolated"],
"passingWuhan": post_dict["passingWuhan"],
"passingHubei": post_dict["passingHubei"],
"patientSide": post_dict["patientSide"],
"patientContact": post_dict["patientContact"],
"mentalHealth": post_dict["mentalHealth"],
"wayToSchool": post_dict["wayToSchool"],
"backToSchool": post_dict["backToSchool"],
"haveBroadband": post_dict["haveBroadband"],
"emergencyContactName": post_dict["emergencyContactName"],
"helpInfo": "",
"passingCity": "",
"longitude": "", # 请在此处填写需要打卡位置的longitude
"latitude": "", # 请在此处填写需要打卡位置的latitude
"token": token,
}
headers = {
"referer": f"https://reportedh5.17wanxiao.com/nCovReport/index.html?token={token}&customerId={custom_id}",
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/reported/receive",
headers=headers,
data=check_json,
).json()
# 以json格式打印json字符串
# print(res)
if res["code"] == 0:
logging.info(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type="healthy",
)
else:
logging.warning(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type="healthy",
)
except:
errmsg = f"```打卡请求出错```"
logging.warning("打卡请求出错,网络不稳定")
return dict(status=0, errmsg=errmsg)
def get_ap():
"""
获取当前时间,用于校内打卡
:return: 返回布尔列表:[am, pm, ev]
"""
now_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
am = 0 <= now_time.hour < 12
pm = 12 <= now_time.hour < 17
ev = 17 <= now_time.hour <= 23
return [am, pm, ev]
def get_id_list(token, custom_id):
"""
通过校内模板id获取校内打卡具体的每个时间段id
:param token: 用户令牌
:param custom_id: 校内打卡模板id
:return: 返回校内打卡id列表
"""
post_data = {
"customerAppTypeId": custom_id,
"longitude": "",
"latitude": "",
"token": token,
}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/rules", data=post_data
)
# print(res.text)
return res.json()["customerAppTypeDto"]["ruleList"]
except:
return None
def get_id_list_v1(token):
"""
通过校内模板id获取校内打卡具体的每个时间段id(初版,暂留)
:param token: 用户令牌
:return: 返回校内打卡id列表
"""
post_data = {"appClassify": "DK", "token": token}
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/api/clock/school/childApps",
data=post_data,
)
if res.json()["appList"]:
id_list = sorted(
res.json()["appList"][-1]["customerAppTypeRuleList"],
key=lambda x: x["id"],
)
res_dict = [
{"id": j["id"], "templateid": f"clockSign{i + 1}"}
for i, j in enumerate(id_list)
]
return res_dict
return None
except:
return None
def campus_check_in(username, token, post_dict, id):
"""
校内打卡
:param username: 电话号
:param token: 用户令牌
:param post_dict: 校内打卡数据
:param id: 校内打卡id
:return:
"""
check_json = {
"businessType": "epmpics",
"method": "submitUpInfoSchool",
"jsonData": {
"deptStr": post_dict["deptStr"],
"areaStr": post_dict["areaStr"],
"reportdate": round(time.time() * 1000),
"customerid": post_dict["customerid"],
"deptid": post_dict["deptid"],
"source": "app",
"templateid": post_dict["templateid"],
"stuNo": post_dict["stuNo"],
"username": post_dict["username"],
"phonenum": username,
"userid": post_dict["userid"],
"updatainfo": post_dict["updatainfo"],
"customerAppTypeRuleId": id,
"clockState": 0,
"token": token,
},
"token": token,
}
# print(check_json)
try:
res = requests.post(
"https://reportedh5.17wanxiao.com/sass/api/epmpics", json=check_json
).json()
# 以json格式打印json字符串
if res["code"] != "10000":
logging.warning(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type=post_dict["templateid"],
)
else:
logging.info(res)
return dict(
status=1,
res=res,
post_dict=post_dict,
check_json=check_json,
type=post_dict["templateid"],
)
except BaseException:
errmsg = f"```校内打卡请求出错```"
logging.warning("校内打卡请求出错")
return dict(status=0, errmsg=errmsg)
def check_in(username, password, device_id):
check_dict_list = []
# 登录获取token用于打卡
token = get_token(username, password, device_id)
if not token:
errmsg = f"{username[:4]},获取token失败,打卡失败"
logging.warning(errmsg)
check_dict_list.append({"status": 0, "errmsg": errmsg})
return check_dict_list
# print(token)
# 获取现在是上午,还是下午,还是晚上
# ape_list = get_ap()
# 获取学校使用打卡模板Id
user_info = get_user_info(token)
if not user_info:
errmsg = f"{username[:4]},获取user_info失败,打卡失败"
logging.warning(errmsg)
check_dict_list.append({"status": 0, "errmsg": errmsg})
return check_dict_list
# 获取第一类健康打卡的参数
json1 = {
"businessType": "epmpics",
"jsonData": {"templateid": "pneumonia", "token": token},
"method": "userComeApp",
}
post_dict = get_post_json(json1)
if post_dict:
# 第一类健康打卡
# print(post_dict)
# 修改温度等参数
# for j in post_dict['updatainfo']: # 这里获取打卡json字段的打卡信息,微信推送的json字段
# if j['propertyname'] == 'temperature': # 找到propertyname为temperature的字段
# j['value'] = '36.2' # 由于原先为null,这里直接设置36.2(根据自己学校打卡选项来)
# if j['propertyname'] == 'xinqing':
# j['value'] = '健康'
# if j['propertyname'] == 'outdoor':
# j['value'] = '否'
# if j['propertyname'] == 'symptom':
# j['value'] = '无症状'
# if j['propertyname'] == 'ownbodyzk':
# j['value'] = '身体健康,无异常'
# 修改地址,依照自己完美校园,查一下地址即可
# post_dict['areaStr'] = '{"streetNumber":"89号","street":"建设东路","district":"","city":"新乡市","province":"河南省",' \
# '"town":"","pois":"河南师范大学(东区)","lng":113.91572178314209,' \
# '"lat":35.327695868943984,"address":"牧野区建设东路89号河南师范大学(东区)","text":"河南省-新乡市",' \
# '"code":""} '
healthy_check_dict = healthy_check_in(token, username, post_dict)
check_dict_list.append(healthy_check_dict)
else:
# 获取第二类健康打卡参数
post_dict = get_recall_data(token)
# 第二类健康打卡
healthy_check_dict = receive_check_in(token, user_info["customerId"], post_dict)
check_dict_list.append(healthy_check_dict)
# # 获取校内打卡ID
# id_list = get_id_list(token, user_info.get('customerAppTypeId'))
# # print(id_list)
# if not id_list:
# return check_dict_list
#
# # 校内打卡
# for index, i in enumerate(id_list):
# if ape_list[index]:
# # print(i)
# logging.info(f"-------------------------------{i['templateid']}-------------------------------")
# json2 = {"businessType": "epmpics",
# "jsonData": {"templateid": i['templateid'], "customerAppTypeRuleId": i['id'],
# "stuNo": post_dict['stuNo'],
# "token": token}, "method": "userComeAppSchool",
# "token": token}
# campus_dict = get_post_json(json2)
# campus_dict['areaStr'] = post_dict['areaStr']
# for j in campus_dict['updatainfo']:
# if j['propertyname'] == 'temperature':
# j['value'] = '36.4'
# if j['propertyname'] == 'symptom':
# j['value'] = '无症状'
# campus_check_dict = campus_check_in(username, token, campus_dict, i['id'])
# check_dict_list.append(campus_check_dict)
# logging.info("--------------------------------------------------------------")
return check_dict_list
def wanxiao_server_push(sckey, check_info_list):
utc8_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
push_list = [f"""
------
#### 现在时间:
```
{utc8_time.strftime("%Y-%m-%d %H:%M:%S %p")}
```"""]
for check_info in check_info_list:
if check_info["status"]:
if check_info["post_dict"].get("checkbox"):
post_msg = "\n".join(
[
f"| {i['description']} | {j['value']} |"
for i in check_info["post_dict"].get("checkbox")
for j in check_info["post_dict"].get("updatainfo")
if i["propertyname"] == j["propertyname"]
]
)
else:
post_msg = "暂无详情"
name = check_info["post_dict"].get("username")
if not name:
name = check_info["post_dict"]["name"]
push_list.append(
f"""#### {name}{check_info['type']}打卡信息:
```
{json.dumps(check_info['check_json'], sort_keys=True, indent=4, ensure_ascii=False)}
```
------
| Text | Message |
| :----------------------------------- | :--- |
{post_msg}
------
```
{check_info['res']}
```"""
)
else:
push_list.append(
f"""------
#### {check_info['errmsg']}
------
"""
)
push_list.append(
f"""
>
> [17wanxiaoCheckin-Actions](https://github.com/ReaJason/17wanxiaoCheckin-Actions)
>
>微信消息测试!
"""
)
return server_push(sckey, "健康打卡", "\n".join(push_list))
def wanxiao_qq_mail_push(send_email, send_pwd, receive_email, check_info_list):
bj_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
bj_time.strftime("%Y-%m-%d %H:%M:%S %p")
mail_msg_list = [f"""
<h2><center> >>>> <a href="https://github.com/ReaJason/17wanxiaoCheckin-Actions">17wanxiaoCheckin-Actions</a>
<<<<</center></h2>
<h2><center>微信消息提醒!</center></h2>
<h3><center>打卡时间:{bj_time}</center></h3>
"""
]
for check in check_info_list:
if check["status"]:
name = check['post_dict'].get('username')
if not name:
name = check['post_dict']['name']
mail_msg_list.append(f"""<hr>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: deepskyblue;">{name}:{check["type"]} 打卡结果:{check['res']}</summary>
<pre><code>
{json.dumps(check['check_json'], sort_keys=True, indent=4, ensure_ascii=False)}
</code></pre>
</details>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: black;" >>>>填写数据抓包详情(便于代码的编写)<<<</summary>
<pre><code>
{json.dumps(check['post_dict']['updatainfo_detail'], sort_keys=True, indent=4, ensure_ascii=False)}
</code></pre>
</details>
<details>
<summary style="font-family: 'Microsoft YaHei UI',serif; color: lightskyblue;" >>>>打卡信息数据表格<<<</summary>
<table id="customers">
<tr>
<th>Text</th>
<th>Value</th>
</tr>
"""
)
for index, box in enumerate(check["post_dict"]["checkbox"]):
if index % 2:
mail_msg_list.append(
f"""<tr>
<td>{box['description']}</td>
<td>{box['value']}</td>
</tr>"""
)
else:
mail_msg_list.append(f"""<tr class="alt">
<td>{box['description']}</td>
<td>{box['value']}</td>
</tr>"""
)
mail_msg_list.append(
f"""
</table></details>"""
)
else:
mail_msg_list.append(
f"""<hr>
<b style="color: red">{check['errmsg']}</b>"""
)
css = """<style type="text/css">
#customers
{
font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
width:100%;
border-collapse:collapse;
}
#customers td, #customers th
{
font-size:1em;
border:1px solid #98bf21;
padding:3px 7px 2px 7px;
}
#customers th
{
font-size:1.1em;
text-align:left;
padding-top:5px;
padding-bottom:4px;
background-color:#A7C942;
color:#ffffff;
}
#customers tr.alt td
{
color:#000000;
background-color:#EAF2D3;
}
</style>"""
mail_msg_list.append(css)
return qq_email_push(send_email, send_pwd, receive_email,
title="完美校园健康打卡", text="".join(mail_msg_list))
def wanxiao_qmsg_push(key, qq_num, check_info_list, send_type):
utc8_time = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
push_list = [f'@face=74@ {utc8_time.strftime("%Y-%m-%d %H:%M:%S")} @face=74@ ']
for check_info in check_info_list:
if check_info["status"]:
name = check_info["post_dict"].get("username")
if not name:
name = check_info["post_dict"]["name"]
push_list.append(f"""\
@face=54@ {name}{check_info['type']} @face=54@
@face=211@
{check_info['res']}
@face=211@""")
else:
push_list.append(check_info['errmsg'])
return qmsg_push(key, qq_num, "\n".join(push_list), send_type)
def main_handler(*args, **kwargs):
initLogging()
raw_info = []
username_list = os.environ['USERNAME'].split(',')
password_list = os.environ['PASSWORD'].split(',')
device_id_list = os.environ['DEVICEID'].split(',')
sckey = os.environ.get('SCKEY')
key = os.environ.get('KEY')
qq_num = os.environ.get('QQ_NUM')
send_email = os.environ.get('SEND_EMAIL')
send_pwd = os.environ.get('SEND_PWD')
receive_email = os.environ.get('RECEIVE_EMAIL')
for username, password, device_id in zip(
[i.strip() for i in username_list if i != ''],
[i.strip() for i in password_list if i != ''],
[i.strip() for i in device_id_list if i != '']):
check_dict = check_in(username, password, device_id)
raw_info.extend(check_dict)
if sckey:
logging.info(wanxiao_server_push(sckey, raw_info))
if send_email and send_pwd and receive_email:
logging.info(wanxiao_qq_mail_push(send_email, send_pwd, receive_email, raw_info))
if key:
logging.info(wanxiao_qmsg_push(key, qq_num, raw_info, send_type="send"))
if __name__ == "__main__":
main_handler()
|
#!/usr/bin/env python3
import sys
import subprocess
import functools
from enum import Enum
import gi
gi.require_version('Notify', '0.7')
from gi.repository import Notify
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QSystemTrayIcon, QMenu, QAction, qApp, QMessageBox
from PyQt5.QtCore import QSize, QThread, pyqtSignal
from PyQt5.QtGui import QIcon
from protonvpn_cli import utils, country_codes
from protonvpn_cli.utils import is_connected
PROTONVPN_APPLET_VERSION = "0.1.7"
class VPNStatusException(Exception):
"""General exception to throw when anything goes wrong
"""
class VPNCommand(Enum):
"""Commands to run the CLI
"""
status = 'protonvpn s'
connect_fastest = 'protonvpn c -f'
disconnect = 'protonvpn d'
version = 'protonvpn -v'
connect_random = 'protonvpn c -r'
connect_fastest_cc = 'protonvpn c --cc'
connect_fastest_p2p = 'protonvpn c --p2p'
connect_fastest_sc = 'protonvpn c --sc'
connect_fastest_tor = 'protonvpn c --tor'
reconnect = 'protonvpn r'
def check_single_instance():
"""Use pgrep to check if protonvpn-applet is already running
"""
pid = None
try:
pid = subprocess.run('pgrep protonvpn-applet'.split(), check=True, capture_output=True)
except subprocess.CalledProcessError:
try:
pid = subprocess.run('pgrep protonvpn-applet.py'.split(), check=True, capture_output=True)
except subprocess.CalledProcessError:
pass
if pid is not None:
print('There is an instance already running.')
sys.exit(1)
class Status(Enum):
"""Enum to keep track of the previous connection state
"""
connected = 'Connected'
disconnected = 'Disconnected'
class Polling(QThread):
"""Thread to check the VPN state every second and notifies on disconnection
"""
def __init__(self, applet):
QThread.__init__(self)
self.applet = applet
def __del__(self):
self.wait()
def run(self):
while self.applet.is_polling():
if is_connected():
self.applet.tray_icon.setIcon(QIcon('icons/16x16/protonvpn-connected.png'))
self.applet.previous_status = Status.connected
else:
# notify on disconnection
if self.applet.show_notifications() and self.applet.previous_status == Status.connected:
CheckStatus(self).start()
self.applet.tray_icon.setIcon(QIcon('icons/16x16/protonvpn-disconnected.png'))
self.applet.previous_status = Status.disconnected
self.sleep(1)
class ConnectVPN(QThread):
"""Thread to connect using the specified profile
"""
def __init__(self, applet, command):
QThread.__init__(self)
self.applet = applet
self.command = command
print(self.command)
def __del__(self):
self.wait()
def run(self):
subprocess.run([self.applet.auth] + self.command.split(), check=False)
self.applet.status_vpn()
class DisconnectVPN(QThread):
"""Thread to disconnect the VPN
"""
def __init__(self, applet):
QThread.__init__(self)
self.applet = applet
def __del__(self):
self.wait()
def run(self):
subprocess.run([self.applet.auth] + VPNCommand.disconnect.value.split(), check=False)
self.applet.status_vpn()
class ReconnectVPN(QThread):
"""Thread to connect using previously used profile
"""
def __init__(self, applet):
QThread.__init__(self)
self.applet = applet
def __del__(self):
self.wait()
def run(self):
subprocess.run([self.applet.auth] + VPNCommand.reconnect.value.split(), check=False)
self.applet.status_vpn()
class CheckStatus(QThread):
"""Thread to report ProtonVPN status
"""
def __init__(self, applet):
QThread.__init__(self)
self.applet = applet
def __del__(self):
self.wait()
def run(self):
result = subprocess.run(VPNCommand.status.value.split(), check=False, capture_output=True)
Notify.Notification.new(result.stdout.decode()).show()
class CheckProtonVPNVersion(QThread):
"""Thread to check version
"""
protonvpn_version_ready = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.parent = parent
self.version = 'None'
def __del__(self):
self.wait()
def run(self):
self.version = subprocess.check_output(VPNCommand.version.value.split()).decode(sys.stdout.encoding)
self.protonvpn_version_ready.emit(self.version)
class PVPNApplet(QMainWindow):
"""Main applet body
"""
tray_icon = None
polling = True
previous_status = None
#auth = 'pkexec'
auth = 'sudo'
# Override the class constructor
def __init__(self):
super(PVPNApplet, self).__init__()
self.country_codes = country_codes # Keep a list of country codes
# Init QSystemTrayIcon
self.tray_icon = QSystemTrayIcon(self)
self.tray_icon.setIcon(QIcon('icons/16x16/protonvpn-disconnected.png'))
# Init libnotify
Notify.init('ProtonVPN')
# Refresh server list, store the resulting servers so we can populate the menu
self.servers = self.update_available_servers()
# Menu actions
connect_fastest_action = QAction('Connect fastest', self)
reconnect_action = QAction('Reconnect', self)
disconnect_action = QAction('Disconnect', self)
status_action = QAction('Status', self)
connect_fastest_sc_action = QAction('Secure Core', self)
connect_fastest_p2p_action = QAction('P2P', self)
connect_fastest_tor_action = QAction('Tor', self)
connect_random_action = QAction('Random', self)
show_protonvpn_applet_version_action = QAction('About ProtonVPN-Applet', self)
show_protonvpn_version_action = QAction('About ProtonVPN', self)
quit_action = QAction('Exit', self)
self.show_notifications_action = QAction('Show Notifications')
self.show_notifications_action.setCheckable(True)
self.show_notifications_action.setChecked(False)
# Triggers
quit_action.triggered.connect(qApp.quit)
connect_fastest_action.triggered.connect(self.connect_fastest)
disconnect_action.triggered.connect(self.disconnect_vpn)
status_action.triggered.connect(self.status_vpn)
show_protonvpn_applet_version_action.triggered.connect(self.show_protonvpn_applet_version)
show_protonvpn_version_action.triggered.connect(self.get_protonvpn_version)
connect_fastest_sc_action.triggered.connect(self.connect_fastest_sc)
connect_fastest_p2p_action.triggered.connect(self.connect_fastest_p2p)
connect_fastest_tor_action.triggered.connect(self.connect_fastest_tor)
connect_random_action.triggered.connect(self.connect_random)
reconnect_action.triggered.connect(self.reconnect_vpn)
# Generate connection menu for specific countries
connect_country_actions = []
for country_name in self.get_available_countries(self.servers):
# Get the ISO-3166 Alpha-2 country code
country_name_to_code = {v: k for k, v in country_codes.country_codes.items()}
country_code = country_name_to_code[country_name]
# Dynamically create functions for connecting to each country; each function just passes its respective
# country code to `self.connect_fastest_cc()`
setattr(self, f'connect_fastest_{country_code}', functools.partial(self.connect_fastest_cc, country_code))
# Generate an action for each country; set up the trigger; append to actions list
country_action = QAction(f'{country_name}', self)
country_action.triggered.connect(getattr(self, f'connect_fastest_{country_code}'))
connect_country_actions.append(country_action)
# Create a scrollable country connection menu
connect_country_menu = QMenu("Country...", self)
connect_country_menu.setStyleSheet('QMenu { menu-scrollable: 1; }')
connect_country_menu.addActions(connect_country_actions)
# Generate connection menu
connection_menu = QMenu("Other connections...", self)
connection_menu.addMenu(connect_country_menu)
connection_menu.addAction(connect_fastest_sc_action)
connection_menu.addAction(connect_fastest_p2p_action)
connection_menu.addAction(connect_fastest_tor_action)
connection_menu.addAction(connect_random_action)
# Draw menu
tray_menu = QMenu()
tray_menu.addAction(connect_fastest_action)
tray_menu.addAction(reconnect_action)
tray_menu.addMenu(connection_menu)
tray_menu.addAction(disconnect_action)
tray_menu.addAction(status_action)
tray_menu.addSeparator()
tray_menu.addAction(self.show_notifications_action)
tray_menu.addAction(show_protonvpn_applet_version_action)
tray_menu.addAction(show_protonvpn_version_action)
tray_menu.addAction(quit_action)
self.tray_icon.setContextMenu(tray_menu)
self.tray_icon.show()
# Polling thread
self.start_polling()
def is_polling(self):
return self.polling
def kill_polling(self):
self.polling = False
def start_polling(self):
self.polling = True
self.polling_thread = Polling(self)
self.polling_thread.start()
def _connect_vpn(self, command):
self.kill_polling()
connect_thread = ConnectVPN(self, command)
connect_thread.finished.connect(self.start_polling)
connect_thread.start()
def connect_fastest(self):
self._connect_vpn(VPNCommand.connect_fastest.value)
def connect_fastest_p2p(self):
self._connect_vpn(VPNCommand.connect_fastest_p2p.value)
def connect_fastest_sc(self):
self._connect_vpn(VPNCommand.connect_fastest_sc.value)
def connect_fastest_cc(self, cc):
command = VPNCommand.connect_fastest_cc.value + f' {cc}'
self._connect_vpn(command)
def connect_fastest_tor(self):
self._connect_vpn(VPNCommand.connect_fastest_tor.value)
def connect_random(self):
self._connect_vpn(VPNCommand.connect_random.value)
def disconnect_vpn(self):
disconnect_thread = DisconnectVPN(self)
disconnect_thread.start()
def status_vpn(self):
status_thread = CheckStatus(self)
status_thread.start()
def reconnect_vpn(self):
reconnect_thread = ReconnectVPN(self)
reconnect_thread.start()
# Override closeEvent to intercept the window closing event
def closeEvent(self, event):
event.ignore()
self.hide()
def show_notifications(self):
return self.show_notifications_action.isChecked()
def show_protonvpn_applet_version(self):
"""Show the protonvpn-applet version.
"""
name = '© 2020 Dónal Murray'
email = '[email protected]'
github = 'https://github.com/seadanda/protonvpn-applet'
info = [f'<center>Version: {PROTONVPN_APPLET_VERSION}',
f'{name}',
f"<a href='{email}'>{email}</a>",
f"<a href='{github}'>{github}</a></center>"]
centered_text = f'<center>{'<br>'.join(info)}</center>'
QMessageBox.information(self, 'protonvpn-applet', centered_text)
def get_protonvpn_version(self):
"""Start the CheckProtonVPNVersion thread; when it gets the version, it will call `self.show_protonvpn_version`
"""
print('called get_protonvpn_version')
check_protonvpn_version_thread = CheckProtonVPNVersion(self)
check_protonvpn_version_thread.protonvpn_version_ready.connect(self.show_protonvpn_version)
check_protonvpn_version_thread.start()
def show_protonvpn_version(self, version):
"""
Show the ProtonVPN version in a QMessageBox.
Parameters
----------
version : str
Version number to be shown.
"""
print('called show_protonvpn_version')
QMessageBox.information(self, 'ProtonVPN Version', f'Version: {version}')
def update_available_servers(self):
utils.pull_server_data()
return utils.get_servers()
@staticmethod
def get_available_countries(servers):
return sorted(list({utils.get_country_name(server['ExitCountry']) for server in servers}))
if __name__ == '__main__':
check_single_instance()
app = QApplication(sys.argv)
mw = PVPNApplet()
sys.exit(app.exec())
|
#!/usr/bin/env python3
import sys
import subprocess
import functools
from enum import Enum
import gi
gi.require_version('Notify', '0.7')
from gi.repository import Notify
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QSystemTrayIcon, QMenu, QAction, qApp, QMessageBox
from PyQt5.QtCore import QSize, QThread, pyqtSignal
from PyQt5.QtGui import QIcon
from protonvpn_cli import utils, country_codes
from protonvpn_cli.utils import is_connected
PROTONVPN_APPLET_VERSION = "0.1.7"
class VPNStatusException(Exception):
"""General exception to throw when anything goes wrong
"""
class VPNCommand(Enum):
"""Commands to run the CLI
"""
status = 'protonvpn s'
connect_fastest = 'protonvpn c -f'
disconnect = 'protonvpn d'
version = 'protonvpn -v'
connect_random = 'protonvpn c -r'
connect_fastest_cc = 'protonvpn c --cc'
connect_fastest_p2p = 'protonvpn c --p2p'
connect_fastest_sc = 'protonvpn c --sc'
connect_fastest_tor = 'protonvpn c --tor'
reconnect = 'protonvpn r'
def check_single_instance():
"""Use pgrep to check if protonvpn-applet is already running
"""
pid = None
try:
pid = subprocess.run('pgrep protonvpn-applet'.split(), check=True, capture_output=True)
except subprocess.CalledProcessError:
try:
pid = subprocess.run('pgrep protonvpn-applet.py'.split(), check=True, capture_output=True)
except subprocess.CalledProcessError:
pass
if pid is not None:
print('There is an instance already running.')
sys.exit(1)
class Status(Enum):
"""Enum to keep track of the previous connection state
"""
connected = 'Connected'
disconnected = 'Disconnected'
class Polling(QThread):
"""Thread to check the VPN state every second and notifies on disconnection
"""
def __init__(self, applet):
QThread.__init__(self)
self.applet = applet
def __del__(self):
self.wait()
def run(self):
while self.applet.is_polling():
if is_connected():
self.applet.tray_icon.setIcon(QIcon('icons/16x16/protonvpn-connected.png'))
self.applet.previous_status = Status.connected
else:
# notify on disconnection
if self.applet.show_notifications() and self.applet.previous_status == Status.connected:
CheckStatus(self).start()
self.applet.tray_icon.setIcon(QIcon('icons/16x16/protonvpn-disconnected.png'))
self.applet.previous_status = Status.disconnected
self.sleep(1)
class ConnectVPN(QThread):
"""Thread to connect using the specified profile
"""
def __init__(self, applet, command):
QThread.__init__(self)
self.applet = applet
self.command = command
print(self.command)
def __del__(self):
self.wait()
def run(self):
subprocess.run([self.applet.auth] + self.command.split(), check=False)
self.applet.status_vpn()
class DisconnectVPN(QThread):
"""Thread to disconnect the VPN
"""
def __init__(self, applet):
QThread.__init__(self)
self.applet = applet
def __del__(self):
self.wait()
def run(self):
subprocess.run([self.applet.auth] + VPNCommand.disconnect.value.split(), check=False)
self.applet.status_vpn()
class ReconnectVPN(QThread):
"""Thread to connect using previously used profile
"""
def __init__(self, applet):
QThread.__init__(self)
self.applet = applet
def __del__(self):
self.wait()
def run(self):
subprocess.run([self.applet.auth] + VPNCommand.reconnect.value.split(), check=False)
self.applet.status_vpn()
class CheckStatus(QThread):
"""Thread to report ProtonVPN status
"""
def __init__(self, applet):
QThread.__init__(self)
self.applet = applet
def __del__(self):
self.wait()
def run(self):
result = subprocess.run(VPNCommand.status.value.split(), check=False, capture_output=True)
Notify.Notification.new(result.stdout.decode()).show()
class CheckProtonVPNVersion(QThread):
"""Thread to check version
"""
protonvpn_version_ready = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.parent = parent
self.version = 'None'
def __del__(self):
self.wait()
def run(self):
self.version = subprocess.check_output(VPNCommand.version.value.split()).decode(sys.stdout.encoding)
self.protonvpn_version_ready.emit(self.version)
class PVPNApplet(QMainWindow):
"""Main applet body
"""
tray_icon = None
polling = True
previous_status = None
#auth = 'pkexec'
auth = 'sudo'
# Override the class constructor
def __init__(self):
super(PVPNApplet, self).__init__()
self.country_codes = country_codes # Keep a list of country codes
# Init QSystemTrayIcon
self.tray_icon = QSystemTrayIcon(self)
self.tray_icon.setIcon(QIcon('icons/16x16/protonvpn-disconnected.png'))
# Init libnotify
Notify.init('ProtonVPN')
# Refresh server list, store the resulting servers so we can populate the menu
self.servers = self.update_available_servers()
# Menu actions
connect_fastest_action = QAction('Connect fastest', self)
reconnect_action = QAction('Reconnect', self)
disconnect_action = QAction('Disconnect', self)
status_action = QAction('Status', self)
connect_fastest_sc_action = QAction('Secure Core', self)
connect_fastest_p2p_action = QAction('P2P', self)
connect_fastest_tor_action = QAction('Tor', self)
connect_random_action = QAction('Random', self)
show_protonvpn_applet_version_action = QAction('About ProtonVPN-Applet', self)
show_protonvpn_version_action = QAction('About ProtonVPN', self)
quit_action = QAction('Exit', self)
self.show_notifications_action = QAction('Show Notifications')
self.show_notifications_action.setCheckable(True)
self.show_notifications_action.setChecked(False)
# Triggers
quit_action.triggered.connect(qApp.quit)
connect_fastest_action.triggered.connect(self.connect_fastest)
disconnect_action.triggered.connect(self.disconnect_vpn)
status_action.triggered.connect(self.status_vpn)
show_protonvpn_applet_version_action.triggered.connect(self.show_protonvpn_applet_version)
show_protonvpn_version_action.triggered.connect(self.get_protonvpn_version)
connect_fastest_sc_action.triggered.connect(self.connect_fastest_sc)
connect_fastest_p2p_action.triggered.connect(self.connect_fastest_p2p)
connect_fastest_tor_action.triggered.connect(self.connect_fastest_tor)
connect_random_action.triggered.connect(self.connect_random)
reconnect_action.triggered.connect(self.reconnect_vpn)
# Generate connection menu for specific countries
connect_country_actions = []
for country_name in self.get_available_countries(self.servers):
# Get the ISO-3166 Alpha-2 country code
country_name_to_code = {v: k for k, v in country_codes.country_codes.items()}
country_code = country_name_to_code[country_name]
# Dynamically create functions for connecting to each country; each function just passes its respective
# country code to `self.connect_fastest_cc()`
setattr(self, f'connect_fastest_{country_code}', functools.partial(self.connect_fastest_cc, country_code))
# Generate an action for each country; set up the trigger; append to actions list
country_action = QAction(f'{country_name}', self)
country_action.triggered.connect(getattr(self, f'connect_fastest_{country_code}'))
connect_country_actions.append(country_action)
# Create a scrollable country connection menu
connect_country_menu = QMenu("Country...", self)
connect_country_menu.setStyleSheet('QMenu { menu-scrollable: 1; }')
connect_country_menu.addActions(connect_country_actions)
# Generate connection menu
connection_menu = QMenu("Other connections...", self)
connection_menu.addMenu(connect_country_menu)
connection_menu.addAction(connect_fastest_sc_action)
connection_menu.addAction(connect_fastest_p2p_action)
connection_menu.addAction(connect_fastest_tor_action)
connection_menu.addAction(connect_random_action)
# Draw menu
tray_menu = QMenu()
tray_menu.addAction(connect_fastest_action)
tray_menu.addAction(reconnect_action)
tray_menu.addMenu(connection_menu)
tray_menu.addAction(disconnect_action)
tray_menu.addAction(status_action)
tray_menu.addSeparator()
tray_menu.addAction(self.show_notifications_action)
tray_menu.addAction(show_protonvpn_applet_version_action)
tray_menu.addAction(show_protonvpn_version_action)
tray_menu.addAction(quit_action)
self.tray_icon.setContextMenu(tray_menu)
self.tray_icon.show()
# Polling thread
self.start_polling()
def is_polling(self):
return self.polling
def kill_polling(self):
self.polling = False
def start_polling(self):
self.polling = True
self.polling_thread = Polling(self)
self.polling_thread.start()
def _connect_vpn(self, command):
self.kill_polling()
connect_thread = ConnectVPN(self, command)
connect_thread.finished.connect(self.start_polling)
connect_thread.start()
def connect_fastest(self):
self._connect_vpn(VPNCommand.connect_fastest.value)
def connect_fastest_p2p(self):
self._connect_vpn(VPNCommand.connect_fastest_p2p.value)
def connect_fastest_sc(self):
self._connect_vpn(VPNCommand.connect_fastest_sc.value)
def connect_fastest_cc(self, cc):
command = VPNCommand.connect_fastest_cc.value + f' {cc}'
self._connect_vpn(command)
def connect_fastest_tor(self):
self._connect_vpn(VPNCommand.connect_fastest_tor.value)
def connect_random(self):
self._connect_vpn(VPNCommand.connect_random.value)
def disconnect_vpn(self):
disconnect_thread = DisconnectVPN(self)
disconnect_thread.start()
def status_vpn(self):
status_thread = CheckStatus(self)
status_thread.start()
def reconnect_vpn(self):
reconnect_thread = ReconnectVPN(self)
reconnect_thread.start()
# Override closeEvent to intercept the window closing event
def closeEvent(self, event):
event.ignore()
self.hide()
def show_notifications(self):
return self.show_notifications_action.isChecked()
def show_protonvpn_applet_version(self):
"""Show the protonvpn-applet version.
"""
name = '© 2020 Dónal Murray'
email = '[email protected]'
github = 'https://github.com/seadanda/protonvpn-applet'
info = [f'<center>Version: {PROTONVPN_APPLET_VERSION}',
f'{name}',
f"<a href='{email}'>{email}</a>",
f"<a href='{github}'>{github}</a></center>"]
centered_text = f'<center>{"<br>".join(info)}</center>'
QMessageBox.information(self, 'protonvpn-applet', centered_text)
def get_protonvpn_version(self):
"""Start the CheckProtonVPNVersion thread; when it gets the version, it will call `self.show_protonvpn_version`
"""
print('called get_protonvpn_version')
check_protonvpn_version_thread = CheckProtonVPNVersion(self)
check_protonvpn_version_thread.protonvpn_version_ready.connect(self.show_protonvpn_version)
check_protonvpn_version_thread.start()
def show_protonvpn_version(self, version):
"""
Show the ProtonVPN version in a QMessageBox.
Parameters
----------
version : str
Version number to be shown.
"""
print('called show_protonvpn_version')
QMessageBox.information(self, 'ProtonVPN Version', f'Version: {version}')
def update_available_servers(self):
utils.pull_server_data()
return utils.get_servers()
@staticmethod
def get_available_countries(servers):
return sorted(list({utils.get_country_name(server['ExitCountry']) for server in servers}))
if __name__ == '__main__':
check_single_instance()
app = QApplication(sys.argv)
mw = PVPNApplet()
sys.exit(app.exec())
|
import os
import pandas as pd
from pathlib import Path
import numpy as np
from mt import RAW_PATH
from mt import utils
SUFFLE = True
CONSTRAINED = True
TR_DATA_PATH = "/home/salva/Documents/Programming/Datasets/scielo/originals/scielo-gma/scielo-gma"
TR_RAW_FILES = ["es-en-gma-biological.csv", "es-en-gma-health.csv", "fr-en-gma-health.csv",
"pt-en-gma-biological.csv", "pt-en-gma-health.csv"]
TS_DATA_PATH = "/home/salva/Documents/Programming/Datasets/scielo/originals/testset-gma/testset_gma"
TS_RAW_FILES = ["test-gma-en2es-biological.csv", "test-gma-en2es-health.csv", "test-gma-en2fr-health.csv",
"test-gma-en2pt-biological.csv", "test-gma-en2pt-health.csv", "test-gma-es2en-biological.csv",
"test-gma-es2en-health.csv", "test-gma-fr2en-health.csv", "test-gma-pt2en-biological.csv",
"test-gma-pt2en-health.csv"]
# Create path if doesn't exists
path = Path(RAW_PATH)
path.mkdir(parents=True, exist_ok=True)
# Process splits train/test files
for split in ["train", "test"]:
# Select split to process
if split == "train":
print("Processing training files...")
DATA_PATH = TR_DATA_PATH
RAW_FILES = TR_RAW_FILES
istrain = True
elif split == "test":
print("Processing test files...")
DATA_PATH = TS_DATA_PATH
RAW_FILES = TS_RAW_FILES
istrain = False
else:
raise ValueError("Invalid split name")
# Process raw files
for fname in RAW_FILES:
# Read file
print(f"Reading file... ({fname})")
filename = os.path.join(DATA_PATH, fname)
df = pd.read_csv(filename)
# Limit dataset
domain = utils.get_domain(fname)
SRC_LANG, TRG_LANG = utils.get_langs(fname, istrain=istrain)
# Clean dataset (basic)
total_old = len(df)
df = utils.preprocess_dataset(df, src_col=SRC_LANG, trg_col=TRG_LANG)
# Shuffle dataset
if SUFFLE:
np.random.seed(123)
np.random.shuffle(df.values)
if CONSTRAINED and istrain:
if domain == "health" and "es" in {SRC_LANG, TRG_LANG}:
max_size = 123597 # Biological rows
print(f"Limiting size to {max_size}")
df = df[:max_size]
elif domain == "health" and "pt" in {SRC_LANG, TRG_LANG}:
max_size = 120301 # Biological rows
print(f"Limiting size to {max_size}")
df = df[:max_size]
# Stats
total_doctypes = df['doctype'].value_counts()
removed = total_old - len(df)
print(f"Stats for: {fname} **************************")
print(f"\t- Documents: {len(set(df["docid"]))}")
print(f"\t- Sentences: {len(df)}")
print("\t\t- Removed: {} ({:.2f}%)".format(removed, removed / total_old * 100))
print("\t- Titles/Abstracts: {}/{} ({:.2f}%)".format(total_doctypes['title'], total_doctypes['text'],
total_doctypes['title'] / total_doctypes['text'] * 100))
# Save data
df.to_csv(os.path.join(RAW_PATH, fname), index=False)
print("File saved!")
print("")
print("Done!")
|
import os
import pandas as pd
from pathlib import Path
import numpy as np
from mt import RAW_PATH
from mt import utils
SUFFLE = True
CONSTRAINED = True
TR_DATA_PATH = "/home/salva/Documents/Programming/Datasets/scielo/originals/scielo-gma/scielo-gma"
TR_RAW_FILES = ["es-en-gma-biological.csv", "es-en-gma-health.csv", "fr-en-gma-health.csv",
"pt-en-gma-biological.csv", "pt-en-gma-health.csv"]
TS_DATA_PATH = "/home/salva/Documents/Programming/Datasets/scielo/originals/testset-gma/testset_gma"
TS_RAW_FILES = ["test-gma-en2es-biological.csv", "test-gma-en2es-health.csv", "test-gma-en2fr-health.csv",
"test-gma-en2pt-biological.csv", "test-gma-en2pt-health.csv", "test-gma-es2en-biological.csv",
"test-gma-es2en-health.csv", "test-gma-fr2en-health.csv", "test-gma-pt2en-biological.csv",
"test-gma-pt2en-health.csv"]
# Create path if doesn't exists
path = Path(RAW_PATH)
path.mkdir(parents=True, exist_ok=True)
# Process splits train/test files
for split in ["train", "test"]:
# Select split to process
if split == "train":
print("Processing training files...")
DATA_PATH = TR_DATA_PATH
RAW_FILES = TR_RAW_FILES
istrain = True
elif split == "test":
print("Processing test files...")
DATA_PATH = TS_DATA_PATH
RAW_FILES = TS_RAW_FILES
istrain = False
else:
raise ValueError("Invalid split name")
# Process raw files
for fname in RAW_FILES:
# Read file
print(f"Reading file... ({fname})")
filename = os.path.join(DATA_PATH, fname)
df = pd.read_csv(filename)
# Limit dataset
domain = utils.get_domain(fname)
SRC_LANG, TRG_LANG = utils.get_langs(fname, istrain=istrain)
# Clean dataset (basic)
total_old = len(df)
df = utils.preprocess_dataset(df, src_col=SRC_LANG, trg_col=TRG_LANG)
# Shuffle dataset
if SUFFLE:
np.random.seed(123)
np.random.shuffle(df.values)
if CONSTRAINED and istrain:
if domain == "health" and "es" in {SRC_LANG, TRG_LANG}:
max_size = 123597 # Biological rows
print(f"Limiting size to {max_size}")
df = df[:max_size]
elif domain == "health" and "pt" in {SRC_LANG, TRG_LANG}:
max_size = 120301 # Biological rows
print(f"Limiting size to {max_size}")
df = df[:max_size]
# Stats
total_doctypes = df['doctype'].value_counts()
removed = total_old - len(df)
print(f"Stats for: {fname} **************************")
print(f"\t- Documents: {len(set(df['docid']))}")
print(f"\t- Sentences: {len(df)}")
print("\t\t- Removed: {} ({:.2f}%)".format(removed, removed / total_old * 100))
print("\t- Titles/Abstracts: {}/{} ({:.2f}%)".format(total_doctypes['title'], total_doctypes['text'],
total_doctypes['title'] / total_doctypes['text'] * 100))
# Save data
df.to_csv(os.path.join(RAW_PATH, fname), index=False)
print("File saved!")
print("")
print("Done!")
|
"""
Pulls data from:
https://www.divvybikes.com/system-data
https://s3.amazonaws.com/divvy-data/tripdata
"""
from io import BytesIO
import os
import re
import requests
from zipfile import ZipFile
from typing import List
from lxml import html
import pandas as pd
from .stations_feed import StationsFeed
STN_DT_FORM = {
'2013': "%m/%d/%Y", # Not labeled for quarters
'2014_Q1Q2': None, # xlsx file
'2014_Q3Q4': "%m/%d/%Y %H:%M",
'2015': None, # no date column and not labeled for quarters
'2016_Q1Q2': "%m/%d/%Y",
'2016_Q3': "%m/%d/%Y",
'2016_Q4': "%m/%d/%Y",
'2017_Q1Q2': "%m/%d/%Y %H:%M:%S",
'2017_Q3Q4': "%m/%d/%Y %H:%M",
}
STN_COL_MAP = {
'latitude': 'lat',
'longitude': 'lon',
'dateCreated': 'online_date',
'online date': 'online_date',
}
RD_DT_FORM = {
'2013': "%Y-%m-%d %H:%M", # Not labeled for quarters
'2014_Q1Q2': "%m/%d/%Y %H:%M",
'2014_Q3': "%m/%d/%Y %H:%M",
'2014_Q4': "%m/%d/%Y %H:%M",
'2015_Q1': "%m/%d/%Y %H:%M",
'2015_Q2': "%m/%d/%Y %H:%M",
'2015': "%m/%d/%Y %H:%M", # Q3 labeled as month integer
'2015_Q4': "%m/%d/%Y %H:%M",
'2016_Q1': "%m/%d/%Y %H:%M",
'2016': "%m/%d/%Y %H:%M", # Q2 labeled as month integer
'2016_Q3': "%m/%d/%Y %H:%M:%S",
'2016_Q4': "%m/%d/%Y %H:%M:%S",
'2017_Q1': "%m/%d/%Y %H:%M:%S",
'2017_Q2': "%m/%d/%Y %H:%M:%S",
'2017_Q3': "%m/%d/%Y %H:%M:%S",
'2017_Q4': "%m/%d/%Y %H:%M",
'2018_Q1': "%Y-%m-%d %H:%M:%S",
'2018_Q2': "%Y-%m-%d %H:%M:%S",
'2018_Q3': "%Y-%m-%d %H:%M:%S",
'2018_Q4': "%Y-%m-%d %H:%M:%S",
}
RD_COL_MAP = {
'01 - Rental Details Rental ID': 'trip_id',
'01 - Rental Details Local Start Time': 'start_time',
'01 - Rental Details Local End Time': 'end_time',
'01 - Rental Details Bike ID': 'bikeid',
'01 - Rental Details Duration In Seconds Uncapped': 'tripduration',
'03 - Rental Start Station ID': 'from_station_id',
'03 - Rental Start Station Name': 'from_station_name',
'02 - Rental End Station ID': 'to_station_id',
'02 - Rental End Station Name': 'to_station_name',
'User Type': 'usertype',
'Member Gender': 'gender',
'05 - Member Details Member Birthday Year': 'birthyear',
'stoptime': 'end_time',
'starttime': 'start_time',
'birthday': 'birthyear',
}
def parse_zip_urls_from_url(url):
r = requests.get(url)
webpage = html.fromstring(r.content)
base_source = 'https://s3.amazonaws.com/divvy-data/tripdata/'
urls = [url for url in set(webpage.xpath('//a/@href'))
if (base_source in url and url.endswith('.zip'))]
return urls
def year_lookup_to_date(yr_lookup: str) -> str:
q_map = {
'Q1': '03-31',
'Q2': '06-30',
'Q3': '09-30',
'Q4': '12-31',
}
yr_l_splt = yr_lookup.split('_')
q = yr_l_splt[-1][-2:]
date = q_map.get(q, '12-31')
date = f'{yr_l_splt[0]}-{date}'
return date
def get_current_stations():
"""Pulls most recent data from Divvy JSON feed.
Necessar because Divvy did not provide 2018 station data.
"""
df = StationsFeed().get_current_data()
cols = ['id', 'stationName', 'latitude', 'longitude',
'totalDocks', 'lastCommunicationTime']
df = df[cols].rename(columns={
'stationName': 'name',
'lastCommunicationTime': 'as_of_date',
'totalDocks': 'dpcapacity'
})
df = df.rename(columns=STN_COL_MAP)
return df
def process_ride_df(z, fpath, year_lookup):
df = (pd.read_csv(z.open(fpath))
.rename(columns=RD_COL_MAP))
df['start_time'] = pd.to_datetime(
df['start_time'],
format=RD_DT_FORM.get(year_lookup, None),
errors='coerce'
)
df['end_time'] = pd.to_datetime(
df['end_time'],
format=RD_DT_FORM.get(year_lookup, None),
errors='coerce'
)
return df
def process_station_df(z, fpath, year_lookup):
if fpath.endswith('.csv'):
df = pd.read_csv(z.open(fpath))
else: # must be '.xlsx'
df = pd.read_excel(z.open(fpath))
df = df.rename(columns=STN_COL_MAP)
df['as_of_date'] = year_lookup_to_date(year_lookup)
df['as_of_date'] = pd.to_datetime(df['as_of_date'])
if 'online_date' in df:
df['online_date'] = pd.to_datetime(
df['online_date'],
format=STN_DT_FORM.get(year_lookup, None),
errors='coerce'
)
return df
def combine_ride_dfs(dfs: List[pd.DataFrame]) -> pd.DataFrame:
dfs = (pd.concat(dfs, ignore_index=True, sort=True)
.sort_values('start_time')
.reset_index(drop=True))
dfs['tripduration'] = (
dfs.tripduration.astype(str).str.replace(',', '').astype(float)
)
cols = ['trip_id', 'bikeid', 'start_time', 'end_time', 'tripduration',
'from_station_id', 'from_station_name', 'to_station_id',
'to_station_name', 'usertype', 'gender', 'birthyear']
dfs = dfs[[col for col in cols if col in dfs]]
return dfs
def combine_station_dfs(dfs: List[pd.DataFrame]) -> pd.DataFrame:
dfs = (pd.concat(dfs, ignore_index=True, sort=True)
.sort_values(['id', 'as_of_date'])
.reset_index(drop=True))
# excludes ['city', 'Unnamed: 7']
cols = ['id', 'name', 'as_of_date', 'lat', 'lon', 'dpcapacity',
'online_date', 'landmark']
dfs = dfs[[col for col in cols if col in dfs]]
return dfs
def get_historical_data(years: List[str], write_to: str = '', rides=True,
stations=True):
"""Gathers and cleans historical Divvy data
write_to: optional local folder path to extract zip files to
returns: (pandas.DataFrame of rides, pandas.DataFrame of stations)
"""
if isinstance(years, str):
years = [years]
ride_dfs = []
station_dfs = []
if not (rides or stations):
return ride_dfs, station_dfs
urls = parse_zip_urls_from_url('https://www.divvybikes.com/system-data')
for url in sorted(urls):
z_fn = url.split('/')[-1]
z_year = re.findall(r'20\d{2}', z_fn)[0]
if z_year not in years:
continue
print(url)
r = requests.get(url)
with ZipFile(BytesIO(r.content)) as z:
if write_to:
write_path = os.path.join(write_to, z_fn.replace('.zip', ''))
z.extractall(write_path)
for fpath in z.namelist():
fn = fpath.split('/')[-1]
if fn.endswith(('.csv', '.xlsx')) and not fn.startswith('.'):
quarter = re.findall('Q[1-4]', fn)
if quarter:
year_lookup = f"{z_year}_{"".join(quarter)}"
else:
year_lookup = z_year
else:
continue
if rides and '_trips_' in fn.lower():
print(fn, year_lookup)
df = process_ride_df(z, fpath, year_lookup)
ride_dfs.append(df)
elif stations and '_stations_' in fn.lower():
print(fn, year_lookup)
df = process_station_df(z, fpath, year_lookup)
station_dfs.append(df)
if rides:
ride_dfs = combine_ride_dfs(ride_dfs)
if stations:
if '2018' in years:
df = get_current_stations()
station_dfs.append(df)
station_dfs = combine_station_dfs(station_dfs)
return ride_dfs, station_dfs
|
"""
Pulls data from:
https://www.divvybikes.com/system-data
https://s3.amazonaws.com/divvy-data/tripdata
"""
from io import BytesIO
import os
import re
import requests
from zipfile import ZipFile
from typing import List
from lxml import html
import pandas as pd
from .stations_feed import StationsFeed
STN_DT_FORM = {
'2013': "%m/%d/%Y", # Not labeled for quarters
'2014_Q1Q2': None, # xlsx file
'2014_Q3Q4': "%m/%d/%Y %H:%M",
'2015': None, # no date column and not labeled for quarters
'2016_Q1Q2': "%m/%d/%Y",
'2016_Q3': "%m/%d/%Y",
'2016_Q4': "%m/%d/%Y",
'2017_Q1Q2': "%m/%d/%Y %H:%M:%S",
'2017_Q3Q4': "%m/%d/%Y %H:%M",
}
STN_COL_MAP = {
'latitude': 'lat',
'longitude': 'lon',
'dateCreated': 'online_date',
'online date': 'online_date',
}
RD_DT_FORM = {
'2013': "%Y-%m-%d %H:%M", # Not labeled for quarters
'2014_Q1Q2': "%m/%d/%Y %H:%M",
'2014_Q3': "%m/%d/%Y %H:%M",
'2014_Q4': "%m/%d/%Y %H:%M",
'2015_Q1': "%m/%d/%Y %H:%M",
'2015_Q2': "%m/%d/%Y %H:%M",
'2015': "%m/%d/%Y %H:%M", # Q3 labeled as month integer
'2015_Q4': "%m/%d/%Y %H:%M",
'2016_Q1': "%m/%d/%Y %H:%M",
'2016': "%m/%d/%Y %H:%M", # Q2 labeled as month integer
'2016_Q3': "%m/%d/%Y %H:%M:%S",
'2016_Q4': "%m/%d/%Y %H:%M:%S",
'2017_Q1': "%m/%d/%Y %H:%M:%S",
'2017_Q2': "%m/%d/%Y %H:%M:%S",
'2017_Q3': "%m/%d/%Y %H:%M:%S",
'2017_Q4': "%m/%d/%Y %H:%M",
'2018_Q1': "%Y-%m-%d %H:%M:%S",
'2018_Q2': "%Y-%m-%d %H:%M:%S",
'2018_Q3': "%Y-%m-%d %H:%M:%S",
'2018_Q4': "%Y-%m-%d %H:%M:%S",
}
RD_COL_MAP = {
'01 - Rental Details Rental ID': 'trip_id',
'01 - Rental Details Local Start Time': 'start_time',
'01 - Rental Details Local End Time': 'end_time',
'01 - Rental Details Bike ID': 'bikeid',
'01 - Rental Details Duration In Seconds Uncapped': 'tripduration',
'03 - Rental Start Station ID': 'from_station_id',
'03 - Rental Start Station Name': 'from_station_name',
'02 - Rental End Station ID': 'to_station_id',
'02 - Rental End Station Name': 'to_station_name',
'User Type': 'usertype',
'Member Gender': 'gender',
'05 - Member Details Member Birthday Year': 'birthyear',
'stoptime': 'end_time',
'starttime': 'start_time',
'birthday': 'birthyear',
}
def parse_zip_urls_from_url(url):
r = requests.get(url)
webpage = html.fromstring(r.content)
base_source = 'https://s3.amazonaws.com/divvy-data/tripdata/'
urls = [url for url in set(webpage.xpath('//a/@href'))
if (base_source in url and url.endswith('.zip'))]
return urls
def year_lookup_to_date(yr_lookup: str) -> str:
q_map = {
'Q1': '03-31',
'Q2': '06-30',
'Q3': '09-30',
'Q4': '12-31',
}
yr_l_splt = yr_lookup.split('_')
q = yr_l_splt[-1][-2:]
date = q_map.get(q, '12-31')
date = f'{yr_l_splt[0]}-{date}'
return date
def get_current_stations():
"""Pulls most recent data from Divvy JSON feed.
Necessar because Divvy did not provide 2018 station data.
"""
df = StationsFeed().get_current_data()
cols = ['id', 'stationName', 'latitude', 'longitude',
'totalDocks', 'lastCommunicationTime']
df = df[cols].rename(columns={
'stationName': 'name',
'lastCommunicationTime': 'as_of_date',
'totalDocks': 'dpcapacity'
})
df = df.rename(columns=STN_COL_MAP)
return df
def process_ride_df(z, fpath, year_lookup):
df = (pd.read_csv(z.open(fpath))
.rename(columns=RD_COL_MAP))
df['start_time'] = pd.to_datetime(
df['start_time'],
format=RD_DT_FORM.get(year_lookup, None),
errors='coerce'
)
df['end_time'] = pd.to_datetime(
df['end_time'],
format=RD_DT_FORM.get(year_lookup, None),
errors='coerce'
)
return df
def process_station_df(z, fpath, year_lookup):
if fpath.endswith('.csv'):
df = pd.read_csv(z.open(fpath))
else: # must be '.xlsx'
df = pd.read_excel(z.open(fpath))
df = df.rename(columns=STN_COL_MAP)
df['as_of_date'] = year_lookup_to_date(year_lookup)
df['as_of_date'] = pd.to_datetime(df['as_of_date'])
if 'online_date' in df:
df['online_date'] = pd.to_datetime(
df['online_date'],
format=STN_DT_FORM.get(year_lookup, None),
errors='coerce'
)
return df
def combine_ride_dfs(dfs: List[pd.DataFrame]) -> pd.DataFrame:
dfs = (pd.concat(dfs, ignore_index=True, sort=True)
.sort_values('start_time')
.reset_index(drop=True))
dfs['tripduration'] = (
dfs.tripduration.astype(str).str.replace(',', '').astype(float)
)
cols = ['trip_id', 'bikeid', 'start_time', 'end_time', 'tripduration',
'from_station_id', 'from_station_name', 'to_station_id',
'to_station_name', 'usertype', 'gender', 'birthyear']
dfs = dfs[[col for col in cols if col in dfs]]
return dfs
def combine_station_dfs(dfs: List[pd.DataFrame]) -> pd.DataFrame:
dfs = (pd.concat(dfs, ignore_index=True, sort=True)
.sort_values(['id', 'as_of_date'])
.reset_index(drop=True))
# excludes ['city', 'Unnamed: 7']
cols = ['id', 'name', 'as_of_date', 'lat', 'lon', 'dpcapacity',
'online_date', 'landmark']
dfs = dfs[[col for col in cols if col in dfs]]
return dfs
def get_historical_data(years: List[str], write_to: str = '', rides=True,
stations=True):
"""Gathers and cleans historical Divvy data
write_to: optional local folder path to extract zip files to
returns: (pandas.DataFrame of rides, pandas.DataFrame of stations)
"""
if isinstance(years, str):
years = [years]
ride_dfs = []
station_dfs = []
if not (rides or stations):
return ride_dfs, station_dfs
urls = parse_zip_urls_from_url('https://www.divvybikes.com/system-data')
for url in sorted(urls):
z_fn = url.split('/')[-1]
z_year = re.findall(r'20\d{2}', z_fn)[0]
if z_year not in years:
continue
print(url)
r = requests.get(url)
with ZipFile(BytesIO(r.content)) as z:
if write_to:
write_path = os.path.join(write_to, z_fn.replace('.zip', ''))
z.extractall(write_path)
for fpath in z.namelist():
fn = fpath.split('/')[-1]
if fn.endswith(('.csv', '.xlsx')) and not fn.startswith('.'):
quarter = re.findall('Q[1-4]', fn)
if quarter:
year_lookup = f"{z_year}_{''.join(quarter)}"
else:
year_lookup = z_year
else:
continue
if rides and '_trips_' in fn.lower():
print(fn, year_lookup)
df = process_ride_df(z, fpath, year_lookup)
ride_dfs.append(df)
elif stations and '_stations_' in fn.lower():
print(fn, year_lookup)
df = process_station_df(z, fpath, year_lookup)
station_dfs.append(df)
if rides:
ride_dfs = combine_ride_dfs(ride_dfs)
if stations:
if '2018' in years:
df = get_current_stations()
station_dfs.append(df)
station_dfs = combine_station_dfs(station_dfs)
return ride_dfs, station_dfs
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Credits to Hitalo-Sama and FTG Modules
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetFullChatRequest, GetHistoryRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (
ChannelInvalidError,
ChannelPrivateError,
ChannelPublicGroupNaError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("`Analysing the chat...`")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`An unexpected error has occurred.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except BaseException:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Invalid channel/group`")
return None
except ChannelPrivateError:
await event.edit("`This is a private channel/group or I am banned from there`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Channel or supergroup doesn't exist`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(
chat_obj_info, "broadcast") else False
chat_type = "Channel" if broadcast else "Group"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[
0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[
0].first_name is not None else "Deleted Account"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and isinstance(
msg_info.messages[0].action,
MessageActionChannelMigrateFrom) and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Unknown"
str(e)
# this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(
chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(
chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(
chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(
chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(
chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(
chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(
chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(
chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(
chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Yes</b>" if hasattr(chat_obj_info,
"megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Yes</b>" if hasattr(chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(
chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Yes</b>" if hasattr(chat_obj_info,
"restricted") and chat_obj_info.restricted else "No"
verified = "<b>Yes</b>" if hasattr(chat_obj_info,
"verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(
creator_username) if creator_username else None
# end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None,
# works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b>CHAT INFO:</b>\n"
caption += f"ID: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"{chat_type} name: {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"Former name: {former_title}\n"
if username is not None:
caption += f"{chat_type} type: Public\n"
caption += f"Link: {username}\n"
else:
caption += f"{chat_type} type: Private\n"
if creator_username is not None:
caption += f"Creator: {creator_username}\n"
elif creator_valid:
caption += f"Creator: <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"Created: <code>{created.date().strftime("%b %d, %Y")} - {created.time()}</code>\n"
else:
caption += f"Created: <code>{chat_obj_info.date.date().strftime("%b %d, %Y")} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"Data Centre ID: {dc_id}\n"
if exp_count is not None:
chat_level = int((1 + sqrt(1 + 7 * exp_count / 14)) / 2)
caption += f"{chat_type} level: <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"Viewable messages: <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"Messages sent: <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"Messages sent: <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"Members: <code>{members}</code>\n"
if admins is not None:
caption += f"Administrators: <code>{admins}</code>\n"
if bots_list:
caption += f"Bots: <code>{bots}</code>\n"
if members_online:
caption += f"Currently online: <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"Restricted users: <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"Banned users: <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"{chat_type} stickers: <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"Slow mode: {slowmode}"
if hasattr(
chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"Supergroup: {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"Restricted: {restricted}\n"
if chat_obj_info.restricted:
caption += f"> Platform: {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"> Reason: {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"> Text: {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "Scam: <b>Yes</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"Verified by Telegram: {verified}\n\n"
if description:
caption += f"Description: \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
})
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Credits to Hitalo-Sama and FTG Modules
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetFullChatRequest, GetHistoryRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (
ChannelInvalidError,
ChannelPrivateError,
ChannelPublicGroupNaError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("`Analysing the chat...`")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`An unexpected error has occurred.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except BaseException:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Invalid channel/group`")
return None
except ChannelPrivateError:
await event.edit("`This is a private channel/group or I am banned from there`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Channel or supergroup doesn't exist`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(
chat_obj_info, "broadcast") else False
chat_type = "Channel" if broadcast else "Group"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[
0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[
0].first_name is not None else "Deleted Account"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and isinstance(
msg_info.messages[0].action,
MessageActionChannelMigrateFrom) and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Unknown"
str(e)
# this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(
chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(
chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(
chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(
chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(
chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(
chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(
chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(
chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(
chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Yes</b>" if hasattr(chat_obj_info,
"megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Yes</b>" if hasattr(chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(
chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Yes</b>" if hasattr(chat_obj_info,
"restricted") and chat_obj_info.restricted else "No"
verified = "<b>Yes</b>" if hasattr(chat_obj_info,
"verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(
creator_username) if creator_username else None
# end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None,
# works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b>CHAT INFO:</b>\n"
caption += f"ID: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"{chat_type} name: {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"Former name: {former_title}\n"
if username is not None:
caption += f"{chat_type} type: Public\n"
caption += f"Link: {username}\n"
else:
caption += f"{chat_type} type: Private\n"
if creator_username is not None:
caption += f"Creator: {creator_username}\n"
elif creator_valid:
caption += f"Creator: <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"Created: <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"Created: <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"Data Centre ID: {dc_id}\n"
if exp_count is not None:
chat_level = int((1 + sqrt(1 + 7 * exp_count / 14)) / 2)
caption += f"{chat_type} level: <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"Viewable messages: <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"Messages sent: <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"Messages sent: <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"Members: <code>{members}</code>\n"
if admins is not None:
caption += f"Administrators: <code>{admins}</code>\n"
if bots_list:
caption += f"Bots: <code>{bots}</code>\n"
if members_online:
caption += f"Currently online: <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"Restricted users: <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"Banned users: <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"{chat_type} stickers: <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"Slow mode: {slowmode}"
if hasattr(
chat_obj_info,
"slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"Supergroup: {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"Restricted: {restricted}\n"
if chat_obj_info.restricted:
caption += f"> Platform: {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"> Reason: {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"> Text: {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "Scam: <b>Yes</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"Verified by Telegram: {verified}\n\n"
if description:
caption += f"Description: \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
})
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import json
import logging
import subprocess
import sys
from pathlib import Path
from typing import final, Sequence, Optional
LOG: logging.Logger = logging.getLogger(__name__)
@final
class PyreErrorException(Exception):
"""
Custom Exception to raise when Pyre errors out
"""
pass
def normalized_json_dump(
results: str, salient_keys_only: bool, filter_issues: bool
) -> str:
"""
Returns a normalised JSON string from results keeping only essential items.
Removes all keys that are not salient to determining if results have changed
when 'salient_keys_only' is true. Filters issues down to issues that have
the code we intend to test for if 'filter_issues' is true.
"""
normalized = json.loads(results)
if "errors" in normalized:
pretty_error = json.dumps(normalized, sort_keys=True, indent=2)
raise PyreErrorException(
f"Errors were found when processing results:\n{pretty_error}"
)
if filter_issues:
# Filter down to only issues that have the code that we intended to
# test for. This prevents the introduction of new rules or false
# positives from breaking existing tests.
normalized = [
issue for issue in normalized if f"test_{issue["code"]}_" in issue["define"]
]
normalized = sorted(
normalized,
key=lambda issue: (
issue["code"],
issue["path"],
issue["line"],
issue["column"],
),
)
if salient_keys_only:
salient_keys = {"code", "define", "description", "path"}
stripped_issues = []
for issue in normalized:
stripped_issue = {
key: value for key, value in issue.items() if key in salient_keys
}
if set(stripped_issue.keys()) != salient_keys:
raise KeyError(
f"Expected issue to contain {salient_keys} keys, "
+ f"but instead found: {issue}"
)
stripped_issues.append(stripped_issue)
normalized = stripped_issues
return json.dumps(normalized, sort_keys=True, indent=2) + "\n"
def compare_results(
actual_results: str,
expected_results: str,
current_directory: Path,
filter_issues: bool,
) -> None:
normalized_pysa_results = normalized_json_dump(
actual_results, salient_keys_only=True, filter_issues=filter_issues
)
normalized_expected_results = normalized_json_dump(
expected_results, salient_keys_only=True, filter_issues=filter_issues
)
if normalized_pysa_results != normalized_expected_results:
actual_full_results_path = current_directory / "full_result.actual"
actual_full_results_path.write_text(
normalized_json_dump(
actual_results, salient_keys_only=False, filter_issues=filter_issues
)
)
actual_invariant_results_path = (
current_directory / "position_invariant_result.actual"
)
actual_invariant_results_path.write_text(normalized_pysa_results)
expected_invariant_results_path = (
current_directory / "position_invariant_result.json"
)
expected_invariant_results_path.write_text(normalized_expected_results)
result = subprocess.run(
[
"diff",
"-u",
expected_invariant_results_path,
actual_invariant_results_path,
],
text=True,
stdout=subprocess.PIPE,
)
friendly_exit(
"Output differs from expected:",
result.stdout,
"output-differs-from-expected",
)
else:
LOG.info("Run produced expected results")
def friendly_exit(error_message: str, logs: str, suggested_hash: str) -> None:
"""
Error function to print error message using LOG and exit
"""
LOG.error("----BEGIN PYSA INTEGRATION TEST ERROR----")
LOG.error(error_message)
LOG.error(logs)
LOG.error("----END PYSA INTEGRATION TEST ERROR----")
sys.exit(1)
def run_pysa_integration_test(
current_directory: Path,
passthrough_args: Sequence[str],
skip_model_verification: bool,
filter_issues: bool,
save_results_to: Optional[Path],
run_from_source: bool = False,
) -> None:
"""
Runs pysa and compares the output to that in full_results.json. Creates
raw_results.json file that contains the output. Creates
position_invariant_result.json that contains position information to
compare using diff with position_invariant_result.actual before exiting if
there is a mismatch between the specified and detected issues.
"""
LOG.info("Running `pyre analyze`")
if run_from_source:
command = [
"python",
"-m" "pyre-check.client.pyre",
]
else:
command = ["pyre"]
command.extend(["--noninteractive", "analyze"])
if save_results_to is not None:
command.extend(["--save-results-to", str(save_results_to)])
if skip_model_verification:
command.append("--no-verify")
command.extend(passthrough_args)
LOG.debug(f"Using command: {command}")
pysa_results: str
try:
pysa_results = subprocess.check_output(
command, text=True, cwd=current_directory
)
if save_results_to is not None:
pysa_results = (save_results_to / "errors.json").read_text()
except subprocess.CalledProcessError as exception:
friendly_exit(
"Command failed with output:",
exception.stdout,
"found-x-model-verification-error",
)
(current_directory / "raw_results.json").write_text(pysa_results)
expected_results = (current_directory / "full_result.json").read_text()
compare_results(pysa_results, expected_results, current_directory, filter_issues)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import json
import logging
import subprocess
import sys
from pathlib import Path
from typing import final, Sequence, Optional
LOG: logging.Logger = logging.getLogger(__name__)
@final
class PyreErrorException(Exception):
"""
Custom Exception to raise when Pyre errors out
"""
pass
def normalized_json_dump(
results: str, salient_keys_only: bool, filter_issues: bool
) -> str:
"""
Returns a normalised JSON string from results keeping only essential items.
Removes all keys that are not salient to determining if results have changed
when 'salient_keys_only' is true. Filters issues down to issues that have
the code we intend to test for if 'filter_issues' is true.
"""
normalized = json.loads(results)
if "errors" in normalized:
pretty_error = json.dumps(normalized, sort_keys=True, indent=2)
raise PyreErrorException(
f"Errors were found when processing results:\n{pretty_error}"
)
if filter_issues:
# Filter down to only issues that have the code that we intended to
# test for. This prevents the introduction of new rules or false
# positives from breaking existing tests.
normalized = [
issue for issue in normalized if f"test_{issue['code']}_" in issue["define"]
]
normalized = sorted(
normalized,
key=lambda issue: (
issue["code"],
issue["path"],
issue["line"],
issue["column"],
),
)
if salient_keys_only:
salient_keys = {"code", "define", "description", "path"}
stripped_issues = []
for issue in normalized:
stripped_issue = {
key: value for key, value in issue.items() if key in salient_keys
}
if set(stripped_issue.keys()) != salient_keys:
raise KeyError(
f"Expected issue to contain {salient_keys} keys, "
+ f"but instead found: {issue}"
)
stripped_issues.append(stripped_issue)
normalized = stripped_issues
return json.dumps(normalized, sort_keys=True, indent=2) + "\n"
def compare_results(
actual_results: str,
expected_results: str,
current_directory: Path,
filter_issues: bool,
) -> None:
normalized_pysa_results = normalized_json_dump(
actual_results, salient_keys_only=True, filter_issues=filter_issues
)
normalized_expected_results = normalized_json_dump(
expected_results, salient_keys_only=True, filter_issues=filter_issues
)
if normalized_pysa_results != normalized_expected_results:
actual_full_results_path = current_directory / "full_result.actual"
actual_full_results_path.write_text(
normalized_json_dump(
actual_results, salient_keys_only=False, filter_issues=filter_issues
)
)
actual_invariant_results_path = (
current_directory / "position_invariant_result.actual"
)
actual_invariant_results_path.write_text(normalized_pysa_results)
expected_invariant_results_path = (
current_directory / "position_invariant_result.json"
)
expected_invariant_results_path.write_text(normalized_expected_results)
result = subprocess.run(
[
"diff",
"-u",
expected_invariant_results_path,
actual_invariant_results_path,
],
text=True,
stdout=subprocess.PIPE,
)
friendly_exit(
"Output differs from expected:",
result.stdout,
"output-differs-from-expected",
)
else:
LOG.info("Run produced expected results")
def friendly_exit(error_message: str, logs: str, suggested_hash: str) -> None:
"""
Error function to print error message using LOG and exit
"""
LOG.error("----BEGIN PYSA INTEGRATION TEST ERROR----")
LOG.error(error_message)
LOG.error(logs)
LOG.error("----END PYSA INTEGRATION TEST ERROR----")
sys.exit(1)
def run_pysa_integration_test(
current_directory: Path,
passthrough_args: Sequence[str],
skip_model_verification: bool,
filter_issues: bool,
save_results_to: Optional[Path],
run_from_source: bool = False,
) -> None:
"""
Runs pysa and compares the output to that in full_results.json. Creates
raw_results.json file that contains the output. Creates
position_invariant_result.json that contains position information to
compare using diff with position_invariant_result.actual before exiting if
there is a mismatch between the specified and detected issues.
"""
LOG.info("Running `pyre analyze`")
if run_from_source:
command = [
"python",
"-m" "pyre-check.client.pyre",
]
else:
command = ["pyre"]
command.extend(["--noninteractive", "analyze"])
if save_results_to is not None:
command.extend(["--save-results-to", str(save_results_to)])
if skip_model_verification:
command.append("--no-verify")
command.extend(passthrough_args)
LOG.debug(f"Using command: {command}")
pysa_results: str
try:
pysa_results = subprocess.check_output(
command, text=True, cwd=current_directory
)
if save_results_to is not None:
pysa_results = (save_results_to / "errors.json").read_text()
except subprocess.CalledProcessError as exception:
friendly_exit(
"Command failed with output:",
exception.stdout,
"found-x-model-verification-error",
)
(current_directory / "raw_results.json").write_text(pysa_results)
expected_results = (current_directory / "full_result.json").read_text()
compare_results(pysa_results, expected_results, current_directory, filter_issues)
|
import os
import unittest
from urllib.parse import urlparse
import pytest
from w3lib.url import (
add_or_replace_parameter,
add_or_replace_parameters,
any_to_uri,
canonicalize_url,
file_uri_to_path,
is_url,
parse_data_uri,
parse_url,
path_to_file_uri,
safe_download_url,
safe_url_string,
url_query_parameter,
url_query_cleaner,
)
class UrlTests(unittest.TestCase):
def test_safe_url_string(self):
# Motoko Kusanagi (Cyborg from Ghost in the Shell)
motoko = "\u8349\u8599 \u7d20\u5b50"
self.assertEqual(
safe_url_string(motoko), # note the %20 for space
"%E8%8D%89%E8%96%99%20%E7%B4%A0%E5%AD%90",
)
self.assertEqual(
safe_url_string(motoko), safe_url_string(safe_url_string(motoko))
)
self.assertEqual(safe_url_string("©"), "%C2%A9") # copyright symbol
# page-encoding does not affect URL path
self.assertEqual(safe_url_string("©", "iso-8859-1"), "%C2%A9")
# path_encoding does
self.assertEqual(safe_url_string("©", path_encoding="iso-8859-1"), "%A9")
self.assertEqual(
safe_url_string("http://www.example.org/"), "http://www.example.org/"
)
alessi = "/ecommerce/oggetto/Te \xf2/tea-strainer/1273"
self.assertEqual(
safe_url_string(alessi), "/ecommerce/oggetto/Te%20%C3%B2/tea-strainer/1273"
)
self.assertEqual(
safe_url_string(
"http://www.example.com/test?p(29)url(http://www.another.net/page)"
),
"http://www.example.com/test?p(29)url(http://www.another.net/page)",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200"
),
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200",
)
# page-encoding does not affect URL path
# we still end up UTF-8 encoding characters before percent-escaping
safeurl = safe_url_string("http://www.example.com/£")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", path_encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3")
self.assertTrue(isinstance(safe_url_string(b"http://example.com/"), str))
def test_safe_url_string_remove_ascii_tab_and_newlines(self):
self.assertEqual(
safe_url_string("http://example.com/test\n.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\t.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html\n"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r\n.html\t"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\a\n.html"),
"http://example.com/test%07.html",
)
def test_safe_url_string_unsafe_chars(self):
safeurl = safe_url_string(
r"http://localhost:8001/unwise{,},|,\,^,[,],`?|=[]&[]=|"
)
self.assertEqual(
safeurl, r"http://localhost:8001/unwise%7B,%7D,|,%5C,%5E,[,],%60?|=[]&[]=|"
)
def test_safe_url_string_quote_path(self):
safeurl = safe_url_string('http://google.com/"hello"', quote_path=True)
self.assertEqual(safeurl, "http://google.com/%22hello%22")
safeurl = safe_url_string('http://google.com/"hello"', quote_path=False)
self.assertEqual(safeurl, 'http://google.com/"hello"')
safeurl = safe_url_string('http://google.com/"hello"')
self.assertEqual(safeurl, "http://google.com/%22hello%22")
def test_safe_url_string_with_query(self):
safeurl = safe_url_string("http://www.example.com/£?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ", path_encoding="latin-1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%C2%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ",
encoding="latin-1",
path_encoding="latin-1",
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
def test_safe_url_string_misc(self):
# mixing Unicode and percent-escaped sequences
safeurl = safe_url_string("http://www.example.com/£?unit=%C2%B5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/%C2%A3?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
def test_safe_url_string_bytes_input(self):
safeurl = safe_url_string(b"http://www.example.com/")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/")
# bytes input is assumed to be UTF-8
safeurl = safe_url_string(b"http://www.example.com/\xc2\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
# page-encoding encoded bytes still end up as UTF-8 sequences in path
safeurl = safe_url_string(b"http://www.example.com/\xb5", encoding="latin1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
safeurl = safe_url_string(
b"http://www.example.com/\xa3?unit=\xb5", encoding="latin1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
def test_safe_url_string_bytes_input_nonutf8(self):
# latin1
safeurl = safe_url_string(b"http://www.example.com/\xa3?unit=\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
# cp1251
# >>> 'Россия'.encode('cp1251')
# '\xd0\xee\xf1\xf1\xe8\xff'
safeurl = safe_url_string(
b"http://www.example.com/country/\xd0\xee\xf1\xf1\xe8\xff"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/country/%D0%EE%F1%F1%E8%FF")
def test_safe_url_idna(self):
# adapted from:
# https://ssl.icu-project.org/icu-bin/idnbrowser
# http://unicode.org/faq/idn.html
# + various others
websites = (
(
"http://www.färgbolaget.nu/färgbolaget",
"http://www.xn--frgbolaget-q5a.nu/f%C3%A4rgbolaget",
),
(
"http://www.räksmörgås.se/?räksmörgås=yes",
"http://www.xn--rksmrgs-5wao1o.se/?r%C3%A4ksm%C3%B6rg%C3%A5s=yes",
),
(
"http://www.brændendekærlighed.com/brændende/kærlighed",
"http://www.xn--brndendekrlighed-vobh.com/br%C3%A6ndende/k%C3%A6rlighed",
),
("http://www.예비교사.com", "http://www.xn--9d0bm53a3xbzui.com"),
("http://理容ナカムラ.com", "http://xn--lck1c3crb1723bpq4a.com"),
("http://あーるいん.com", "http://xn--l8je6s7a45b.com"),
# --- real websites ---
# in practice, this redirect (301) to http://www.buecher.de/?q=b%C3%BCcher
(
"http://www.bücher.de/?q=bücher",
"http://www.xn--bcher-kva.de/?q=b%C3%BCcher",
),
# Japanese
(
"http://はじめよう.みんな/?query=サ&maxResults=5",
"http://xn--p8j9a0d9c9a.xn--q9jyb4c/?query=%E3%82%B5&maxResults=5",
),
# Russian
("http://кто.рф/", "http://xn--j1ail.xn--p1ai/"),
(
"http://кто.рф/index.php?domain=Что",
"http://xn--j1ail.xn--p1ai/index.php?domain=%D0%A7%D1%82%D0%BE",
),
# Korean
("http://내도메인.한국/", "http://xn--220b31d95hq8o.xn--3e0b707e/"),
(
"http://맨체스터시티축구단.한국/",
"http://xn--2e0b17htvgtvj9haj53ccob62ni8d.xn--3e0b707e/",
),
# Arabic
("http://nic.شبكة", "http://nic.xn--ngbc5azd"),
# Chinese
("https://www.贷款.在线", "https://www.xn--0kwr83e.xn--3ds443g"),
("https://www2.xn--0kwr83e.在线", "https://www2.xn--0kwr83e.xn--3ds443g"),
("https://www3.贷款.xn--3ds443g", "https://www3.xn--0kwr83e.xn--3ds443g"),
)
for idn_input, safe_result in websites:
safeurl = safe_url_string(idn_input)
self.assertEqual(safeurl, safe_result)
# make sure the safe URL is unchanged when made safe a 2nd time
for _, safe_result in websites:
safeurl = safe_url_string(safe_result)
self.assertEqual(safeurl, safe_result)
def test_safe_url_idna_encoding_failure(self):
# missing DNS label
self.assertEqual(
safe_url_string("http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# DNS label too long
self.assertEqual(
safe_url_string(f"http://www.{"example" * 11}.com/résumé?q=résumé"),
f"http://www.{"example" * 11}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_safe_url_port_number(self):
self.assertEqual(
safe_url_string("http://www.example.com:80/résumé?q=résumé"),
"http://www.example.com:80/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
safe_url_string("http://www.example.com:/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_safe_url_string_preserve_nonfragment_hash(self):
# don't decode `%23` to `#`
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar#frag"),
"http://www.example.com/path/to/%23/foo/bar#frag",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
)
def test_safe_download_url(self):
self.assertEqual(
safe_download_url("http://www.example.org"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../../images/../image"),
"http://www.example.org/image",
)
self.assertEqual(
safe_download_url("http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
self.assertEqual(
safe_download_url(b"http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
# Encoding related tests
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xa3",
encoding="latin-1",
path_encoding="latin-1",
),
"http://www.example.org/?%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xc2\xa3",
encoding="utf-8",
path_encoding="utf-8",
),
"http://www.example.org/?%C2%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org/\xc2\xa3?\xc2\xa3",
encoding="utf-8",
path_encoding="latin-1",
),
"http://www.example.org/%A3?%C2%A3",
)
def test_is_url(self):
self.assertTrue(is_url("http://www.example.org"))
self.assertTrue(is_url("https://www.example.org"))
self.assertTrue(is_url("file:///some/path"))
self.assertFalse(is_url("foo://bar"))
self.assertFalse(is_url("foo--bar"))
def test_url_query_parameter(self):
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "id"), "200"
)
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault"),
"mydefault",
)
self.assertEqual(url_query_parameter("product.html?id=", "id"), None)
self.assertEqual(
url_query_parameter("product.html?id=", "id", keep_blank_values=1), ""
)
def test_url_query_parameter_2(self):
"""
This problem was seen several times in the feeds. Sometime affiliate URLs contains
nested encoded affiliate URL with direct URL as parameters. For example:
aff_url1 = 'http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1'
the typical code to extract needed URL from it is:
aff_url2 = url_query_parameter(aff_url1, 'url')
after this aff2_url is:
'http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's gardenfurniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1'
the direct URL extraction is
url = url_query_parameter(aff_url2, 'referredURL')
but this will not work, because aff_url2 contains ' (comma sign encoded in the feed)
and the URL extraction will fail, current workaround was made in the spider,
just a replace for ' to %27
"""
return # FIXME: this test should pass but currently doesnt
# correct case
aff_url1 = "http://www.anrdoezrs.net/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EGarden+table+and+chair+sets%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357199%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Garden table and chair sets&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357199%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357199&langId=-1",
)
# weird case
aff_url1 = "http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's garden furniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
# fails, prod_url is None now
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357023&langId=-1",
)
def test_add_or_replace_parameter(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameter(url, "arg", "v"), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg3", "nv3"),
"http://domain/test?arg1=v1&arg2=v2&arg3=nv3",
)
self.assertEqual(
add_or_replace_parameter(
"http://domain/moreInfo.asp?prodID=", "prodID", "20"
),
"http://domain/moreInfo.asp?prodID=20",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2%2C60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2,60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue",
)
url = "http://example.com/?version=1&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2"
self.assertEqual(
add_or_replace_parameter(url, "version", "2"),
"http://example.com/?version=2&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2",
)
self.assertEqual(
add_or_replace_parameter(url, "pageurl", "test"),
"http://example.com/?version=1&pageurl=test¶m2=value2",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg1", "v3"),
"http://domain/test?arg1=v3&arg2=v2",
)
@pytest.mark.xfail(reason="https://github.com/scrapy/w3lib/issues/164")
def test_add_or_replace_parameter_fail(self):
self.assertEqual(
add_or_replace_parameter(
"http://domain/test?arg1=v1;arg2=v2", "arg1", "v3"
),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameters(url, {"arg": "v"}), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4", "arg3": "v3new"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3new&arg4=v4",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg1": "v3"}),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters_does_not_change_input_param(self):
url = "http://domain/test?arg=original"
input_param = {"arg": "value"}
add_or_replace_parameters(url, input_param) # noqa
self.assertEqual(input_param, {"arg": "value"})
def test_url_query_cleaner(self):
self.assertEqual("product.html", url_query_cleaner("product.html?"))
self.assertEqual("product.html", url_query_cleaner("product.html?&"))
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?&id=200&&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?foo=bar&name=wired", ["id"])
)
self.assertEqual(
"product.html?id=200&name=wired",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id", "name"]),
)
self.assertEqual(
"product.html?id",
url_query_cleaner("product.html?id&other=3&novalue=", ["id"]),
)
# default is to remove duplicate keys
self.assertEqual(
"product.html?d=1",
url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ["d"]),
)
# unique=False disables duplicate keys filtering
self.assertEqual(
"product.html?d=1&d=2&d=3",
url_query_cleaner(
"product.html?d=1&e=b&d=2&d=3&other=other", ["d"], unique=False
),
)
self.assertEqual(
"product.html?id=200&foo=bar",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#id20", ["id", "foo"]
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired", ["id"], remove=True
),
)
self.assertEqual(
"product.html?name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "foo"], remove=True
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "footo"], remove=True
),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html", ["id"], remove=True)
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?&", ["id"], remove=True)
)
self.assertEqual(
"product.html?foo=bar",
url_query_cleaner("product.html?foo=bar&name=wired", "foo"),
)
self.assertEqual(
"product.html?foobar=wired",
url_query_cleaner("product.html?foo=bar&foobar=wired", "foobar"),
)
def test_url_query_cleaner_keep_fragments(self):
self.assertEqual(
"product.html?id=200#foo",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#foo",
["id"],
keep_fragments=True,
),
)
def test_path_to_file_uri(self):
if os.name == "nt":
self.assertEqual(
path_to_file_uri(r"C:\\windows\clock.avi"),
"file:///C:/windows/clock.avi",
)
else:
self.assertEqual(
path_to_file_uri("/some/path.txt"), "file:///some/path.txt"
)
fn = "test.txt"
x = path_to_file_uri(fn)
self.assertTrue(x.startswith("file:///"))
self.assertEqual(file_uri_to_path(x).lower(), os.path.abspath(fn).lower())
def test_file_uri_to_path(self):
if os.name == "nt":
self.assertEqual(
file_uri_to_path("file:///C:/windows/clock.avi"),
r"C:\\windows\clock.avi",
)
uri = "file:///C:/windows/clock.avi"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
else:
self.assertEqual(
file_uri_to_path("file:///path/to/test.txt"), "/path/to/test.txt"
)
self.assertEqual(file_uri_to_path("/path/to/test.txt"), "/path/to/test.txt")
uri = "file:///path/to/test.txt"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
self.assertEqual(file_uri_to_path("test.txt"), "test.txt")
def test_any_to_uri(self):
if os.name == "nt":
self.assertEqual(
any_to_uri(r"C:\\windows\clock.avi"), "file:///C:/windows/clock.avi"
)
else:
self.assertEqual(any_to_uri("/some/path.txt"), "file:///some/path.txt")
self.assertEqual(any_to_uri("file:///some/path.txt"), "file:///some/path.txt")
self.assertEqual(
any_to_uri("http://www.example.com/some/path.txt"),
"http://www.example.com/some/path.txt",
)
class CanonicalizeUrlTest(unittest.TestCase):
def test_canonicalize_url(self):
# simplest case
self.assertEqual(
canonicalize_url("http://www.example.com/"), "http://www.example.com/"
)
def test_return_str(self):
assert isinstance(canonicalize_url("http://www.example.com"), str)
assert isinstance(canonicalize_url(b"http://www.example.com"), str)
def test_append_missing_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com"), "http://www.example.com/"
)
def test_typical_usage(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1",
)
def test_port_number(self):
self.assertEqual(
canonicalize_url("http://www.example.com:8888/do?a=1&b=2&c=3"),
"http://www.example.com:8888/do?a=1&b=2&c=3",
)
# trailing empty ports are removed
self.assertEqual(
canonicalize_url("http://www.example.com:/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
def test_sorting(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3",
)
def test_keep_blank_values(self):
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&c&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?1750,4"),
"http://www.example.com/do?1750%2C4=",
)
def test_spaces(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
def test_canonicalize_url_unicode_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com/résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9",
)
def test_canonicalize_url_unicode_query_string(self):
# default encoding for path and query is UTF-8
self.assertEqual(
canonicalize_url("http://www.example.com/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# passed encoding will affect query string
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?q=résumé", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%E9sum%E9",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?country=Россия", encoding="cp1251"
),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%EE%F1%F1%E8%FF",
)
def test_canonicalize_url_unicode_query_string_wrong_encoding(self):
# trying to encode with wrong encoding
# fallback to UTF-8
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?currency=€", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?currency=%E2%82%AC",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?country=Россия", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%A0%D0%BE%D1%81%D1%81%D0%B8%D1%8F",
)
def test_normalize_percent_encoding_in_paths(self):
self.assertEqual(
canonicalize_url("http://www.example.com/r%c3%a9sum%c3%a9"),
"http://www.example.com/r%C3%A9sum%C3%A9",
)
# non-UTF8 encoded sequences: they should be kept untouched, only upper-cased
# 'latin1'-encoded sequence in path
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do",
)
# 'latin1'-encoded path, UTF-8 encoded query string
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9"),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
# 'latin1'-encoded path and query string
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do?q=r%e9sum%e9"),
"http://www.example.com/a%A3do?q=r%E9sum%E9",
)
def test_normalize_percent_encoding_in_query_arguments(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?k=r%c3%a9sum%c3%a9"),
"http://www.example.com/do?k=r%C3%A9sum%C3%A9",
)
def test_non_ascii_percent_encoding_in_paths(self):
self.assertEqual(
canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/a do£.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1",
)
def test_non_ascii_percent_encoding_in_query_arguments(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?price=£500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500",
)
def test_urls_with_auth_and_ports(self):
self.assertEqual(
canonicalize_url("http://user:[email protected]:81/do?now=1"),
"http://user:[email protected]:81/do?now=1",
)
def test_remove_fragments(self):
self.assertEqual(
canonicalize_url("http://user:[email protected]/do?a=1#frag"),
"http://user:[email protected]/do?a=1",
)
self.assertEqual(
canonicalize_url(
"http://user:[email protected]/do?a=1#frag", keep_fragments=True
),
"http://user:[email protected]/do?a=1#frag",
)
def test_dont_convert_safe_characters(self):
# dont convert safe characters to percent encoding representation
self.assertEqual(
canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"
),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html",
)
def test_safe_characters_unicode(self):
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
# convert the urls to string. the following test asserts that
# functionality.
self.assertEqual(
canonicalize_url("http://www.example.com/caf%E9-con-leche.htm"),
"http://www.example.com/caf%E9-con-leche.htm",
)
def test_domains_are_case_insensitive(self):
self.assertEqual(
canonicalize_url("http://www.EXAMPLE.com/"), "http://www.example.com/"
)
def test_canonicalize_idns(self):
self.assertEqual(
canonicalize_url("http://www.bücher.de?q=bücher"),
"http://www.xn--bcher-kva.de/?q=b%C3%BCcher",
)
# Japanese (+ reordering query parameters)
self.assertEqual(
canonicalize_url("http://はじめよう.みんな/?query=サ&maxResults=5"),
"http://xn--p8j9a0d9c9a.xn--q9jyb4c/?maxResults=5&query=%E3%82%B5",
)
def test_quoted_slash_and_question_sign(self):
self.assertEqual(
canonicalize_url("http://foo.com/AC%2FDC+rocks%3f/?yeah=1"),
"http://foo.com/AC%2FDC+rocks%3F/?yeah=1",
)
self.assertEqual(
canonicalize_url("http://foo.com/AC%2FDC/"), "http://foo.com/AC%2FDC/"
)
def test_canonicalize_urlparsed(self):
# canonicalize_url() can be passed an already urlparse'd URL
self.assertEqual(
canonicalize_url(urlparse("http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
canonicalize_url(urlparse("http://www.example.com/caf%e9-con-leche.htm")),
"http://www.example.com/caf%E9-con-leche.htm",
)
self.assertEqual(
canonicalize_url(
urlparse("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")
),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
def test_canonicalize_parse_url(self):
# parse_url() wraps urlparse and is used in link extractors
self.assertEqual(
canonicalize_url(parse_url("http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
canonicalize_url(parse_url("http://www.example.com/caf%e9-con-leche.htm")),
"http://www.example.com/caf%E9-con-leche.htm",
)
self.assertEqual(
canonicalize_url(
parse_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")
),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
def test_canonicalize_url_idempotence(self):
for url, enc in [
("http://www.bücher.de/résumé?q=résumé", "utf8"),
("http://www.example.com/résumé?q=résumé", "latin1"),
("http://www.example.com/résumé?country=Россия", "cp1251"),
("http://はじめよう.みんな/?query=サ&maxResults=5", "iso2022jp"),
]:
canonicalized = canonicalize_url(url, encoding=enc)
# if we canonicalize again, we ge the same result
self.assertEqual(
canonicalize_url(canonicalized, encoding=enc), canonicalized
)
# without encoding, already canonicalized URL is canonicalized identically
self.assertEqual(canonicalize_url(canonicalized), canonicalized)
def test_canonicalize_url_idna_exceptions(self):
# missing DNS label
self.assertEqual(
canonicalize_url("http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# DNS label too long
self.assertEqual(
canonicalize_url(f"http://www.{"example" * 11}.com/résumé?q=résumé"),
f"http://www.{"example" * 11}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_preserve_nonfragment_hash(self):
# don't decode `%23` to `#`
self.assertEqual(
canonicalize_url("http://www.example.com/path/to/%23/foo/bar"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
canonicalize_url("http://www.example.com/path/to/%23/foo/bar#frag"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar#frag", keep_fragments=True
),
"http://www.example.com/path/to/%23/foo/bar#frag",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
keep_fragments=True,
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
)
class DataURITests(unittest.TestCase):
def test_default_mediatype_charset(self):
result = parse_data_uri("data:,A%20brief%20note")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "US-ASCII"})
self.assertEqual(result.data, b"A brief note")
def test_text_uri(self):
result = parse_data_uri("data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_bytes_uri(self):
result = parse_data_uri(b"data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_unicode_uri(self):
result = parse_data_uri("data:,é")
self.assertEqual(result.data, "é".encode())
def test_default_mediatype(self):
result = parse_data_uri("data:;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_text_charset(self):
result = parse_data_uri("data:text/plain;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_mediatype_parameters(self):
result = parse_data_uri(
"data:text/plain;"
"foo=%22foo;bar%5C%22%22;"
"charset=utf-8;"
"bar=%22foo;%5C%22foo%20;/%20,%22,"
"%CE%8E%CE%A3%CE%8E"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(
result.media_type_parameters,
{"charset": "utf-8", "foo": 'foo;bar"', "bar": 'foo;"foo ;/ ,'},
)
self.assertEqual(result.data, b"\xce\x8e\xce\xa3\xce\x8e")
def test_base64(self):
result = parse_data_uri("data:text/plain;base64," "SGVsbG8sIHdvcmxkLg%3D%3D")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_base64_spaces(self):
result = parse_data_uri(
"data:text/plain;base64,SGVsb%20G8sIH%0A%20%20"
"dvcm%20%20%20xk%20Lg%3D%0A%3D"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
result = parse_data_uri(
"data:text/plain;base64,SGVsb G8sIH\n " "dvcm xk Lg%3D\n%3D"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_wrong_base64_param(self):
with self.assertRaises(ValueError):
parse_data_uri("data:text/plain;baes64,SGVsbG8sIHdvcmxkLg%3D%3D")
def test_missing_comma(self):
with self.assertRaises(ValueError):
parse_data_uri("data:A%20brief%20note")
def test_missing_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("text/plain,A%20brief%20note")
def test_wrong_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("http://example.com/")
def test_scheme_case_insensitive(self):
result = parse_data_uri("DATA:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
result = parse_data_uri("DaTa:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
if __name__ == "__main__":
unittest.main()
|
import os
import unittest
from urllib.parse import urlparse
import pytest
from w3lib.url import (
add_or_replace_parameter,
add_or_replace_parameters,
any_to_uri,
canonicalize_url,
file_uri_to_path,
is_url,
parse_data_uri,
parse_url,
path_to_file_uri,
safe_download_url,
safe_url_string,
url_query_parameter,
url_query_cleaner,
)
class UrlTests(unittest.TestCase):
def test_safe_url_string(self):
# Motoko Kusanagi (Cyborg from Ghost in the Shell)
motoko = "\u8349\u8599 \u7d20\u5b50"
self.assertEqual(
safe_url_string(motoko), # note the %20 for space
"%E8%8D%89%E8%96%99%20%E7%B4%A0%E5%AD%90",
)
self.assertEqual(
safe_url_string(motoko), safe_url_string(safe_url_string(motoko))
)
self.assertEqual(safe_url_string("©"), "%C2%A9") # copyright symbol
# page-encoding does not affect URL path
self.assertEqual(safe_url_string("©", "iso-8859-1"), "%C2%A9")
# path_encoding does
self.assertEqual(safe_url_string("©", path_encoding="iso-8859-1"), "%A9")
self.assertEqual(
safe_url_string("http://www.example.org/"), "http://www.example.org/"
)
alessi = "/ecommerce/oggetto/Te \xf2/tea-strainer/1273"
self.assertEqual(
safe_url_string(alessi), "/ecommerce/oggetto/Te%20%C3%B2/tea-strainer/1273"
)
self.assertEqual(
safe_url_string(
"http://www.example.com/test?p(29)url(http://www.another.net/page)"
),
"http://www.example.com/test?p(29)url(http://www.another.net/page)",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200"
),
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200",
)
# page-encoding does not affect URL path
# we still end up UTF-8 encoding characters before percent-escaping
safeurl = safe_url_string("http://www.example.com/£")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", path_encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3")
self.assertTrue(isinstance(safe_url_string(b"http://example.com/"), str))
def test_safe_url_string_remove_ascii_tab_and_newlines(self):
self.assertEqual(
safe_url_string("http://example.com/test\n.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\t.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html\n"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r\n.html\t"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\a\n.html"),
"http://example.com/test%07.html",
)
def test_safe_url_string_unsafe_chars(self):
safeurl = safe_url_string(
r"http://localhost:8001/unwise{,},|,\,^,[,],`?|=[]&[]=|"
)
self.assertEqual(
safeurl, r"http://localhost:8001/unwise%7B,%7D,|,%5C,%5E,[,],%60?|=[]&[]=|"
)
def test_safe_url_string_quote_path(self):
safeurl = safe_url_string('http://google.com/"hello"', quote_path=True)
self.assertEqual(safeurl, "http://google.com/%22hello%22")
safeurl = safe_url_string('http://google.com/"hello"', quote_path=False)
self.assertEqual(safeurl, 'http://google.com/"hello"')
safeurl = safe_url_string('http://google.com/"hello"')
self.assertEqual(safeurl, "http://google.com/%22hello%22")
def test_safe_url_string_with_query(self):
safeurl = safe_url_string("http://www.example.com/£?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ", path_encoding="latin-1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%C2%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ",
encoding="latin-1",
path_encoding="latin-1",
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
def test_safe_url_string_misc(self):
# mixing Unicode and percent-escaped sequences
safeurl = safe_url_string("http://www.example.com/£?unit=%C2%B5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/%C2%A3?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
def test_safe_url_string_bytes_input(self):
safeurl = safe_url_string(b"http://www.example.com/")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/")
# bytes input is assumed to be UTF-8
safeurl = safe_url_string(b"http://www.example.com/\xc2\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
# page-encoding encoded bytes still end up as UTF-8 sequences in path
safeurl = safe_url_string(b"http://www.example.com/\xb5", encoding="latin1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
safeurl = safe_url_string(
b"http://www.example.com/\xa3?unit=\xb5", encoding="latin1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
def test_safe_url_string_bytes_input_nonutf8(self):
# latin1
safeurl = safe_url_string(b"http://www.example.com/\xa3?unit=\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
# cp1251
# >>> 'Россия'.encode('cp1251')
# '\xd0\xee\xf1\xf1\xe8\xff'
safeurl = safe_url_string(
b"http://www.example.com/country/\xd0\xee\xf1\xf1\xe8\xff"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/country/%D0%EE%F1%F1%E8%FF")
def test_safe_url_idna(self):
# adapted from:
# https://ssl.icu-project.org/icu-bin/idnbrowser
# http://unicode.org/faq/idn.html
# + various others
websites = (
(
"http://www.färgbolaget.nu/färgbolaget",
"http://www.xn--frgbolaget-q5a.nu/f%C3%A4rgbolaget",
),
(
"http://www.räksmörgås.se/?räksmörgås=yes",
"http://www.xn--rksmrgs-5wao1o.se/?r%C3%A4ksm%C3%B6rg%C3%A5s=yes",
),
(
"http://www.brændendekærlighed.com/brændende/kærlighed",
"http://www.xn--brndendekrlighed-vobh.com/br%C3%A6ndende/k%C3%A6rlighed",
),
("http://www.예비교사.com", "http://www.xn--9d0bm53a3xbzui.com"),
("http://理容ナカムラ.com", "http://xn--lck1c3crb1723bpq4a.com"),
("http://あーるいん.com", "http://xn--l8je6s7a45b.com"),
# --- real websites ---
# in practice, this redirect (301) to http://www.buecher.de/?q=b%C3%BCcher
(
"http://www.bücher.de/?q=bücher",
"http://www.xn--bcher-kva.de/?q=b%C3%BCcher",
),
# Japanese
(
"http://はじめよう.みんな/?query=サ&maxResults=5",
"http://xn--p8j9a0d9c9a.xn--q9jyb4c/?query=%E3%82%B5&maxResults=5",
),
# Russian
("http://кто.рф/", "http://xn--j1ail.xn--p1ai/"),
(
"http://кто.рф/index.php?domain=Что",
"http://xn--j1ail.xn--p1ai/index.php?domain=%D0%A7%D1%82%D0%BE",
),
# Korean
("http://내도메인.한국/", "http://xn--220b31d95hq8o.xn--3e0b707e/"),
(
"http://맨체스터시티축구단.한국/",
"http://xn--2e0b17htvgtvj9haj53ccob62ni8d.xn--3e0b707e/",
),
# Arabic
("http://nic.شبكة", "http://nic.xn--ngbc5azd"),
# Chinese
("https://www.贷款.在线", "https://www.xn--0kwr83e.xn--3ds443g"),
("https://www2.xn--0kwr83e.在线", "https://www2.xn--0kwr83e.xn--3ds443g"),
("https://www3.贷款.xn--3ds443g", "https://www3.xn--0kwr83e.xn--3ds443g"),
)
for idn_input, safe_result in websites:
safeurl = safe_url_string(idn_input)
self.assertEqual(safeurl, safe_result)
# make sure the safe URL is unchanged when made safe a 2nd time
for _, safe_result in websites:
safeurl = safe_url_string(safe_result)
self.assertEqual(safeurl, safe_result)
def test_safe_url_idna_encoding_failure(self):
# missing DNS label
self.assertEqual(
safe_url_string("http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# DNS label too long
self.assertEqual(
safe_url_string(f"http://www.{'example' * 11}.com/résumé?q=résumé"),
f"http://www.{'example' * 11}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_safe_url_port_number(self):
self.assertEqual(
safe_url_string("http://www.example.com:80/résumé?q=résumé"),
"http://www.example.com:80/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
safe_url_string("http://www.example.com:/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_safe_url_string_preserve_nonfragment_hash(self):
# don't decode `%23` to `#`
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar#frag"),
"http://www.example.com/path/to/%23/foo/bar#frag",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
)
def test_safe_download_url(self):
self.assertEqual(
safe_download_url("http://www.example.org"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../../images/../image"),
"http://www.example.org/image",
)
self.assertEqual(
safe_download_url("http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
self.assertEqual(
safe_download_url(b"http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
# Encoding related tests
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xa3",
encoding="latin-1",
path_encoding="latin-1",
),
"http://www.example.org/?%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xc2\xa3",
encoding="utf-8",
path_encoding="utf-8",
),
"http://www.example.org/?%C2%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org/\xc2\xa3?\xc2\xa3",
encoding="utf-8",
path_encoding="latin-1",
),
"http://www.example.org/%A3?%C2%A3",
)
def test_is_url(self):
self.assertTrue(is_url("http://www.example.org"))
self.assertTrue(is_url("https://www.example.org"))
self.assertTrue(is_url("file:///some/path"))
self.assertFalse(is_url("foo://bar"))
self.assertFalse(is_url("foo--bar"))
def test_url_query_parameter(self):
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "id"), "200"
)
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault"),
"mydefault",
)
self.assertEqual(url_query_parameter("product.html?id=", "id"), None)
self.assertEqual(
url_query_parameter("product.html?id=", "id", keep_blank_values=1), ""
)
def test_url_query_parameter_2(self):
"""
This problem was seen several times in the feeds. Sometime affiliate URLs contains
nested encoded affiliate URL with direct URL as parameters. For example:
aff_url1 = 'http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1'
the typical code to extract needed URL from it is:
aff_url2 = url_query_parameter(aff_url1, 'url')
after this aff2_url is:
'http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's gardenfurniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1'
the direct URL extraction is
url = url_query_parameter(aff_url2, 'referredURL')
but this will not work, because aff_url2 contains ' (comma sign encoded in the feed)
and the URL extraction will fail, current workaround was made in the spider,
just a replace for ' to %27
"""
return # FIXME: this test should pass but currently doesnt
# correct case
aff_url1 = "http://www.anrdoezrs.net/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EGarden+table+and+chair+sets%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357199%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Garden table and chair sets&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357199%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357199&langId=-1",
)
# weird case
aff_url1 = "http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's garden furniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
# fails, prod_url is None now
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357023&langId=-1",
)
def test_add_or_replace_parameter(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameter(url, "arg", "v"), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg3", "nv3"),
"http://domain/test?arg1=v1&arg2=v2&arg3=nv3",
)
self.assertEqual(
add_or_replace_parameter(
"http://domain/moreInfo.asp?prodID=", "prodID", "20"
),
"http://domain/moreInfo.asp?prodID=20",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2%2C60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2,60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue",
)
url = "http://example.com/?version=1&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2"
self.assertEqual(
add_or_replace_parameter(url, "version", "2"),
"http://example.com/?version=2&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2",
)
self.assertEqual(
add_or_replace_parameter(url, "pageurl", "test"),
"http://example.com/?version=1&pageurl=test¶m2=value2",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg1", "v3"),
"http://domain/test?arg1=v3&arg2=v2",
)
@pytest.mark.xfail(reason="https://github.com/scrapy/w3lib/issues/164")
def test_add_or_replace_parameter_fail(self):
self.assertEqual(
add_or_replace_parameter(
"http://domain/test?arg1=v1;arg2=v2", "arg1", "v3"
),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameters(url, {"arg": "v"}), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4", "arg3": "v3new"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3new&arg4=v4",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg1": "v3"}),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters_does_not_change_input_param(self):
url = "http://domain/test?arg=original"
input_param = {"arg": "value"}
add_or_replace_parameters(url, input_param) # noqa
self.assertEqual(input_param, {"arg": "value"})
def test_url_query_cleaner(self):
self.assertEqual("product.html", url_query_cleaner("product.html?"))
self.assertEqual("product.html", url_query_cleaner("product.html?&"))
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?&id=200&&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?foo=bar&name=wired", ["id"])
)
self.assertEqual(
"product.html?id=200&name=wired",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id", "name"]),
)
self.assertEqual(
"product.html?id",
url_query_cleaner("product.html?id&other=3&novalue=", ["id"]),
)
# default is to remove duplicate keys
self.assertEqual(
"product.html?d=1",
url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ["d"]),
)
# unique=False disables duplicate keys filtering
self.assertEqual(
"product.html?d=1&d=2&d=3",
url_query_cleaner(
"product.html?d=1&e=b&d=2&d=3&other=other", ["d"], unique=False
),
)
self.assertEqual(
"product.html?id=200&foo=bar",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#id20", ["id", "foo"]
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired", ["id"], remove=True
),
)
self.assertEqual(
"product.html?name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "foo"], remove=True
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "footo"], remove=True
),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html", ["id"], remove=True)
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?&", ["id"], remove=True)
)
self.assertEqual(
"product.html?foo=bar",
url_query_cleaner("product.html?foo=bar&name=wired", "foo"),
)
self.assertEqual(
"product.html?foobar=wired",
url_query_cleaner("product.html?foo=bar&foobar=wired", "foobar"),
)
def test_url_query_cleaner_keep_fragments(self):
self.assertEqual(
"product.html?id=200#foo",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#foo",
["id"],
keep_fragments=True,
),
)
def test_path_to_file_uri(self):
if os.name == "nt":
self.assertEqual(
path_to_file_uri(r"C:\\windows\clock.avi"),
"file:///C:/windows/clock.avi",
)
else:
self.assertEqual(
path_to_file_uri("/some/path.txt"), "file:///some/path.txt"
)
fn = "test.txt"
x = path_to_file_uri(fn)
self.assertTrue(x.startswith("file:///"))
self.assertEqual(file_uri_to_path(x).lower(), os.path.abspath(fn).lower())
def test_file_uri_to_path(self):
if os.name == "nt":
self.assertEqual(
file_uri_to_path("file:///C:/windows/clock.avi"),
r"C:\\windows\clock.avi",
)
uri = "file:///C:/windows/clock.avi"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
else:
self.assertEqual(
file_uri_to_path("file:///path/to/test.txt"), "/path/to/test.txt"
)
self.assertEqual(file_uri_to_path("/path/to/test.txt"), "/path/to/test.txt")
uri = "file:///path/to/test.txt"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
self.assertEqual(file_uri_to_path("test.txt"), "test.txt")
def test_any_to_uri(self):
if os.name == "nt":
self.assertEqual(
any_to_uri(r"C:\\windows\clock.avi"), "file:///C:/windows/clock.avi"
)
else:
self.assertEqual(any_to_uri("/some/path.txt"), "file:///some/path.txt")
self.assertEqual(any_to_uri("file:///some/path.txt"), "file:///some/path.txt")
self.assertEqual(
any_to_uri("http://www.example.com/some/path.txt"),
"http://www.example.com/some/path.txt",
)
class CanonicalizeUrlTest(unittest.TestCase):
def test_canonicalize_url(self):
# simplest case
self.assertEqual(
canonicalize_url("http://www.example.com/"), "http://www.example.com/"
)
def test_return_str(self):
assert isinstance(canonicalize_url("http://www.example.com"), str)
assert isinstance(canonicalize_url(b"http://www.example.com"), str)
def test_append_missing_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com"), "http://www.example.com/"
)
def test_typical_usage(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1",
)
def test_port_number(self):
self.assertEqual(
canonicalize_url("http://www.example.com:8888/do?a=1&b=2&c=3"),
"http://www.example.com:8888/do?a=1&b=2&c=3",
)
# trailing empty ports are removed
self.assertEqual(
canonicalize_url("http://www.example.com:/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
def test_sorting(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3",
)
def test_keep_blank_values(self):
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&c&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?1750,4"),
"http://www.example.com/do?1750%2C4=",
)
def test_spaces(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
def test_canonicalize_url_unicode_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com/résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9",
)
def test_canonicalize_url_unicode_query_string(self):
# default encoding for path and query is UTF-8
self.assertEqual(
canonicalize_url("http://www.example.com/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# passed encoding will affect query string
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?q=résumé", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%E9sum%E9",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?country=Россия", encoding="cp1251"
),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%EE%F1%F1%E8%FF",
)
def test_canonicalize_url_unicode_query_string_wrong_encoding(self):
# trying to encode with wrong encoding
# fallback to UTF-8
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?currency=€", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?currency=%E2%82%AC",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?country=Россия", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%A0%D0%BE%D1%81%D1%81%D0%B8%D1%8F",
)
def test_normalize_percent_encoding_in_paths(self):
self.assertEqual(
canonicalize_url("http://www.example.com/r%c3%a9sum%c3%a9"),
"http://www.example.com/r%C3%A9sum%C3%A9",
)
# non-UTF8 encoded sequences: they should be kept untouched, only upper-cased
# 'latin1'-encoded sequence in path
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do",
)
# 'latin1'-encoded path, UTF-8 encoded query string
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9"),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
# 'latin1'-encoded path and query string
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do?q=r%e9sum%e9"),
"http://www.example.com/a%A3do?q=r%E9sum%E9",
)
def test_normalize_percent_encoding_in_query_arguments(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?k=r%c3%a9sum%c3%a9"),
"http://www.example.com/do?k=r%C3%A9sum%C3%A9",
)
def test_non_ascii_percent_encoding_in_paths(self):
self.assertEqual(
canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/a do£.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1",
)
def test_non_ascii_percent_encoding_in_query_arguments(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?price=£500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500",
)
def test_urls_with_auth_and_ports(self):
self.assertEqual(
canonicalize_url("http://user:[email protected]:81/do?now=1"),
"http://user:[email protected]:81/do?now=1",
)
def test_remove_fragments(self):
self.assertEqual(
canonicalize_url("http://user:[email protected]/do?a=1#frag"),
"http://user:[email protected]/do?a=1",
)
self.assertEqual(
canonicalize_url(
"http://user:[email protected]/do?a=1#frag", keep_fragments=True
),
"http://user:[email protected]/do?a=1#frag",
)
def test_dont_convert_safe_characters(self):
# dont convert safe characters to percent encoding representation
self.assertEqual(
canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"
),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html",
)
def test_safe_characters_unicode(self):
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
# convert the urls to string. the following test asserts that
# functionality.
self.assertEqual(
canonicalize_url("http://www.example.com/caf%E9-con-leche.htm"),
"http://www.example.com/caf%E9-con-leche.htm",
)
def test_domains_are_case_insensitive(self):
self.assertEqual(
canonicalize_url("http://www.EXAMPLE.com/"), "http://www.example.com/"
)
def test_canonicalize_idns(self):
self.assertEqual(
canonicalize_url("http://www.bücher.de?q=bücher"),
"http://www.xn--bcher-kva.de/?q=b%C3%BCcher",
)
# Japanese (+ reordering query parameters)
self.assertEqual(
canonicalize_url("http://はじめよう.みんな/?query=サ&maxResults=5"),
"http://xn--p8j9a0d9c9a.xn--q9jyb4c/?maxResults=5&query=%E3%82%B5",
)
def test_quoted_slash_and_question_sign(self):
self.assertEqual(
canonicalize_url("http://foo.com/AC%2FDC+rocks%3f/?yeah=1"),
"http://foo.com/AC%2FDC+rocks%3F/?yeah=1",
)
self.assertEqual(
canonicalize_url("http://foo.com/AC%2FDC/"), "http://foo.com/AC%2FDC/"
)
def test_canonicalize_urlparsed(self):
# canonicalize_url() can be passed an already urlparse'd URL
self.assertEqual(
canonicalize_url(urlparse("http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
canonicalize_url(urlparse("http://www.example.com/caf%e9-con-leche.htm")),
"http://www.example.com/caf%E9-con-leche.htm",
)
self.assertEqual(
canonicalize_url(
urlparse("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")
),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
def test_canonicalize_parse_url(self):
# parse_url() wraps urlparse and is used in link extractors
self.assertEqual(
canonicalize_url(parse_url("http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
canonicalize_url(parse_url("http://www.example.com/caf%e9-con-leche.htm")),
"http://www.example.com/caf%E9-con-leche.htm",
)
self.assertEqual(
canonicalize_url(
parse_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")
),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
def test_canonicalize_url_idempotence(self):
for url, enc in [
("http://www.bücher.de/résumé?q=résumé", "utf8"),
("http://www.example.com/résumé?q=résumé", "latin1"),
("http://www.example.com/résumé?country=Россия", "cp1251"),
("http://はじめよう.みんな/?query=サ&maxResults=5", "iso2022jp"),
]:
canonicalized = canonicalize_url(url, encoding=enc)
# if we canonicalize again, we ge the same result
self.assertEqual(
canonicalize_url(canonicalized, encoding=enc), canonicalized
)
# without encoding, already canonicalized URL is canonicalized identically
self.assertEqual(canonicalize_url(canonicalized), canonicalized)
def test_canonicalize_url_idna_exceptions(self):
# missing DNS label
self.assertEqual(
canonicalize_url("http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# DNS label too long
self.assertEqual(
canonicalize_url(f"http://www.{'example' * 11}.com/résumé?q=résumé"),
f"http://www.{'example' * 11}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_preserve_nonfragment_hash(self):
# don't decode `%23` to `#`
self.assertEqual(
canonicalize_url("http://www.example.com/path/to/%23/foo/bar"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
canonicalize_url("http://www.example.com/path/to/%23/foo/bar#frag"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar#frag", keep_fragments=True
),
"http://www.example.com/path/to/%23/foo/bar#frag",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
keep_fragments=True,
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
)
class DataURITests(unittest.TestCase):
def test_default_mediatype_charset(self):
result = parse_data_uri("data:,A%20brief%20note")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "US-ASCII"})
self.assertEqual(result.data, b"A brief note")
def test_text_uri(self):
result = parse_data_uri("data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_bytes_uri(self):
result = parse_data_uri(b"data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_unicode_uri(self):
result = parse_data_uri("data:,é")
self.assertEqual(result.data, "é".encode())
def test_default_mediatype(self):
result = parse_data_uri("data:;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_text_charset(self):
result = parse_data_uri("data:text/plain;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_mediatype_parameters(self):
result = parse_data_uri(
"data:text/plain;"
"foo=%22foo;bar%5C%22%22;"
"charset=utf-8;"
"bar=%22foo;%5C%22foo%20;/%20,%22,"
"%CE%8E%CE%A3%CE%8E"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(
result.media_type_parameters,
{"charset": "utf-8", "foo": 'foo;bar"', "bar": 'foo;"foo ;/ ,'},
)
self.assertEqual(result.data, b"\xce\x8e\xce\xa3\xce\x8e")
def test_base64(self):
result = parse_data_uri("data:text/plain;base64," "SGVsbG8sIHdvcmxkLg%3D%3D")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_base64_spaces(self):
result = parse_data_uri(
"data:text/plain;base64,SGVsb%20G8sIH%0A%20%20"
"dvcm%20%20%20xk%20Lg%3D%0A%3D"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
result = parse_data_uri(
"data:text/plain;base64,SGVsb G8sIH\n " "dvcm xk Lg%3D\n%3D"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_wrong_base64_param(self):
with self.assertRaises(ValueError):
parse_data_uri("data:text/plain;baes64,SGVsbG8sIHdvcmxkLg%3D%3D")
def test_missing_comma(self):
with self.assertRaises(ValueError):
parse_data_uri("data:A%20brief%20note")
def test_missing_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("text/plain,A%20brief%20note")
def test_wrong_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("http://example.com/")
def test_scheme_case_insensitive(self):
result = parse_data_uri("DATA:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
result = parse_data_uri("DaTa:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
if __name__ == "__main__":
unittest.main()
|
import os
import logging
import time
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import ACMClusterDeployException
from ocs_ci.ocs.ui.base_ui import BaseUI
from ocs_ci.ocs.ui.helpers_ui import format_locator
from ocs_ci.ocs.ui.views import locators
from ocs_ci.utility.utils import (
get_ocp_version,
expose_ocp_version,
run_cmd,
)
from ocs_ci.ocs.constants import (
PLATFORM_XPATH_MAP,
ACM_PLATOFRM_VSPHERE_CRED_PREFIX,
VSPHERE_CA_FILE_PATH,
DATA_DIR,
ACM_OCP_RELEASE_IMG_URL_PREFIX,
ACM_VSPHERE_NETWORK,
ACM_CLUSTER_DEPLOY_TIMEOUT,
ACM_CLUSTER_DEPLOYMENT_LABEL_KEY,
ACM_CLUSTER_DEPLOYMENT_SECRET_TYPE_LABEL_KEY,
)
from ocs_ci.framework import config
from ocs_ci.utility.retry import retry
log = logging.getLogger(__name__)
class AcmPageNavigator(BaseUI):
"""
ACM Page Navigator Class
"""
def __init__(self, driver):
super().__init__(driver)
self.ocp_version = get_ocp_version()
self.acm_page_nav = locators[self.ocp_version]["acm_page"]
def navigate_welcome_page(self):
"""
Navigate to ACM Welcome Page
"""
log.info("Navigate into Home Page")
self.choose_expanded_mode(mode=True, locator=self.acm_page_nav["Home"])
self.do_click(locator=self.acm_page_nav["Welcome_page"])
def navigate_overview_page(self):
"""
Navigate to ACM Overview Page
"""
log.info("Navigate into Overview Page")
self.choose_expanded_mode(mode=True, locator=self.acm_page_nav["Home"])
self.do_click(locator=self.acm_page_nav["Overview_page"])
def navigate_clusters_page(self):
"""
Navigate to ACM Clusters Page
"""
log.info("Navigate into Clusters Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Clusters_page"])
def navigate_bare_metal_assets_page(self):
"""
Navigate to ACM Bare Metal Assets Page
"""
log.info("Navigate into Bare Metal Assets Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Bare_metal_assets_page"])
def navigate_automation_page(self):
"""
Navigate to ACM Automation Page
"""
log.info("Navigate into Automation Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Automation_page"])
def navigate_infrastructure_env_page(self):
"""
Navigate to ACM Infrastructure Environments Page
"""
log.info("Navigate into Infrastructure Environments Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Infrastructure_environments_page"])
def navigate_applications_page(self):
"""
Navigate to ACM Applications Page
"""
log.info("Navigate into Applications Page")
self.do_click(locator=self.acm_page_nav["Applications"])
def navigate_governance_page(self):
"""
Navigate to ACM Governance Page
"""
log.info("Navigate into Governance Page")
self.do_click(locator=self.acm_page_nav["Governance"])
def navigate_credentials_page(self):
"""
Navigate to ACM Credentials Page
"""
log.info("Navigate into Governance Page")
self.do_click(locator=self.acm_page_nav["Credentials"])
class ACMOCPClusterDeployment(AcmPageNavigator):
"""
Everything related to cluster creation through ACM goes here
"""
def __init__(self, driver, platform, cluster_conf):
super().__init__(driver)
self.platform = platform
self.cluster_conf = cluster_conf
self.cluster_name = self.cluster_conf.ENV_DATA["cluster_name"]
self.cluster_path = self.cluster_conf.ENV_DATA["cluster_path"]
self.deploy_sync_mode = config.MULTICLUSTER.get("deploy_sync_mode", "async")
self.deployment_status = None
self.cluster_deploy_timeout = self.cluster_conf.ENV_DATA.get(
"cluster_deploy_timeout", ACM_CLUSTER_DEPLOY_TIMEOUT
)
self.deployment_failed_reason = None
self.deployment_start_time = 0
def create_cluster_prereq(self):
raise NotImplementedError("Child class has to implement this method")
def navigate_create_clusters_page(self):
# Navigate to Clusters page which has 'Create Cluster'/
# 'Import Cluster' buttons
# Here we click on "Create Cluster" and we will be in create cluster page
while True:
self.navigate_clusters_page()
log.info("Clicking on 'CreateCluster'")
# Because of weird selenium behaviour we are checking
# for CreateCluster button in 3 different ways
# 1. CreateCluster button
# 2. CreateCluster button with index xpath
# 3. Checking url, which should end with 'create-cluster'
if not self.check_element_presence(
(By.XPATH, self.acm_page_nav["cc_create_cluster"][0]), timeout=60
):
log.error("Create cluster button not found")
raise ACMClusterDeployException("Can't continue with deployment")
log.info("check 1:Found create cluster button")
if not self.check_element_presence(
(By.XPATH, self.acm_page_nav["cc_create_cluster_index_xpath"][0]),
timeout=300,
):
log.error("Create cluster button not found")
raise ACMClusterDeployException("Can't continue with deployment")
log.info("check 2:Found create cluster by index path")
self.do_click(locator=self.acm_page_nav["cc_create_cluster"], timeout=100)
time.sleep(20)
if self.driver.current_url.endswith("create-cluster"):
break
def click_next_button(self):
self.do_click(self.acm_page_nav["cc_next_page_button"])
def fill_multiple_textbox(self, key_val):
"""
In a page if we want to fill multiple text boxes we can use
this function which iteratively fills in values from the dictionary parameter
key_val (dict): keys corresponds to the xpath of text box, value corresponds
to the value to be filled in
"""
for xpath, value in key_val.items():
self.do_send_keys(locator=xpath, text=value)
def click_platform_and_credentials(self):
self.navigate_create_clusters_page()
self.do_click(
locator=self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]], timeout=100
)
self.do_click(
locator=self.acm_page_nav["cc_infrastructure_provider_creds_dropdown"]
)
credential = format_locator(
self.acm_page_nav["cc_infrastructure_provider_creds_select_creds"],
self.platform_credential_name,
)
self.do_click(locator=credential)
@retry(ACMClusterDeployException, tries=3, delay=10, backoff=1)
def goto_cluster_details_page(self):
self.navigate_clusters_page()
locator = format_locator(self.acm_page_nav["cc_table_entry"], self.cluster_name)
self.do_click(locator=locator)
self.do_click(locator=self.acm_page_nav["cc_cluster_details_page"], timeout=100)
self.choose_expanded_mode(True, self.acm_page_nav["cc_details_toggle_icon"])
def get_deployment_status(self):
self.goto_cluster_details_page()
if self.acm_cluster_status_failed(timeout=2):
self.deployment_status = "failed"
elif self.acm_cluster_status_ready(timeout=2):
self.deployment_status = "ready"
elif self.acm_cluster_status_creating(timeout=2):
self.deployment_status = "creating"
else:
self.deployment_status = "unknown"
elapsed_time = int(time.time() - self.deployment_start_time)
if elapsed_time > self.cluster_deploy_timeout:
if self.deployment_status == "creating":
self.deployment_status = "failed"
self.deployment_failed_reason = "deploy_timeout"
def wait_for_cluster_create(self):
# Wait for status creating
staus_check_timeout = 300
while (
not self.acm_cluster_status_ready(staus_check_timeout)
and self.cluster_deploy_timeout >= 1
):
self.cluster_deploy_timeout -= staus_check_timeout
if self.acm_cluster_status_creating():
log.info(f"Cluster {self.cluster_name} is in 'Creating' phase")
else:
self.acm_bailout_if_failed()
if self.acm_cluster_status_ready():
log.info(
f"Cluster create successful, Cluster {self.cluster_name} is in 'Ready' state"
)
def acm_bailout_if_failed(self):
if self.acm_cluster_status_failed():
raise ACMClusterDeployException("Deployment is in 'FAILED' state")
def acm_cluster_status_failed(self, timeout=5):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_failed"][1],
self.acm_page_nav["cc_cluster_status_page_status_failed"][0],
),
timeout=timeout,
)
def acm_cluster_status_ready(self, timeout=120):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_ready"][1],
self.acm_page_nav["cc_cluster_status_page_status_ready"][0],
),
timeout=timeout,
)
def acm_cluster_status_creating(self, timeout=120):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_creating"][1],
self.acm_page_nav["cc_cluster_status_page_status_creating"][0],
),
timeout=timeout,
)
def download_cluster_conf_files(self):
"""
Download install-config and kubeconfig to cluster dir
"""
if not os.path.exists(os.path.expanduser(f"{self.cluster_path}")):
os.mkdir(os.path.expanduser(f"{self.cluster_path}"))
# create auth dir inside cluster dir
auth_dir = os.path.join(os.path.expanduser(f"{self.cluster_path}"), "auth")
if not os.path.exists(auth_dir):
os.mkdir(auth_dir)
self.download_kubeconfig(auth_dir)
def download_kubeconfig(self, authdir):
get_kubeconf_secret_cmd = (
f"$(oc get secret -o name -n {self.cluster_name} "
f"-l {ACM_CLUSTER_DEPLOYMENT_LABEL_KEY}={self.cluster_name} "
f"-l {ACM_CLUSTER_DEPLOYMENT_SECRET_TYPE_LABEL_KEY}=kubeconfig)"
)
extract_cmd = (
f"oc extract -n {self.cluster_name} "
f"{get_kubeconf_secret_cmd} "
f"--to={authdir} --confirm"
)
run_cmd(extract_cmd)
if not os.path.exists(os.path.join(authdir, "kubeconfig")):
raise ACMClusterDeployException("Could not find the kubeconfig")
def create_cluster(self, cluster_config=None):
"""
Create cluster using ACM UI
Args:
cluster_config (Config): framework.Config object of complete configuration required
for deployment
"""
raise NotImplementedError("Child class should implement this function")
class ACMOCPPlatformVsphereIPI(ACMOCPClusterDeployment):
"""
This class handles all behind the scene activities
for cluster creation through ACM for vsphere platform
"""
def __init__(self, driver, cluster_conf=None):
super().__init__(driver=driver, platform="vsphere", cluster_conf=cluster_conf)
self.platform_credential_name = cluster_conf.ENV_DATA.get(
"platform_credential_name",
f"{ACM_PLATOFRM_VSPHERE_CRED_PREFIX}{self.cluster_name}",
)
# API VIP & Ingress IP
self.ips = None
self.vsphere_network = None
def create_cluster_prereq(self, timeout=600):
"""
Perform all prereqs before vsphere cluster creation from ACM
Args:
timeout (int): Timeout for any UI operations
"""
# Create vsphre credentials
# Click on 'Add credential' in 'Infrastructure provider' page
self.navigate_create_clusters_page()
self.refresh_page()
hard_timeout = config.ENV_DATA.get("acm_ui_hard_deadline", 1200)
remaining = hard_timeout
while True:
ret = self.check_element_presence(
(By.XPATH, self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]][0]),
timeout=300,
)
if ret:
log.info("Found platform icon")
break
else:
if remaining < 0:
raise TimeoutException("Timedout while waiting for platform icon")
else:
remaining -= timeout
self.navigate_create_clusters_page()
self.refresh_page()
self.do_click(
locator=self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]], timeout=100
)
# "Basic vsphere credential info"
# 1. credential name
# 2. Namespace
# 3. Base DNS domain
self.do_click(locator=self.acm_page_nav["cc_provider_credentials"], timeout=100)
parent_tab = self.driver.current_window_handle
tabs = self.driver.window_handles
self.driver.switch_to.window(tabs[1])
self.do_click(locator=self.acm_page_nav["cc_provider_creds_vsphere"])
basic_cred_dict = {
self.acm_page_nav[
"cc_provider_creds_vsphere_cred_name"
]: self.platform_credential_name,
self.acm_page_nav[
"cc_provider_creds_vsphere_base_dns"
]: f"{self.cluster_conf.ENV_DATA["base_domain"]}",
}
self.fill_multiple_textbox(basic_cred_dict)
# Credential Namespace is not a text box but a dropdown
self.do_click(self.acm_page_nav["cc_provider_creds_vsphere_cred_namespace"])
self.do_click(self.acm_page_nav["cc_provider_creds_default_namespace"])
# click on 'Next' button at the bottom
self.click_next_button()
# Detailed VMWare credentials section
# 1. vCenter server
# 2. vCenter username
# 3. vCenter password
# 4. cVenter root CA certificate
# 5. vSphere cluster name
# 6. vSphere datacenter
# 7. vSphere default Datastore
with open(VSPHERE_CA_FILE_PATH, "r") as fp:
vsphere_ca = fp.read()
vsphere_creds_dict = {
self.acm_page_nav[
"cc_provider_creds_vsphere_vcenter_server"
]: f"{self.cluster_conf.ENV_DATA["vsphere_server"]}",
self.acm_page_nav[
"cc_provider_creds_vsphere_username"
]: f"{self.cluster_conf.ENV_DATA["vsphere_user"]}",
self.acm_page_nav[
"cc_provider_creds_vsphere_password"
]: f"{self.cluster_conf.ENV_DATA["vsphere_password"]}",
self.acm_page_nav["cc_provider_creds_vsphere_rootca"]: f"{vsphere_ca}",
self.acm_page_nav[
"cc_provider_creds_vsphere_clustername"
]: f"{self.cluster_conf.ENV_DATA["vsphere_cluster"]}",
self.acm_page_nav[
"cc_provider_creds_vsphere_dc"
]: f"{self.cluster_conf.ENV_DATA["vsphere_datacenter"]}",
self.acm_page_nav[
"cc_provider_creds_vsphere_datastore"
]: f"{self.cluster_conf.ENV_DATA["vsphere_datastore"]}",
}
self.fill_multiple_textbox(vsphere_creds_dict)
self.click_next_button()
# Pull Secret and SSH
# 1. Pull secret
# 2. SSH Private key
# 3. SSH Public key
with open(os.path.join(DATA_DIR, "pull-secret"), "r") as fp:
pull_secret = fp.read()
ssh_pub_key_path = os.path.expanduser(self.cluster_conf.DEPLOYMENT["ssh_key"])
ssh_priv_key_path = os.path.expanduser(
self.cluster_conf.DEPLOYMENT["ssh_key_private"]
)
with open(ssh_pub_key_path, "r") as fp:
ssh_pub_key = fp.read()
with open(ssh_priv_key_path, "r") as fp:
ssh_priv_key = fp.read()
pull_secret_and_ssh = {
self.acm_page_nav["cc_provider_creds_vsphere_pullsecret"]: f"{pull_secret}",
self.acm_page_nav[
"cc_provider_creds_vsphere_ssh_privkey"
]: f"{ssh_priv_key}",
self.acm_page_nav["cc_provider_creds_vsphere_ssh_pubkey"]: f"{ssh_pub_key}",
}
self.fill_multiple_textbox(pull_secret_and_ssh)
self.click_next_button()
self.do_click(locator=self.acm_page_nav["cc_provider_creds_vsphere_add_button"])
# Go to credentials tab
self.do_click(locator=self.acm_page_nav["Credentials"])
credential_table_entry = format_locator(
self.acm_page_nav["cc_table_entry"], self.platform_credential_name
)
if not self.check_element_presence(
(By.XPATH, credential_table_entry[0]), timeout=20
):
raise ACMClusterDeployException("Could not create credentials for vsphere")
else:
log.info(
f"vsphere credential successfully created {self.platform_credential_name}"
)
# Get the ips in prereq itself
from ocs_ci.deployment import vmware
# Switch context to cluster which we are about to create
prev_ctx = config.cur_index
config.switch_ctx(self.cluster_conf.MULTICLUSTER["multicluster_index"])
self.ips = vmware.assign_ips(2)
vmware.create_dns_records(self.ips)
config.switch_ctx(prev_ctx)
self.driver.close()
self.driver.switch_to.window(parent_tab)
self.driver.switch_to.default_content()
def create_cluster(self):
"""
This function navigates through following pages in the UI
1. Cluster details
2. Node poools
3. Networks
4. Proxy
5. Automation
6. Review
Raises:
ACMClusterDeployException: If deployment failed for the cluster
"""
self.navigate_create_clusters_page()
self.click_platform_and_credentials()
self.click_next_button()
self.fill_cluster_details_page()
self.click_next_button()
# For now we don't do anything in 'Node Pools' page
self.click_next_button()
self.fill_network_info()
self.click_next_button()
# Skip proxy for now
self.click_next_button()
# Skip Automation for now
self.click_next_button()
# We are at Review page
# Click on create
self.do_click(locator=self.acm_page_nav["cc_create_button"])
self.deployment_start_time = time.time()
# We will be redirect to 'Details' page which has cluster deployment progress
if self.deploy_sync_mode == "sync":
try:
self.wait_for_cluster_create()
except ACMClusterDeployException:
log.error(
f"Failed to create OCP cluster {self.cluster_conf.ENV_DATA["cluster_name"]}"
)
raise
# Download kubeconfig and install-config file
self.download_cluster_conf_files()
else:
# Async mode of deployment, so just return to caller
# we will just wait for status 'Creating' and then return
if not self.acm_cluster_status_creating(timeout=600):
raise ACMClusterDeployException(
f"Cluster {self.cluster_name} didn't reach 'Creating' phase"
)
self.deployment_status = "Creating"
return
def fill_network_info(self):
"""
We need to fill following network info
1. vSphere network name
2. API VIP
3. Ingress VIP
"""
self.vsphere_network = self.cluster_conf.ENV_DATA.get(
"vm_network", ACM_VSPHERE_NETWORK
)
self.do_click(self.acm_page_nav["cc_vsphere_network_name"])
self.do_send_keys(
self.acm_page_nav["cc_vsphere_network_name"], self.vsphere_network
)
# Chrome has a weird problem of trimming the whitespace
# Suppose if network name is 'VM Network', when we put this text
# in text box it automatically becomes 'VMNetwork', hence we need to take
# care
ele = self.driver.find_element(
By.XPATH, self.acm_page_nav["cc_vsphere_network_name"][0]
)
remote_text = ele.get_property("value")
if remote_text != self.vsphere_network:
# Check if we have white space char
# in network name
try:
index = self.vsphere_network.index(constants.SPACE)
left_shift_offset = len(remote_text) - index
self.do_send_keys(
self.acm_page_nav["cc_vsphere_network_name"],
f"{left_shift_offset*Keys.ARROW_LEFT}{constants.SPACE}",
)
except ValueError:
raise ACMClusterDeployException(
"Weird browser behaviour, Not able to provide vsphere network info"
)
vsphere_network = {
self.acm_page_nav["cc_api_vip"]: f"{self.ips[0]}",
self.acm_page_nav["cc_ingress_vip"]: f"{self.ips[1]}",
}
self.fill_multiple_textbox(vsphere_network)
def fill_cluster_details_page(self):
"""
Fill in following details in "Cluster details" page
1. Cluster name
2. Base DNS domain
3. Release image
"""
release_img = self.get_ocp_release_img()
cluster_details = {
self.acm_page_nav[
"cc_cluster_name"
]: f"{self.cluster_conf.ENV_DATA["cluster_name"]}",
self.acm_page_nav["cc_openshift_release_image"]: f"{release_img}",
}
self.fill_multiple_textbox(cluster_details)
def get_ocp_release_img(self):
vers = expose_ocp_version(self.cluster_conf.DEPLOYMENT["installer_version"])
return f"{ACM_OCP_RELEASE_IMG_URL_PREFIX}:{vers}"
class ACMOCPDeploymentFactory(object):
def __init__(self):
# All platform specific classes should have map here
self.platform_map = {"vsphereipi": ACMOCPPlatformVsphereIPI}
def get_platform_instance(self, driver, cluster_config):
"""
Args:
driver: selenium UI driver object
cluster_config (dict): Cluster Config object
"""
platform_deployment = (
f"{cluster_config.ENV_DATA["platform"]}"
f"{cluster_config.ENV_DATA["deployment_type"]}"
)
return self.platform_map[platform_deployment](driver, cluster_config)
|
import os
import logging
import time
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import ACMClusterDeployException
from ocs_ci.ocs.ui.base_ui import BaseUI
from ocs_ci.ocs.ui.helpers_ui import format_locator
from ocs_ci.ocs.ui.views import locators
from ocs_ci.utility.utils import (
get_ocp_version,
expose_ocp_version,
run_cmd,
)
from ocs_ci.ocs.constants import (
PLATFORM_XPATH_MAP,
ACM_PLATOFRM_VSPHERE_CRED_PREFIX,
VSPHERE_CA_FILE_PATH,
DATA_DIR,
ACM_OCP_RELEASE_IMG_URL_PREFIX,
ACM_VSPHERE_NETWORK,
ACM_CLUSTER_DEPLOY_TIMEOUT,
ACM_CLUSTER_DEPLOYMENT_LABEL_KEY,
ACM_CLUSTER_DEPLOYMENT_SECRET_TYPE_LABEL_KEY,
)
from ocs_ci.framework import config
from ocs_ci.utility.retry import retry
log = logging.getLogger(__name__)
class AcmPageNavigator(BaseUI):
"""
ACM Page Navigator Class
"""
def __init__(self, driver):
super().__init__(driver)
self.ocp_version = get_ocp_version()
self.acm_page_nav = locators[self.ocp_version]["acm_page"]
def navigate_welcome_page(self):
"""
Navigate to ACM Welcome Page
"""
log.info("Navigate into Home Page")
self.choose_expanded_mode(mode=True, locator=self.acm_page_nav["Home"])
self.do_click(locator=self.acm_page_nav["Welcome_page"])
def navigate_overview_page(self):
"""
Navigate to ACM Overview Page
"""
log.info("Navigate into Overview Page")
self.choose_expanded_mode(mode=True, locator=self.acm_page_nav["Home"])
self.do_click(locator=self.acm_page_nav["Overview_page"])
def navigate_clusters_page(self):
"""
Navigate to ACM Clusters Page
"""
log.info("Navigate into Clusters Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Clusters_page"])
def navigate_bare_metal_assets_page(self):
"""
Navigate to ACM Bare Metal Assets Page
"""
log.info("Navigate into Bare Metal Assets Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Bare_metal_assets_page"])
def navigate_automation_page(self):
"""
Navigate to ACM Automation Page
"""
log.info("Navigate into Automation Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Automation_page"])
def navigate_infrastructure_env_page(self):
"""
Navigate to ACM Infrastructure Environments Page
"""
log.info("Navigate into Infrastructure Environments Page")
self.choose_expanded_mode(
mode=True, locator=self.acm_page_nav["Infrastructure"]
)
self.do_click(locator=self.acm_page_nav["Infrastructure_environments_page"])
def navigate_applications_page(self):
"""
Navigate to ACM Applications Page
"""
log.info("Navigate into Applications Page")
self.do_click(locator=self.acm_page_nav["Applications"])
def navigate_governance_page(self):
"""
Navigate to ACM Governance Page
"""
log.info("Navigate into Governance Page")
self.do_click(locator=self.acm_page_nav["Governance"])
def navigate_credentials_page(self):
"""
Navigate to ACM Credentials Page
"""
log.info("Navigate into Governance Page")
self.do_click(locator=self.acm_page_nav["Credentials"])
class ACMOCPClusterDeployment(AcmPageNavigator):
"""
Everything related to cluster creation through ACM goes here
"""
def __init__(self, driver, platform, cluster_conf):
super().__init__(driver)
self.platform = platform
self.cluster_conf = cluster_conf
self.cluster_name = self.cluster_conf.ENV_DATA["cluster_name"]
self.cluster_path = self.cluster_conf.ENV_DATA["cluster_path"]
self.deploy_sync_mode = config.MULTICLUSTER.get("deploy_sync_mode", "async")
self.deployment_status = None
self.cluster_deploy_timeout = self.cluster_conf.ENV_DATA.get(
"cluster_deploy_timeout", ACM_CLUSTER_DEPLOY_TIMEOUT
)
self.deployment_failed_reason = None
self.deployment_start_time = 0
def create_cluster_prereq(self):
raise NotImplementedError("Child class has to implement this method")
def navigate_create_clusters_page(self):
# Navigate to Clusters page which has 'Create Cluster'/
# 'Import Cluster' buttons
# Here we click on "Create Cluster" and we will be in create cluster page
while True:
self.navigate_clusters_page()
log.info("Clicking on 'CreateCluster'")
# Because of weird selenium behaviour we are checking
# for CreateCluster button in 3 different ways
# 1. CreateCluster button
# 2. CreateCluster button with index xpath
# 3. Checking url, which should end with 'create-cluster'
if not self.check_element_presence(
(By.XPATH, self.acm_page_nav["cc_create_cluster"][0]), timeout=60
):
log.error("Create cluster button not found")
raise ACMClusterDeployException("Can't continue with deployment")
log.info("check 1:Found create cluster button")
if not self.check_element_presence(
(By.XPATH, self.acm_page_nav["cc_create_cluster_index_xpath"][0]),
timeout=300,
):
log.error("Create cluster button not found")
raise ACMClusterDeployException("Can't continue with deployment")
log.info("check 2:Found create cluster by index path")
self.do_click(locator=self.acm_page_nav["cc_create_cluster"], timeout=100)
time.sleep(20)
if self.driver.current_url.endswith("create-cluster"):
break
def click_next_button(self):
self.do_click(self.acm_page_nav["cc_next_page_button"])
def fill_multiple_textbox(self, key_val):
"""
In a page if we want to fill multiple text boxes we can use
this function which iteratively fills in values from the dictionary parameter
key_val (dict): keys corresponds to the xpath of text box, value corresponds
to the value to be filled in
"""
for xpath, value in key_val.items():
self.do_send_keys(locator=xpath, text=value)
def click_platform_and_credentials(self):
self.navigate_create_clusters_page()
self.do_click(
locator=self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]], timeout=100
)
self.do_click(
locator=self.acm_page_nav["cc_infrastructure_provider_creds_dropdown"]
)
credential = format_locator(
self.acm_page_nav["cc_infrastructure_provider_creds_select_creds"],
self.platform_credential_name,
)
self.do_click(locator=credential)
@retry(ACMClusterDeployException, tries=3, delay=10, backoff=1)
def goto_cluster_details_page(self):
self.navigate_clusters_page()
locator = format_locator(self.acm_page_nav["cc_table_entry"], self.cluster_name)
self.do_click(locator=locator)
self.do_click(locator=self.acm_page_nav["cc_cluster_details_page"], timeout=100)
self.choose_expanded_mode(True, self.acm_page_nav["cc_details_toggle_icon"])
def get_deployment_status(self):
self.goto_cluster_details_page()
if self.acm_cluster_status_failed(timeout=2):
self.deployment_status = "failed"
elif self.acm_cluster_status_ready(timeout=2):
self.deployment_status = "ready"
elif self.acm_cluster_status_creating(timeout=2):
self.deployment_status = "creating"
else:
self.deployment_status = "unknown"
elapsed_time = int(time.time() - self.deployment_start_time)
if elapsed_time > self.cluster_deploy_timeout:
if self.deployment_status == "creating":
self.deployment_status = "failed"
self.deployment_failed_reason = "deploy_timeout"
def wait_for_cluster_create(self):
# Wait for status creating
staus_check_timeout = 300
while (
not self.acm_cluster_status_ready(staus_check_timeout)
and self.cluster_deploy_timeout >= 1
):
self.cluster_deploy_timeout -= staus_check_timeout
if self.acm_cluster_status_creating():
log.info(f"Cluster {self.cluster_name} is in 'Creating' phase")
else:
self.acm_bailout_if_failed()
if self.acm_cluster_status_ready():
log.info(
f"Cluster create successful, Cluster {self.cluster_name} is in 'Ready' state"
)
def acm_bailout_if_failed(self):
if self.acm_cluster_status_failed():
raise ACMClusterDeployException("Deployment is in 'FAILED' state")
def acm_cluster_status_failed(self, timeout=5):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_failed"][1],
self.acm_page_nav["cc_cluster_status_page_status_failed"][0],
),
timeout=timeout,
)
def acm_cluster_status_ready(self, timeout=120):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_ready"][1],
self.acm_page_nav["cc_cluster_status_page_status_ready"][0],
),
timeout=timeout,
)
def acm_cluster_status_creating(self, timeout=120):
return self.check_element_presence(
(
self.acm_page_nav["cc_cluster_status_page_status_creating"][1],
self.acm_page_nav["cc_cluster_status_page_status_creating"][0],
),
timeout=timeout,
)
def download_cluster_conf_files(self):
"""
Download install-config and kubeconfig to cluster dir
"""
if not os.path.exists(os.path.expanduser(f"{self.cluster_path}")):
os.mkdir(os.path.expanduser(f"{self.cluster_path}"))
# create auth dir inside cluster dir
auth_dir = os.path.join(os.path.expanduser(f"{self.cluster_path}"), "auth")
if not os.path.exists(auth_dir):
os.mkdir(auth_dir)
self.download_kubeconfig(auth_dir)
def download_kubeconfig(self, authdir):
get_kubeconf_secret_cmd = (
f"$(oc get secret -o name -n {self.cluster_name} "
f"-l {ACM_CLUSTER_DEPLOYMENT_LABEL_KEY}={self.cluster_name} "
f"-l {ACM_CLUSTER_DEPLOYMENT_SECRET_TYPE_LABEL_KEY}=kubeconfig)"
)
extract_cmd = (
f"oc extract -n {self.cluster_name} "
f"{get_kubeconf_secret_cmd} "
f"--to={authdir} --confirm"
)
run_cmd(extract_cmd)
if not os.path.exists(os.path.join(authdir, "kubeconfig")):
raise ACMClusterDeployException("Could not find the kubeconfig")
def create_cluster(self, cluster_config=None):
"""
Create cluster using ACM UI
Args:
cluster_config (Config): framework.Config object of complete configuration required
for deployment
"""
raise NotImplementedError("Child class should implement this function")
class ACMOCPPlatformVsphereIPI(ACMOCPClusterDeployment):
"""
This class handles all behind the scene activities
for cluster creation through ACM for vsphere platform
"""
def __init__(self, driver, cluster_conf=None):
super().__init__(driver=driver, platform="vsphere", cluster_conf=cluster_conf)
self.platform_credential_name = cluster_conf.ENV_DATA.get(
"platform_credential_name",
f"{ACM_PLATOFRM_VSPHERE_CRED_PREFIX}{self.cluster_name}",
)
# API VIP & Ingress IP
self.ips = None
self.vsphere_network = None
def create_cluster_prereq(self, timeout=600):
"""
Perform all prereqs before vsphere cluster creation from ACM
Args:
timeout (int): Timeout for any UI operations
"""
# Create vsphre credentials
# Click on 'Add credential' in 'Infrastructure provider' page
self.navigate_create_clusters_page()
self.refresh_page()
hard_timeout = config.ENV_DATA.get("acm_ui_hard_deadline", 1200)
remaining = hard_timeout
while True:
ret = self.check_element_presence(
(By.XPATH, self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]][0]),
timeout=300,
)
if ret:
log.info("Found platform icon")
break
else:
if remaining < 0:
raise TimeoutException("Timedout while waiting for platform icon")
else:
remaining -= timeout
self.navigate_create_clusters_page()
self.refresh_page()
self.do_click(
locator=self.acm_page_nav[PLATFORM_XPATH_MAP[self.platform]], timeout=100
)
# "Basic vsphere credential info"
# 1. credential name
# 2. Namespace
# 3. Base DNS domain
self.do_click(locator=self.acm_page_nav["cc_provider_credentials"], timeout=100)
parent_tab = self.driver.current_window_handle
tabs = self.driver.window_handles
self.driver.switch_to.window(tabs[1])
self.do_click(locator=self.acm_page_nav["cc_provider_creds_vsphere"])
basic_cred_dict = {
self.acm_page_nav[
"cc_provider_creds_vsphere_cred_name"
]: self.platform_credential_name,
self.acm_page_nav[
"cc_provider_creds_vsphere_base_dns"
]: f"{self.cluster_conf.ENV_DATA['base_domain']}",
}
self.fill_multiple_textbox(basic_cred_dict)
# Credential Namespace is not a text box but a dropdown
self.do_click(self.acm_page_nav["cc_provider_creds_vsphere_cred_namespace"])
self.do_click(self.acm_page_nav["cc_provider_creds_default_namespace"])
# click on 'Next' button at the bottom
self.click_next_button()
# Detailed VMWare credentials section
# 1. vCenter server
# 2. vCenter username
# 3. vCenter password
# 4. cVenter root CA certificate
# 5. vSphere cluster name
# 6. vSphere datacenter
# 7. vSphere default Datastore
with open(VSPHERE_CA_FILE_PATH, "r") as fp:
vsphere_ca = fp.read()
vsphere_creds_dict = {
self.acm_page_nav[
"cc_provider_creds_vsphere_vcenter_server"
]: f"{self.cluster_conf.ENV_DATA['vsphere_server']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_username"
]: f"{self.cluster_conf.ENV_DATA['vsphere_user']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_password"
]: f"{self.cluster_conf.ENV_DATA['vsphere_password']}",
self.acm_page_nav["cc_provider_creds_vsphere_rootca"]: f"{vsphere_ca}",
self.acm_page_nav[
"cc_provider_creds_vsphere_clustername"
]: f"{self.cluster_conf.ENV_DATA['vsphere_cluster']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_dc"
]: f"{self.cluster_conf.ENV_DATA['vsphere_datacenter']}",
self.acm_page_nav[
"cc_provider_creds_vsphere_datastore"
]: f"{self.cluster_conf.ENV_DATA['vsphere_datastore']}",
}
self.fill_multiple_textbox(vsphere_creds_dict)
self.click_next_button()
# Pull Secret and SSH
# 1. Pull secret
# 2. SSH Private key
# 3. SSH Public key
with open(os.path.join(DATA_DIR, "pull-secret"), "r") as fp:
pull_secret = fp.read()
ssh_pub_key_path = os.path.expanduser(self.cluster_conf.DEPLOYMENT["ssh_key"])
ssh_priv_key_path = os.path.expanduser(
self.cluster_conf.DEPLOYMENT["ssh_key_private"]
)
with open(ssh_pub_key_path, "r") as fp:
ssh_pub_key = fp.read()
with open(ssh_priv_key_path, "r") as fp:
ssh_priv_key = fp.read()
pull_secret_and_ssh = {
self.acm_page_nav["cc_provider_creds_vsphere_pullsecret"]: f"{pull_secret}",
self.acm_page_nav[
"cc_provider_creds_vsphere_ssh_privkey"
]: f"{ssh_priv_key}",
self.acm_page_nav["cc_provider_creds_vsphere_ssh_pubkey"]: f"{ssh_pub_key}",
}
self.fill_multiple_textbox(pull_secret_and_ssh)
self.click_next_button()
self.do_click(locator=self.acm_page_nav["cc_provider_creds_vsphere_add_button"])
# Go to credentials tab
self.do_click(locator=self.acm_page_nav["Credentials"])
credential_table_entry = format_locator(
self.acm_page_nav["cc_table_entry"], self.platform_credential_name
)
if not self.check_element_presence(
(By.XPATH, credential_table_entry[0]), timeout=20
):
raise ACMClusterDeployException("Could not create credentials for vsphere")
else:
log.info(
f"vsphere credential successfully created {self.platform_credential_name}"
)
# Get the ips in prereq itself
from ocs_ci.deployment import vmware
# Switch context to cluster which we are about to create
prev_ctx = config.cur_index
config.switch_ctx(self.cluster_conf.MULTICLUSTER["multicluster_index"])
self.ips = vmware.assign_ips(2)
vmware.create_dns_records(self.ips)
config.switch_ctx(prev_ctx)
self.driver.close()
self.driver.switch_to.window(parent_tab)
self.driver.switch_to.default_content()
def create_cluster(self):
"""
This function navigates through following pages in the UI
1. Cluster details
2. Node poools
3. Networks
4. Proxy
5. Automation
6. Review
Raises:
ACMClusterDeployException: If deployment failed for the cluster
"""
self.navigate_create_clusters_page()
self.click_platform_and_credentials()
self.click_next_button()
self.fill_cluster_details_page()
self.click_next_button()
# For now we don't do anything in 'Node Pools' page
self.click_next_button()
self.fill_network_info()
self.click_next_button()
# Skip proxy for now
self.click_next_button()
# Skip Automation for now
self.click_next_button()
# We are at Review page
# Click on create
self.do_click(locator=self.acm_page_nav["cc_create_button"])
self.deployment_start_time = time.time()
# We will be redirect to 'Details' page which has cluster deployment progress
if self.deploy_sync_mode == "sync":
try:
self.wait_for_cluster_create()
except ACMClusterDeployException:
log.error(
f"Failed to create OCP cluster {self.cluster_conf.ENV_DATA['cluster_name']}"
)
raise
# Download kubeconfig and install-config file
self.download_cluster_conf_files()
else:
# Async mode of deployment, so just return to caller
# we will just wait for status 'Creating' and then return
if not self.acm_cluster_status_creating(timeout=600):
raise ACMClusterDeployException(
f"Cluster {self.cluster_name} didn't reach 'Creating' phase"
)
self.deployment_status = "Creating"
return
def fill_network_info(self):
"""
We need to fill following network info
1. vSphere network name
2. API VIP
3. Ingress VIP
"""
self.vsphere_network = self.cluster_conf.ENV_DATA.get(
"vm_network", ACM_VSPHERE_NETWORK
)
self.do_click(self.acm_page_nav["cc_vsphere_network_name"])
self.do_send_keys(
self.acm_page_nav["cc_vsphere_network_name"], self.vsphere_network
)
# Chrome has a weird problem of trimming the whitespace
# Suppose if network name is 'VM Network', when we put this text
# in text box it automatically becomes 'VMNetwork', hence we need to take
# care
ele = self.driver.find_element(
By.XPATH, self.acm_page_nav["cc_vsphere_network_name"][0]
)
remote_text = ele.get_property("value")
if remote_text != self.vsphere_network:
# Check if we have white space char
# in network name
try:
index = self.vsphere_network.index(constants.SPACE)
left_shift_offset = len(remote_text) - index
self.do_send_keys(
self.acm_page_nav["cc_vsphere_network_name"],
f"{left_shift_offset*Keys.ARROW_LEFT}{constants.SPACE}",
)
except ValueError:
raise ACMClusterDeployException(
"Weird browser behaviour, Not able to provide vsphere network info"
)
vsphere_network = {
self.acm_page_nav["cc_api_vip"]: f"{self.ips[0]}",
self.acm_page_nav["cc_ingress_vip"]: f"{self.ips[1]}",
}
self.fill_multiple_textbox(vsphere_network)
def fill_cluster_details_page(self):
"""
Fill in following details in "Cluster details" page
1. Cluster name
2. Base DNS domain
3. Release image
"""
release_img = self.get_ocp_release_img()
cluster_details = {
self.acm_page_nav[
"cc_cluster_name"
]: f"{self.cluster_conf.ENV_DATA['cluster_name']}",
self.acm_page_nav["cc_openshift_release_image"]: f"{release_img}",
}
self.fill_multiple_textbox(cluster_details)
def get_ocp_release_img(self):
vers = expose_ocp_version(self.cluster_conf.DEPLOYMENT["installer_version"])
return f"{ACM_OCP_RELEASE_IMG_URL_PREFIX}:{vers}"
class ACMOCPDeploymentFactory(object):
def __init__(self):
# All platform specific classes should have map here
self.platform_map = {"vsphereipi": ACMOCPPlatformVsphereIPI}
def get_platform_instance(self, driver, cluster_config):
"""
Args:
driver: selenium UI driver object
cluster_config (dict): Cluster Config object
"""
platform_deployment = (
f"{cluster_config.ENV_DATA['platform']}"
f"{cluster_config.ENV_DATA['deployment_type']}"
)
return self.platform_map[platform_deployment](driver, cluster_config)
|
'''Extension for Nemo's context menu to easily convert images to PNG and
optimize their filesize with pngcrush.'''
from __future__ import annotations
import os
import subprocess
from urllib.parse import unquote_plus, urlparse
from PIL import Image, UnidentifiedImageError
import PySimpleGUI as sg
import gi
gi.require_version('Nemo', '3.0')
from gi.repository import GObject, Nemo # type: ignore pylint: disable=wrong-import-position
EXTENSIONS = ('jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png')
uqp= unquote_plus
def get_files(files_in: list[GObject]) -> list[str]|None:
"""
Retrieve filenames as cross-platform safe strings from file objects.
:param files_in: List of file objects.
"""
files = []
for file_in in files_in:
file_in = unquote_plus(file_in.get_uri()[7:])
if os.path.isfile(file_in):
files.append(file_in)
if files:
return files
return None
def convert_one(file: str) -> None:
'''
Converts an image to a PNG.
:param file: Filename of the image to convert.
'''
filename = f'{file.split('.')[-2]}.png'
try:
img = Image.open(file).convert('RGB')
except UnidentifiedImageError:
img = False
if img:
os.remove(file)
img.save(filename, 'PNG')
def convert_images(_, files: list[str]) -> list[str]:
'''
Called by the context menu item "Convert selected image(s) to PNG".
:param files: The currently selected files.
'''
filenames = [f'{file.split('.')[-2]}.png' for file in files]
count = sum(not file.endswith('png') for file in files)
for i, file in enumerate(files):
if not file.endswith('png'):
sg.OneLineProgressMeter('Please wait...', i+1, count, 'pb', 'Converting images', orientation='h')
convert_one(file)
sg.OneLineProgressMeter('', count, count, key='pb')
return filenames
def crush_one(file: str) -> None:
'''
Runs pngcrush on a png file.
:param file: The file to execute this action on.
'''
subprocess.run(['pngcrush', '-rem', 'alla', '-nofilecheck', '-fix', '-ow',
'-reduce', '-m', '0', file], check=False)
def crush_images(_, files: list[str]) -> None:
'''
Called by the context menu item "Optimize image(s) with pngcrush.
:param files: The currently selected files.
'''
for i, file in enumerate(files):
sg.OneLineProgressMeter('Please wait...', i+1, len(files), 'pb',
'Optimize images with pngcrush', orientation='h')
crush_one(file)
sg.OneLineProgressMeter('', len(files), len(files), key='pb')
def convert_and_crush(_, files: list[str]) -> None:
'''
Called by the context menu item "Convert to PNG and optimize.
:param files: The currently selected files.
'''
converted = convert_images(None, files)
crush_images(None, converted)
class PNGConverter(GObject.GObject, Nemo.MenuProvider):
'''Class for extension context menu items.'''
def __init__(self):
'''File manager crashes if init is not called.'''
...
def get_background_items( # pylint: disable=arguments-differ
self, _, folder: GObject) -> list[Nemo.MenuItem]|None:
'''
Called when context menu is called with no file objects selected.
:param folder: Nemo's current working directory.
'''
folder = urlparse(folder.get_uri()).path
files = [uqp(os.path.join(folder, f))
for f in os.listdir(uqp(folder))
if os.path.isfile(uqp(os.path.join(folder, f))) and
f.lower().endswith(EXTENSIONS)]
if all(file.endswith('png') for file in files):
crush = Nemo.MenuItem(
name='CrushImages',
label='Optimize image(s) with pngcrush',
tip='Optimize image filesizes with pngcrush'
)
crush.connect('activate', crush_images, files)
return [crush]
if any(file.endswith(EXTENSIONS) for file in files):
convert = Nemo.MenuItem(
name="ConvertAllImagestoPNG",
label="Convert all images to PNG",
tip="Convert all images to PNG"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name='ConvertandCrush',
label="Convert images to PNG and optimize",
tip="Convert images to PNG and optimize filesizes with pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
def get_file_items( # pylint: disable=arguments-differ
self, _, files: list[GObject]) -> list[Nemo.MenuItem]|None:
'''
Called when context menu is called with files selected.
:param files: The currently selected file objects.
'''
files = get_files(files) # type: ignore
try:
is_iter = iter(files)
check = all(file.lower().endswith('png') for file in files)
except TypeError:
is_iter = False
check = False
if check:
convert = Nemo.MenuItem(
name="CrushImages",
label="Optimize image(s) with pngcrush",
tip="Optimize filesize(s) with pngcrush"
)
convert.connect('activate', crush_images, files)
return [convert]
if is_iter:
check = all(file.lower().endswith(EXTENSIONS) for file in files)
if check:
convert = Nemo.MenuItem(
name="ConvertImagetoPNG",
label="Convert selected image(s) to .png",
tip="Convert image(s) to .png"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name="ConvertandCrush",
label="Convert to PNG and optimize with pngcrush",
tip="Convert image(s) to PNG and optimize filesize(s) with\
pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
|
'''Extension for Nemo's context menu to easily convert images to PNG and
optimize their filesize with pngcrush.'''
from __future__ import annotations
import os
import subprocess
from urllib.parse import unquote_plus, urlparse
from PIL import Image, UnidentifiedImageError
import PySimpleGUI as sg
import gi
gi.require_version('Nemo', '3.0')
from gi.repository import GObject, Nemo # type: ignore pylint: disable=wrong-import-position
EXTENSIONS = ('jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png')
uqp= unquote_plus
def get_files(files_in: list[GObject]) -> list[str]|None:
"""
Retrieve filenames as cross-platform safe strings from file objects.
:param files_in: List of file objects.
"""
files = []
for file_in in files_in:
file_in = unquote_plus(file_in.get_uri()[7:])
if os.path.isfile(file_in):
files.append(file_in)
if files:
return files
return None
def convert_one(file: str) -> None:
'''
Converts an image to a PNG.
:param file: Filename of the image to convert.
'''
filename = f'{file.split(".")[-2]}.png'
try:
img = Image.open(file).convert('RGB')
except UnidentifiedImageError:
img = False
if img:
os.remove(file)
img.save(filename, 'PNG')
def convert_images(_, files: list[str]) -> list[str]:
'''
Called by the context menu item "Convert selected image(s) to PNG".
:param files: The currently selected files.
'''
filenames = [f'{file.split(".")[-2]}.png' for file in files]
count = sum(not file.endswith('png') for file in files)
for i, file in enumerate(files):
if not file.endswith('png'):
sg.OneLineProgressMeter('Please wait...', i+1, count, 'pb', 'Converting images', orientation='h')
convert_one(file)
sg.OneLineProgressMeter('', count, count, key='pb')
return filenames
def crush_one(file: str) -> None:
'''
Runs pngcrush on a png file.
:param file: The file to execute this action on.
'''
subprocess.run(['pngcrush', '-rem', 'alla', '-nofilecheck', '-fix', '-ow',
'-reduce', '-m', '0', file], check=False)
def crush_images(_, files: list[str]) -> None:
'''
Called by the context menu item "Optimize image(s) with pngcrush.
:param files: The currently selected files.
'''
for i, file in enumerate(files):
sg.OneLineProgressMeter('Please wait...', i+1, len(files), 'pb',
'Optimize images with pngcrush', orientation='h')
crush_one(file)
sg.OneLineProgressMeter('', len(files), len(files), key='pb')
def convert_and_crush(_, files: list[str]) -> None:
'''
Called by the context menu item "Convert to PNG and optimize.
:param files: The currently selected files.
'''
converted = convert_images(None, files)
crush_images(None, converted)
class PNGConverter(GObject.GObject, Nemo.MenuProvider):
'''Class for extension context menu items.'''
def __init__(self):
'''File manager crashes if init is not called.'''
...
def get_background_items( # pylint: disable=arguments-differ
self, _, folder: GObject) -> list[Nemo.MenuItem]|None:
'''
Called when context menu is called with no file objects selected.
:param folder: Nemo's current working directory.
'''
folder = urlparse(folder.get_uri()).path
files = [uqp(os.path.join(folder, f))
for f in os.listdir(uqp(folder))
if os.path.isfile(uqp(os.path.join(folder, f))) and
f.lower().endswith(EXTENSIONS)]
if all(file.endswith('png') for file in files):
crush = Nemo.MenuItem(
name='CrushImages',
label='Optimize image(s) with pngcrush',
tip='Optimize image filesizes with pngcrush'
)
crush.connect('activate', crush_images, files)
return [crush]
if any(file.endswith(EXTENSIONS) for file in files):
convert = Nemo.MenuItem(
name="ConvertAllImagestoPNG",
label="Convert all images to PNG",
tip="Convert all images to PNG"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name='ConvertandCrush',
label="Convert images to PNG and optimize",
tip="Convert images to PNG and optimize filesizes with pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
def get_file_items( # pylint: disable=arguments-differ
self, _, files: list[GObject]) -> list[Nemo.MenuItem]|None:
'''
Called when context menu is called with files selected.
:param files: The currently selected file objects.
'''
files = get_files(files) # type: ignore
try:
is_iter = iter(files)
check = all(file.lower().endswith('png') for file in files)
except TypeError:
is_iter = False
check = False
if check:
convert = Nemo.MenuItem(
name="CrushImages",
label="Optimize image(s) with pngcrush",
tip="Optimize filesize(s) with pngcrush"
)
convert.connect('activate', crush_images, files)
return [convert]
if is_iter:
check = all(file.lower().endswith(EXTENSIONS) for file in files)
if check:
convert = Nemo.MenuItem(
name="ConvertImagetoPNG",
label="Convert selected image(s) to .png",
tip="Convert image(s) to .png"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name="ConvertandCrush",
label="Convert to PNG and optimize with pngcrush",
tip="Convert image(s) to PNG and optimize filesize(s) with\
pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
|
from onelang_core import *
import OneLang.One.Ast.Expressions as exprs
import OneLang.One.Ast.Statements as stats
import OneLang.One.Ast.Types as types
import OneLang.One.Ast.AstTypes as astTypes
import OneLang.One.Ast.References as refs
import OneLang.One.Ast.Interfaces as ints
import onelang_core as one
import json
import re
@one.static_init
class TSOverviewGenerator:
@classmethod
def static_init(cls):
cls.preview = cls(True)
def __init__(self, preview_only = False, show_types = False):
self.preview_only = preview_only
self.show_types = show_types
def leading(self, item):
result = ""
if item.leading_trivia != None and len(item.leading_trivia) > 0:
result += item.leading_trivia
if item.attributes != None:
result += "".join(list(map(lambda x: f'''/// {{ATTR}} name='{x}', value={json.dumps(item.attributes.get(x), separators=(',', ':'))}\n''', item.attributes.keys())))
return result
def pre_arr(self, prefix, value):
return f'''{prefix}{', '.join(value)}''' if len(value) > 0 else ""
def pre_if(self, prefix, condition):
return prefix if condition else ""
def pre(self, prefix, value):
return f'''{prefix}{value}''' if value != None else ""
def type_args(self, args):
return f'''<{', '.join(args)}>''' if args != None and len(args) > 0 else ""
def type(self, t, raw = False):
repr = "???" if t == None else t.repr()
if repr == "U:UNKNOWN":
pass
return ("" if raw else "{T}") + repr
def var(self, v):
result = ""
is_prop = isinstance(v, types.Property)
if isinstance(v, types.Field) or isinstance(v, types.Property):
m = v
result += self.pre_if("", m.is_static)
result += "private " if m.visibility == types.VISIBILITY.PRIVATE else "protected " if m.visibility == types.VISIBILITY.PROTECTED else "public " if m.visibility == types.VISIBILITY.PUBLIC else "VISIBILITY-NOT-SET"
result += f'''{('@prop ' if is_prop else '')}'''
if v.mutability != None:
result += f'''{('@unused ' if v.mutability.unused else '')}'''
result += f'''{('@mutated ' if v.mutability.mutated else '')}'''
result += f'''{('@reass ' if v.mutability.reassigned else '')}'''
result += f'''{v.name}{('()' if is_prop else '')}: {self.type(v.type)}'''
if isinstance(v, stats.VariableDeclaration) or isinstance(v, stats.ForVariable) or isinstance(v, types.Field) or isinstance(v, types.MethodParameter):
init = (v).initializer
if init != None:
result += self.pre(" = ", self.expr(init))
return result
def expr(self, expr):
res = "UNKNOWN-EXPR"
if isinstance(expr, exprs.NewExpression):
res = f'''new {self.type(expr.cls_)}({('...' if self.preview_only else ', '.join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedNewExpression):
res = f'''new {self.type(expr.cls_)}({('...' if self.preview_only else ', '.join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.Identifier):
res = f'''{{ID}}{expr.text}'''
elif isinstance(expr, exprs.PropertyAccessExpression):
res = f'''{self.expr(expr.object)}.{{PA}}{expr.property_name}'''
elif isinstance(expr, exprs.UnresolvedCallExpression):
type_args = f'''<{', '.join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.func)}{type_args}({('...' if self.preview_only else ', '.join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedMethodCallExpression):
type_args = f'''<{', '.join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{UM}}{expr.method_name}{type_args}({('...' if self.preview_only else ', '.join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.InstanceMethodCallExpression):
type_args = f'''<{', '.join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{M}}{expr.method.name}{type_args}({('...' if self.preview_only else ', '.join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.StaticMethodCallExpression):
type_args = f'''<{', '.join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{expr.method.parent_interface.name}.{{M}}{expr.method.name}{type_args}({('...' if self.preview_only else ', '.join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.GlobalFunctionCallExpression):
res = f'''{expr.func.name}({('...' if self.preview_only else ', '.join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.LambdaCallExpression):
res = f'''{self.expr(expr.method)}({('...' if self.preview_only else ', '.join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.BooleanLiteral):
res = f'''{('true' if expr.bool_value else 'false')}'''
elif isinstance(expr, exprs.StringLiteral):
res = f'''{json.dumps(expr.string_value, separators=(',', ':'))}'''
elif isinstance(expr, exprs.NumericLiteral):
res = f'''{expr.value_as_text}'''
elif isinstance(expr, exprs.CharacterLiteral):
res = f'''\'{expr.char_value}\''''
elif isinstance(expr, exprs.ElementAccessExpression):
res = f'''({self.expr(expr.object)})[{self.expr(expr.element_expr)}]'''
elif isinstance(expr, exprs.TemplateString):
res = "`" + "".join(list(map(lambda x: x.literal_text if x.is_literal else "${" + self.expr(x.expression) + "}", expr.parts))) + "`"
elif isinstance(expr, exprs.BinaryExpression):
res = f'''{self.expr(expr.left)} {expr.operator} {self.expr(expr.right)}'''
elif isinstance(expr, exprs.ArrayLiteral):
res = f'''[{', '.join(list(map(lambda x: self.expr(x), expr.items)))}]'''
elif isinstance(expr, exprs.CastExpression):
res = f'''<{self.type(expr.new_type)}>({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.ConditionalExpression):
res = f'''{self.expr(expr.condition)} ? {self.expr(expr.when_true)} : {self.expr(expr.when_false)}'''
elif isinstance(expr, exprs.InstanceOfExpression):
res = f'''{self.expr(expr.expr)} instanceof {self.type(expr.check_type)}'''
elif isinstance(expr, exprs.ParenthesizedExpression):
res = f'''({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.RegexLiteral):
res = f'''/{expr.pattern}/{('g' if expr.global_ else '')}{('g' if expr.case_insensitive else '')}'''
elif isinstance(expr, types.Lambda):
res = f'''({', '.join(list(map(lambda x: x.name + (': ' + self.type(x.type) if x.type != None else ''), expr.parameters)))})''' + (f''' @captures({', '.join(list(map(lambda x: x.name, expr.captures)))})''' if expr.captures != None and len(expr.captures) > 0 else "") + f''' => {{ {self.raw_block(expr.body)} }}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.PREFIX:
res = f'''{expr.operator}{self.expr(expr.operand)}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.POSTFIX:
res = f'''{self.expr(expr.operand)}{expr.operator}'''
elif isinstance(expr, exprs.MapLiteral):
repr = ",\n".join(list(map(lambda item: f'''{item.key}: {self.expr(item.value)}''', expr.items)))
res = "{L:M}" + ("{}" if repr == "" else f'''{{\n{self.pad(repr)}\n}}''' if "\n" in repr else f'''{{ {repr} }}''')
elif isinstance(expr, exprs.NullLiteral):
res = f'''null'''
elif isinstance(expr, exprs.AwaitExpression):
res = f'''await {self.expr(expr.expr)}'''
elif isinstance(expr, refs.ThisReference):
res = f'''{{R}}this'''
elif isinstance(expr, refs.StaticThisReference):
res = f'''{{R:Static}}this'''
elif isinstance(expr, refs.EnumReference):
res = f'''{{R:Enum}}{expr.decl.name}'''
elif isinstance(expr, refs.ClassReference):
res = f'''{{R:Cls}}{expr.decl.name}'''
elif isinstance(expr, refs.MethodParameterReference):
res = f'''{{R:MetP}}{expr.decl.name}'''
elif isinstance(expr, refs.VariableDeclarationReference):
res = f'''{{V}}{expr.decl.name}'''
elif isinstance(expr, refs.ForVariableReference):
res = f'''{{R:ForV}}{expr.decl.name}'''
elif isinstance(expr, refs.ForeachVariableReference):
res = f'''{{R:ForEV}}{expr.decl.name}'''
elif isinstance(expr, refs.CatchVariableReference):
res = f'''{{R:CatchV}}{expr.decl.name}'''
elif isinstance(expr, refs.GlobalFunctionReference):
res = f'''{{R:GFunc}}{expr.decl.name}'''
elif isinstance(expr, refs.SuperReference):
res = f'''{{R}}super'''
elif isinstance(expr, refs.StaticFieldReference):
res = f'''{{R:StFi}}{expr.decl.parent_interface.name}::{expr.decl.name}'''
elif isinstance(expr, refs.StaticPropertyReference):
res = f'''{{R:StPr}}{expr.decl.parent_class.name}::{expr.decl.name}'''
elif isinstance(expr, refs.InstanceFieldReference):
res = f'''{self.expr(expr.object)}.{{F}}{expr.field.name}'''
elif isinstance(expr, refs.InstancePropertyReference):
res = f'''{self.expr(expr.object)}.{{P}}{expr.property.name}'''
elif isinstance(expr, refs.EnumMemberReference):
res = f'''{{E}}{expr.decl.parent_enum.name}::{expr.decl.name}'''
elif isinstance(expr, exprs.NullCoalesceExpression):
res = f'''{self.expr(expr.default_expr)} ?? {self.expr(expr.expr_if_null)}'''
else:
pass
if self.show_types:
res = f'''<{self.type(expr.get_type(), True)}>({res})'''
return res
def block(self, block, allow_one_liner = True):
if self.preview_only:
return " { ... }"
stmt_len = len(block.statements)
return " { }" if stmt_len == 0 else f'''\n{self.pad(self.raw_block(block))}''' if allow_one_liner and stmt_len == 1 else f''' {{\n{self.pad(self.raw_block(block))}\n}}'''
def stmt(self, stmt):
res = "UNKNOWN-STATEMENT"
if isinstance(stmt, stats.BreakStatement):
res = "break;"
elif isinstance(stmt, stats.ReturnStatement):
res = "return;" if stmt.expression == None else f'''return {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.UnsetStatement):
res = f'''unset {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ThrowStatement):
res = f'''throw {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ExpressionStatement):
res = f'''{self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.VariableDeclaration):
res = f'''var {self.var(stmt)};'''
elif isinstance(stmt, stats.ForeachStatement):
res = f'''for (const {stmt.item_var.name} of {self.expr(stmt.items)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.IfStatement):
else_if = stmt.else_ != None and len(stmt.else_.statements) == 1 and isinstance(stmt.else_.statements[0], stats.IfStatement)
res = f'''if ({self.expr(stmt.condition)}){self.block(stmt.then)}'''
if not self.preview_only:
res += (f'''\nelse {self.stmt(stmt.else_.statements[0])}''' if else_if else "") + (f'''\nelse''' + self.block(stmt.else_) if not else_if and stmt.else_ != None else "")
elif isinstance(stmt, stats.WhileStatement):
res = f'''while ({self.expr(stmt.condition)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.ForStatement):
res = f'''for ({(self.var(stmt.item_var) if stmt.item_var != None else '')}; {self.expr(stmt.condition)}; {self.expr(stmt.incrementor)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.DoStatement):
res = f'''do{self.block(stmt.body)} while ({self.expr(stmt.condition)})'''
elif isinstance(stmt, stats.TryStatement):
res = "try" + self.block(stmt.try_body, False) + (f''' catch ({stmt.catch_var.name}){self.block(stmt.catch_body)}''' if stmt.catch_body != None else "") + ("finally" + self.block(stmt.finally_body) if stmt.finally_body != None else "")
elif isinstance(stmt, stats.ContinueStatement):
res = f'''continue;'''
else:
pass
return res if self.preview_only else self.leading(stmt) + res
def raw_block(self, block):
return "\n".join(list(map(lambda stmt: self.stmt(stmt), block.statements)))
def method_base(self, method, returns):
if method == None:
return ""
name = method.name if isinstance(method, types.Method) else "constructor" if isinstance(method, types.Constructor) else method.name if isinstance(method, types.GlobalFunction) else "???"
type_args = method.type_arguments if isinstance(method, types.Method) else None
return self.pre_if("/* throws */ ", method.throws) + f'''{name}{self.type_args(type_args)}({', '.join(list(map(lambda p: self.leading(p) + self.var(p), method.parameters)))})''' + ("" if isinstance(returns, astTypes.VoidType) else f''': {self.type(returns)}''') + (f''' {{\n{self.pad(self.raw_block(method.body))}\n}}''' if method.body != None else ";")
def method(self, method):
return "" if method == None else ("static " if method.is_static else "") + ("@mutates " if method.attributes != None and "mutates" in method.attributes else "") + self.method_base(method, method.returns)
def class_like(self, cls_):
res_list = []
res_list.append("\n".join(list(map(lambda field: self.var(field) + ";", cls_.fields))))
if isinstance(cls_, types.Class):
res_list.append("\n".join(list(map(lambda prop: self.var(prop) + ";", cls_.properties))))
res_list.append(self.method_base(cls_.constructor_, astTypes.VoidType.instance))
res_list.append("\n\n".join(list(map(lambda method: self.method(method), cls_.methods))))
return self.pad("\n\n".join(list(filter(lambda x: x != "", res_list))))
def pad(self, str):
return "\n".join(list(map(lambda x: f''' {x}''', re.split("\\n", str))))
def imp(self, imp):
return "" + ("X" if isinstance(imp, types.UnresolvedImport) else "C" if isinstance(imp, types.Class) else "I" if isinstance(imp, types.Interface) else "E" if isinstance(imp, types.Enum) else "???") + f''':{imp.name}'''
def node_repr(self, node):
if isinstance(node, stats.Statement):
return self.stmt(node)
elif isinstance(node, exprs.Expression):
return self.expr(node)
else:
return "/* TODO: missing */"
def generate(self, source_file):
imps = list(map(lambda imp: (f'''import * as {imp.import_as}''' if imp.import_all else f'''import {{ {', '.join(list(map(lambda x: self.imp(x), imp.imports)))} }}''') + f''' from "{imp.export_scope.package_name}{self.pre('/', imp.export_scope.scope_name)}";''', source_file.imports))
enums = list(map(lambda enum_: f'''{self.leading(enum_)}enum {enum_.name} {{ {', '.join(list(map(lambda x: x.name, enum_.values)))} }}''', source_file.enums))
intfs = list(map(lambda intf: f'''{self.leading(intf)}interface {intf.name}{self.type_args(intf.type_arguments)}''' + f'''{self.pre_arr(' extends ', list(map(lambda x: self.type(x), intf.base_interfaces)))} {{\n{self.class_like(intf)}\n}}''', source_file.interfaces))
classes = list(map(lambda cls_: f'''{self.leading(cls_)}class {cls_.name}{self.type_args(cls_.type_arguments)}''' + self.pre(" extends ", self.type(cls_.base_class) if cls_.base_class != None else None) + self.pre_arr(" implements ", list(map(lambda x: self.type(x), cls_.base_interfaces))) + f''' {{\n{self.class_like(cls_)}\n}}''', source_file.classes))
funcs = list(map(lambda func: f'''{self.leading(func)}function {func.name}{self.method_base(func, func.returns)}''', source_file.funcs))
main = self.raw_block(source_file.main_block)
result = f'''// export scope: {source_file.export_scope.package_name}/{source_file.export_scope.scope_name}\n''' + "\n\n".join(list(filter(lambda x: x != "", ["\n".join(imps), "\n".join(enums), "\n\n".join(intfs), "\n\n".join(classes), "\n\n".join(funcs), main])))
return result
|
from onelang_core import *
import OneLang.One.Ast.Expressions as exprs
import OneLang.One.Ast.Statements as stats
import OneLang.One.Ast.Types as types
import OneLang.One.Ast.AstTypes as astTypes
import OneLang.One.Ast.References as refs
import OneLang.One.Ast.Interfaces as ints
import onelang_core as one
import json
import re
@one.static_init
class TSOverviewGenerator:
@classmethod
def static_init(cls):
cls.preview = cls(True)
def __init__(self, preview_only = False, show_types = False):
self.preview_only = preview_only
self.show_types = show_types
def leading(self, item):
result = ""
if item.leading_trivia != None and len(item.leading_trivia) > 0:
result += item.leading_trivia
if item.attributes != None:
result += "".join(list(map(lambda x: f'''/// {{ATTR}} name="{x}", value={json.dumps(item.attributes.get(x), separators=(',', ':'))}\n''', item.attributes.keys())))
return result
def pre_arr(self, prefix, value):
return f'''{prefix}{", ".join(value)}''' if len(value) > 0 else ""
def pre_if(self, prefix, condition):
return prefix if condition else ""
def pre(self, prefix, value):
return f'''{prefix}{value}''' if value != None else ""
def type_args(self, args):
return f'''<{", ".join(args)}>''' if args != None and len(args) > 0 else ""
def type(self, t, raw = False):
repr = "???" if t == None else t.repr()
if repr == "U:UNKNOWN":
pass
return ("" if raw else "{T}") + repr
def var(self, v):
result = ""
is_prop = isinstance(v, types.Property)
if isinstance(v, types.Field) or isinstance(v, types.Property):
m = v
result += self.pre_if("", m.is_static)
result += "private " if m.visibility == types.VISIBILITY.PRIVATE else "protected " if m.visibility == types.VISIBILITY.PROTECTED else "public " if m.visibility == types.VISIBILITY.PUBLIC else "VISIBILITY-NOT-SET"
result += f'''{("@prop " if is_prop else "")}'''
if v.mutability != None:
result += f'''{("@unused " if v.mutability.unused else "")}'''
result += f'''{("@mutated " if v.mutability.mutated else "")}'''
result += f'''{("@reass " if v.mutability.reassigned else "")}'''
result += f'''{v.name}{("()" if is_prop else "")}: {self.type(v.type)}'''
if isinstance(v, stats.VariableDeclaration) or isinstance(v, stats.ForVariable) or isinstance(v, types.Field) or isinstance(v, types.MethodParameter):
init = (v).initializer
if init != None:
result += self.pre(" = ", self.expr(init))
return result
def expr(self, expr):
res = "UNKNOWN-EXPR"
if isinstance(expr, exprs.NewExpression):
res = f'''new {self.type(expr.cls_)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedNewExpression):
res = f'''new {self.type(expr.cls_)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.Identifier):
res = f'''{{ID}}{expr.text}'''
elif isinstance(expr, exprs.PropertyAccessExpression):
res = f'''{self.expr(expr.object)}.{{PA}}{expr.property_name}'''
elif isinstance(expr, exprs.UnresolvedCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.func)}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{UM}}{expr.method_name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.InstanceMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{M}}{expr.method.name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.StaticMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{expr.method.parent_interface.name}.{{M}}{expr.method.name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.GlobalFunctionCallExpression):
res = f'''{expr.func.name}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.LambdaCallExpression):
res = f'''{self.expr(expr.method)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.BooleanLiteral):
res = f'''{("true" if expr.bool_value else "false")}'''
elif isinstance(expr, exprs.StringLiteral):
res = f'''{json.dumps(expr.string_value, separators=(',', ':'))}'''
elif isinstance(expr, exprs.NumericLiteral):
res = f'''{expr.value_as_text}'''
elif isinstance(expr, exprs.CharacterLiteral):
res = f'''\'{expr.char_value}\''''
elif isinstance(expr, exprs.ElementAccessExpression):
res = f'''({self.expr(expr.object)})[{self.expr(expr.element_expr)}]'''
elif isinstance(expr, exprs.TemplateString):
res = "`" + "".join(list(map(lambda x: x.literal_text if x.is_literal else "${" + self.expr(x.expression) + "}", expr.parts))) + "`"
elif isinstance(expr, exprs.BinaryExpression):
res = f'''{self.expr(expr.left)} {expr.operator} {self.expr(expr.right)}'''
elif isinstance(expr, exprs.ArrayLiteral):
res = f'''[{", ".join(list(map(lambda x: self.expr(x), expr.items)))}]'''
elif isinstance(expr, exprs.CastExpression):
res = f'''<{self.type(expr.new_type)}>({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.ConditionalExpression):
res = f'''{self.expr(expr.condition)} ? {self.expr(expr.when_true)} : {self.expr(expr.when_false)}'''
elif isinstance(expr, exprs.InstanceOfExpression):
res = f'''{self.expr(expr.expr)} instanceof {self.type(expr.check_type)}'''
elif isinstance(expr, exprs.ParenthesizedExpression):
res = f'''({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.RegexLiteral):
res = f'''/{expr.pattern}/{("g" if expr.global_ else "")}{("g" if expr.case_insensitive else "")}'''
elif isinstance(expr, types.Lambda):
res = f'''({", ".join(list(map(lambda x: x.name + (": " + self.type(x.type) if x.type != None else ""), expr.parameters)))})''' + (f''' @captures({", ".join(list(map(lambda x: x.name, expr.captures)))})''' if expr.captures != None and len(expr.captures) > 0 else "") + f''' => {{ {self.raw_block(expr.body)} }}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.PREFIX:
res = f'''{expr.operator}{self.expr(expr.operand)}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.POSTFIX:
res = f'''{self.expr(expr.operand)}{expr.operator}'''
elif isinstance(expr, exprs.MapLiteral):
repr = ",\n".join(list(map(lambda item: f'''{item.key}: {self.expr(item.value)}''', expr.items)))
res = "{L:M}" + ("{}" if repr == "" else f'''{{\n{self.pad(repr)}\n}}''' if "\n" in repr else f'''{{ {repr} }}''')
elif isinstance(expr, exprs.NullLiteral):
res = f'''null'''
elif isinstance(expr, exprs.AwaitExpression):
res = f'''await {self.expr(expr.expr)}'''
elif isinstance(expr, refs.ThisReference):
res = f'''{{R}}this'''
elif isinstance(expr, refs.StaticThisReference):
res = f'''{{R:Static}}this'''
elif isinstance(expr, refs.EnumReference):
res = f'''{{R:Enum}}{expr.decl.name}'''
elif isinstance(expr, refs.ClassReference):
res = f'''{{R:Cls}}{expr.decl.name}'''
elif isinstance(expr, refs.MethodParameterReference):
res = f'''{{R:MetP}}{expr.decl.name}'''
elif isinstance(expr, refs.VariableDeclarationReference):
res = f'''{{V}}{expr.decl.name}'''
elif isinstance(expr, refs.ForVariableReference):
res = f'''{{R:ForV}}{expr.decl.name}'''
elif isinstance(expr, refs.ForeachVariableReference):
res = f'''{{R:ForEV}}{expr.decl.name}'''
elif isinstance(expr, refs.CatchVariableReference):
res = f'''{{R:CatchV}}{expr.decl.name}'''
elif isinstance(expr, refs.GlobalFunctionReference):
res = f'''{{R:GFunc}}{expr.decl.name}'''
elif isinstance(expr, refs.SuperReference):
res = f'''{{R}}super'''
elif isinstance(expr, refs.StaticFieldReference):
res = f'''{{R:StFi}}{expr.decl.parent_interface.name}::{expr.decl.name}'''
elif isinstance(expr, refs.StaticPropertyReference):
res = f'''{{R:StPr}}{expr.decl.parent_class.name}::{expr.decl.name}'''
elif isinstance(expr, refs.InstanceFieldReference):
res = f'''{self.expr(expr.object)}.{{F}}{expr.field.name}'''
elif isinstance(expr, refs.InstancePropertyReference):
res = f'''{self.expr(expr.object)}.{{P}}{expr.property.name}'''
elif isinstance(expr, refs.EnumMemberReference):
res = f'''{{E}}{expr.decl.parent_enum.name}::{expr.decl.name}'''
elif isinstance(expr, exprs.NullCoalesceExpression):
res = f'''{self.expr(expr.default_expr)} ?? {self.expr(expr.expr_if_null)}'''
else:
pass
if self.show_types:
res = f'''<{self.type(expr.get_type(), True)}>({res})'''
return res
def block(self, block, allow_one_liner = True):
if self.preview_only:
return " { ... }"
stmt_len = len(block.statements)
return " { }" if stmt_len == 0 else f'''\n{self.pad(self.raw_block(block))}''' if allow_one_liner and stmt_len == 1 else f''' {{\n{self.pad(self.raw_block(block))}\n}}'''
def stmt(self, stmt):
res = "UNKNOWN-STATEMENT"
if isinstance(stmt, stats.BreakStatement):
res = "break;"
elif isinstance(stmt, stats.ReturnStatement):
res = "return;" if stmt.expression == None else f'''return {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.UnsetStatement):
res = f'''unset {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ThrowStatement):
res = f'''throw {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ExpressionStatement):
res = f'''{self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.VariableDeclaration):
res = f'''var {self.var(stmt)};'''
elif isinstance(stmt, stats.ForeachStatement):
res = f'''for (const {stmt.item_var.name} of {self.expr(stmt.items)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.IfStatement):
else_if = stmt.else_ != None and len(stmt.else_.statements) == 1 and isinstance(stmt.else_.statements[0], stats.IfStatement)
res = f'''if ({self.expr(stmt.condition)}){self.block(stmt.then)}'''
if not self.preview_only:
res += (f'''\nelse {self.stmt(stmt.else_.statements[0])}''' if else_if else "") + (f'''\nelse''' + self.block(stmt.else_) if not else_if and stmt.else_ != None else "")
elif isinstance(stmt, stats.WhileStatement):
res = f'''while ({self.expr(stmt.condition)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.ForStatement):
res = f'''for ({(self.var(stmt.item_var) if stmt.item_var != None else "")}; {self.expr(stmt.condition)}; {self.expr(stmt.incrementor)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.DoStatement):
res = f'''do{self.block(stmt.body)} while ({self.expr(stmt.condition)})'''
elif isinstance(stmt, stats.TryStatement):
res = "try" + self.block(stmt.try_body, False) + (f''' catch ({stmt.catch_var.name}){self.block(stmt.catch_body)}''' if stmt.catch_body != None else "") + ("finally" + self.block(stmt.finally_body) if stmt.finally_body != None else "")
elif isinstance(stmt, stats.ContinueStatement):
res = f'''continue;'''
else:
pass
return res if self.preview_only else self.leading(stmt) + res
def raw_block(self, block):
return "\n".join(list(map(lambda stmt: self.stmt(stmt), block.statements)))
def method_base(self, method, returns):
if method == None:
return ""
name = method.name if isinstance(method, types.Method) else "constructor" if isinstance(method, types.Constructor) else method.name if isinstance(method, types.GlobalFunction) else "???"
type_args = method.type_arguments if isinstance(method, types.Method) else None
return self.pre_if("/* throws */ ", method.throws) + f'''{name}{self.type_args(type_args)}({", ".join(list(map(lambda p: self.leading(p) + self.var(p), method.parameters)))})''' + ("" if isinstance(returns, astTypes.VoidType) else f''': {self.type(returns)}''') + (f''' {{\n{self.pad(self.raw_block(method.body))}\n}}''' if method.body != None else ";")
def method(self, method):
return "" if method == None else ("static " if method.is_static else "") + ("@mutates " if method.attributes != None and "mutates" in method.attributes else "") + self.method_base(method, method.returns)
def class_like(self, cls_):
res_list = []
res_list.append("\n".join(list(map(lambda field: self.var(field) + ";", cls_.fields))))
if isinstance(cls_, types.Class):
res_list.append("\n".join(list(map(lambda prop: self.var(prop) + ";", cls_.properties))))
res_list.append(self.method_base(cls_.constructor_, astTypes.VoidType.instance))
res_list.append("\n\n".join(list(map(lambda method: self.method(method), cls_.methods))))
return self.pad("\n\n".join(list(filter(lambda x: x != "", res_list))))
def pad(self, str):
return "\n".join(list(map(lambda x: f''' {x}''', re.split("\\n", str))))
def imp(self, imp):
return "" + ("X" if isinstance(imp, types.UnresolvedImport) else "C" if isinstance(imp, types.Class) else "I" if isinstance(imp, types.Interface) else "E" if isinstance(imp, types.Enum) else "???") + f''':{imp.name}'''
def node_repr(self, node):
if isinstance(node, stats.Statement):
return self.stmt(node)
elif isinstance(node, exprs.Expression):
return self.expr(node)
else:
return "/* TODO: missing */"
def generate(self, source_file):
imps = list(map(lambda imp: (f'''import * as {imp.import_as}''' if imp.import_all else f'''import {{ {", ".join(list(map(lambda x: self.imp(x), imp.imports)))} }}''') + f''' from "{imp.export_scope.package_name}{self.pre("/", imp.export_scope.scope_name)}";''', source_file.imports))
enums = list(map(lambda enum_: f'''{self.leading(enum_)}enum {enum_.name} {{ {", ".join(list(map(lambda x: x.name, enum_.values)))} }}''', source_file.enums))
intfs = list(map(lambda intf: f'''{self.leading(intf)}interface {intf.name}{self.type_args(intf.type_arguments)}''' + f'''{self.pre_arr(" extends ", list(map(lambda x: self.type(x), intf.base_interfaces)))} {{\n{self.class_like(intf)}\n}}''', source_file.interfaces))
classes = list(map(lambda cls_: f'''{self.leading(cls_)}class {cls_.name}{self.type_args(cls_.type_arguments)}''' + self.pre(" extends ", self.type(cls_.base_class) if cls_.base_class != None else None) + self.pre_arr(" implements ", list(map(lambda x: self.type(x), cls_.base_interfaces))) + f''' {{\n{self.class_like(cls_)}\n}}''', source_file.classes))
funcs = list(map(lambda func: f'''{self.leading(func)}function {func.name}{self.method_base(func, func.returns)}''', source_file.funcs))
main = self.raw_block(source_file.main_block)
result = f'''// export scope: {source_file.export_scope.package_name}/{source_file.export_scope.scope_name}\n''' + "\n\n".join(list(filter(lambda x: x != "", ["\n".join(imps), "\n".join(enums), "\n\n".join(intfs), "\n\n".join(classes), "\n\n".join(funcs), main])))
return result
|
from math import ceil
def chunk(lst, size):
return list(
map(lambda x: lst[x * size:x * size + size],
list(range(0, ceil(len(lst) / size)))))
def find_str_index(str1, str2):
if not str2:
return "str2 not none"
for x in str2:
if x in str1:
return str1.index(x)
def find_sub_string(s, words):
if not words:
return []
tmp = []
str1 = ''
str2 = ''
for x in words:
str1 += x
if str1 in s:
tmp.append(s.index(str1))
words.reverse()
for x in words:
str2 += x
if str2 in s:
tmp.append(s.index(str2))
return tmp
def longest_valid_parentheses(s: str) -> int:
left = '('
right = ')'
n = 0
stack = [-1]
for x in range(len(s)):
if x == left:
stack.append(x)
else:
stack.pop()
if not stack:
stack.append(x)
if stack:
n = max(n, x - stack[-1])
return n
def search(nums, target) -> int:
if target in nums:
return nums.index(target)
else:
return -1
def search_range(nums, target):
indices = [i for i, x in enumerate(nums) if x == target]
if not indices:
return [-1, -1]
return indices
def binary_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if l[mid] == item:
return mid
if l[mid] > item:
high = mid - 1
else:
low = mid + 1
return None
def bin_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_sort(l):
for index in range(len(l) - 1):
for k in range(len(l) - 1):
if l[k] > l[k + 1]:
l[k], l[k + 1] = l[k + 1], l[k]
k += 1
return l
# 插入排序
def i_sort():
pass
# 选择排序
def s_sort(l):
low = 0
high = len(l) - 1
while low >= high:
n = min(l[low:])
n, l[low] = l[low], n
low += 1
return l
# 快速排序
def q_sort(l):
pass
# 递归算法
# 匹配括号
def find_k(strings):
stack = []
count = 0
for s in strings:
if s == '(':
stack.append(s)
elif len(stack) > 0 and s == ')':
stack.pop()
count += 1
else:
return 0
return count * 2
def insert_index(l, target):
l.append(target)
l.sort()
return l.index(target)
def multiply(n1, n2):
return f"{eval(f"{n1}*{n2}")}"
if __name__ == '__main__':
a = find_str_index('hello', 'l')
b = find_sub_string("barfoothefoobarman", ["foo", "bar", 'aaa'])
l = [1, 2, 3, 4, 5, 6]
k = "(()()())()"
# c = longest_valid_parentheses("(()()()))")
# print(c)
nums = [4, 5, 6, 7, 0, 1, 2]
# target = 6
# s = search(nums,target)
# ss = search_range([5, 7, 7, 8, 8, 10], 18)
# print(ss)
# x = [1, 3, 5, 7, 8, 9]
# # bs = binary_search(x, 9)
# bs = bin_search(l, 4)
# b = b_search(x, 9)
# print(bs, b)
s = b_sort(nums)
print(s)
f = find_k(k)
print(f)
select = s_sort(nums)
print(select)
print(multiply("12", "12"))
# t = [1, 3, 5, 6]
# st = insert_index(t, 7)
# print(st)
|
from math import ceil
def chunk(lst, size):
return list(
map(lambda x: lst[x * size:x * size + size],
list(range(0, ceil(len(lst) / size)))))
def find_str_index(str1, str2):
if not str2:
return "str2 not none"
for x in str2:
if x in str1:
return str1.index(x)
def find_sub_string(s, words):
if not words:
return []
tmp = []
str1 = ''
str2 = ''
for x in words:
str1 += x
if str1 in s:
tmp.append(s.index(str1))
words.reverse()
for x in words:
str2 += x
if str2 in s:
tmp.append(s.index(str2))
return tmp
def longest_valid_parentheses(s: str) -> int:
left = '('
right = ')'
n = 0
stack = [-1]
for x in range(len(s)):
if x == left:
stack.append(x)
else:
stack.pop()
if not stack:
stack.append(x)
if stack:
n = max(n, x - stack[-1])
return n
def search(nums, target) -> int:
if target in nums:
return nums.index(target)
else:
return -1
def search_range(nums, target):
indices = [i for i, x in enumerate(nums) if x == target]
if not indices:
return [-1, -1]
return indices
def binary_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if l[mid] == item:
return mid
if l[mid] > item:
high = mid - 1
else:
low = mid + 1
return None
def bin_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_sort(l):
for index in range(len(l) - 1):
for k in range(len(l) - 1):
if l[k] > l[k + 1]:
l[k], l[k + 1] = l[k + 1], l[k]
k += 1
return l
# 插入排序
def i_sort():
pass
# 选择排序
def s_sort(l):
low = 0
high = len(l) - 1
while low >= high:
n = min(l[low:])
n, l[low] = l[low], n
low += 1
return l
# 快速排序
def q_sort(l):
pass
# 递归算法
# 匹配括号
def find_k(strings):
stack = []
count = 0
for s in strings:
if s == '(':
stack.append(s)
elif len(stack) > 0 and s == ')':
stack.pop()
count += 1
else:
return 0
return count * 2
def insert_index(l, target):
l.append(target)
l.sort()
return l.index(target)
def multiply(n1, n2):
return f"{eval(f'{n1}*{n2}')}"
if __name__ == '__main__':
a = find_str_index('hello', 'l')
b = find_sub_string("barfoothefoobarman", ["foo", "bar", 'aaa'])
l = [1, 2, 3, 4, 5, 6]
k = "(()()())()"
# c = longest_valid_parentheses("(()()()))")
# print(c)
nums = [4, 5, 6, 7, 0, 1, 2]
# target = 6
# s = search(nums,target)
# ss = search_range([5, 7, 7, 8, 8, 10], 18)
# print(ss)
# x = [1, 3, 5, 7, 8, 9]
# # bs = binary_search(x, 9)
# bs = bin_search(l, 4)
# b = b_search(x, 9)
# print(bs, b)
s = b_sort(nums)
print(s)
f = find_k(k)
print(f)
select = s_sort(nums)
print(select)
print(multiply("12", "12"))
# t = [1, 3, 5, 6]
# st = insert_index(t, 7)
# print(st)
|
# # Introduction
# In this notebook, we will load an example time series, fit a growth model
# and plot the signals.
#
# ## Load example time series
#
# Let's start by loading example time series data.
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from typing import Iterable, List, Optional, cast
import matplotlib.pyplot as plt
import pytest
import seaborn as sns
import staticchar as ch
from psbutils.filecheck import Plottable, figure_found
from psbutils.misc import find_subrepo_directory
from staticchar.plotting.core import AnnotationSpec
SUBREPO_DIR = find_subrepo_directory()
S_SHAPE_FOLDER = SUBREPO_DIR / "tests/test_data/S-shape"
def plot_figure(name: str, ax: Optional[Plottable] = None) -> List[str]:
sns.despine()
found = figure_found(ax, f"test_introduction/{name}")
plt.clf()
return [] if found else [name]
@pytest.mark.timeout(10)
def test_introduction():
dataset = ch.datasets.Dataset(S_SHAPE_FOLDER) # type: ignore # auto
raw_timeseries = dataset.get_a_frame()
rth = raw_timeseries.head()
# As we can see, there is some non-zero signal at the beginning, which we attribute to
# the media absorbance and media fluorescence (as initially we have very low cell density).
assert sorted(rth.keys().to_list()) == sorted([ch.TIME, "EYFP", "OD", "ECFP", "OD700", "mRFP1"])
colors = {"EYFP": "yellow", "ECFP": "cyan", "mRFP1": "red", "OD": "black"}
plt.figure(figsize=(6.4, 4.8))
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(raw_timeseries, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found = []
figures_not_found += plot_figure("plot1_raw_timeseries", ax)
# ## Pre-processing
# Let's assume this is the background and subtract it.
# (A more precise, but also costly alternative is to estimate this using several blanks).
# In[ ]:
subtracted = ch.subtract_background(
raw_timeseries, columns=["OD", "ECFP", "EYFP", "mRFP1"], strategy=ch.BackgroundChoices.Minimum
)
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found += plot_figure("plot2_subtracted_timeseries", ax)
# ## Run characterization on an example
# In[ ]:
yaml_path = find_subrepo_directory() / "tests/configs/integral_basic.yml"
config = ch.config.load(yaml_path, ch.config.CharacterizationConfig)
# config
# ### Fitting a growth model
#
# Let's fit a growth model to the OD signal.
model_params = ch.LogisticModel.fit(subtracted["time"], subtracted[config.growth_signal]) # type: ignore # auto
model = ch.LogisticModel(model_params)
# model_params = ch.GompertzModel.fit(subtracted["time"], subtracted[config.growth_signal])
# model = ch.GompertzModel(model_params)
print(f"Inferred parameters: {model_params}")
print(f"Growth phase: {model.growth_period}")
print(f"Time of maximal activity: {model.time_maximal_activity}")
print(f"Inferred (log of) initial density: {model.initial_density(log=True)}")
ch.plot_growth_model(subtracted["time"], subtracted[config.growth_signal], model=model) # type: ignore # auto
figures_not_found += plot_figure("plot3_growth_model_fit")
# ### Plotting the data
#
# Some time after the growth phase, we should observe a similar exponential production
# of the proteins. Suppose that this maturation time is about 50 minutes,
# that is about 0.85 hours.
#
# Then, fluorescence signals should be linear when drawn with respect to each other.
# Add offset to the growth phase
production_phase = model.growth_period + config.maturation_offset
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(subtracted, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2)
figures_not_found += plot_figure("plot4_fluorescence_signals", f)
# ### Truncate the time-series
#
# We see that this very well captures the growth phase of mRFP1 (the reference signal),
# but is a bit too late for EYFP and ECFP -- we won't have a linear dependence between
# the signals...
#
# Let's choose a more narrow interval.
another_production_phase = ch.TimePeriod(reference=12, left=2, right=2)
truncated_timeseries = ch.select_time_interval(subtracted, interval=another_production_phase)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=another_production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(
truncated_timeseries, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2 # type: ignore # auto
)
figures_not_found += plot_figure("plot5_truncated")
# Run method
gradient, gradient_error = ch.transcriptional_activity_ratio(
truncated_timeseries, # type: ignore # auto
config.signals,
config.reference,
config.signal_properties,
model_params.growth_rate,
model.growth_period,
maturation_offset=config.maturation_offset,
)
# gradient
# ### Integration-based characterization
# Now assume that we want to integrate the signals over the production period.
signals = ["EYFP", "ECFP"]
ch.integrate(data=subtracted, signals=signals, interval=config.time_window)
# Now plot the output
f, axs = plt.subplots(1, len(config.signals), figsize=(12, 4))
for signal, ax in zip(config.signals, cast(Iterable, axs)):
ch.plot_integration(
subtracted,
signal,
config.time_window,
ax,
fillcolor=colors[signal],
annotation_spec=AnnotationSpec(title=True),
)
figures_not_found += plot_figure("plot6_integration", f)
assert figures_not_found == [], f"Figures not found: {", ".join(figures_not_found)}"
|
# # Introduction
# In this notebook, we will load an example time series, fit a growth model
# and plot the signals.
#
# ## Load example time series
#
# Let's start by loading example time series data.
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from typing import Iterable, List, Optional, cast
import matplotlib.pyplot as plt
import pytest
import seaborn as sns
import staticchar as ch
from psbutils.filecheck import Plottable, figure_found
from psbutils.misc import find_subrepo_directory
from staticchar.plotting.core import AnnotationSpec
SUBREPO_DIR = find_subrepo_directory()
S_SHAPE_FOLDER = SUBREPO_DIR / "tests/test_data/S-shape"
def plot_figure(name: str, ax: Optional[Plottable] = None) -> List[str]:
sns.despine()
found = figure_found(ax, f"test_introduction/{name}")
plt.clf()
return [] if found else [name]
@pytest.mark.timeout(10)
def test_introduction():
dataset = ch.datasets.Dataset(S_SHAPE_FOLDER) # type: ignore # auto
raw_timeseries = dataset.get_a_frame()
rth = raw_timeseries.head()
# As we can see, there is some non-zero signal at the beginning, which we attribute to
# the media absorbance and media fluorescence (as initially we have very low cell density).
assert sorted(rth.keys().to_list()) == sorted([ch.TIME, "EYFP", "OD", "ECFP", "OD700", "mRFP1"])
colors = {"EYFP": "yellow", "ECFP": "cyan", "mRFP1": "red", "OD": "black"}
plt.figure(figsize=(6.4, 4.8))
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(raw_timeseries, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found = []
figures_not_found += plot_figure("plot1_raw_timeseries", ax)
# ## Pre-processing
# Let's assume this is the background and subtract it.
# (A more precise, but also costly alternative is to estimate this using several blanks).
# In[ ]:
subtracted = ch.subtract_background(
raw_timeseries, columns=["OD", "ECFP", "EYFP", "mRFP1"], strategy=ch.BackgroundChoices.Minimum
)
ax = cast(plt.Axes, plt.subplot())
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax, colors=colors)
ax.legend()
figures_not_found += plot_figure("plot2_subtracted_timeseries", ax)
# ## Run characterization on an example
# In[ ]:
yaml_path = find_subrepo_directory() / "tests/configs/integral_basic.yml"
config = ch.config.load(yaml_path, ch.config.CharacterizationConfig)
# config
# ### Fitting a growth model
#
# Let's fit a growth model to the OD signal.
model_params = ch.LogisticModel.fit(subtracted["time"], subtracted[config.growth_signal]) # type: ignore # auto
model = ch.LogisticModel(model_params)
# model_params = ch.GompertzModel.fit(subtracted["time"], subtracted[config.growth_signal])
# model = ch.GompertzModel(model_params)
print(f"Inferred parameters: {model_params}")
print(f"Growth phase: {model.growth_period}")
print(f"Time of maximal activity: {model.time_maximal_activity}")
print(f"Inferred (log of) initial density: {model.initial_density(log=True)}")
ch.plot_growth_model(subtracted["time"], subtracted[config.growth_signal], model=model) # type: ignore # auto
figures_not_found += plot_figure("plot3_growth_model_fit")
# ### Plotting the data
#
# Some time after the growth phase, we should observe a similar exponential production
# of the proteins. Suppose that this maturation time is about 50 minutes,
# that is about 0.85 hours.
#
# Then, fluorescence signals should be linear when drawn with respect to each other.
# Add offset to the growth phase
production_phase = model.growth_period + config.maturation_offset
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(subtracted, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2)
figures_not_found += plot_figure("plot4_fluorescence_signals", f)
# ### Truncate the time-series
#
# We see that this very well captures the growth phase of mRFP1 (the reference signal),
# but is a bit too late for EYFP and ECFP -- we won't have a linear dependence between
# the signals...
#
# Let's choose a more narrow interval.
another_production_phase = ch.TimePeriod(reference=12, left=2, right=2)
truncated_timeseries = ch.select_time_interval(subtracted, interval=another_production_phase)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) # type: ignore
ch.plot_signals_against_time(subtracted, signals=colors.keys(), time_column="time", ax=ax1, colors=colors)
# Visualise the production phase
ch.mark_phase(ax1, interval=another_production_phase, color="green", alpha=0.1)
ch.plot_signals_against_reference(
truncated_timeseries, signals=("EYFP", "ECFP"), reference="mRFP1", colors=colors, ax=ax2 # type: ignore # auto
)
figures_not_found += plot_figure("plot5_truncated")
# Run method
gradient, gradient_error = ch.transcriptional_activity_ratio(
truncated_timeseries, # type: ignore # auto
config.signals,
config.reference,
config.signal_properties,
model_params.growth_rate,
model.growth_period,
maturation_offset=config.maturation_offset,
)
# gradient
# ### Integration-based characterization
# Now assume that we want to integrate the signals over the production period.
signals = ["EYFP", "ECFP"]
ch.integrate(data=subtracted, signals=signals, interval=config.time_window)
# Now plot the output
f, axs = plt.subplots(1, len(config.signals), figsize=(12, 4))
for signal, ax in zip(config.signals, cast(Iterable, axs)):
ch.plot_integration(
subtracted,
signal,
config.time_window,
ax,
fillcolor=colors[signal],
annotation_spec=AnnotationSpec(title=True),
)
figures_not_found += plot_figure("plot6_integration", f)
assert figures_not_found == [], f"Figures not found: {', '.join(figures_not_found)}"
|
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import datetime
import inspect
import unittest
from random import choice
from unittest.mock import patch
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from frappe.database import savepoint
from frappe.database.database import Database
from frappe.query_builder import Field
from frappe.query_builder.functions import Concat_ws
from frappe.tests.test_query_builder import db_type_is, run_only_if
from frappe.utils import add_days, cint, now, random_string
from frappe.utils.testutils import clear_custom_fields
class TestDB(unittest.TestCase):
def test_get_value(self):
self.assertEqual(frappe.db.get_value("User", {"name": ["=", "Administrator"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["like", "Admin%"]}), "Administrator")
self.assertNotEqual(frappe.db.get_value("User", {"name": ["!=", "Guest"]}), "Guest")
self.assertEqual(frappe.db.get_value("User", {"name": ["<", "Adn"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["<=", "Administrator"]}), "Administrator")
self.assertEqual(
frappe.db.get_value("User", {}, ["Max(name)"], order_by=None),
frappe.db.sql("SELECT Max(name) FROM tabUser")[0][0],
)
self.assertEqual(
frappe.db.get_value("User", {}, "Min(name)", order_by=None),
frappe.db.sql("SELECT Min(name) FROM tabUser")[0][0],
)
self.assertIn(
"for update",
frappe.db.get_value(
"User", Field("name") == "Administrator", for_update=True, run=False
).lower(),
)
user_doctype = frappe.qb.DocType("User")
self.assertEqual(
frappe.qb.from_(user_doctype).select(user_doctype.name, user_doctype.email).run(),
frappe.db.get_values(
user_doctype,
filters={},
fieldname=[user_doctype.name, user_doctype.email],
order_by=None,
),
)
self.assertEqual(
frappe.db.sql("""SELECT name FROM `tabUser` WHERE name > 's' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">", "s"]}),
)
self.assertEqual(
frappe.db.sql("""SELECT name FROM `tabUser` WHERE name >= 't' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">=", "t"]}),
)
self.assertEqual(
frappe.db.get_values(
"User",
filters={"name": "Administrator"},
distinct=True,
fieldname="email",
),
frappe.qb.from_(user_doctype)
.where(user_doctype.name == "Administrator")
.select("email")
.distinct()
.run(),
)
self.assertIn(
"concat_ws",
frappe.db.get_value(
"User",
filters={"name": "Administrator"},
fieldname=Concat_ws(" ", "LastName"),
run=False,
).lower(),
)
self.assertEqual(
frappe.db.sql("select email from tabUser where name='Administrator' order by modified DESC"),
frappe.db.get_values("User", filters=[["name", "=", "Administrator"]], fieldname="email"),
)
def test_get_value_limits(self):
# check both dict and list style filters
filters = [{"enabled": 1}, [["enabled", "=", 1]]]
for filter in filters:
self.assertEqual(1, len(frappe.db.get_values("User", filters=filter, limit=1)))
# count of last touched rows as per DB-API 2.0 https://peps.python.org/pep-0249/#rowcount
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
self.assertEqual(2, len(frappe.db.get_values("User", filters=filter, limit=2)))
self.assertGreaterEqual(2, cint(frappe.db._cursor.rowcount))
# without limits length == count
self.assertEqual(
len(frappe.db.get_values("User", filters=filter)), frappe.db.count("User", filter)
)
frappe.db.get_value("User", filters=filter)
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
frappe.db.exists("User", filter)
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
def test_escape(self):
frappe.db.escape("香港濟生堂製藥有限公司 - IT".encode("utf-8"))
def test_get_single_value(self):
# setup
values_dict = {
"Float": 1.5,
"Int": 1,
"Percent": 55.5,
"Currency": 12.5,
"Data": "Test",
"Date": datetime.datetime.now().date(),
"Datetime": datetime.datetime.now(),
"Time": datetime.timedelta(hours=9, minutes=45, seconds=10),
}
test_inputs = [
{"fieldtype": fieldtype, "value": value} for fieldtype, value in values_dict.items()
]
for fieldtype in values_dict.keys():
create_custom_field(
"Print Settings",
{
"fieldname": f"test_{fieldtype.lower()}",
"label": f"Test {fieldtype}",
"fieldtype": fieldtype,
},
)
# test
for inp in test_inputs:
fieldname = f"test_{inp["fieldtype"].lower()}"
frappe.db.set_value("Print Settings", "Print Settings", fieldname, inp["value"])
self.assertEqual(frappe.db.get_single_value("Print Settings", fieldname), inp["value"])
# teardown
clear_custom_fields("Print Settings")
def test_log_touched_tables(self):
frappe.flags.in_migrate = True
frappe.flags.touched_tables = set()
frappe.db.set_value("System Settings", "System Settings", "backup_limit", 5)
self.assertIn("tabSingles", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo = frappe.get_doc({"doctype": "ToDo", "description": "Random Description"})
todo.save()
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.description = "Another Description"
todo.save()
self.assertIn("tabToDo", frappe.flags.touched_tables)
if frappe.db.db_type != "postgres":
frappe.flags.touched_tables = set()
frappe.db.sql("UPDATE tabToDo SET description = 'Updated Description'")
self.assertNotIn("tabToDo SET", frappe.flags.touched_tables)
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.delete()
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
create_custom_field("ToDo", {"label": "ToDo Custom Field"})
self.assertIn("tabToDo", frappe.flags.touched_tables)
self.assertIn("tabCustom Field", frappe.flags.touched_tables)
frappe.flags.in_migrate = False
frappe.flags.touched_tables.clear()
def test_db_keywords_as_fields(self):
"""Tests if DB keywords work as docfield names. If they're wrapped with grave accents."""
# Using random.choices, picked out a list of 40 keywords for testing
all_keywords = {
"mariadb": [
"CHARACTER",
"DELAYED",
"LINES",
"EXISTS",
"YEAR_MONTH",
"LOCALTIME",
"BOTH",
"MEDIUMINT",
"LEFT",
"BINARY",
"DEFAULT",
"KILL",
"WRITE",
"SQL_SMALL_RESULT",
"CURRENT_TIME",
"CROSS",
"INHERITS",
"SELECT",
"TABLE",
"ALTER",
"CURRENT_TIMESTAMP",
"XOR",
"CASE",
"ALL",
"WHERE",
"INT",
"TO",
"SOME",
"DAY_MINUTE",
"ERRORS",
"OPTIMIZE",
"REPLACE",
"HIGH_PRIORITY",
"VARBINARY",
"HELP",
"IS",
"CHAR",
"DESCRIBE",
"KEY",
],
"postgres": [
"WORK",
"LANCOMPILER",
"REAL",
"HAVING",
"REPEATABLE",
"DATA",
"USING",
"BIT",
"DEALLOCATE",
"SERIALIZABLE",
"CURSOR",
"INHERITS",
"ARRAY",
"TRUE",
"IGNORE",
"PARAMETER_MODE",
"ROW",
"CHECKPOINT",
"SHOW",
"BY",
"SIZE",
"SCALE",
"UNENCRYPTED",
"WITH",
"AND",
"CONVERT",
"FIRST",
"SCOPE",
"WRITE",
"INTERVAL",
"CHARACTER_SET_SCHEMA",
"ADD",
"SCROLL",
"NULL",
"WHEN",
"TRANSACTION_ACTIVE",
"INT",
"FORTRAN",
"STABLE",
],
}
created_docs = []
# edit by rushabh: added [:1]
# don't run every keyword! - if one works, they all do
fields = all_keywords[frappe.conf.db_type][:1]
test_doctype = "ToDo"
def add_custom_field(field):
create_custom_field(
test_doctype,
{
"fieldname": field.lower(),
"label": field.title(),
"fieldtype": "Data",
},
)
# Create custom fields for test_doctype
for field in fields:
add_custom_field(field)
# Create documents under that doctype and query them via ORM
for _ in range(10):
docfields = {key.lower(): random_string(10) for key in fields}
doc = frappe.get_doc({"doctype": test_doctype, "description": random_string(20), **docfields})
doc.insert()
created_docs.append(doc.name)
random_field = choice(fields).lower()
random_doc = choice(created_docs)
random_value = random_string(20)
# Testing read
self.assertEqual(
list(frappe.get_all("ToDo", fields=[random_field], limit=1)[0])[0], random_field
)
self.assertEqual(
list(frappe.get_all("ToDo", fields=[f"`{random_field}` as total"], limit=1)[0])[0], "total"
)
# Testing read for distinct and sql functions
self.assertEqual(
list(
frappe.get_all(
"ToDo",
fields=[f"`{random_field}` as total"],
distinct=True,
limit=1,
)[0]
)[0],
"total",
)
self.assertEqual(
list(
frappe.get_all(
"ToDo",
fields=[f"`{random_field}`"],
distinct=True,
limit=1,
)[0]
)[0],
random_field,
)
self.assertEqual(
list(frappe.get_all("ToDo", fields=[f"count(`{random_field}`)"], limit=1)[0])[0],
"count" if frappe.conf.db_type == "postgres" else f"count(`{random_field}`)",
)
# Testing update
frappe.db.set_value(test_doctype, random_doc, random_field, random_value)
self.assertEqual(frappe.db.get_value(test_doctype, random_doc, random_field), random_value)
# Cleanup - delete records and remove custom fields
for doc in created_docs:
frappe.delete_doc(test_doctype, doc)
clear_custom_fields(test_doctype)
def test_savepoints(self):
frappe.db.rollback()
save_point = "todonope"
created_docs = []
failed_docs = []
for _ in range(5):
frappe.db.savepoint(save_point)
doc_gone = frappe.get_doc(doctype="ToDo", description="nope").save()
failed_docs.append(doc_gone.name)
frappe.db.rollback(save_point=save_point)
doc_kept = frappe.get_doc(doctype="ToDo", description="nope").save()
created_docs.append(doc_kept.name)
frappe.db.commit()
for d in failed_docs:
self.assertFalse(frappe.db.exists("ToDo", d))
for d in created_docs:
self.assertTrue(frappe.db.exists("ToDo", d))
def test_savepoints_wrapper(self):
frappe.db.rollback()
class SpecificExc(Exception):
pass
created_docs = []
failed_docs = []
for _ in range(5):
with savepoint(catch=SpecificExc):
doc_kept = frappe.get_doc(doctype="ToDo", description="nope").save()
created_docs.append(doc_kept.name)
with savepoint(catch=SpecificExc):
doc_gone = frappe.get_doc(doctype="ToDo", description="nope").save()
failed_docs.append(doc_gone.name)
raise SpecificExc
frappe.db.commit()
for d in failed_docs:
self.assertFalse(frappe.db.exists("ToDo", d))
for d in created_docs:
self.assertTrue(frappe.db.exists("ToDo", d))
def test_transaction_writes_error(self):
from frappe.database.database import Database
frappe.db.rollback()
frappe.db.MAX_WRITES_PER_TRANSACTION = 1
note = frappe.get_last_doc("ToDo")
note.description = "changed"
with self.assertRaises(frappe.TooManyWritesError) as tmw:
note.save()
frappe.db.MAX_WRITES_PER_TRANSACTION = Database.MAX_WRITES_PER_TRANSACTION
def test_transaction_write_counting(self):
note = frappe.get_doc(doctype="Note", title="transaction counting").insert()
writes = frappe.db.transaction_writes
frappe.db.set_value("Note", note.name, "content", "abc")
self.assertEqual(1, frappe.db.transaction_writes - writes)
writes = frappe.db.transaction_writes
frappe.db.sql(
"""
update `tabNote`
set content = 'abc'
where name = %s
""",
note.name,
)
self.assertEqual(1, frappe.db.transaction_writes - writes)
def test_pk_collision_ignoring(self):
# note has `name` generated from title
for _ in range(3):
frappe.get_doc(doctype="Note", title="duplicate name").insert(ignore_if_duplicate=True)
with savepoint():
self.assertRaises(
frappe.DuplicateEntryError, frappe.get_doc(doctype="Note", title="duplicate name").insert
)
# recover transaction to continue other tests
raise Exception
def test_exists(self):
dt, dn = "User", "Administrator"
self.assertEqual(frappe.db.exists(dt, dn, cache=True), dn)
self.assertEqual(frappe.db.exists(dt, dn), dn)
self.assertEqual(frappe.db.exists(dt, {"name": ("=", dn)}), dn)
filters = {"doctype": dt, "name": ("like", "Admin%")}
self.assertEqual(frappe.db.exists(filters), dn)
self.assertEqual(filters["doctype"], dt) # make sure that doctype was not removed from filters
self.assertEqual(frappe.db.exists(dt, [["name", "=", dn]]), dn)
@run_only_if(db_type_is.MARIADB)
class TestDDLCommandsMaria(unittest.TestCase):
test_table_name = "TestNotes"
def setUp(self) -> None:
frappe.db.commit()
frappe.db.sql(
f"""
CREATE TABLE `tab{self.test_table_name}` (`id` INT NULL, content TEXT, PRIMARY KEY (`id`));
"""
)
def tearDown(self) -> None:
frappe.db.sql(f"DROP TABLE tab{self.test_table_name};")
self.test_table_name = "TestNotes"
def test_rename(self) -> None:
new_table_name = f"{self.test_table_name}_new"
frappe.db.rename_table(self.test_table_name, new_table_name)
check_exists = frappe.db.sql(
f"""
SELECT * FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME = N'tab{new_table_name}';
"""
)
self.assertGreater(len(check_exists), 0)
self.assertIn(f"tab{new_table_name}", check_exists[0])
# * so this table is deleted after the rename
self.test_table_name = new_table_name
def test_describe(self) -> None:
self.assertEqual(
(
("id", "int(11)", "NO", "PRI", None, ""),
("content", "text", "YES", "", None, ""),
),
frappe.db.describe(self.test_table_name),
)
def test_change_type(self) -> None:
frappe.db.change_column_type("TestNotes", "id", "varchar(255)")
test_table_description = frappe.db.sql(f"DESC tab{self.test_table_name};")
self.assertGreater(len(test_table_description), 0)
self.assertIn("varchar(255)", test_table_description[0])
def test_add_index(self) -> None:
index_name = "test_index"
frappe.db.add_index(self.test_table_name, ["id", "content(50)"], index_name)
indexs_in_table = frappe.db.sql(
f"""
SHOW INDEX FROM tab{self.test_table_name}
WHERE Key_name = '{index_name}';
"""
)
self.assertEqual(len(indexs_in_table), 2)
class TestDBSetValue(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.todo1 = frappe.get_doc(doctype="ToDo", description="test_set_value 1").insert()
cls.todo2 = frappe.get_doc(doctype="ToDo", description="test_set_value 2").insert()
def test_update_single_doctype_field(self):
value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
changed_value = not value
frappe.db.set_value(
"System Settings", "System Settings", "deny_multiple_sessions", changed_value
)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
changed_value = not current_value
frappe.db.set_value("System Settings", None, "deny_multiple_sessions", changed_value)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
changed_value = not current_value
frappe.db.set_single_value("System Settings", "deny_multiple_sessions", changed_value)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
def test_update_single_row_single_column(self):
frappe.db.set_value("ToDo", self.todo1.name, "description", "test_set_value change 1")
updated_value = frappe.db.get_value("ToDo", self.todo1.name, "description")
self.assertEqual(updated_value, "test_set_value change 1")
def test_update_single_row_multiple_columns(self):
description, status = "Upated by test_update_single_row_multiple_columns", "Closed"
frappe.db.set_value(
"ToDo",
self.todo1.name,
{
"description": description,
"status": status,
},
update_modified=False,
)
updated_desciption, updated_status = frappe.db.get_value(
"ToDo", filters={"name": self.todo1.name}, fieldname=["description", "status"]
)
self.assertEqual(description, updated_desciption)
self.assertEqual(status, updated_status)
def test_update_multiple_rows_single_column(self):
frappe.db.set_value(
"ToDo", {"description": ("like", "%test_set_value%")}, "description", "change 2"
)
self.assertEqual(frappe.db.get_value("ToDo", self.todo1.name, "description"), "change 2")
self.assertEqual(frappe.db.get_value("ToDo", self.todo2.name, "description"), "change 2")
def test_update_multiple_rows_multiple_columns(self):
todos_to_update = frappe.get_all(
"ToDo",
filters={"description": ("like", "%test_set_value%"), "status": ("!=", "Closed")},
pluck="name",
)
frappe.db.set_value(
"ToDo",
{"description": ("like", "%test_set_value%"), "status": ("!=", "Closed")},
{"status": "Closed", "priority": "High"},
)
test_result = frappe.get_all(
"ToDo", filters={"name": ("in", todos_to_update)}, fields=["status", "priority"]
)
self.assertTrue(all(x for x in test_result if x["status"] == "Closed"))
self.assertTrue(all(x for x in test_result if x["priority"] == "High"))
def test_update_modified_options(self):
self.todo2.reload()
todo = self.todo2
updated_description = f"{todo.description} - by `test_update_modified_options`"
custom_modified = datetime.datetime.fromisoformat(add_days(now(), 10))
custom_modified_by = "[email protected]"
frappe.db.set_value("ToDo", todo.name, "description", updated_description, update_modified=False)
self.assertEqual(updated_description, frappe.db.get_value("ToDo", todo.name, "description"))
self.assertEqual(todo.modified, frappe.db.get_value("ToDo", todo.name, "modified"))
frappe.db.set_value(
"ToDo",
todo.name,
"description",
"test_set_value change 1",
modified=custom_modified,
modified_by=custom_modified_by,
)
self.assertTupleEqual(
(custom_modified, custom_modified_by),
frappe.db.get_value("ToDo", todo.name, ["modified", "modified_by"]),
)
def test_for_update(self):
self.todo1.reload()
with patch.object(Database, "sql") as sql_called:
frappe.db.set_value(
self.todo1.doctype,
self.todo1.name,
"description",
f"{self.todo1.description}-edit by `test_for_update`",
)
first_query = sql_called.call_args_list[0].args[0]
second_query = sql_called.call_args_list[1].args[0]
self.assertTrue(sql_called.call_count == 2)
self.assertTrue("FOR UPDATE" in first_query)
if frappe.conf.db_type == "postgres":
from frappe.database.postgres.database import modify_query
self.assertTrue(modify_query("UPDATE `tabToDo` SET") in second_query)
if frappe.conf.db_type == "mariadb":
self.assertTrue("UPDATE `tabToDo` SET" in second_query)
def test_cleared_cache(self):
self.todo2.reload()
with patch.object(frappe, "clear_document_cache") as clear_cache:
frappe.db.set_value(
self.todo2.doctype,
self.todo2.name,
"description",
f"{self.todo2.description}-edit by `test_cleared_cache`",
)
clear_cache.assert_called()
def test_update_alias(self):
args = (self.todo1.doctype, self.todo1.name, "description", "Updated by `test_update_alias`")
kwargs = {
"for_update": False,
"modified": None,
"modified_by": None,
"update_modified": True,
"debug": False,
}
self.assertTrue("return self.set_value(" in inspect.getsource(frappe.db.update))
with patch.object(Database, "set_value") as set_value:
frappe.db.update(*args, **kwargs)
set_value.assert_called_once()
set_value.assert_called_with(*args, **kwargs)
@classmethod
def tearDownClass(cls):
frappe.db.rollback()
@run_only_if(db_type_is.POSTGRES)
class TestDDLCommandsPost(unittest.TestCase):
test_table_name = "TestNotes"
def setUp(self) -> None:
frappe.db.sql(
f"""
CREATE TABLE "tab{self.test_table_name}" ("id" INT NULL, content text, PRIMARY KEY ("id"))
"""
)
def tearDown(self) -> None:
frappe.db.sql(f'DROP TABLE "tab{self.test_table_name}"')
self.test_table_name = "TestNotes"
def test_rename(self) -> None:
new_table_name = f"{self.test_table_name}_new"
frappe.db.rename_table(self.test_table_name, new_table_name)
check_exists = frappe.db.sql(
f"""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'tab{new_table_name}'
);
"""
)
self.assertTrue(check_exists[0][0])
# * so this table is deleted after the rename
self.test_table_name = new_table_name
def test_describe(self) -> None:
self.assertEqual([("id",), ("content",)], frappe.db.describe(self.test_table_name))
def test_change_type(self) -> None:
frappe.db.change_column_type(self.test_table_name, "id", "varchar(255)")
check_change = frappe.db.sql(
f"""
SELECT
table_name,
column_name,
data_type
FROM
information_schema.columns
WHERE
table_name = 'tab{self.test_table_name}'
"""
)
self.assertGreater(len(check_change), 0)
self.assertIn("character varying", check_change[0])
def test_add_index(self) -> None:
index_name = "test_index"
frappe.db.add_index(self.test_table_name, ["id", "content(50)"], index_name)
indexs_in_table = frappe.db.sql(
f"""
SELECT indexname
FROM pg_indexes
WHERE tablename = 'tab{self.test_table_name}'
AND indexname = '{index_name}' ;
""",
)
self.assertEqual(len(indexs_in_table), 1)
@run_only_if(db_type_is.POSTGRES)
def test_modify_query(self):
from frappe.database.postgres.database import modify_query
query = "select * from `tabtree b` where lft > 13 and rgt <= 16 and name =1.0 and parent = 4134qrsdc and isgroup = 1.00045"
self.assertEqual(
"select * from \"tabtree b\" where lft > '13' and rgt <= '16' and name = '1' and parent = 4134qrsdc and isgroup = 1.00045",
modify_query(query),
)
query = (
'select locate(".io", "frappe.io"), locate("3", cast(3 as varchar)), locate("3", 3::varchar)'
)
self.assertEqual(
'select strpos( "frappe.io", ".io"), strpos( cast(3 as varchar), "3"), strpos( 3::varchar, "3")',
modify_query(query),
)
@run_only_if(db_type_is.POSTGRES)
def test_modify_values(self):
from frappe.database.postgres.database import modify_values
self.assertEqual(
{"abcd": "23", "efgh": "23", "ijkl": 23.0345, "mnop": "wow"},
modify_values({"abcd": 23, "efgh": 23.0, "ijkl": 23.0345, "mnop": "wow"}),
)
self.assertEqual(["23", "23", 23.00004345, "wow"], modify_values((23, 23.0, 23.00004345, "wow")))
def test_sequence_table_creation(self):
from frappe.core.doctype.doctype.test_doctype import new_doctype
dt = new_doctype("autoinc_dt_seq_test", autoname="autoincrement").insert(ignore_permissions=True)
if frappe.db.db_type == "postgres":
self.assertTrue(
frappe.db.sql(
"""select sequence_name FROM information_schema.sequences
where sequence_name ilike 'autoinc_dt_seq_test%'"""
)[0][0]
)
else:
self.assertTrue(
frappe.db.sql(
"""select data_type FROM information_schema.tables
where table_type = 'SEQUENCE' and table_name like 'autoinc_dt_seq_test%'"""
)[0][0]
)
dt.delete(ignore_permissions=True)
|
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import datetime
import inspect
import unittest
from random import choice
from unittest.mock import patch
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from frappe.database import savepoint
from frappe.database.database import Database
from frappe.query_builder import Field
from frappe.query_builder.functions import Concat_ws
from frappe.tests.test_query_builder import db_type_is, run_only_if
from frappe.utils import add_days, cint, now, random_string
from frappe.utils.testutils import clear_custom_fields
class TestDB(unittest.TestCase):
def test_get_value(self):
self.assertEqual(frappe.db.get_value("User", {"name": ["=", "Administrator"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["like", "Admin%"]}), "Administrator")
self.assertNotEqual(frappe.db.get_value("User", {"name": ["!=", "Guest"]}), "Guest")
self.assertEqual(frappe.db.get_value("User", {"name": ["<", "Adn"]}), "Administrator")
self.assertEqual(frappe.db.get_value("User", {"name": ["<=", "Administrator"]}), "Administrator")
self.assertEqual(
frappe.db.get_value("User", {}, ["Max(name)"], order_by=None),
frappe.db.sql("SELECT Max(name) FROM tabUser")[0][0],
)
self.assertEqual(
frappe.db.get_value("User", {}, "Min(name)", order_by=None),
frappe.db.sql("SELECT Min(name) FROM tabUser")[0][0],
)
self.assertIn(
"for update",
frappe.db.get_value(
"User", Field("name") == "Administrator", for_update=True, run=False
).lower(),
)
user_doctype = frappe.qb.DocType("User")
self.assertEqual(
frappe.qb.from_(user_doctype).select(user_doctype.name, user_doctype.email).run(),
frappe.db.get_values(
user_doctype,
filters={},
fieldname=[user_doctype.name, user_doctype.email],
order_by=None,
),
)
self.assertEqual(
frappe.db.sql("""SELECT name FROM `tabUser` WHERE name > 's' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">", "s"]}),
)
self.assertEqual(
frappe.db.sql("""SELECT name FROM `tabUser` WHERE name >= 't' ORDER BY MODIFIED DESC""")[0][0],
frappe.db.get_value("User", {"name": [">=", "t"]}),
)
self.assertEqual(
frappe.db.get_values(
"User",
filters={"name": "Administrator"},
distinct=True,
fieldname="email",
),
frappe.qb.from_(user_doctype)
.where(user_doctype.name == "Administrator")
.select("email")
.distinct()
.run(),
)
self.assertIn(
"concat_ws",
frappe.db.get_value(
"User",
filters={"name": "Administrator"},
fieldname=Concat_ws(" ", "LastName"),
run=False,
).lower(),
)
self.assertEqual(
frappe.db.sql("select email from tabUser where name='Administrator' order by modified DESC"),
frappe.db.get_values("User", filters=[["name", "=", "Administrator"]], fieldname="email"),
)
def test_get_value_limits(self):
# check both dict and list style filters
filters = [{"enabled": 1}, [["enabled", "=", 1]]]
for filter in filters:
self.assertEqual(1, len(frappe.db.get_values("User", filters=filter, limit=1)))
# count of last touched rows as per DB-API 2.0 https://peps.python.org/pep-0249/#rowcount
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
self.assertEqual(2, len(frappe.db.get_values("User", filters=filter, limit=2)))
self.assertGreaterEqual(2, cint(frappe.db._cursor.rowcount))
# without limits length == count
self.assertEqual(
len(frappe.db.get_values("User", filters=filter)), frappe.db.count("User", filter)
)
frappe.db.get_value("User", filters=filter)
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
frappe.db.exists("User", filter)
self.assertGreaterEqual(1, cint(frappe.db._cursor.rowcount))
def test_escape(self):
frappe.db.escape("香港濟生堂製藥有限公司 - IT".encode("utf-8"))
def test_get_single_value(self):
# setup
values_dict = {
"Float": 1.5,
"Int": 1,
"Percent": 55.5,
"Currency": 12.5,
"Data": "Test",
"Date": datetime.datetime.now().date(),
"Datetime": datetime.datetime.now(),
"Time": datetime.timedelta(hours=9, minutes=45, seconds=10),
}
test_inputs = [
{"fieldtype": fieldtype, "value": value} for fieldtype, value in values_dict.items()
]
for fieldtype in values_dict.keys():
create_custom_field(
"Print Settings",
{
"fieldname": f"test_{fieldtype.lower()}",
"label": f"Test {fieldtype}",
"fieldtype": fieldtype,
},
)
# test
for inp in test_inputs:
fieldname = f"test_{inp['fieldtype'].lower()}"
frappe.db.set_value("Print Settings", "Print Settings", fieldname, inp["value"])
self.assertEqual(frappe.db.get_single_value("Print Settings", fieldname), inp["value"])
# teardown
clear_custom_fields("Print Settings")
def test_log_touched_tables(self):
frappe.flags.in_migrate = True
frappe.flags.touched_tables = set()
frappe.db.set_value("System Settings", "System Settings", "backup_limit", 5)
self.assertIn("tabSingles", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo = frappe.get_doc({"doctype": "ToDo", "description": "Random Description"})
todo.save()
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.description = "Another Description"
todo.save()
self.assertIn("tabToDo", frappe.flags.touched_tables)
if frappe.db.db_type != "postgres":
frappe.flags.touched_tables = set()
frappe.db.sql("UPDATE tabToDo SET description = 'Updated Description'")
self.assertNotIn("tabToDo SET", frappe.flags.touched_tables)
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
todo.delete()
self.assertIn("tabToDo", frappe.flags.touched_tables)
frappe.flags.touched_tables = set()
create_custom_field("ToDo", {"label": "ToDo Custom Field"})
self.assertIn("tabToDo", frappe.flags.touched_tables)
self.assertIn("tabCustom Field", frappe.flags.touched_tables)
frappe.flags.in_migrate = False
frappe.flags.touched_tables.clear()
def test_db_keywords_as_fields(self):
"""Tests if DB keywords work as docfield names. If they're wrapped with grave accents."""
# Using random.choices, picked out a list of 40 keywords for testing
all_keywords = {
"mariadb": [
"CHARACTER",
"DELAYED",
"LINES",
"EXISTS",
"YEAR_MONTH",
"LOCALTIME",
"BOTH",
"MEDIUMINT",
"LEFT",
"BINARY",
"DEFAULT",
"KILL",
"WRITE",
"SQL_SMALL_RESULT",
"CURRENT_TIME",
"CROSS",
"INHERITS",
"SELECT",
"TABLE",
"ALTER",
"CURRENT_TIMESTAMP",
"XOR",
"CASE",
"ALL",
"WHERE",
"INT",
"TO",
"SOME",
"DAY_MINUTE",
"ERRORS",
"OPTIMIZE",
"REPLACE",
"HIGH_PRIORITY",
"VARBINARY",
"HELP",
"IS",
"CHAR",
"DESCRIBE",
"KEY",
],
"postgres": [
"WORK",
"LANCOMPILER",
"REAL",
"HAVING",
"REPEATABLE",
"DATA",
"USING",
"BIT",
"DEALLOCATE",
"SERIALIZABLE",
"CURSOR",
"INHERITS",
"ARRAY",
"TRUE",
"IGNORE",
"PARAMETER_MODE",
"ROW",
"CHECKPOINT",
"SHOW",
"BY",
"SIZE",
"SCALE",
"UNENCRYPTED",
"WITH",
"AND",
"CONVERT",
"FIRST",
"SCOPE",
"WRITE",
"INTERVAL",
"CHARACTER_SET_SCHEMA",
"ADD",
"SCROLL",
"NULL",
"WHEN",
"TRANSACTION_ACTIVE",
"INT",
"FORTRAN",
"STABLE",
],
}
created_docs = []
# edit by rushabh: added [:1]
# don't run every keyword! - if one works, they all do
fields = all_keywords[frappe.conf.db_type][:1]
test_doctype = "ToDo"
def add_custom_field(field):
create_custom_field(
test_doctype,
{
"fieldname": field.lower(),
"label": field.title(),
"fieldtype": "Data",
},
)
# Create custom fields for test_doctype
for field in fields:
add_custom_field(field)
# Create documents under that doctype and query them via ORM
for _ in range(10):
docfields = {key.lower(): random_string(10) for key in fields}
doc = frappe.get_doc({"doctype": test_doctype, "description": random_string(20), **docfields})
doc.insert()
created_docs.append(doc.name)
random_field = choice(fields).lower()
random_doc = choice(created_docs)
random_value = random_string(20)
# Testing read
self.assertEqual(
list(frappe.get_all("ToDo", fields=[random_field], limit=1)[0])[0], random_field
)
self.assertEqual(
list(frappe.get_all("ToDo", fields=[f"`{random_field}` as total"], limit=1)[0])[0], "total"
)
# Testing read for distinct and sql functions
self.assertEqual(
list(
frappe.get_all(
"ToDo",
fields=[f"`{random_field}` as total"],
distinct=True,
limit=1,
)[0]
)[0],
"total",
)
self.assertEqual(
list(
frappe.get_all(
"ToDo",
fields=[f"`{random_field}`"],
distinct=True,
limit=1,
)[0]
)[0],
random_field,
)
self.assertEqual(
list(frappe.get_all("ToDo", fields=[f"count(`{random_field}`)"], limit=1)[0])[0],
"count" if frappe.conf.db_type == "postgres" else f"count(`{random_field}`)",
)
# Testing update
frappe.db.set_value(test_doctype, random_doc, random_field, random_value)
self.assertEqual(frappe.db.get_value(test_doctype, random_doc, random_field), random_value)
# Cleanup - delete records and remove custom fields
for doc in created_docs:
frappe.delete_doc(test_doctype, doc)
clear_custom_fields(test_doctype)
def test_savepoints(self):
frappe.db.rollback()
save_point = "todonope"
created_docs = []
failed_docs = []
for _ in range(5):
frappe.db.savepoint(save_point)
doc_gone = frappe.get_doc(doctype="ToDo", description="nope").save()
failed_docs.append(doc_gone.name)
frappe.db.rollback(save_point=save_point)
doc_kept = frappe.get_doc(doctype="ToDo", description="nope").save()
created_docs.append(doc_kept.name)
frappe.db.commit()
for d in failed_docs:
self.assertFalse(frappe.db.exists("ToDo", d))
for d in created_docs:
self.assertTrue(frappe.db.exists("ToDo", d))
def test_savepoints_wrapper(self):
frappe.db.rollback()
class SpecificExc(Exception):
pass
created_docs = []
failed_docs = []
for _ in range(5):
with savepoint(catch=SpecificExc):
doc_kept = frappe.get_doc(doctype="ToDo", description="nope").save()
created_docs.append(doc_kept.name)
with savepoint(catch=SpecificExc):
doc_gone = frappe.get_doc(doctype="ToDo", description="nope").save()
failed_docs.append(doc_gone.name)
raise SpecificExc
frappe.db.commit()
for d in failed_docs:
self.assertFalse(frappe.db.exists("ToDo", d))
for d in created_docs:
self.assertTrue(frappe.db.exists("ToDo", d))
def test_transaction_writes_error(self):
from frappe.database.database import Database
frappe.db.rollback()
frappe.db.MAX_WRITES_PER_TRANSACTION = 1
note = frappe.get_last_doc("ToDo")
note.description = "changed"
with self.assertRaises(frappe.TooManyWritesError) as tmw:
note.save()
frappe.db.MAX_WRITES_PER_TRANSACTION = Database.MAX_WRITES_PER_TRANSACTION
def test_transaction_write_counting(self):
note = frappe.get_doc(doctype="Note", title="transaction counting").insert()
writes = frappe.db.transaction_writes
frappe.db.set_value("Note", note.name, "content", "abc")
self.assertEqual(1, frappe.db.transaction_writes - writes)
writes = frappe.db.transaction_writes
frappe.db.sql(
"""
update `tabNote`
set content = 'abc'
where name = %s
""",
note.name,
)
self.assertEqual(1, frappe.db.transaction_writes - writes)
def test_pk_collision_ignoring(self):
# note has `name` generated from title
for _ in range(3):
frappe.get_doc(doctype="Note", title="duplicate name").insert(ignore_if_duplicate=True)
with savepoint():
self.assertRaises(
frappe.DuplicateEntryError, frappe.get_doc(doctype="Note", title="duplicate name").insert
)
# recover transaction to continue other tests
raise Exception
def test_exists(self):
dt, dn = "User", "Administrator"
self.assertEqual(frappe.db.exists(dt, dn, cache=True), dn)
self.assertEqual(frappe.db.exists(dt, dn), dn)
self.assertEqual(frappe.db.exists(dt, {"name": ("=", dn)}), dn)
filters = {"doctype": dt, "name": ("like", "Admin%")}
self.assertEqual(frappe.db.exists(filters), dn)
self.assertEqual(filters["doctype"], dt) # make sure that doctype was not removed from filters
self.assertEqual(frappe.db.exists(dt, [["name", "=", dn]]), dn)
@run_only_if(db_type_is.MARIADB)
class TestDDLCommandsMaria(unittest.TestCase):
test_table_name = "TestNotes"
def setUp(self) -> None:
frappe.db.commit()
frappe.db.sql(
f"""
CREATE TABLE `tab{self.test_table_name}` (`id` INT NULL, content TEXT, PRIMARY KEY (`id`));
"""
)
def tearDown(self) -> None:
frappe.db.sql(f"DROP TABLE tab{self.test_table_name};")
self.test_table_name = "TestNotes"
def test_rename(self) -> None:
new_table_name = f"{self.test_table_name}_new"
frappe.db.rename_table(self.test_table_name, new_table_name)
check_exists = frappe.db.sql(
f"""
SELECT * FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME = N'tab{new_table_name}';
"""
)
self.assertGreater(len(check_exists), 0)
self.assertIn(f"tab{new_table_name}", check_exists[0])
# * so this table is deleted after the rename
self.test_table_name = new_table_name
def test_describe(self) -> None:
self.assertEqual(
(
("id", "int(11)", "NO", "PRI", None, ""),
("content", "text", "YES", "", None, ""),
),
frappe.db.describe(self.test_table_name),
)
def test_change_type(self) -> None:
frappe.db.change_column_type("TestNotes", "id", "varchar(255)")
test_table_description = frappe.db.sql(f"DESC tab{self.test_table_name};")
self.assertGreater(len(test_table_description), 0)
self.assertIn("varchar(255)", test_table_description[0])
def test_add_index(self) -> None:
index_name = "test_index"
frappe.db.add_index(self.test_table_name, ["id", "content(50)"], index_name)
indexs_in_table = frappe.db.sql(
f"""
SHOW INDEX FROM tab{self.test_table_name}
WHERE Key_name = '{index_name}';
"""
)
self.assertEqual(len(indexs_in_table), 2)
class TestDBSetValue(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.todo1 = frappe.get_doc(doctype="ToDo", description="test_set_value 1").insert()
cls.todo2 = frappe.get_doc(doctype="ToDo", description="test_set_value 2").insert()
def test_update_single_doctype_field(self):
value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
changed_value = not value
frappe.db.set_value(
"System Settings", "System Settings", "deny_multiple_sessions", changed_value
)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
changed_value = not current_value
frappe.db.set_value("System Settings", None, "deny_multiple_sessions", changed_value)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
changed_value = not current_value
frappe.db.set_single_value("System Settings", "deny_multiple_sessions", changed_value)
current_value = frappe.db.get_single_value("System Settings", "deny_multiple_sessions")
self.assertEqual(current_value, changed_value)
def test_update_single_row_single_column(self):
frappe.db.set_value("ToDo", self.todo1.name, "description", "test_set_value change 1")
updated_value = frappe.db.get_value("ToDo", self.todo1.name, "description")
self.assertEqual(updated_value, "test_set_value change 1")
def test_update_single_row_multiple_columns(self):
description, status = "Upated by test_update_single_row_multiple_columns", "Closed"
frappe.db.set_value(
"ToDo",
self.todo1.name,
{
"description": description,
"status": status,
},
update_modified=False,
)
updated_desciption, updated_status = frappe.db.get_value(
"ToDo", filters={"name": self.todo1.name}, fieldname=["description", "status"]
)
self.assertEqual(description, updated_desciption)
self.assertEqual(status, updated_status)
def test_update_multiple_rows_single_column(self):
frappe.db.set_value(
"ToDo", {"description": ("like", "%test_set_value%")}, "description", "change 2"
)
self.assertEqual(frappe.db.get_value("ToDo", self.todo1.name, "description"), "change 2")
self.assertEqual(frappe.db.get_value("ToDo", self.todo2.name, "description"), "change 2")
def test_update_multiple_rows_multiple_columns(self):
todos_to_update = frappe.get_all(
"ToDo",
filters={"description": ("like", "%test_set_value%"), "status": ("!=", "Closed")},
pluck="name",
)
frappe.db.set_value(
"ToDo",
{"description": ("like", "%test_set_value%"), "status": ("!=", "Closed")},
{"status": "Closed", "priority": "High"},
)
test_result = frappe.get_all(
"ToDo", filters={"name": ("in", todos_to_update)}, fields=["status", "priority"]
)
self.assertTrue(all(x for x in test_result if x["status"] == "Closed"))
self.assertTrue(all(x for x in test_result if x["priority"] == "High"))
def test_update_modified_options(self):
self.todo2.reload()
todo = self.todo2
updated_description = f"{todo.description} - by `test_update_modified_options`"
custom_modified = datetime.datetime.fromisoformat(add_days(now(), 10))
custom_modified_by = "[email protected]"
frappe.db.set_value("ToDo", todo.name, "description", updated_description, update_modified=False)
self.assertEqual(updated_description, frappe.db.get_value("ToDo", todo.name, "description"))
self.assertEqual(todo.modified, frappe.db.get_value("ToDo", todo.name, "modified"))
frappe.db.set_value(
"ToDo",
todo.name,
"description",
"test_set_value change 1",
modified=custom_modified,
modified_by=custom_modified_by,
)
self.assertTupleEqual(
(custom_modified, custom_modified_by),
frappe.db.get_value("ToDo", todo.name, ["modified", "modified_by"]),
)
def test_for_update(self):
self.todo1.reload()
with patch.object(Database, "sql") as sql_called:
frappe.db.set_value(
self.todo1.doctype,
self.todo1.name,
"description",
f"{self.todo1.description}-edit by `test_for_update`",
)
first_query = sql_called.call_args_list[0].args[0]
second_query = sql_called.call_args_list[1].args[0]
self.assertTrue(sql_called.call_count == 2)
self.assertTrue("FOR UPDATE" in first_query)
if frappe.conf.db_type == "postgres":
from frappe.database.postgres.database import modify_query
self.assertTrue(modify_query("UPDATE `tabToDo` SET") in second_query)
if frappe.conf.db_type == "mariadb":
self.assertTrue("UPDATE `tabToDo` SET" in second_query)
def test_cleared_cache(self):
self.todo2.reload()
with patch.object(frappe, "clear_document_cache") as clear_cache:
frappe.db.set_value(
self.todo2.doctype,
self.todo2.name,
"description",
f"{self.todo2.description}-edit by `test_cleared_cache`",
)
clear_cache.assert_called()
def test_update_alias(self):
args = (self.todo1.doctype, self.todo1.name, "description", "Updated by `test_update_alias`")
kwargs = {
"for_update": False,
"modified": None,
"modified_by": None,
"update_modified": True,
"debug": False,
}
self.assertTrue("return self.set_value(" in inspect.getsource(frappe.db.update))
with patch.object(Database, "set_value") as set_value:
frappe.db.update(*args, **kwargs)
set_value.assert_called_once()
set_value.assert_called_with(*args, **kwargs)
@classmethod
def tearDownClass(cls):
frappe.db.rollback()
@run_only_if(db_type_is.POSTGRES)
class TestDDLCommandsPost(unittest.TestCase):
test_table_name = "TestNotes"
def setUp(self) -> None:
frappe.db.sql(
f"""
CREATE TABLE "tab{self.test_table_name}" ("id" INT NULL, content text, PRIMARY KEY ("id"))
"""
)
def tearDown(self) -> None:
frappe.db.sql(f'DROP TABLE "tab{self.test_table_name}"')
self.test_table_name = "TestNotes"
def test_rename(self) -> None:
new_table_name = f"{self.test_table_name}_new"
frappe.db.rename_table(self.test_table_name, new_table_name)
check_exists = frappe.db.sql(
f"""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'tab{new_table_name}'
);
"""
)
self.assertTrue(check_exists[0][0])
# * so this table is deleted after the rename
self.test_table_name = new_table_name
def test_describe(self) -> None:
self.assertEqual([("id",), ("content",)], frappe.db.describe(self.test_table_name))
def test_change_type(self) -> None:
frappe.db.change_column_type(self.test_table_name, "id", "varchar(255)")
check_change = frappe.db.sql(
f"""
SELECT
table_name,
column_name,
data_type
FROM
information_schema.columns
WHERE
table_name = 'tab{self.test_table_name}'
"""
)
self.assertGreater(len(check_change), 0)
self.assertIn("character varying", check_change[0])
def test_add_index(self) -> None:
index_name = "test_index"
frappe.db.add_index(self.test_table_name, ["id", "content(50)"], index_name)
indexs_in_table = frappe.db.sql(
f"""
SELECT indexname
FROM pg_indexes
WHERE tablename = 'tab{self.test_table_name}'
AND indexname = '{index_name}' ;
""",
)
self.assertEqual(len(indexs_in_table), 1)
@run_only_if(db_type_is.POSTGRES)
def test_modify_query(self):
from frappe.database.postgres.database import modify_query
query = "select * from `tabtree b` where lft > 13 and rgt <= 16 and name =1.0 and parent = 4134qrsdc and isgroup = 1.00045"
self.assertEqual(
"select * from \"tabtree b\" where lft > '13' and rgt <= '16' and name = '1' and parent = 4134qrsdc and isgroup = 1.00045",
modify_query(query),
)
query = (
'select locate(".io", "frappe.io"), locate("3", cast(3 as varchar)), locate("3", 3::varchar)'
)
self.assertEqual(
'select strpos( "frappe.io", ".io"), strpos( cast(3 as varchar), "3"), strpos( 3::varchar, "3")',
modify_query(query),
)
@run_only_if(db_type_is.POSTGRES)
def test_modify_values(self):
from frappe.database.postgres.database import modify_values
self.assertEqual(
{"abcd": "23", "efgh": "23", "ijkl": 23.0345, "mnop": "wow"},
modify_values({"abcd": 23, "efgh": 23.0, "ijkl": 23.0345, "mnop": "wow"}),
)
self.assertEqual(["23", "23", 23.00004345, "wow"], modify_values((23, 23.0, 23.00004345, "wow")))
def test_sequence_table_creation(self):
from frappe.core.doctype.doctype.test_doctype import new_doctype
dt = new_doctype("autoinc_dt_seq_test", autoname="autoincrement").insert(ignore_permissions=True)
if frappe.db.db_type == "postgres":
self.assertTrue(
frappe.db.sql(
"""select sequence_name FROM information_schema.sequences
where sequence_name ilike 'autoinc_dt_seq_test%'"""
)[0][0]
)
else:
self.assertTrue(
frappe.db.sql(
"""select data_type FROM information_schema.tables
where table_type = 'SEQUENCE' and table_name like 'autoinc_dt_seq_test%'"""
)[0][0]
)
dt.delete(ignore_permissions=True)
|
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
import datetime
from decimal import Decimal
import pytest
from typepy import DateTime, RealNumber, String, Typecode
from dataproperty import (
Align,
DataPropertyExtractor,
Format,
LineBreakHandling,
MatrixFormatting,
Preprocessor,
)
from .common import get_strict_level_map
DATATIME_DATA = datetime.datetime(2017, 1, 2, 3, 4, 5)
nan = float("nan")
inf = float("inf")
@pytest.fixture
def dp_extractor():
return DataPropertyExtractor()
def datetime_formatter_test(value):
return value.strftime("%Y%m%d %H%M%S")
def datetime_formatter_tostr_0(value):
return value.strftime("%Y-%m-%d %H:%M:%S%z")
def datetime_formatter_tostr_1(value):
return value.strftime("%Y/%m/%d %H:%M:%S")
def trans_func_1(v):
if v is None:
return ""
if v is False:
return "false"
if v == 0:
return 123
return v
def trans_func_2(v):
if v == 123:
return 321
return v
def nop(v):
return v
class Test_DataPropertyExtractor_to_dp:
@pytest.mark.parametrize(
["value", "type_value_map", "is_strict", "expected_value", "expected_typecode"],
[
[None, {Typecode.NONE: None}, True, None, Typecode.NONE],
[None, {Typecode.NONE: "null"}, False, "null", Typecode.STRING],
[None, {Typecode.NONE: ""}, True, "", Typecode.NULL_STRING],
[None, {Typecode.NONE: 0}, False, 0, Typecode.INTEGER],
[inf, {Typecode.INFINITY: "INF_1"}, False, "INF_1", Typecode.STRING],
[inf, {Typecode.INFINITY: "INF_2"}, True, "INF_2", Typecode.STRING],
[inf, {Typecode.INFINITY: None}, True, None, Typecode.NONE],
["inf", {Typecode.INFINITY: "INF_3"}, False, "INF_3", Typecode.STRING],
["inf", {Typecode.INFINITY: "INF_4"}, True, "inf", Typecode.STRING],
["inf", {Typecode.INFINITY: inf}, False, Decimal("Infinity"), Typecode.INFINITY],
[nan, {Typecode.NAN: "NAN_1"}, False, "NAN_1", Typecode.STRING],
[nan, {Typecode.NAN: "NAN_2"}, True, "NAN_2", Typecode.STRING],
[nan, {Typecode.NAN: None}, True, None, Typecode.NONE],
["nan", {Typecode.NAN: "NAN_4"}, False, "NAN_4", Typecode.STRING],
["nan", {Typecode.NAN: "NAN_5"}, True, "nan", Typecode.STRING],
],
)
def test_normal_type_value_map(
self, dp_extractor, value, type_value_map, is_strict, expected_value, expected_typecode
):
dp_extractor.type_value_map = type_value_map
dp_extractor.strict_level_map = get_strict_level_map(is_strict)
dp = dp_extractor.to_dp(value)
assert dp.data == expected_value
assert dp.typecode == expected_typecode
assert isinstance(dp.to_str(), str)
@pytest.mark.parametrize(
["value", "datetime_formatter", "datetime_format_str", "is_strict", "expected"],
[
[DATATIME_DATA, datetime_formatter_tostr_0, "s", False, "2017-01-02 03:04:05"],
["2017-01-01 00:00:00", datetime_formatter_tostr_1, "s", False, "2017/01/01 00:00:00"],
[
"2017-01-01 00:00:00",
None,
"%Y-%m-%dT%H:%M:%S",
False,
datetime.datetime(2017, 1, 1, 0, 0, 0),
],
["2017-01-01 00:00:00", None, "s", True, "2017-01-01 00:00:00"],
],
)
def test_normal_datetime(
self, dp_extractor, value, datetime_formatter, datetime_format_str, is_strict, expected
):
dp_extractor.datetime_formatter = datetime_formatter
dp_extractor.datetime_format_str = datetime_format_str
dp_extractor.strict_level_map = get_strict_level_map(is_strict)
dp = dp_extractor.to_dp(value)
assert dp.data == expected
@pytest.mark.parametrize(
["value", "type_hint", "trans_func", "expected"],
[
[1, String, nop, "1"],
[0, String, nop, "0"],
[None, String, nop, "None"],
[0, String, trans_func_1, "123"],
[False, String, trans_func_1, "false"],
[None, String, trans_func_1, ""],
],
)
def test_normal_type_hint(self, dp_extractor, value, type_hint, trans_func, expected):
dp_extractor.register_trans_func(trans_func)
dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint)
assert dp.data == expected
@pytest.mark.parametrize(
["value", "type_hint", "trans_funcs", "expected"],
[
[0, String, [trans_func_2, trans_func_1], "321"],
[0, String, [trans_func_1, trans_func_2], "123"],
],
)
def test_normal_trans_funcs(self, dp_extractor, value, type_hint, trans_funcs, expected):
for trans_func in trans_funcs:
dp_extractor.register_trans_func(trans_func)
dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint)
assert dp.data == expected
class Test_DataPropertyExtractor_to_dp_quoting_flags:
ALWAYS_QUOTE_FLAG_MAP = {
Typecode.NONE: True,
Typecode.INTEGER: True,
Typecode.REAL_NUMBER: True,
Typecode.STRING: True,
Typecode.NULL_STRING: True,
Typecode.DATETIME: True,
Typecode.REAL_NUMBER: True,
Typecode.NAN: True,
Typecode.BOOL: True,
}
@pytest.mark.parametrize(
["value", "quoting_flags", "expected"],
[
["string", ALWAYS_QUOTE_FLAG_MAP, '"string"'],
['"string"', ALWAYS_QUOTE_FLAG_MAP, '"string"'],
[' "123"', ALWAYS_QUOTE_FLAG_MAP, ' "123"'],
['"string" ', ALWAYS_QUOTE_FLAG_MAP, '"string" '],
[' "12 345" ', ALWAYS_QUOTE_FLAG_MAP, ' "12 345" '],
],
)
def test_normal_always_quote(self, dp_extractor, value, quoting_flags, expected):
dp_extractor.quoting_flags = quoting_flags
dp = dp_extractor.to_dp(value)
assert dp.data == expected
class Test_DataPropertyExtractor_to_dp_matrix:
@pytest.mark.parametrize(
["value"],
[
[
[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
]
]
],
)
def test_smoke(self, dp_extractor, value):
assert len(list(dp_extractor.to_dp_matrix(value))) > 0
@pytest.mark.parametrize(
["value", "type_value_map", "datetime_formatter"],
[
[
[[None, "1"], [1.1, "a"], [nan, inf], ["false", DATATIME_DATA]],
{Typecode.NONE: "null", Typecode.INFINITY: "INFINITY", Typecode.NAN: "NAN"},
datetime_formatter_test,
]
],
)
def test_normal(self, dp_extractor, value, type_value_map, datetime_formatter):
dp_extractor.type_value_map = type_value_map
dp_extractor.datetime_formatter = datetime_formatter
dp_matrix = list(dp_extractor.to_dp_matrix(dp_extractor.to_dp_matrix(value)))
assert len(dp_matrix) == 4
dp = dp_matrix[0][0]
assert dp.data == "null"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[0][1]
assert dp.data == 1
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
dp = dp_matrix[1][0]
assert dp.data == Decimal("1.1")
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.decimal_places == 1
assert dp.format_str == "{:.1f}"
dp = dp_matrix[1][1]
assert dp.data == "a"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[2][0]
assert dp.data == "NAN"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[2][1]
assert dp.data == "INFINITY"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[3][0]
assert dp.data == "false"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[3][1]
assert dp.data == "20170102 030405"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
@pytest.mark.parametrize(["value", "expected"], [[None, []], [[], []], [(), []]])
def test_empty(self, dp_extractor, value, expected):
assert dp_extractor.to_dp_matrix(value) == expected
class Test_DataPropertyExtractor_to_dp_list:
@pytest.mark.parametrize(
["value", "float_type"], [[[0.1, Decimal("1.1")], float], [[0.1, Decimal("1.1")], Decimal]]
)
def test_normal_float(self, dp_extractor, value, float_type):
dp_extractor.float_type = float_type
dp_list = dp_extractor.to_dp_list(value)
for dp in dp_list:
assert isinstance(dp.data, float_type)
@pytest.mark.parametrize(
["value", "type_hint", "expected_list"],
[
[
["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)],
None,
[Typecode.STRING, Typecode.DATETIME],
],
[
["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)],
DateTime,
[Typecode.DATETIME, Typecode.DATETIME],
],
],
)
def test_normal_type_hint(self, dp_extractor, value, type_hint, expected_list):
dp_extractor.default_type_hint = type_hint
dp_list = dp_extractor.to_dp_list(value)
for dp, expected in zip(dp_list, expected_list):
assert dp.typecode == expected
@pytest.mark.parametrize(
["value", "strip_str_header", "strip_str_value", "expected"],
[
[['"1"', '"-1.1"', '"abc"'], "", '"', [1, Decimal("-1.1"), "abc"]],
[['"1"', '"-1.1"', '"abc"'], '"', "", ['"1"', '"-1.1"', '"abc"']],
[['"1"', '"-1.1"', '"abc"'], None, None, ['"1"', '"-1.1"', '"abc"']],
],
)
def test_normal_strip_str(
self, dp_extractor, value, strip_str_header, strip_str_value, expected
):
dp_extractor.strip_str_header = strip_str_header
dp_extractor.preprocessor = Preprocessor(strip_str=strip_str_value)
dp_list = dp_extractor.to_dp_list(value)
for dp, expected_value in zip(dp_list, expected):
assert dp.data == expected_value
dp_matrix = dp_extractor.to_dp_matrix([value])
for dp, expected_value in zip(dp_matrix[0], expected):
assert dp.data == expected_value
@pytest.mark.parametrize(
["value", "line_break_handling", "expected"],
[
[["a\nb", "a\r\nb"], LineBreakHandling.NOP, ["a\nb", "a\r\nb"]],
[["a\nb", "a\r\nb"], LineBreakHandling.REPLACE, ["a b", "a b"]],
[["a\nb", "a\r\nb"], LineBreakHandling.ESCAPE, ["a\\nb", "a\\r\\nb"]],
],
)
def test_normal_line_break_handling(self, dp_extractor, value, line_break_handling, expected):
dp_extractor.preprocessor = Preprocessor(line_break_handling=line_break_handling)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value
@pytest.mark.parametrize(
["value", "line_break_handling", "line_break_repl", "expected"],
[
[["a\nb", "a\r\nb"], LineBreakHandling.NOP, "<br>", ["a\nb", "a\r\nb"]],
[
["a\nb", "a\r\nb", "a\r\n\nb"],
LineBreakHandling.REPLACE,
"<br>",
["a<br>b", "a<br>b", "a<br><br>b"],
],
],
)
def test_normal_line_break_repl(
self, dp_extractor, value, line_break_handling, line_break_repl, expected
):
dp_extractor.preprocessor = Preprocessor(
line_break_handling=line_break_handling, line_break_repl=line_break_repl
)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
@pytest.mark.parametrize(
["value", "escape_formula_injection", "expected"],
[
[
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
True,
["a+b", "'=a+b", "'-a+b", "'+a+b", "'@a+b"],
],
[
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
False,
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
],
],
)
def test_normal_escape_formula_injection(
self, dp_extractor, value, escape_formula_injection, expected
):
dp_extractor.preprocessor = Preprocessor(
is_escape_formula_injection=escape_formula_injection
)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
@pytest.mark.parametrize(
["value", "expected"],
[[[0, None], [0, None]]],
)
def test_exception_escape_formula_injection(self, dp_extractor, value, expected):
dp_extractor.preprocessor = Preprocessor(is_escape_formula_injection=True)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
class Test_DataPropertyExtractor_to_column_dp_list:
TEST_DATA_MATRIX = [
[1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)],
[2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"],
[3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"],
]
TEST_DATA_MATRIX_TUPLE = (
(1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)),
(2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"),
(3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"),
)
@pytest.mark.parametrize(
["max_workers", "headers", "value"],
[
[1, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[4, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[None, None, TEST_DATA_MATRIX],
[None, [], TEST_DATA_MATRIX],
[
None,
("i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"),
TEST_DATA_MATRIX_TUPLE,
],
],
)
def test_normal_default(self, dp_extractor, max_workers, headers, value):
dp_extractor.max_workers = max_workers
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 9
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
assert str(dp) == (
"column=0, type=INTEGER, align=right, "
"ascii_width=1, bit_len=2, int_digits=1, decimal_places=0"
)
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 2
assert dp.format_str == "{:.2f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 1
assert dp.format_str == "{:.1f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places == 1
assert dp.format_str == "{:s}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.BOOL
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 5
assert dp.decimal_places is None
assert dp.format_str == "{}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INFINITY
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 8
assert dp.decimal_places is None
assert dp.format_str == "{:f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.NAN
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places is None
assert dp.format_str == "{:f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 24
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
@pytest.mark.parametrize(
["headers", "value"],
[
[
["i", "f"],
[
[1234, 1234.5],
[1234567, 34.5],
],
],
[
[],
[
[1234, 1234.5],
[1234567, 34.5],
],
],
],
)
def test_normal_format_str(self, dp_extractor, headers, value):
dp_extractor.format_flags_list = [Format.THOUSAND_SEPARATOR, Format.THOUSAND_SEPARATOR]
dp_extractor.max_workers = 1
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.format_str == "{:,d}"
assert dp.ascii_char_width == 9
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.format_str == "{:,.1f}"
assert dp.ascii_char_width == 7
@pytest.mark.parametrize(
["headers", "value"],
[
[["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[None, TEST_DATA_MATRIX],
[[], TEST_DATA_MATRIX],
],
)
def test_normal_not_strict(self, dp_extractor, headers, value):
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 9
dp = col_dp_list[0]
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
dp = col_dp_list[1]
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 2
assert dp.format_str == "{:.2f}"
def test_normal_column_type_hints(self, dp_extractor):
data_matrix = [
[1, "1.1", 1, "2017-01-02 03:04:05"],
[2, "2.2", 0.1, "2017-01-02 03:04:05"],
]
dp_extractor.headers = ["none", "to_float", "to_str", "to_datetime"]
dp_extractor.column_type_hints = [None, RealNumber, String, DateTime]
assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix))
assert len(col_dp_list) == 4
assert col_dp_list[0].typecode == Typecode.INTEGER
assert col_dp_list[1].typecode == Typecode.REAL_NUMBER
assert col_dp_list[2].typecode == Typecode.STRING
assert col_dp_list[3].typecode == Typecode.DATETIME
dp_extractor.column_type_hints = ["", "float", "str", "datetime"]
assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix))
assert len(col_dp_list) == 4
assert col_dp_list[0].typecode == Typecode.INTEGER
assert col_dp_list[1].typecode == Typecode.REAL_NUMBER
assert col_dp_list[2].typecode == Typecode.STRING
assert col_dp_list[3].typecode == Typecode.DATETIME
def test_normal_max_precision(self):
extractor = DataPropertyExtractor(max_precision=3)
extractor.headers = ["i", "f"]
value = [
[1234, 0.0000000001],
[1234567, 34.5],
]
col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.decimal_places == 0
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.decimal_places == 3
# test setter
extractor.max_precision = 1
col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.decimal_places == 0
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.decimal_places == 1
def test_normal_nan_inf(self, dp_extractor):
dp_extractor.headers = ["n", "i"]
col_dp_list = dp_extractor.to_column_dp_list(
dp_extractor.to_dp_matrix([[nan, inf], ["nan", "inf"]])
)
assert len(col_dp_list) == 2
dp = col_dp_list[0]
assert dp.typecode == Typecode.NAN
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places is None
dp = col_dp_list[1]
assert dp.typecode == Typecode.INFINITY
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 8
assert dp.decimal_places is None
@pytest.mark.parametrize(["ambiguous_width"], [[2], [1]])
def test_normal_east_asian_ambiguous_width(self, dp_extractor, ambiguous_width):
dp_extractor.headers = ["ascii", "eaa"]
dp_extractor.east_asian_ambiguous_width = ambiguous_width
col_dp_list = dp_extractor.to_column_dp_list(
dp_extractor.to_dp_matrix([["abcdefg", "Øαββ"], ["abcdefghij", "ØØ"]])
)
assert len(col_dp_list) == 2
dp = col_dp_list[0]
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 10
assert dp.decimal_places is None
dp = col_dp_list[1]
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4 * ambiguous_width
assert dp.decimal_places is None
def test_normal_empty_value(self, dp_extractor):
dp_extractor.headers = ["a", "22", "cccc"]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(None))
dp = col_dp_list[0]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places is None
assert dp.format_str == "{}"
dp = col_dp_list[1]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 2
assert dp.decimal_places is None
assert dp.format_str == "{}"
dp = col_dp_list[2]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places is None
assert dp.format_str == "{}"
class Test_DataPropertyExtractor_matrix_formatting:
TEST_DATA_MATRIX_NORMAL_COL3 = [["a", 0, "aa"], ["b", 1, "bb"], ["c", 2, "ccc"]]
TEST_DATA_MATRIX_NOUNIFORM_COL1 = [["a", 0], ["b", 1, "bb"], ["c", 2, "ccc", 0.1], ["d"]]
@pytest.mark.parametrize(
["headers", "value", "matrix_formatting", "expected"],
[
[None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.TRIM, 1],
[["a", "b"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.TRIM, 2],
[None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.FILL_NONE, 4],
[["a", "b", "c"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.FILL_NONE, 3],
[["a", "b", "c"], TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.HEADER_ALIGNED, 3],
[
["a", "b", "c", "d", "e"],
TEST_DATA_MATRIX_NOUNIFORM_COL1,
MatrixFormatting.HEADER_ALIGNED,
5,
],
],
)
def test_normal_matrix_formatting(
self, dp_extractor, headers, value, matrix_formatting, expected
):
dp_extractor.headers = headers
dp_extractor.matrix_formatting = matrix_formatting
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == expected
@pytest.mark.parametrize(
["headers", "value", "matrix_formatting", "expected"],
[
[
["i", "f", "s", "if", "mix"],
TEST_DATA_MATRIX_NOUNIFORM_COL1,
MatrixFormatting.EXCEPTION,
ValueError,
]
],
)
def test_exception_matrix_formatting(
self, dp_extractor, headers, value, matrix_formatting, expected
):
dp_extractor.headers = headers
dp_extractor.matrix_formatting = matrix_formatting
with pytest.raises(expected):
dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
class Test_DataPropertyExtractor_update_preprocessor:
def test_normal(self, dp_extractor):
assert dp_extractor.preprocessor.strip_str is None
assert dp_extractor.preprocessor.replace_tabs_with_spaces is True
assert dp_extractor.preprocessor.tab_length == 2
assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.NOP
assert dp_extractor.preprocessor.line_break_repl == " "
assert dp_extractor.preprocessor.is_escape_html_tag is False
assert dp_extractor.preprocessor.is_escape_formula_injection is False
dp_extractor.update_preprocessor(
strip_str='"',
replace_tabs_with_spaces=False,
tab_length=4,
line_break_handling=LineBreakHandling.REPLACE,
line_break_repl="<br>",
is_escape_html_tag=True,
is_escape_formula_injection=True,
)
assert dp_extractor.preprocessor.strip_str == '"'
assert dp_extractor.preprocessor.replace_tabs_with_spaces is False
assert dp_extractor.preprocessor.tab_length == 4
assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.REPLACE
assert dp_extractor.preprocessor.line_break_repl == "<br>"
assert dp_extractor.preprocessor.is_escape_html_tag is True
assert dp_extractor.preprocessor.is_escape_formula_injection is True
|
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
import datetime
from decimal import Decimal
import pytest
from typepy import DateTime, RealNumber, String, Typecode
from dataproperty import (
Align,
DataPropertyExtractor,
Format,
LineBreakHandling,
MatrixFormatting,
Preprocessor,
)
from .common import get_strict_level_map
DATATIME_DATA = datetime.datetime(2017, 1, 2, 3, 4, 5)
nan = float("nan")
inf = float("inf")
@pytest.fixture
def dp_extractor():
return DataPropertyExtractor()
def datetime_formatter_test(value):
return value.strftime("%Y%m%d %H%M%S")
def datetime_formatter_tostr_0(value):
return value.strftime("%Y-%m-%d %H:%M:%S%z")
def datetime_formatter_tostr_1(value):
return value.strftime("%Y/%m/%d %H:%M:%S")
def trans_func_1(v):
if v is None:
return ""
if v is False:
return "false"
if v == 0:
return 123
return v
def trans_func_2(v):
if v == 123:
return 321
return v
def nop(v):
return v
class Test_DataPropertyExtractor_to_dp:
@pytest.mark.parametrize(
["value", "type_value_map", "is_strict", "expected_value", "expected_typecode"],
[
[None, {Typecode.NONE: None}, True, None, Typecode.NONE],
[None, {Typecode.NONE: "null"}, False, "null", Typecode.STRING],
[None, {Typecode.NONE: ""}, True, "", Typecode.NULL_STRING],
[None, {Typecode.NONE: 0}, False, 0, Typecode.INTEGER],
[inf, {Typecode.INFINITY: "INF_1"}, False, "INF_1", Typecode.STRING],
[inf, {Typecode.INFINITY: "INF_2"}, True, "INF_2", Typecode.STRING],
[inf, {Typecode.INFINITY: None}, True, None, Typecode.NONE],
["inf", {Typecode.INFINITY: "INF_3"}, False, "INF_3", Typecode.STRING],
["inf", {Typecode.INFINITY: "INF_4"}, True, "inf", Typecode.STRING],
["inf", {Typecode.INFINITY: inf}, False, Decimal("Infinity"), Typecode.INFINITY],
[nan, {Typecode.NAN: "NAN_1"}, False, "NAN_1", Typecode.STRING],
[nan, {Typecode.NAN: "NAN_2"}, True, "NAN_2", Typecode.STRING],
[nan, {Typecode.NAN: None}, True, None, Typecode.NONE],
["nan", {Typecode.NAN: "NAN_4"}, False, "NAN_4", Typecode.STRING],
["nan", {Typecode.NAN: "NAN_5"}, True, "nan", Typecode.STRING],
],
)
def test_normal_type_value_map(
self, dp_extractor, value, type_value_map, is_strict, expected_value, expected_typecode
):
dp_extractor.type_value_map = type_value_map
dp_extractor.strict_level_map = get_strict_level_map(is_strict)
dp = dp_extractor.to_dp(value)
assert dp.data == expected_value
assert dp.typecode == expected_typecode
assert isinstance(dp.to_str(), str)
@pytest.mark.parametrize(
["value", "datetime_formatter", "datetime_format_str", "is_strict", "expected"],
[
[DATATIME_DATA, datetime_formatter_tostr_0, "s", False, "2017-01-02 03:04:05"],
["2017-01-01 00:00:00", datetime_formatter_tostr_1, "s", False, "2017/01/01 00:00:00"],
[
"2017-01-01 00:00:00",
None,
"%Y-%m-%dT%H:%M:%S",
False,
datetime.datetime(2017, 1, 1, 0, 0, 0),
],
["2017-01-01 00:00:00", None, "s", True, "2017-01-01 00:00:00"],
],
)
def test_normal_datetime(
self, dp_extractor, value, datetime_formatter, datetime_format_str, is_strict, expected
):
dp_extractor.datetime_formatter = datetime_formatter
dp_extractor.datetime_format_str = datetime_format_str
dp_extractor.strict_level_map = get_strict_level_map(is_strict)
dp = dp_extractor.to_dp(value)
assert dp.data == expected
@pytest.mark.parametrize(
["value", "type_hint", "trans_func", "expected"],
[
[1, String, nop, "1"],
[0, String, nop, "0"],
[None, String, nop, "None"],
[0, String, trans_func_1, "123"],
[False, String, trans_func_1, "false"],
[None, String, trans_func_1, ""],
],
)
def test_normal_type_hint(self, dp_extractor, value, type_hint, trans_func, expected):
dp_extractor.register_trans_func(trans_func)
dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint)
assert dp.data == expected
@pytest.mark.parametrize(
["value", "type_hint", "trans_funcs", "expected"],
[
[0, String, [trans_func_2, trans_func_1], "321"],
[0, String, [trans_func_1, trans_func_2], "123"],
],
)
def test_normal_trans_funcs(self, dp_extractor, value, type_hint, trans_funcs, expected):
for trans_func in trans_funcs:
dp_extractor.register_trans_func(trans_func)
dp = dp_extractor._DataPropertyExtractor__to_dp(value, type_hint=type_hint)
assert dp.data == expected
class Test_DataPropertyExtractor_to_dp_quoting_flags:
ALWAYS_QUOTE_FLAG_MAP = {
Typecode.NONE: True,
Typecode.INTEGER: True,
Typecode.REAL_NUMBER: True,
Typecode.STRING: True,
Typecode.NULL_STRING: True,
Typecode.DATETIME: True,
Typecode.REAL_NUMBER: True,
Typecode.NAN: True,
Typecode.BOOL: True,
}
@pytest.mark.parametrize(
["value", "quoting_flags", "expected"],
[
["string", ALWAYS_QUOTE_FLAG_MAP, '"string"'],
['"string"', ALWAYS_QUOTE_FLAG_MAP, '"string"'],
[' "123"', ALWAYS_QUOTE_FLAG_MAP, ' "123"'],
['"string" ', ALWAYS_QUOTE_FLAG_MAP, '"string" '],
[' "12 345" ', ALWAYS_QUOTE_FLAG_MAP, ' "12 345" '],
],
)
def test_normal_always_quote(self, dp_extractor, value, quoting_flags, expected):
dp_extractor.quoting_flags = quoting_flags
dp = dp_extractor.to_dp(value)
assert dp.data == expected
class Test_DataPropertyExtractor_to_dp_matrix:
@pytest.mark.parametrize(
["value"],
[
[
[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
]
]
],
)
def test_smoke(self, dp_extractor, value):
assert len(list(dp_extractor.to_dp_matrix(value))) > 0
@pytest.mark.parametrize(
["value", "type_value_map", "datetime_formatter"],
[
[
[[None, "1"], [1.1, "a"], [nan, inf], ["false", DATATIME_DATA]],
{Typecode.NONE: "null", Typecode.INFINITY: "INFINITY", Typecode.NAN: "NAN"},
datetime_formatter_test,
]
],
)
def test_normal(self, dp_extractor, value, type_value_map, datetime_formatter):
dp_extractor.type_value_map = type_value_map
dp_extractor.datetime_formatter = datetime_formatter
dp_matrix = list(dp_extractor.to_dp_matrix(dp_extractor.to_dp_matrix(value)))
assert len(dp_matrix) == 4
dp = dp_matrix[0][0]
assert dp.data == "null"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[0][1]
assert dp.data == 1
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
dp = dp_matrix[1][0]
assert dp.data == Decimal("1.1")
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.decimal_places == 1
assert dp.format_str == "{:.1f}"
dp = dp_matrix[1][1]
assert dp.data == "a"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[2][0]
assert dp.data == "NAN"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[2][1]
assert dp.data == "INFINITY"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[3][0]
assert dp.data == "false"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
dp = dp_matrix[3][1]
assert dp.data == "20170102 030405"
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
@pytest.mark.parametrize(["value", "expected"], [[None, []], [[], []], [(), []]])
def test_empty(self, dp_extractor, value, expected):
assert dp_extractor.to_dp_matrix(value) == expected
class Test_DataPropertyExtractor_to_dp_list:
@pytest.mark.parametrize(
["value", "float_type"], [[[0.1, Decimal("1.1")], float], [[0.1, Decimal("1.1")], Decimal]]
)
def test_normal_float(self, dp_extractor, value, float_type):
dp_extractor.float_type = float_type
dp_list = dp_extractor.to_dp_list(value)
for dp in dp_list:
assert isinstance(dp.data, float_type)
@pytest.mark.parametrize(
["value", "type_hint", "expected_list"],
[
[
["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)],
None,
[Typecode.STRING, Typecode.DATETIME],
],
[
["2017-01-02 03:04:05", datetime.datetime(2017, 1, 2, 3, 4, 5)],
DateTime,
[Typecode.DATETIME, Typecode.DATETIME],
],
],
)
def test_normal_type_hint(self, dp_extractor, value, type_hint, expected_list):
dp_extractor.default_type_hint = type_hint
dp_list = dp_extractor.to_dp_list(value)
for dp, expected in zip(dp_list, expected_list):
assert dp.typecode == expected
@pytest.mark.parametrize(
["value", "strip_str_header", "strip_str_value", "expected"],
[
[['"1"', '"-1.1"', '"abc"'], "", '"', [1, Decimal("-1.1"), "abc"]],
[['"1"', '"-1.1"', '"abc"'], '"', "", ['"1"', '"-1.1"', '"abc"']],
[['"1"', '"-1.1"', '"abc"'], None, None, ['"1"', '"-1.1"', '"abc"']],
],
)
def test_normal_strip_str(
self, dp_extractor, value, strip_str_header, strip_str_value, expected
):
dp_extractor.strip_str_header = strip_str_header
dp_extractor.preprocessor = Preprocessor(strip_str=strip_str_value)
dp_list = dp_extractor.to_dp_list(value)
for dp, expected_value in zip(dp_list, expected):
assert dp.data == expected_value
dp_matrix = dp_extractor.to_dp_matrix([value])
for dp, expected_value in zip(dp_matrix[0], expected):
assert dp.data == expected_value
@pytest.mark.parametrize(
["value", "line_break_handling", "expected"],
[
[["a\nb", "a\r\nb"], LineBreakHandling.NOP, ["a\nb", "a\r\nb"]],
[["a\nb", "a\r\nb"], LineBreakHandling.REPLACE, ["a b", "a b"]],
[["a\nb", "a\r\nb"], LineBreakHandling.ESCAPE, ["a\\nb", "a\\r\\nb"]],
],
)
def test_normal_line_break_handling(self, dp_extractor, value, line_break_handling, expected):
dp_extractor.preprocessor = Preprocessor(line_break_handling=line_break_handling)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value
@pytest.mark.parametrize(
["value", "line_break_handling", "line_break_repl", "expected"],
[
[["a\nb", "a\r\nb"], LineBreakHandling.NOP, "<br>", ["a\nb", "a\r\nb"]],
[
["a\nb", "a\r\nb", "a\r\n\nb"],
LineBreakHandling.REPLACE,
"<br>",
["a<br>b", "a<br>b", "a<br><br>b"],
],
],
)
def test_normal_line_break_repl(
self, dp_extractor, value, line_break_handling, line_break_repl, expected
):
dp_extractor.preprocessor = Preprocessor(
line_break_handling=line_break_handling, line_break_repl=line_break_repl
)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
@pytest.mark.parametrize(
["value", "escape_formula_injection", "expected"],
[
[
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
True,
["a+b", "'=a+b", "'-a+b", "'+a+b", "'@a+b"],
],
[
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
False,
["a+b", "=a+b", "-a+b", "+a+b", "@a+b"],
],
],
)
def test_normal_escape_formula_injection(
self, dp_extractor, value, escape_formula_injection, expected
):
dp_extractor.preprocessor = Preprocessor(
is_escape_formula_injection=escape_formula_injection
)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
@pytest.mark.parametrize(
["value", "expected"],
[[[0, None], [0, None]]],
)
def test_exception_escape_formula_injection(self, dp_extractor, value, expected):
dp_extractor.preprocessor = Preprocessor(is_escape_formula_injection=True)
dp_list = dp_extractor.to_dp_list(value)
for dp, value in zip(dp_list, expected):
assert dp.data == value, value
class Test_DataPropertyExtractor_to_column_dp_list:
TEST_DATA_MATRIX = [
[1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)],
[2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"],
[3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"],
]
TEST_DATA_MATRIX_TUPLE = (
(1, 1.1, "aa", 1, 1, True, inf, nan, datetime.datetime(2017, 1, 1, 0, 0, 0)),
(2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", "2017-01-01T01:23:45+0900"),
(3, 3.33, "cccc", -3, "ccc", True, "infinity", "NAN", "2017-11-01 01:23:45+0900"),
)
@pytest.mark.parametrize(
["max_workers", "headers", "value"],
[
[1, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[4, ["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[None, None, TEST_DATA_MATRIX],
[None, [], TEST_DATA_MATRIX],
[
None,
("i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"),
TEST_DATA_MATRIX_TUPLE,
],
],
)
def test_normal_default(self, dp_extractor, max_workers, headers, value):
dp_extractor.max_workers = max_workers
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 9
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
assert str(dp) == (
"column=0, type=INTEGER, align=right, "
"ascii_width=1, bit_len=2, int_digits=1, decimal_places=0"
)
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 2
assert dp.format_str == "{:.2f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 1
assert dp.format_str == "{:.1f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places == 1
assert dp.format_str == "{:s}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.BOOL
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 5
assert dp.decimal_places is None
assert dp.format_str == "{}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INFINITY
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 8
assert dp.decimal_places is None
assert dp.format_str == "{:f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.NAN
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places is None
assert dp.format_str == "{:f}"
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 24
assert dp.decimal_places is None
assert dp.format_str == "{:s}"
@pytest.mark.parametrize(
["headers", "value"],
[
[
["i", "f"],
[
[1234, 1234.5],
[1234567, 34.5],
],
],
[
[],
[
[1234, 1234.5],
[1234567, 34.5],
],
],
],
)
def test_normal_format_str(self, dp_extractor, headers, value):
dp_extractor.format_flags_list = [Format.THOUSAND_SEPARATOR, Format.THOUSAND_SEPARATOR]
dp_extractor.max_workers = 1
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.format_str == "{:,d}"
assert dp.ascii_char_width == 9
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.format_str == "{:,.1f}"
assert dp.ascii_char_width == 7
@pytest.mark.parametrize(
["headers", "value"],
[
[["i", "f", "s", "if", "mix", "bool", "inf", "nan", "time"], TEST_DATA_MATRIX],
[None, TEST_DATA_MATRIX],
[[], TEST_DATA_MATRIX],
],
)
def test_normal_not_strict(self, dp_extractor, headers, value):
dp_extractor.headers = headers
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == 9
dp = col_dp_list[0]
assert dp.typecode == Typecode.INTEGER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places == 0
assert dp.format_str == "{:d}"
dp = col_dp_list[1]
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.align.align_code == Align.RIGHT.align_code
assert dp.align.align_string == Align.RIGHT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places == 2
assert dp.format_str == "{:.2f}"
def test_normal_column_type_hints(self, dp_extractor):
data_matrix = [
[1, "1.1", 1, "2017-01-02 03:04:05"],
[2, "2.2", 0.1, "2017-01-02 03:04:05"],
]
dp_extractor.headers = ["none", "to_float", "to_str", "to_datetime"]
dp_extractor.column_type_hints = [None, RealNumber, String, DateTime]
assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix))
assert len(col_dp_list) == 4
assert col_dp_list[0].typecode == Typecode.INTEGER
assert col_dp_list[1].typecode == Typecode.REAL_NUMBER
assert col_dp_list[2].typecode == Typecode.STRING
assert col_dp_list[3].typecode == Typecode.DATETIME
dp_extractor.column_type_hints = ["", "float", "str", "datetime"]
assert dp_extractor.column_type_hints == [None, RealNumber, String, DateTime]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(data_matrix))
assert len(col_dp_list) == 4
assert col_dp_list[0].typecode == Typecode.INTEGER
assert col_dp_list[1].typecode == Typecode.REAL_NUMBER
assert col_dp_list[2].typecode == Typecode.STRING
assert col_dp_list[3].typecode == Typecode.DATETIME
def test_normal_max_precision(self):
extractor = DataPropertyExtractor(max_precision=3)
extractor.headers = ["i", "f"]
value = [
[1234, 0.0000000001],
[1234567, 34.5],
]
col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.decimal_places == 0
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.decimal_places == 3
# test setter
extractor.max_precision = 1
col_dp_list = extractor.to_column_dp_list(extractor.to_dp_matrix(value))
assert len(col_dp_list) == 2
col_idx = 0
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.INTEGER
assert dp.decimal_places == 0
col_idx += 1
dp = col_dp_list[col_idx]
assert dp.column_index == col_idx
assert dp.typecode == Typecode.REAL_NUMBER
assert dp.decimal_places == 1
def test_normal_nan_inf(self, dp_extractor):
dp_extractor.headers = ["n", "i"]
col_dp_list = dp_extractor.to_column_dp_list(
dp_extractor.to_dp_matrix([[nan, inf], ["nan", "inf"]])
)
assert len(col_dp_list) == 2
dp = col_dp_list[0]
assert dp.typecode == Typecode.NAN
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 3
assert dp.decimal_places is None
dp = col_dp_list[1]
assert dp.typecode == Typecode.INFINITY
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 8
assert dp.decimal_places is None
@pytest.mark.parametrize(["ambiguous_width"], [[2], [1]])
def test_normal_east_asian_ambiguous_width(self, dp_extractor, ambiguous_width):
dp_extractor.headers = ["ascii", "eaa"]
dp_extractor.east_asian_ambiguous_width = ambiguous_width
col_dp_list = dp_extractor.to_column_dp_list(
dp_extractor.to_dp_matrix([["abcdefg", "Øαββ"], ["abcdefghij", "ØØ"]])
)
assert len(col_dp_list) == 2
dp = col_dp_list[0]
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 10
assert dp.decimal_places is None
dp = col_dp_list[1]
assert dp.typecode == Typecode.STRING
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4 * ambiguous_width
assert dp.decimal_places is None
def test_normal_empty_value(self, dp_extractor):
dp_extractor.headers = ["a", "22", "cccc"]
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(None))
dp = col_dp_list[0]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 1
assert dp.decimal_places is None
assert dp.format_str == "{}"
dp = col_dp_list[1]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 2
assert dp.decimal_places is None
assert dp.format_str == "{}"
dp = col_dp_list[2]
assert dp.typecode == Typecode.NONE
assert dp.align.align_code == Align.LEFT.align_code
assert dp.align.align_string == Align.LEFT.align_string
assert dp.ascii_char_width == 4
assert dp.decimal_places is None
assert dp.format_str == "{}"
class Test_DataPropertyExtractor_matrix_formatting:
TEST_DATA_MATRIX_NORMAL_COL3 = [["a", 0, "aa"], ["b", 1, "bb"], ["c", 2, "ccc"]]
TEST_DATA_MATRIX_NOUNIFORM_COL1 = [["a", 0], ["b", 1, "bb"], ["c", 2, "ccc", 0.1], ["d"]]
@pytest.mark.parametrize(
["headers", "value", "matrix_formatting", "expected"],
[
[None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.TRIM, 1],
[["a", "b"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.TRIM, 2],
[None, TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.FILL_NONE, 4],
[["a", "b", "c"], TEST_DATA_MATRIX_NORMAL_COL3, MatrixFormatting.FILL_NONE, 3],
[["a", "b", "c"], TEST_DATA_MATRIX_NOUNIFORM_COL1, MatrixFormatting.HEADER_ALIGNED, 3],
[
["a", "b", "c", "d", "e"],
TEST_DATA_MATRIX_NOUNIFORM_COL1,
MatrixFormatting.HEADER_ALIGNED,
5,
],
],
)
def test_normal_matrix_formatting(
self, dp_extractor, headers, value, matrix_formatting, expected
):
dp_extractor.headers = headers
dp_extractor.matrix_formatting = matrix_formatting
col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
assert len(col_dp_list) == expected
@pytest.mark.parametrize(
["headers", "value", "matrix_formatting", "expected"],
[
[
["i", "f", "s", "if", "mix"],
TEST_DATA_MATRIX_NOUNIFORM_COL1,
MatrixFormatting.EXCEPTION,
ValueError,
]
],
)
def test_exception_matrix_formatting(
self, dp_extractor, headers, value, matrix_formatting, expected
):
dp_extractor.headers = headers
dp_extractor.matrix_formatting = matrix_formatting
with pytest.raises(expected):
dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(value))
class Test_DataPropertyExtractor_update_preprocessor:
def test_normal(self, dp_extractor):
assert dp_extractor.preprocessor.strip_str is None
assert dp_extractor.preprocessor.replace_tabs_with_spaces is True
assert dp_extractor.preprocessor.tab_length == 2
assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.NOP
assert dp_extractor.preprocessor.line_break_repl == " "
assert dp_extractor.preprocessor.is_escape_html_tag is False
assert dp_extractor.preprocessor.is_escape_formula_injection is False
dp_extractor.update_preprocessor(
strip_str='"',
replace_tabs_with_spaces=False,
tab_length=4,
line_break_handling=LineBreakHandling.REPLACE,
line_break_repl="<br>",
is_escape_html_tag=True,
is_escape_formula_injection=True,
)
assert dp_extractor.preprocessor.strip_str == '"'
assert dp_extractor.preprocessor.replace_tabs_with_spaces is False
assert dp_extractor.preprocessor.tab_length == 4
assert dp_extractor.preprocessor.line_break_handling is LineBreakHandling.REPLACE
assert dp_extractor.preprocessor.line_break_repl == "<br>"
assert dp_extractor.preprocessor.is_escape_html_tag is True
assert dp_extractor.preprocessor.is_escape_formula_injection is True
|
import asyncio, datetime, discord, json, pycountry, random, re, requests, time, traceback
from aioconsole import ainput
from word2number import w2n
from client import *
from datamanager import config, del_data, get_data, has_data, mod_data, set_data, batch_set_data
from discordutils import *
from league import *
async def dm(user, *a, **k):
channel = user.dm_channel
if channel is None:
channel = await user.create_dm()
await channel.send(*a, **k)
@client.command("", ["help"], "", "")
@client.command("General Commands", ["help", "rpg"], "help [rpg]", "post a list of commands")
async def command_help(command, message):
sections = {}
for section, _, syntax, description, _ in client.commands:
if section == "" or ((section == "RPG Commands") ^ (len(command) == 3)): continue
if section not in sections:
sections[section] = []
sections[section].append(f"`{syntax}` - {description}")
embed = discord.Embed(
title = "Help - Commands",
color = client.color
)
for section in sections:
embed.add_field(name = section, value = "\n".join(sections[section]), inline = False)
await dm(message.author, embed = embed)
await send(message, "Sent the command list to your DMs!")
@client.command("General Commands", ["ping"], "ping", "check your ping")
async def command_ping(command, message):
ping = int((time.time() - (message.created_at - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds = 1)) * 1000)
await send(message, f"Pong! ({ping} ms)", reaction = "🏓")
@client.command("Channel Type Commands", ["subscribe"], "subscribe", "announce updates to this channel")
async def command_subscribe(command, message):
await mod_data("announcement_channels", lambda x: x | {message.channel.id}, default = set())
await send(message, "Subscribed to status updates here!")
@client.command("Channel Type Commands", ["unsubscribe"], "unsubscribe", "stop announcing updates to this channel")
async def command_unsubscribe(command, message):
await mod_data("announcement_channels", lambda x: x - {message.channel.id}, default = set())
await send(message, "Unsubscribed from status updates here!")
@client.command("Channel Type Commands", ["watch", ("osu", "genshin")], "watch osu/genshin", "watch osu!/Genshin Impact updates here")
async def command_watch(command, message):
await mod_data("watch_channels", command[2], lambda x: x | {message.channel.id}, default = set())
await send(message, "Now watching " + {"osu": "osu!", "genshin": "Genshin Impact"}[command[2]] + " updates in this channel!")
@client.command("Channel Type Commands", ["unwatch", ("osu", "genshin")], "unwatch osu/genshin", "stop watching osu!/Genshin Impact updates here")
async def command_watch(command, message):
await mod_data("watch_channels", command[2], lambda x: x - {message.channel.id}, default = set())
await send(message, "No longer watching " + {"osu": "osu!", "genshin": "Genshin Impact"}[command[2]] + " updates in this channel!")
words = None
wordmap = {}
with open("data/words.txt") as f:
words = [x for x in f.read().strip().splitlines() if 5 <= len(x)]
for word in words:
key = "".join(sorted(word))
if key not in wordmap:
wordmap[key] = set()
wordmap[key].add(word)
anagram_lock = asyncio.Lock()
def display(actual, scrambled, hint):
if hint == 0: return scrambled
cl = list(scrambled)
start = actual[:hint if hint * 2 <= len(actual) else -hint]
end = actual[-hint:]
for c in start + end:
cl.remove(c)
return f"**{start}**{"".join(cl)}**{end}**"
async def anagram_function(message, answer = None, start = False, stop = False, hint = False, reorder = False):
global words, wordmap
async with anagram_lock:
active = await has_data("anagram", message.channel.id, "puzzle")
puzzle = await get_data("anagram", message.channel.id, "puzzle", default = "", set_if_missing = False)
answers = wordmap.get("".join(sorted(puzzle)), set())
current_hint = await get_data("anagram", message.channel.id, "hint", default = 0, set_if_missing = False)
if reorder:
if active:
charlist = list(puzzle)
random.shuffle(charlist)
puzzle = "".join(charlist)
await set_data("anagram", message.channel.id, "puzzle", puzzle)
await send(message, f"Reordered: solve for '{display(sorted(answers)[0], puzzle, current_hint)}' ({len(puzzle)}).")
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if hint:
if active:
if len(puzzle) - current_hint * 2 - 2 <= 1:
stop = True
else:
await set_data("anagram", message.channel.id, "hint", current_hint + 1)
await send(message, f"Hint: 2 more letters shown: solve for '{display(sorted(answers)[0], puzzle, current_hint + 1)}' ({len(puzzle)}).")
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if stop:
if active:
if len(answers) == 1:
await send(message, f"Anagram puzzle ended! The correct answer was '{list(answers)[0]}'.")
else:
await send(message, f"Anagram puzzle ended! The correct answers were {english_list(quote(answers))}.")
await del_data("anagram", message.channel.id)
active = False
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if active and answer in answers:
try:
points = len(answer) - 2 * await get_data("anagram", message.channel.id, "hint")
bonus = int(points / 2) * (time.time() - await get_data("anagram", message.channel.id, "timestamp", default = 0) <= 5)
await mod_data("leaderboard", "anagram", message.author.id, lambda x: x + points + bonus, default = 0)
await batch_set_data("anagram", message.channel.id, active = False, last = answers, lasttime = time.time())
active = False
bonus_display = f" **+{bonus}**" if bonus else ""
alt_display = f" (Alternative answers: {english_list(quote(answers - {answer}))})" if len(answers) > 1 else ""
await send(message, f"Congratulations to {message.author.mention} for winning the anagram puzzle! (+{points}{bonus_display}){alt_display}", allowed_mentions = discord.AllowedMentions.none())
start = True
except:
print(traceback.format_exc())
elif answer in await get_data("anagram", message.channel.id, "last", default = set()) and time.time() - await get_data("anagram", message.channel.id, "lasttime", default = 0) <= 1:
await send(message, f"{message.author.mention} L", reaction = "x", allowed_mentions = discord.AllowedMentions.none())
if start:
if active:
hint = await get_data("anagram", message.channel.id, "hint", default = 0)
actual = sorted(answers)[0]
await send(message, f"An anagram puzzle is already running! Solve for '{display(actual, puzzle, hint)}' ({len(puzzle)}).", reaction = "x")
else:
word = random.choice(words)
charlist = list(word)
random.shuffle(charlist)
scrambled = "".join(charlist)
await batch_set_data("anagram", message.channel.id, active = True, puzzle = scrambled, hint = 0, timestamp = time.time())
await send(message, f"Anagram puzzle! Solve for '{scrambled}' ({len(word)}).")
@client.command("Anagram Commands", ["anagram"], "anagram start", "start an anagram puzzle")
async def command_anagram_start(command, message):
await anagram_function(message, start = True)
@client.command("Anagram Commands", ["anagram", "restart"], "anagram restart", "restart the anagram puzzle")
async def command_anagram_restart(command, message):
await anagram_function(message, stop = True, start = True)
@client.command("Anagram Commands", ["anagram", "stop"], "anagram stop", "stop the anagram puzzle")
async def command_anagram_stop(command, message):
await anagram_function(message, stop = True)
@client.command("Anagram Commands", ["anagram", "shuffle"], "anagram shuffle", "alias for `anagram reorder`")
@client.command("Anagram Commands", ["anagram", "scramble"], "anagram scramble", "alias for `anagram reorder`")
@client.command("Anagram Commands", ["anagram", "reorder"], "anagram reorder", "reorder the anagram puzzle")
async def command_anagram_reorder(command, message):
await anagram_function(message, reorder = True)
@client.command("Anagram Commands", ["anagram", "hint"], "anagram hint", "show another character in the anagram puzzle")
async def command_anagram_hint(command, message):
await anagram_function(message, hint = True)
@client.command("Anagram Commands", ["anagram", "add", "?"], "anagram add <word>", "add a word to the anagram dictionary")
async def command_anagram_add(command, message):
global words, wordmap
word = command[3].strip().lower()
if all(char in "abcdefghijklmnopqrstuvwxyz" for char in word):
if word in words:
await send(message, "This word is already in the dictionary!", reaction = "x")
else:
words.append(word)
words.sort()
with open("data/words.txt", "w") as f:
f.write("\n".join(words))
key = "".join(sorted(word))
if key not in wordmap:
wordmap[key] = set()
wordmap[key].add(word)
await send(message, f"Added '{word}' to the dictionary!")
else:
await send(message, "Words must only contain letters!", reaction = "x")
@client.command("Anagram Commands", ["anagram", "rm", "?"], "anagram rm <word>", "alias for `anagram remove`")
@client.command("Anagram Commands", ["anagram", "remove", "?"], "anagram remove <word>", "remove a word from the anagram dictionary")
async def command_anagram_remove(command, message):
global words, wordmap
word = command[3].strip().lower()
if word in words:
words.remove(word)
with open("data/words.txt", "w") as f:
f.write("\n".join(words))
key = "".join(sorted(word))
wordmap[key].discard(word)
await send(message, f"Removed '{word}' from the dictionary!")
else:
await send(message, "This word is not in the dictionary!", reaction = "x")
@client.command("Anagram Commands", ["anagram", "lb"], "anagram lb", "alias for `anagram leaderboard`")
@client.command("Anagram Commands", ["anagram", "leaderboard"], "anagram leaderboard", "show the leaderboard for the anagram puzzle")
async def command_anagram_leaderboard(command, message):
scores = []
scoremap = await get_data("leaderboard", "anagram")
for member in message.guild.members:
score = scoremap.get(member.id, 0)
if score:
scores.append((score, member))
scores.sort(reverse = True)
await send(message, embed = discord.Embed(
title = "Leaderboard - Anagram",
description = "\n".join(f"{member.mention} - {score}" for score, member in scores)
))
@client.command("", lambda m: True, "", "")
async def command_anagram_answer(command, message):
try:
await anagram_function(message, answer = message.content.strip().strip("!@#$%^&*()[]{}/|\.,<>\"'").lower())
except:
pass
@client.command("User Commands", ["alias", "?", "?"], "alias <name> <user>", "alias a name to a user")
async def command_alias(command, message):
member = await get_member(message.guild, command[3], message.author)
await set_data("aliases", message.guild.id, command[2].lower(), member.id)
await send(message, f"Aliased '{command[2].lower()}' to {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("User Commands", ["unalias", "?"], "unalias <name>", "remove a name's alias")
async def command_unalias(command, message):
await set_data("aliases", message.guild.id, command[2].lower(), None)
await send(message, f"Removed the alias for '{command[2].lower()}'!")
@client.command("User Commands", ["unbonk", "?", "..."], "unbonk <user>", "alias for `unignore`")
@client.command("User Commands", ["unignore", "?", "..."], "unignore <user>", "make the bot no longer ignore messages from a particular user (on a server)")
@client.command("User Commands", ["bonk", "?", "..."], "bonk <user>", "alias for `ignore`")
@client.command("User Commands", ["ignore", "?", "..."], "ignore <user>", "make the bot ignore all messages from a particular user (on a server)")
async def command_ignore(command, message):
for uinfo in command[2:]:
member = await get_member(message.guild, uinfo, message.author)
if not command[1].startswith("un") and member == message.author:
await send(message, f"You cannot {command[1]} yourself!", reaction = "x")
else:
await set_data("ignore", message.guild.id, member.id, not command[1].startswith("un"))
await send(message, f"No longer ignoring {member.mention}!" if command[1].startswith("un") else f"{"Bonk! " * (command[1] == "bonk")}Now ignoring {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("User Commands", ["unshut", "?", "..."], "unbonk <user>", "alias for `unsilence`")
@client.command("User Commands", ["unsilence", "?", "..."], "unignore <user>", "make the bot delete messages from a particular user (on a server)")
@client.command("User Commands", ["shut", "?", "..."], "bonk <user>", "alias for `silence`")
@client.command("User Commands", ["silence", "?", "..."], "ignore <user>", "make the bot delete messages from a particular user (on a server)")
async def command_silence(command, message):
for uinfo in command[2:]:
member = await get_member(message.guild, uinfo, message.author)
if not command[1].startswith("un") and member == message.author:
await send(message, f"You cannot {command[1]} yourself!", reaction = "x")
else:
await set_data("silence", message.guild.id, member.id, not command[1].startswith("un"))
await send(message, f"No longer silencing {member.mention}!" if command[1].startswith("un") else f"{"https://i.redd.it/l5jmlb1ltqj51.jpg" * (command[1] == "shut")}Now silencing {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
# @client.command("Role Commands", ["gib", "?", "..."], "gib <name> [roles...]", "alias for `role give`")
# @client.command("Role Commands", ["role", "give", "?", "..."], "role give <name> [roles...]", "give a list of roles to a user")
# async def command_role_give(command, message):
# user, *names = command[2 if command[1] == "gib" else 3:]
# member = await get_member(message.guild, user, message.author)
# roles = [get_role(message.guild, string) for string in names]
# if any(role.id == 741731868692709416 for role in roles) and member.id != 251082987360223233:
# await send(message, f"<@&741731868692709416> is exclusive to <@!251082987360223233>!", allowed_mentions = discord.AllowedMentions.none())
# else:
# await member.add_roles(*roles)
# await send(message, f"Granted {english_list(quote(role.mention for role in roles))} to {member.mention}!", allowed_mentions = discord.AllowedMentions(roles = False))
# @client.command("Role Commands", ["gibnt", "?", "..."], "gibnt <name> [roles...]", "alias for `role remove`")
# @client.command("Role Commands", ["role", "remove", "?", "..."], "role remove <name> [roles...]", "remove a list of roles from a user")
# async def command_role_remove(command, message):
# user, *names = command[2 if command[1] == "gibnt" else 3:]
# member = await get_member(message.guild, user, message.author)
# roles = [get_role(message.guild, string) for string in names]
# await member.remove_roles(*roles)
# await send(message, f"Removed {english_list(quote(role.mention for role in roles))} from {member.mention}!", allowed_mentions = discord.AllowedMentions(roles = False))
@client.command("", ["role", "colour", "?"], "", "")
@client.command("", ["role", "color", "?"], "", "")
@client.command("Role Commands", ["role", "colour", "?", "?"], "role colour <role> [colour = 0]", "alias for `role color`")
@client.command("Role Commands", ["role", "color", "?", "?"], "role color <role> [color = 0]", "recolor a role, or remove its color")
async def command_role_color(command, message):
role = get_role(message.guild, command[3])
await role.edit(color = get_color(command[4] if len(command) > 4 else "0"))
await send(message, f"Recolored '{role.mention}'!", allowed_mentions = discord.AllowedMentions.none())
@client.command("Role Commands", ["role", "rename", "?", "?"], "role rename <role> <name>", "rename a role")
async def command_role_rename(command, message):
role = get_role(message.guild, command[3])
name = role.name
await role.edit(name = command[4])
await send(message, f"Renamed '{name}' to '{command[4]}'!")
services = {
"lol": "lol",
"league": "lol",
"dmoj": "dmoj",
"cf": "cf",
"codeforces": "cf",
"osu": "osu",
"ow": "ow",
"overwatch": "ow"
}
service_list = tuple(services)
@client.command("", [service_list, "link", "?"], "", "")
@client.command("External User Commands", [service_list, "link", "?", "?"], "<lol/league | cf/codeforces | dmoj | osu | ow/overwatch> link [user = me] <account>", "link a user to an external account")
async def command_link(command, message):
service = services[command[1]]
member = await get_member(message.guild, command[3] if len(command) == 5 else "me", message.author)
await set_data("external", service, member.id, command[-1])
await send(message, f"Linked {member.mention} to {command[-1]}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("", [service_list, "unlink"], "", "")
@client.command("External User Commands", [service_list, "unlink", "?"], "<lol/league | cf/codeforces | dmoj | osu | ow/overwatch> unlink [user = me]", "unlink a user from a service")
async def command_link(command, message):
service = services[command[1]]
member = await get_member(message.guild, command[3] if len(command) == 4 else "me", message.author)
await del_data("external", service, member.id)
await send(message, f"Unlinked {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
async def get_ext_user(key, error, command, message):
if len(command) == 3:
if await has_data("external", key, message.author.id):
return await get_data("external", key, message.author.id)
else:
raise BotError(f"You are not linked; please specify {error} or link yourself first!")
else:
try:
member = await get_member(message.guild, command[3], message.author)
if await has_data("external", key, member.id):
return await get_data("external", key, member.id)
except:
pass
return command[3]
@client.command("", [("cf", "codeforces"), ("details", "rank", "rating")], "", "")
@client.command("External User Commands", [("cf", "codeforces"), ("details", "rank", "rating"), "?"], "cf/codeforces <details | rank/rating> [user = me]", "report a codeforces user's public details or just rank+rating")
async def command_cf_details(command, message):
cf = await get_ext_user("cf", "a codeforces user", command, message)
rv = requests.get("https://codeforces.com/api/user.info?handles=" + cf).json()
if rv["status"] == "OK":
cfdata = rv["result"][0]
if command[2] == "rank" or command[2] == "rating":
await send(message, f"{cf} is rank {cfdata["rank"]} [{cfdata["rating"]}] (max {cfdata["maxRank"]} [{cfdata["maxRating"]}])!")
else:
embed = discord.Embed(title = cf, color = client.color, url = "https://codeforces.com/profile/" + cf).set_thumbnail(url = "http:" + cfdata["avatar"])
for key, name in [
("email", "Email Address"),
("firstName", "First Name"),
("lastName", "Last Name"),
("organization", "Organization"),
("contribution", "Contribution"),
("friendOfCount", "Friend Of #")
]:
if cfdata.get(key):
embed.add_field(name = name, value = str(cfdata[key]))
if cfdata.get("country") or cfdata.get("city"):
city = f"{cfdata["city"]}, " if cfdata.get("city") else ""
embed.add_field(name = "Location", value = f"{city}{cfdata["country"]}")
embed.add_field(name = "Current Rank", value = f"{cfdata["rank"]} [{cfdata["rating"]}]")
embed.add_field(name = "Maximum Rank", value = f"{cfdata["maxRank"]} [{cfdata["maxRating"]}]")
embed.add_field(name = "Registered Since", value = datetime.datetime.fromtimestamp(cfdata["registrationTimeSeconds"]).strftime("%B %d, %Y at %H:%M:%S"))
embed.add_field(name = "Last Seen Online", value = datetime.datetime.fromtimestamp(cfdata["lastOnlineTimeSeconds"]).strftime("%B %d, %Y at %H:%M:%S"))
await send(message, embed = embed)
else:
await send(message, f"'{cf}' is not a codeforces user!", reaction = "x")
def dmoj_api(URL):
rv = requests.get(URL)
if rv.status_code != 200:
raise BotError(f"'{URL}' returned status {rv.status_code} (not 200)!")
data = rv.json()
if "error" in data:
raise BotError("Error fetching from DMOJ API; likely item does not exist!")
if "data" not in data:
raise BotError("Data not found; check the URL!")
return data["data"]
@client.command("", ["dmoj", ("details", "rank", "rating")], "", "")
@client.command("External User Commands", ["dmoj", ("details", "rank", "rating"), "?"], "dmoj <details | rank/rating> [user = me]", "report a DMOJ user's public details or just rank+rating")
async def command_dmoj_details(command, message):
dm = await get_ext_user("dmoj", "a DMOJ user", command, message)
dmdata = dmoj_api("https://dmoj.ca/api/v2/user/" + dm)["object"]
rating = dmdata["rating"]
if rating < 1000:
rank = "Newbie"
elif rating < 1200:
rank = "Amateur"
elif rating < 1500:
rank = "Expert"
elif rating < 1800:
rank = "Candidate Master"
elif rating < 2200:
rank = "Master"
elif rating < 3000:
rank = "Grandmaster"
else:
rank = "Target"
if dmdata["rank"] == "admin":
rank += " (Admin)"
if command[2] == "rank" or command[2] == "rating":
await send(message, f"{dmdata["username"]} is rank {rank} [{rating}]!")
elif command[2] == "details":
await send(message, embed = discord.Embed(
title = dmdata["username"],
color = 0x3333AA,
url = "https://dmoj.ca/user/" + dmdata["username"]
).add_field(
name = "Points",
value = "%.2f" % dmdata["points"]
).add_field(
name = "Solved Problems",
value = str(dmdata["problem_count"])
).add_field(
name = "Contests",
value = str(len(dmdata["contests"]))
).add_field(
name = "Organizations",
value = ", ".join(org["short_name"] for org in dmoj_api("https://dmoj.ca/api/v2/organizations")["objects"] if org["id"] in dmdata["organizations"])
).add_field(
name = "Rank",
value = rank
).add_field(
name = "Rating",
value = str(rating)
))
@client.command("", ["osu", ("details", "summary")], "", "")
@client.command("External User Commands", ["osu", ("details", "summary"), "?"], "osu <details | summary> [player = me]", "report an osu player's public details or summary")
async def command_osu_details(command, message):
osu = await get_ext_user("osu", "an osu! player", command, message)
rv = requests.get(f"https://osu.ppy.sh/api/get_user?k={config["api-keys"]["osu"]}&u={osu}")
if rv.status_code == 200:
data = rv.json()
if data == []:
await send(message, "Could not find an osu! player by that username/ID!", reaction = "x")
else:
user = data[0]
if command[2] == "summary":
await send(message, embed = discord.Embed(title = f"osu! player details: {user["username"]}", description = f"Level {user["level"]}\nPP: {user["pp_raw"]}\nRank: #{user["pp_rank"]} (#{user["pp_country_rank"]})\nAccuracy: {user["accuracy"]}", color = client.color).set_thumbnail(url = f"http://s.ppy.sh/a/{user["user_id"]}"))
else:
seconds = int(user["total_seconds_played"])
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
await send(message, embed = discord.Embed(
title = f"osu! player summary: {user["username"]} #{user["user_id"]}",
description = f"User since {user["join_date"]}",
url = f"https://osu.ppy.sh/users/{user["user_id"]}",
color = client.color
).add_field(
name = "Level",
value = user["level"]
).add_field(
name = "Accuracy",
value = user["accuracy"]
).add_field(
name = "Performance Points",
value = user["pp_raw"]
).add_field(
name = "Rank",
value = f"#{user["pp_rank"]} (#{user["pp_country_rank"]} in {pycountry.countries.get(alpha_2 = user["country"]).name})"
).add_field(
name = "Score Counts",
value = " ".join(f"{user["count" + x]} {emoji("osu_" + x)}" for x in ["300", "100", "50"]),
inline = False
).add_field(
name = "Rating Counts",
value = " ".join(f"{user["count_rank_" + x.lower()]} {emoji("osu_" + x)}" for x in ["SSH", "SS", "SH", "S", "A"]),
inline = False
).add_field(
name = "Best Score",
value = user['ranked_score']
).add_field(
name = "Total Score",
value = user['total_score']
).add_field(
name = "Time Played",
value = f"{hours}:{str(minutes).zfill(2)}:{str(seconds).zfill(2)}"
).set_thumbnail(
url = f"http://s.ppy.sh/a/{user["user_id"]}"
))
else:
await send(message, f"Failed to fetch from osu! API: status code {rv.status_code}!", reaction = "x")
def display_ow_rank(rating):
try:
rank = int(rating)
if rank < 1500:
e = "ow_bronze"
elif rank < 2000:
e = "ow_silver"
elif rank < 2500:
e = "ow_gold"
elif rank < 3000:
e = "ow_platinum"
elif rank < 3500:
e = "ow_diamond"
elif rank < 4000:
e = "ow_master"
else:
e = "ow_grandmaster"
return f"{rating} {emoji(e)}"
except:
return rating
@client.command("", [("ow", "overwatch"), "summary"], "", "")
@client.command("External User Commands", [("ow", "overwatch"), "summary", "?"], "ow/overwatch summary <player = me>", "report an overwatch player's summary")
async def command_ow_summary(command, message):
ow = await get_ext_user("ow", "a Blizzard battletag", command, message)
try:
r = requests.get(f"https://ow-api.com/v1/stats/pc/us/{ow}/profile")
if r.status_code != 200:
raise RuntimeError("Status Code not 200")
data = r.json()
try:
await send(message, embed = discord.Embed(
title = f"Overwatch player summary: {data["name"]}",
description = "",
color = client.color
).add_field(
name = "Level",
value = str(data["level"] + 100 * data["prestige"])
).add_field(
name = "Rating",
value = display_ow_rank(data["rating"])
).add_field(
name = "Games Won",
value = str(data["gamesWon"])
).add_field(
name = "Competitive Winrate",
value = "%.2f%%" % (data["competitiveStats"]["games"]["won"] / data["competitiveStats"]["games"]["played"] * 100) if "games" in data["competitiveStats"] else "N/A"
).set_thumbnail(
url = data["icon"]
))
except:
print(traceback.format_exc())
await send(message, "Failed to generate embed!", reaction = "x")
except:
await send(message, f"Failed to fetch user data for `{ow}` from Overwatch API; check the spelling of this battletag (please format as `name-number`)!", reaction = "x")
@client.command("", [("lol", "league"), ("report", "current", "report-player", "current-player")], "", "")
@client.command("League of Legends Commands", [("lol", "league"), ("report", "current", "report-player", "current-player"), "?"], "lol/league <report | current>[-player] [player = me]", "create a game report for the player")
async def command_lol_report(command, message):
sm = await get_ext_user("lol", "a League of Legends summoner", command, message)
try:
summoner = watcher.summoner.by_name(lol_region, sm)
if command[2] == "report" or command[2] == "report-player":
try:
game = watcher.match.matchlist_by_account(lol_region, summoner["accountId"], end_index = 1)["matches"][0]
try:
if command[2] == "report":
await send(message, embed = await lol_game_embed(message.guild, game["gameId"], sm, False), reaction = "check")
elif command[2] == "report-player":
await send(message, embed = await lol_player_embed(message.guild, game["gameId"], sm, False), reaction = "check")
except:
print(traceback.format_exc())
await send(message, "Failed to create embed!", reaction = "x")
except Exception as e:
await send(message, f"Could not find a game for {lol_region.upper()}/{sm}! The summoner may not have played a proper game recently enough.", reaction = "x")
else:
try:
game = watcher.spectator.by_summoner(lol_region, summoner["id"])
try:
if command[2] == "current":
await send(message, embed = await lol_current_embed(message.guild, game, sm))
elif command[2] == "current-player":
await send(message, embed = await lol_current_player_embed(message.guild, game, [sm]))
except:
print(traceback.format_exc())
await send(message, "Failed to create embed!", reaction = "x")
except Exception as e:
await send(message, f"Could not find current game for {lol_region.upper()}/{sm}! The summoner may not be in game.", reaction = "x")
except:
await send(message, f"Could not find summoner {lol_region.upper()}/{sm}! Please check your spelling.", reaction = "x")
@client.command("League of Legends Commands", [("lol", "league"), "rotation"], "lol/league rotation", "check the current free champion rotation")
async def command_lol_rotation(command, message):
champions = [champs[cid] for cid in watcher.champion.rotations(lol_region)["freeChampionIds"]]
champions.sort()
await send(message, f"This week's free rotation is: {english_list(champions)}.")
@client.command("League of Legends Commands", [("lol", "league"), "ranges", "..."], "lol/league ranges <champion> [champion...]", "compare ability ranges for champions")
async def command_lol_ranges(command, message):
champs = set()
for champ in command[3:]:
champ = champ.lower()
if champ not in cmap:
await send(message, f"{champ} is not a recognized champion name or ID!", reaction = "x")
break
champs.add(cmap[champ])
else:
items = []
for champ in champs:
data = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{lol_version}/data/en_US/champion/{champ}.json").json()
items.append((data["data"][champ]["stats"]["attackrange"], data["data"][champ]["name"], "Basic Attack"))
for i, spell in enumerate(data["data"][champ]["spells"]):
ident = data["data"][champ]["name"] + " " + ("QWER"[i] if 0 <= i < 4 else "?")
if len(set(spell["range"])) == 1:
items.append((spell["range"][0], ident, spell["name"]))
else:
clusters = {}
for i, r in enumerate(spell["range"]):
if r not in clusters:
clusters[r] = []
clusters[r].append(i + 1)
for key in clusters:
items.append((key, ident, spell["name"] + " Rank " + "/".join(map(str, clusters[key]))))
items.sort()
stacked = []
for item in items:
if stacked == [] or item[0] != stacked[-1][0]:
stacked.append([item[0], []])
stacked[-1][1].append((item[1], item[2]))
info = "**Range Analysis**\n"
for rng, stack in stacked:
stack = ", ".join(f"{ident} ({name})" for ident, name in stack)
info += f"\n__{rng}__: {stack}"
await send(message, info, reaction = "check")
@client.command("League of Legends Commands", [("lol", "league"), "item", "?", "..."], "lol item <name>", "get details about an item")
async def command_lol_item(command, message):
item = find_item("".join(command[3:]).lower())
await send(message, embed = discord.Embed(
title = f"League of Legends Item: {item["name"]} (#{item["id"]})",
description = re.sub("(\\() (.)|(.) (\\))", "\\1\\2\\3\\4", re.sub(" +", " ", re.sub("<[^>]+?>", "", re.sub("<br>|<li>", "\n", item["description"])))),
color = client.color,
url = f"https://leagueoflegends.fandom.com/wiki/{item["name"].replace(" ", "_")}"
).add_field(
name = "Build Path",
value = build_path(item["id"]) + ("\n\nBuilds into: " + english_list(lolitems[key]["name"] for key in item.get("into")) if item.get("into") else "")
).add_field(
name = "Tags",
value = "\n".join("- " + {
"CriticalStrike": "Critical Strike",
"NonbootsMovement": "Movement Speed",
"SpellDamage": "Ability Power",
"MagicPenetration": "Magic Penetration",
"ArmorPenetration": "Armor Penetration",
"SpellBlock": "Magic Resistance",
"Slow": "Movement Reduction",
"Jungle": "Jungling",
"Health": "Health",
"Lane": "Laning",
"Aura": "Aura",
"HealthRegen": "Health Regeneration",
"SpellVamp": "Spell Vamp",
"GoldPer": "Gold Income",
"Mana": "Mana",
"Vision": "Vision",
"LifeSteal": "Physical Vamp",
"Consumable": "Consumable",
"Armor": "Armor",
"Stealth": "Stealth",
"ManaRegen": "Mana Regeneration",
"OnHit": "On-Hit",
"Active": "Active",
"CooldownReduction": "Cooldown Reduction",
"Trinket": "Trinket",
"AttackSpeed": "Attack Speed",
"Boots": "Boots",
"AbilityHaste": "Ability Haste",
"Tenacity": "Tenacity",
"Damage": "Attack Damage"
}[tag] for tag in item["tags"])
).set_thumbnail(
url = f"http://ddragon.leagueoflegends.com/cdn/{lol_version}/img/item/{item["id"]}.png"
))
stats_length = 24
async def stats(channel, vis = None):
counts = {}
async for message in channel.history(limit = None):
if not vis or message.author.id in vis:
uinfo = f"{truncate(message.author.name, stats_length - 5)}#{message.author.discriminator}"
counts[uinfo] = counts.get(uinfo, 0) + 1
return sorted(counts.items(), key = lambda a: (-a[1], a[0]))
def truncate(string, length):
if len(string) > length:
return string[:length - 1] + "…"
return string
@client.command("Server Statistics Commands", [("channel", "server"), "stats"], "<channel | server> stats", "output the number of messages sent in each channel by each user")
async def command_channel_stats(command, message):
v = set(m.id for m in message.channel.members)
async with message.channel.typing():
if command[1] == "channel":
s = await stats(message.channel, v)
total = sum(b for _, b in s)
mc = len(str(max(b for _, b in s)))
l = max(len(a) for a, _ in s)
await send(message, embed = discord.Embed(
title = f"Channel Stats for #{message.channel.name}",
description = "```\n" + "\n".join(f"{uinfo.ljust(l)} {str(count).ljust(mc)} ({count / total * 100:.2f}%)" for uinfo, count in s) + "\n```",
color = client.color
))
else:
vis = set(message.channel.members)
counts = {}
ccount = {}
cname = {}
total = 0
failed = 0
for channel in message.guild.channels:
try:
if isinstance(channel, discord.TextChannel):
if set(channel.members) >= vis:
cname[channel.id] = channel.name
for uinfo, count in await stats(channel, v):
counts[uinfo] = counts.get(uinfo, 0) + count
ccount[channel.id] = ccount.get(channel.id, 0) + count
total += count
except:
failed += 1
mc = len(str(max(max(counts.values()), max(ccount.values()))))
ul = max(map(len, counts))
cl = max(map(len, cname.values()))
l = min(max(ul, cl), stats_length)
counts = sorted(counts.items(), key = lambda a: (-a[1], a[0]))
ccount = sorted(ccount.items(), key = lambda a: (-a[1], a[0]))
await send(message, embed = discord.Embed(
title = f"Server Stats for {message.guild.name}",
description = "```\n" + "\n".join(f"{uinfo.ljust(l)} {str(count).ljust(mc)} ({count / total * 100:.2f}%)" for uinfo, count in counts) +
"\n\n" + "\n".join(f"#{truncate(cname[cid].ljust(l - 1), stats_length - 1)} {str(count).ljust(mc)} ({count / total:.2f}%)" for cid, count in ccount) + "\n```",
color = client.color
))
if failed:
await send(message, f"Failed to index the results from {failed} channel{"s" * (failed != 1)}; likely this bot does not have permission to access them.")
@client.command("Miscellaneous Commands", ["blame"], "blame", "blame a random person in this channel (cannot blame any bots)")
async def command_blame(command, message):
members = []
for member in message.channel.members:
if not member.bot:
members.append(member)
await send(message, f"It was {random.choice(members).mention}'s fault!", allowed_mentions = discord.AllowedMentions.none())
@client.command("Miscellaneous Commands", ["spoiler", "image"], "spoiler image", "accept an image in a DM to spoiler (for mobile users)")
async def command_spoiler_image(command, message):
try:
await dm(message.author, f"The next image(s) you DM to me will be sent to {message.guild.name}#{message.channel.name} as a spoiler.")
await message.delete()
await set_data("dm_spoiler", message.author.id, message.channel.id)
except:
await send(message, "You need to allow me to DM you to use this feature!", reaction = "x")
@client.command("Miscellaneous Commands", ["color", "image"], "color image", "auto-color the next image you send in this channel with DeepAI")
async def command_spoiler_image(command, message):
await send(message, f"The next image you send in this channel will be automatically colored with the power of Artificial Intelligence.")
await set_data("img_color", message.author.id, message.channel.id, 0)
async def nhentai(nhid, force = False):
if force or not await has_data("nhentai", nhid):
response = requests.get(f"https://nhentai.net/g/{nhid}")
if response.status_code == 404:
raise BotError("404 Not Found!")
elif response.status_code == 200:
t = response.text
urls = {x.replace("t.", "i.", 1).replace("t.", ".") for x in re.findall("https://t\\.nhentai\\.net/galleries/\\d+/\\d+t\\.\\w+", t)}
urls = sorted(urls, key = lambda s: [int(x) for x in re.findall("\\d+", s)])
title = re.findall("<span class=\"pretty\">\\s*(.+?)\\s*</span>", t)[0]
subtitle = re.findall("<span class=\"after\">\\s*(.+?)\\s*</span>", t)[0]
sauce = int(re.findall("\\d+", urls[0])[0])
await set_data("nhentai", nhid, (title, subtitle, sauce, urls))
return (title, subtitle, sauce, urls)
else:
raise BotError(f"Unknown error: {response.status_code}")
else:
return await get_data("nhentai", nhid)
@client.command("Genshin Commands", ["genshin", "info", "..."], "genshin info <item>", "get info on an item (must enter the internal ID; ask a developer if unsure but it's not too counterintuitive)")
async def command_genshin_info(command, message):
item = " ".join(command[3:]).lower()
await client.genshin_info(item, message.channel)
await message.add_reaction("✅")
async def resin_set(user, amt):
await set_data("genshin", "resin_info", user.id, time.time() - 8 * 60 * amt)
async def resin_rmd(user):
return await get_data("genshin", "resin_reminder", user.id, default = -1)
async def resin_amount(uid):
if await has_data("genshin", "resin_info", uid):
return min(160, (time.time() - await get_data("genshin", "resin_info", uid)) / 8 / 60)
else:
return -1
def hm(s):
h, m = divmod(int(s // 60), 60)
return str(h) + "h" + str(m).zfill(2) if h else str(m) + "m"
@client.command("Genshin Commands", ["genshin", "resin", "set", "?"], "genshin resin set <amount>", "tell me how much resin you currently have")
async def command_genshin_resin_set(command, message):
amt = int(command[4])
await resin_set(message.author, amt)
cur = await resin_rmd(message.author)
msg = await send(message, "Set your resin!" + ("" if cur == -1 else f" Your existing reminder, set for {cur} resin, will occur in {hm(8 * 60 * (cur - amt))}."))
if message.guild:
await message.delete(delay = 5)
await msg.delete(delay = 5)
@client.command("Genshin Commands", ["genshin", "resin", "now"], "genshin resin now", "check how much resin you currently have")
async def command_genshin_resin_now(command, message):
amt = await resin_amount(message.author.id)
cur = await resin_rmd(message.author)
if amt == -1:
await send(message, "You haven't told me how much resin you have yet!", reaction = "x")
else:
await send(message, f"You currently have {int(amt)} resin!" + ("" if cur == -1 else f" Your reminder, set for {cur} resin, will occur in {hm(8 * 60 * (cur - amt))}."))
@client.command("Genshin Commands", ["genshin", "resin", "reminder"], "genshin resin reminder [[amount] <desired = 160>] / stop", "set / stop a reminder for when you reach a specific amount of resin; your current amount is optional if you've already set your resin amount")
@client.command("", ["genshin", "resin", "reminder", "?"], "", "")
@client.command("", ["genshin", "resin", "reminder", "?", "?"], "", "")
async def command_genshin_resin_reminder(command, message):
if len(command) == 5 and command[4] == "stop":
msg = await send(message, "I will no longer remind you about your resin!")
await del_data("genshin", "resin_reminder", message.author.id)
else:
if len(command) <= 5:
if not await has_data("genshin", "resin_info", message.author.id):
raise BotError("You need to tell me how much resin you have with `genshin resin set` or specify the amount you currently have!")
des = int(command[4]) if len(command) == 5 else 160
amt = await resin_amount(message.author.id)
else:
amt = int(command[4])
await resin_set(message.author, amt)
des = int(command[5])
if des > 160:
raise BotError("You cannot have more than 160 resin without using Fragile Resin to exceed that cap manually!")
if des <= amt:
raise BotError("You already have that much resin!")
cur = await resin_rmd(message.author)
if cur == -1:
msg = await send(message, f"I will remind you when you reach {des} resin (in {hm(8 * 60 * (des - amt))})!")
else:
msg = await send(message, f"You previously had a reminder for when you reached {cur} resin; I will instead remind you when you reach {des} (in {hm(8 * 60 * (des - amt))})!")
await set_data("genshin", "resin_reminder", message.author.id, des)
if message.guild:
await message.delete(delay = 5)
await msg.delete(delay = 5)
@client.command("", [("nhentai", "fnhentai"), "?"], "", "")
async def command_nhentai(command, message):
nhid = int(command[2])
title, subtitle, sauce, urls = await nhentai(nhid, command[1] == "fnhentai")
reply = await send(message, embed = discord.Embed(title = title + " " + subtitle, url = f"https://nhentai.net/g/{nhid}", description = f"Page 1 / {len(urls)}").set_image(url = urls[0]))
await reply.add_reaction("⬅️")
await reply.add_reaction("➡️")
await set_data("nhentai_embed", reply.id, (nhid, 0))
import httpx
import img2pdf, os
from PIL import Image
from PyPDF3 import PdfFileMerger
from io import BytesIO
async def get_async(url):
async with httpx.AsyncClient() as client:
return await client.get(url)
@client.command("", ["nhdownload", "?"], "", "")
async def command_nhdownload(command, message):
async with message.channel.typing():
nhid = int(command[2])
title, subtitle, sauce, urls = await nhentai(nhid, True)
try:
os.mkdir(f"/tmp/{nhid}")
except:
pass
merger = PdfFileMerger()
responses = await asyncio.gather(*map(get_async, urls))
for page, r in enumerate(responses):
pdf_path = f"/tmp/{nhid}/{page}.pdf"
pdf_bytes = img2pdf.convert(r.content)
with open(pdf_path, "wb") as f:
f.write(pdf_bytes)
merger.append(pdf_path)
final_path = f"/tmp/{nhid}/final.pdf"
merger.write(final_path)
merger.close()
try:
with open(final_path, "rb") as f:
await send(message, file = discord.File(fp = f, filename = f"[{nhid}] {title}.pdf"))
except:
await send(message, f"The file is too large to upload; you can access it here: https://dev.hyper-neutrino.xyz/nh/{nhid}")
@client.command("", lambda m: True, "", "")
async def command_image_spoiler_reply(command, message):
if type(message.channel) == discord.DMChannel:
if len(message.attachments) > 0:
if await has_data("dm_spoiler", message.author.id):
await client.get_channel(await get_data("dm_spoiler", message.author.id)).send(files = [(await attachment.to_file(spoiler = True)) for attachment in message.attachments])
await del_data("dm_spoiler", message.author.id)
@client.command("", lambda m: True, "", "")
async def command_image_spoiler_reply(command, message):
if len(message.attachments) > 0:
if await has_data("img_color", message.author.id, message.channel.id):
r = requests.post("https://api.deepai.org/api/colorizer", data = {"image": message.attachments[0].url}, headers = {"api-key": "551549c3-8d2c-426b-ae9f-9211b13e6f14"})
await send(message, r.json()["output_url"])
await del_data("img_color", message.author.id, message.channel.id)
@client.command("", ["echo", "..."], "echo <message>", "echo the message")
async def command_echo(command, message):
await send(message, message.content[message.content.find("echo") + 4:])
@client.command("", ["say", "..."], "say <message>", "echo, then immediately delete the command")
async def command_say(command, message):
await send(message, message.content[message.content.find("say") + 3:])
await message.delete()
@client.command("", ["eval", "?", "..."], "eval <expr>", "evaluate a Python expression in a command function's scope")
async def command_eval(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
code = message.content[message.content.find("eval") + 4:].strip()
if code.startswith("```python"):
code = code[9:]
elif code.startswith("```py"):
code = code[5:]
code = code.strip("`")
await send(message, str(eval(code))[:2000])
except:
await send(message, "Error evaluating expression!", reaction = "x")
@client.command("", ["exec", "?", "..."], "exec <code>", "execute Python code in a command function's scope (print is replaced with message output)")
async def command_exec(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
code = message.content[message.content.find("exec") + 4:].strip()
if code.startswith("```python"):
code = code[9:]
elif code.startswith("```py"):
code = code[5:]
code = code.strip("`")
output = []
def print(*items, end = "\n", sep = " "):
output.extend(list(sep.join(map(str, items)) + end))
exec(code)
await send(message, "```python\n" + "".join(output[:1980]) + "\n```")
except:
await send(message, "Error executing expression!", reaction = "x")
@client.command("", ["adjust", "ehecd", "?"], "adjust ehecd <x>", "adjust the cooldown of ehe te nandayo")
async def command_exec(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
await set_data("ehecd", int(command[3]))
await send(message, f"Cooldown of 'ehe te nandayo' is now {command[3]} second{"s" * (command[3] != "1")}!")
except:
await send(message, "Error; make sure you entered an integer!", reaction = "x")
@client.command("", ["data", "..."], "data", "fetch data from the bot")
async def command_data(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!")
else:
await send(message, "```python\n" + str(await get_data(*map(eval, command[2:]), default = None, set_if_missing = False))[:1980] + "\n```")
@client.command("", ["identify", "?"], "identify <user>", "identify a user")
async def command_identify(command, message):
member = await get_member(message.guild, command[2], message.author)
await send(message, f"Identified {member.name}#{member.discriminator}, a.k.a {member.display_name}, I.D. {member.id} ({member.mention})", allowed_mentions = discord.AllowedMentions.none())
@client.command("", ["emoji", "?", "-"], "", "")
@client.command("", ["emoji", "?"], "emoji <lookup> [-]", "post an emoji by lookup ID")
async def command_emoji(command, message):
try:
await send(message, str(emoji(command[2])))
if len(command) == 4:
await message.delete()
except:
await send(message, "That resulted in an error.", reaction = "x")
raise
@client.command("", [("summary", "summarize"), "?"], "", "")
@client.command("", [("summary", "summarize"), "?", "?"], "", "")
@client.command("", [("summary", "summarize"), "?", "?", "?"], "", "")
async def command_summarize(command, message):
url = command[2]
if url[0] == "<" and url[-1] == ">":
url = url[1:-1]
await message.edit(suppress = True)
rurl = f"https://api.smmry.com/?SM_API_KEY={config["api-keys"]["sm"]}"
if len(command) >= 4:
rurl += "&SM_LENGTH=" + command[3]
if len(command) >= 5:
rurl += "&SM_KEYWORD_COUNT=" + command[4]
rurl += "&SM_URL=" + url
r = requests.get(rurl)
data = r.json()
if "sm_api_error" in data:
error = data["sm_api_error"]
if error == 0:
await send(message, "Internal server problem with the SMMRY API; this is not your fault. Try again later.", reaction = "x")
elif error == 1:
await send(message, "Parameters are invalid. Check that you entered a real URL; otherwise, contact a developer.", reaction = "x")
elif error == 2:
await send(message, "This request has intentionally been restricted. Perhaps you have expended the API key's limit (100 per day).", reaction = "x")
elif error == 3:
await send(message, "Summarization error. This website might not be summarizable.")
else:
await send(message, (f"**{data["sm_api_title"].strip() or "(no title)"}**\n\n{data["sm_api_content"].strip() or "(no content)"}")[:2000])
if "sm_api_keyword_array" in data:
await message.channel.send(f"**Keywords**: {", ".join(data["sm_api_keyword_array"])}")
@client.command("", ["tsr", "?"], "", "")
async def command_toggle_suppress_reacts(command, message):
member = await get_member(message.guild, command[2], message.author)
await mod_data("tsr", lambda x: x ^ {member.id}, default = set())
await message.add_reaction("✅")
@client.command("", ["react", "..."], "", "")
async def command_react(command, message):
if not message.reference or not message.reference.resolved:
raise BotError("You need to refer to a message via reply!")
fails = []
for x in command[2:]:
try:
await message.reference.resolved.add_reaction(emoji(x))
except:
fails.append(x)
if fails:
await send(message, "The following emojis do not exist / could not have been added: " + ", ".join(fails))
else:
await message.delete()
# @client.command("", re.compile(r"\b[hH]?[eE][hH][eE]\b").search, "", "")
async def command_ehe_te_nandayo(command, message):
if message.author != client.user and time.time() - await get_data("ehe", message.author.id, default = 0) > (await get_data("ehecd", default = 30)):
await send(message, "**ehe te nandayo!?**", reaction = "?")
await set_data("ehe", message.author.id, time.time())
# @client.command("", re.compile(r"\[\w+\]").search, "", "")
async def command_emoji_react(command, message):
for c in re.findall(r"\[(\w+)\]", message.content):
try:
await message.add_reaction(emoji(c))
except:
pass
# @client.command("", re.compile(r"\b[Aa][Oo][Cc]\b").search, "", "")
async def command_aoc(command, message):
await message.channel.send("Alexandria Ocasio-Cortez")
# @client.command("", ["toggle69"], "", "")
async def command_toggle69(command, message):
await set_data("disable_69", not await get_data("disable_69", default = False))
await message.add_reaction("✅")
# @client.command("", re.compile(r"\b69\b").search, "", "")
async def command_69(command, message):
if await get_data("disable_69", default = False):
return
await message.reply("nice", mention_author = False)
|
import asyncio, datetime, discord, json, pycountry, random, re, requests, time, traceback
from aioconsole import ainput
from word2number import w2n
from client import *
from datamanager import config, del_data, get_data, has_data, mod_data, set_data, batch_set_data
from discordutils import *
from league import *
async def dm(user, *a, **k):
channel = user.dm_channel
if channel is None:
channel = await user.create_dm()
await channel.send(*a, **k)
@client.command("", ["help"], "", "")
@client.command("General Commands", ["help", "rpg"], "help [rpg]", "post a list of commands")
async def command_help(command, message):
sections = {}
for section, _, syntax, description, _ in client.commands:
if section == "" or ((section == "RPG Commands") ^ (len(command) == 3)): continue
if section not in sections:
sections[section] = []
sections[section].append(f"`{syntax}` - {description}")
embed = discord.Embed(
title = "Help - Commands",
color = client.color
)
for section in sections:
embed.add_field(name = section, value = "\n".join(sections[section]), inline = False)
await dm(message.author, embed = embed)
await send(message, "Sent the command list to your DMs!")
@client.command("General Commands", ["ping"], "ping", "check your ping")
async def command_ping(command, message):
ping = int((time.time() - (message.created_at - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds = 1)) * 1000)
await send(message, f"Pong! ({ping} ms)", reaction = "🏓")
@client.command("Channel Type Commands", ["subscribe"], "subscribe", "announce updates to this channel")
async def command_subscribe(command, message):
await mod_data("announcement_channels", lambda x: x | {message.channel.id}, default = set())
await send(message, "Subscribed to status updates here!")
@client.command("Channel Type Commands", ["unsubscribe"], "unsubscribe", "stop announcing updates to this channel")
async def command_unsubscribe(command, message):
await mod_data("announcement_channels", lambda x: x - {message.channel.id}, default = set())
await send(message, "Unsubscribed from status updates here!")
@client.command("Channel Type Commands", ["watch", ("osu", "genshin")], "watch osu/genshin", "watch osu!/Genshin Impact updates here")
async def command_watch(command, message):
await mod_data("watch_channels", command[2], lambda x: x | {message.channel.id}, default = set())
await send(message, "Now watching " + {"osu": "osu!", "genshin": "Genshin Impact"}[command[2]] + " updates in this channel!")
@client.command("Channel Type Commands", ["unwatch", ("osu", "genshin")], "unwatch osu/genshin", "stop watching osu!/Genshin Impact updates here")
async def command_watch(command, message):
await mod_data("watch_channels", command[2], lambda x: x - {message.channel.id}, default = set())
await send(message, "No longer watching " + {"osu": "osu!", "genshin": "Genshin Impact"}[command[2]] + " updates in this channel!")
words = None
wordmap = {}
with open("data/words.txt") as f:
words = [x for x in f.read().strip().splitlines() if 5 <= len(x)]
for word in words:
key = "".join(sorted(word))
if key not in wordmap:
wordmap[key] = set()
wordmap[key].add(word)
anagram_lock = asyncio.Lock()
def display(actual, scrambled, hint):
if hint == 0: return scrambled
cl = list(scrambled)
start = actual[:hint if hint * 2 <= len(actual) else -hint]
end = actual[-hint:]
for c in start + end:
cl.remove(c)
return f"**{start}**{''.join(cl)}**{end}**"
async def anagram_function(message, answer = None, start = False, stop = False, hint = False, reorder = False):
global words, wordmap
async with anagram_lock:
active = await has_data("anagram", message.channel.id, "puzzle")
puzzle = await get_data("anagram", message.channel.id, "puzzle", default = "", set_if_missing = False)
answers = wordmap.get("".join(sorted(puzzle)), set())
current_hint = await get_data("anagram", message.channel.id, "hint", default = 0, set_if_missing = False)
if reorder:
if active:
charlist = list(puzzle)
random.shuffle(charlist)
puzzle = "".join(charlist)
await set_data("anagram", message.channel.id, "puzzle", puzzle)
await send(message, f"Reordered: solve for '{display(sorted(answers)[0], puzzle, current_hint)}' ({len(puzzle)}).")
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if hint:
if active:
if len(puzzle) - current_hint * 2 - 2 <= 1:
stop = True
else:
await set_data("anagram", message.channel.id, "hint", current_hint + 1)
await send(message, f"Hint: 2 more letters shown: solve for '{display(sorted(answers)[0], puzzle, current_hint + 1)}' ({len(puzzle)}).")
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if stop:
if active:
if len(answers) == 1:
await send(message, f"Anagram puzzle ended! The correct answer was '{list(answers)[0]}'.")
else:
await send(message, f"Anagram puzzle ended! The correct answers were {english_list(quote(answers))}.")
await del_data("anagram", message.channel.id)
active = False
else:
await send(message, "There is no ongoing anagram puzzle in this channel!", reaction = "x")
if active and answer in answers:
try:
points = len(answer) - 2 * await get_data("anagram", message.channel.id, "hint")
bonus = int(points / 2) * (time.time() - await get_data("anagram", message.channel.id, "timestamp", default = 0) <= 5)
await mod_data("leaderboard", "anagram", message.author.id, lambda x: x + points + bonus, default = 0)
await batch_set_data("anagram", message.channel.id, active = False, last = answers, lasttime = time.time())
active = False
bonus_display = f" **+{bonus}**" if bonus else ""
alt_display = f" (Alternative answers: {english_list(quote(answers - {answer}))})" if len(answers) > 1 else ""
await send(message, f"Congratulations to {message.author.mention} for winning the anagram puzzle! (+{points}{bonus_display}){alt_display}", allowed_mentions = discord.AllowedMentions.none())
start = True
except:
print(traceback.format_exc())
elif answer in await get_data("anagram", message.channel.id, "last", default = set()) and time.time() - await get_data("anagram", message.channel.id, "lasttime", default = 0) <= 1:
await send(message, f"{message.author.mention} L", reaction = "x", allowed_mentions = discord.AllowedMentions.none())
if start:
if active:
hint = await get_data("anagram", message.channel.id, "hint", default = 0)
actual = sorted(answers)[0]
await send(message, f"An anagram puzzle is already running! Solve for '{display(actual, puzzle, hint)}' ({len(puzzle)}).", reaction = "x")
else:
word = random.choice(words)
charlist = list(word)
random.shuffle(charlist)
scrambled = "".join(charlist)
await batch_set_data("anagram", message.channel.id, active = True, puzzle = scrambled, hint = 0, timestamp = time.time())
await send(message, f"Anagram puzzle! Solve for '{scrambled}' ({len(word)}).")
@client.command("Anagram Commands", ["anagram"], "anagram start", "start an anagram puzzle")
async def command_anagram_start(command, message):
await anagram_function(message, start = True)
@client.command("Anagram Commands", ["anagram", "restart"], "anagram restart", "restart the anagram puzzle")
async def command_anagram_restart(command, message):
await anagram_function(message, stop = True, start = True)
@client.command("Anagram Commands", ["anagram", "stop"], "anagram stop", "stop the anagram puzzle")
async def command_anagram_stop(command, message):
await anagram_function(message, stop = True)
@client.command("Anagram Commands", ["anagram", "shuffle"], "anagram shuffle", "alias for `anagram reorder`")
@client.command("Anagram Commands", ["anagram", "scramble"], "anagram scramble", "alias for `anagram reorder`")
@client.command("Anagram Commands", ["anagram", "reorder"], "anagram reorder", "reorder the anagram puzzle")
async def command_anagram_reorder(command, message):
await anagram_function(message, reorder = True)
@client.command("Anagram Commands", ["anagram", "hint"], "anagram hint", "show another character in the anagram puzzle")
async def command_anagram_hint(command, message):
await anagram_function(message, hint = True)
@client.command("Anagram Commands", ["anagram", "add", "?"], "anagram add <word>", "add a word to the anagram dictionary")
async def command_anagram_add(command, message):
global words, wordmap
word = command[3].strip().lower()
if all(char in "abcdefghijklmnopqrstuvwxyz" for char in word):
if word in words:
await send(message, "This word is already in the dictionary!", reaction = "x")
else:
words.append(word)
words.sort()
with open("data/words.txt", "w") as f:
f.write("\n".join(words))
key = "".join(sorted(word))
if key not in wordmap:
wordmap[key] = set()
wordmap[key].add(word)
await send(message, f"Added '{word}' to the dictionary!")
else:
await send(message, "Words must only contain letters!", reaction = "x")
@client.command("Anagram Commands", ["anagram", "rm", "?"], "anagram rm <word>", "alias for `anagram remove`")
@client.command("Anagram Commands", ["anagram", "remove", "?"], "anagram remove <word>", "remove a word from the anagram dictionary")
async def command_anagram_remove(command, message):
global words, wordmap
word = command[3].strip().lower()
if word in words:
words.remove(word)
with open("data/words.txt", "w") as f:
f.write("\n".join(words))
key = "".join(sorted(word))
wordmap[key].discard(word)
await send(message, f"Removed '{word}' from the dictionary!")
else:
await send(message, "This word is not in the dictionary!", reaction = "x")
@client.command("Anagram Commands", ["anagram", "lb"], "anagram lb", "alias for `anagram leaderboard`")
@client.command("Anagram Commands", ["anagram", "leaderboard"], "anagram leaderboard", "show the leaderboard for the anagram puzzle")
async def command_anagram_leaderboard(command, message):
scores = []
scoremap = await get_data("leaderboard", "anagram")
for member in message.guild.members:
score = scoremap.get(member.id, 0)
if score:
scores.append((score, member))
scores.sort(reverse = True)
await send(message, embed = discord.Embed(
title = "Leaderboard - Anagram",
description = "\n".join(f"{member.mention} - {score}" for score, member in scores)
))
@client.command("", lambda m: True, "", "")
async def command_anagram_answer(command, message):
try:
await anagram_function(message, answer = message.content.strip().strip("!@#$%^&*()[]{}/|\.,<>\"'").lower())
except:
pass
@client.command("User Commands", ["alias", "?", "?"], "alias <name> <user>", "alias a name to a user")
async def command_alias(command, message):
member = await get_member(message.guild, command[3], message.author)
await set_data("aliases", message.guild.id, command[2].lower(), member.id)
await send(message, f"Aliased '{command[2].lower()}' to {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("User Commands", ["unalias", "?"], "unalias <name>", "remove a name's alias")
async def command_unalias(command, message):
await set_data("aliases", message.guild.id, command[2].lower(), None)
await send(message, f"Removed the alias for '{command[2].lower()}'!")
@client.command("User Commands", ["unbonk", "?", "..."], "unbonk <user>", "alias for `unignore`")
@client.command("User Commands", ["unignore", "?", "..."], "unignore <user>", "make the bot no longer ignore messages from a particular user (on a server)")
@client.command("User Commands", ["bonk", "?", "..."], "bonk <user>", "alias for `ignore`")
@client.command("User Commands", ["ignore", "?", "..."], "ignore <user>", "make the bot ignore all messages from a particular user (on a server)")
async def command_ignore(command, message):
for uinfo in command[2:]:
member = await get_member(message.guild, uinfo, message.author)
if not command[1].startswith("un") and member == message.author:
await send(message, f"You cannot {command[1]} yourself!", reaction = "x")
else:
await set_data("ignore", message.guild.id, member.id, not command[1].startswith("un"))
await send(message, f"No longer ignoring {member.mention}!" if command[1].startswith("un") else f"{'Bonk! ' * (command[1] == 'bonk')}Now ignoring {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("User Commands", ["unshut", "?", "..."], "unbonk <user>", "alias for `unsilence`")
@client.command("User Commands", ["unsilence", "?", "..."], "unignore <user>", "make the bot delete messages from a particular user (on a server)")
@client.command("User Commands", ["shut", "?", "..."], "bonk <user>", "alias for `silence`")
@client.command("User Commands", ["silence", "?", "..."], "ignore <user>", "make the bot delete messages from a particular user (on a server)")
async def command_silence(command, message):
for uinfo in command[2:]:
member = await get_member(message.guild, uinfo, message.author)
if not command[1].startswith("un") and member == message.author:
await send(message, f"You cannot {command[1]} yourself!", reaction = "x")
else:
await set_data("silence", message.guild.id, member.id, not command[1].startswith("un"))
await send(message, f"No longer silencing {member.mention}!" if command[1].startswith("un") else f"{'https://i.redd.it/l5jmlb1ltqj51.jpg' * (command[1] == 'shut')}Now silencing {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
# @client.command("Role Commands", ["gib", "?", "..."], "gib <name> [roles...]", "alias for `role give`")
# @client.command("Role Commands", ["role", "give", "?", "..."], "role give <name> [roles...]", "give a list of roles to a user")
# async def command_role_give(command, message):
# user, *names = command[2 if command[1] == "gib" else 3:]
# member = await get_member(message.guild, user, message.author)
# roles = [get_role(message.guild, string) for string in names]
# if any(role.id == 741731868692709416 for role in roles) and member.id != 251082987360223233:
# await send(message, f"<@&741731868692709416> is exclusive to <@!251082987360223233>!", allowed_mentions = discord.AllowedMentions.none())
# else:
# await member.add_roles(*roles)
# await send(message, f"Granted {english_list(quote(role.mention for role in roles))} to {member.mention}!", allowed_mentions = discord.AllowedMentions(roles = False))
# @client.command("Role Commands", ["gibnt", "?", "..."], "gibnt <name> [roles...]", "alias for `role remove`")
# @client.command("Role Commands", ["role", "remove", "?", "..."], "role remove <name> [roles...]", "remove a list of roles from a user")
# async def command_role_remove(command, message):
# user, *names = command[2 if command[1] == "gibnt" else 3:]
# member = await get_member(message.guild, user, message.author)
# roles = [get_role(message.guild, string) for string in names]
# await member.remove_roles(*roles)
# await send(message, f"Removed {english_list(quote(role.mention for role in roles))} from {member.mention}!", allowed_mentions = discord.AllowedMentions(roles = False))
@client.command("", ["role", "colour", "?"], "", "")
@client.command("", ["role", "color", "?"], "", "")
@client.command("Role Commands", ["role", "colour", "?", "?"], "role colour <role> [colour = 0]", "alias for `role color`")
@client.command("Role Commands", ["role", "color", "?", "?"], "role color <role> [color = 0]", "recolor a role, or remove its color")
async def command_role_color(command, message):
role = get_role(message.guild, command[3])
await role.edit(color = get_color(command[4] if len(command) > 4 else "0"))
await send(message, f"Recolored '{role.mention}'!", allowed_mentions = discord.AllowedMentions.none())
@client.command("Role Commands", ["role", "rename", "?", "?"], "role rename <role> <name>", "rename a role")
async def command_role_rename(command, message):
role = get_role(message.guild, command[3])
name = role.name
await role.edit(name = command[4])
await send(message, f"Renamed '{name}' to '{command[4]}'!")
services = {
"lol": "lol",
"league": "lol",
"dmoj": "dmoj",
"cf": "cf",
"codeforces": "cf",
"osu": "osu",
"ow": "ow",
"overwatch": "ow"
}
service_list = tuple(services)
@client.command("", [service_list, "link", "?"], "", "")
@client.command("External User Commands", [service_list, "link", "?", "?"], "<lol/league | cf/codeforces | dmoj | osu | ow/overwatch> link [user = me] <account>", "link a user to an external account")
async def command_link(command, message):
service = services[command[1]]
member = await get_member(message.guild, command[3] if len(command) == 5 else "me", message.author)
await set_data("external", service, member.id, command[-1])
await send(message, f"Linked {member.mention} to {command[-1]}!", allowed_mentions = discord.AllowedMentions.none())
@client.command("", [service_list, "unlink"], "", "")
@client.command("External User Commands", [service_list, "unlink", "?"], "<lol/league | cf/codeforces | dmoj | osu | ow/overwatch> unlink [user = me]", "unlink a user from a service")
async def command_link(command, message):
service = services[command[1]]
member = await get_member(message.guild, command[3] if len(command) == 4 else "me", message.author)
await del_data("external", service, member.id)
await send(message, f"Unlinked {member.mention}!", allowed_mentions = discord.AllowedMentions.none())
async def get_ext_user(key, error, command, message):
if len(command) == 3:
if await has_data("external", key, message.author.id):
return await get_data("external", key, message.author.id)
else:
raise BotError(f"You are not linked; please specify {error} or link yourself first!")
else:
try:
member = await get_member(message.guild, command[3], message.author)
if await has_data("external", key, member.id):
return await get_data("external", key, member.id)
except:
pass
return command[3]
@client.command("", [("cf", "codeforces"), ("details", "rank", "rating")], "", "")
@client.command("External User Commands", [("cf", "codeforces"), ("details", "rank", "rating"), "?"], "cf/codeforces <details | rank/rating> [user = me]", "report a codeforces user's public details or just rank+rating")
async def command_cf_details(command, message):
cf = await get_ext_user("cf", "a codeforces user", command, message)
rv = requests.get("https://codeforces.com/api/user.info?handles=" + cf).json()
if rv["status"] == "OK":
cfdata = rv["result"][0]
if command[2] == "rank" or command[2] == "rating":
await send(message, f"{cf} is rank {cfdata['rank']} [{cfdata['rating']}] (max {cfdata['maxRank']} [{cfdata['maxRating']}])!")
else:
embed = discord.Embed(title = cf, color = client.color, url = "https://codeforces.com/profile/" + cf).set_thumbnail(url = "http:" + cfdata["avatar"])
for key, name in [
("email", "Email Address"),
("firstName", "First Name"),
("lastName", "Last Name"),
("organization", "Organization"),
("contribution", "Contribution"),
("friendOfCount", "Friend Of #")
]:
if cfdata.get(key):
embed.add_field(name = name, value = str(cfdata[key]))
if cfdata.get("country") or cfdata.get("city"):
city = f"{cfdata['city']}, " if cfdata.get("city") else ""
embed.add_field(name = "Location", value = f"{city}{cfdata['country']}")
embed.add_field(name = "Current Rank", value = f"{cfdata['rank']} [{cfdata['rating']}]")
embed.add_field(name = "Maximum Rank", value = f"{cfdata['maxRank']} [{cfdata['maxRating']}]")
embed.add_field(name = "Registered Since", value = datetime.datetime.fromtimestamp(cfdata["registrationTimeSeconds"]).strftime("%B %d, %Y at %H:%M:%S"))
embed.add_field(name = "Last Seen Online", value = datetime.datetime.fromtimestamp(cfdata["lastOnlineTimeSeconds"]).strftime("%B %d, %Y at %H:%M:%S"))
await send(message, embed = embed)
else:
await send(message, f"'{cf}' is not a codeforces user!", reaction = "x")
def dmoj_api(URL):
rv = requests.get(URL)
if rv.status_code != 200:
raise BotError(f"'{URL}' returned status {rv.status_code} (not 200)!")
data = rv.json()
if "error" in data:
raise BotError("Error fetching from DMOJ API; likely item does not exist!")
if "data" not in data:
raise BotError("Data not found; check the URL!")
return data["data"]
@client.command("", ["dmoj", ("details", "rank", "rating")], "", "")
@client.command("External User Commands", ["dmoj", ("details", "rank", "rating"), "?"], "dmoj <details | rank/rating> [user = me]", "report a DMOJ user's public details or just rank+rating")
async def command_dmoj_details(command, message):
dm = await get_ext_user("dmoj", "a DMOJ user", command, message)
dmdata = dmoj_api("https://dmoj.ca/api/v2/user/" + dm)["object"]
rating = dmdata["rating"]
if rating < 1000:
rank = "Newbie"
elif rating < 1200:
rank = "Amateur"
elif rating < 1500:
rank = "Expert"
elif rating < 1800:
rank = "Candidate Master"
elif rating < 2200:
rank = "Master"
elif rating < 3000:
rank = "Grandmaster"
else:
rank = "Target"
if dmdata["rank"] == "admin":
rank += " (Admin)"
if command[2] == "rank" or command[2] == "rating":
await send(message, f"{dmdata['username']} is rank {rank} [{rating}]!")
elif command[2] == "details":
await send(message, embed = discord.Embed(
title = dmdata["username"],
color = 0x3333AA,
url = "https://dmoj.ca/user/" + dmdata["username"]
).add_field(
name = "Points",
value = "%.2f" % dmdata["points"]
).add_field(
name = "Solved Problems",
value = str(dmdata["problem_count"])
).add_field(
name = "Contests",
value = str(len(dmdata["contests"]))
).add_field(
name = "Organizations",
value = ", ".join(org["short_name"] for org in dmoj_api("https://dmoj.ca/api/v2/organizations")["objects"] if org["id"] in dmdata["organizations"])
).add_field(
name = "Rank",
value = rank
).add_field(
name = "Rating",
value = str(rating)
))
@client.command("", ["osu", ("details", "summary")], "", "")
@client.command("External User Commands", ["osu", ("details", "summary"), "?"], "osu <details | summary> [player = me]", "report an osu player's public details or summary")
async def command_osu_details(command, message):
osu = await get_ext_user("osu", "an osu! player", command, message)
rv = requests.get(f"https://osu.ppy.sh/api/get_user?k={config['api-keys']['osu']}&u={osu}")
if rv.status_code == 200:
data = rv.json()
if data == []:
await send(message, "Could not find an osu! player by that username/ID!", reaction = "x")
else:
user = data[0]
if command[2] == "summary":
await send(message, embed = discord.Embed(title = f"osu! player details: {user['username']}", description = f"Level {user['level']}\nPP: {user['pp_raw']}\nRank: #{user['pp_rank']} (#{user['pp_country_rank']})\nAccuracy: {user['accuracy']}", color = client.color).set_thumbnail(url = f"http://s.ppy.sh/a/{user['user_id']}"))
else:
seconds = int(user["total_seconds_played"])
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
await send(message, embed = discord.Embed(
title = f"osu! player summary: {user['username']} #{user['user_id']}",
description = f"User since {user['join_date']}",
url = f"https://osu.ppy.sh/users/{user['user_id']}",
color = client.color
).add_field(
name = "Level",
value = user["level"]
).add_field(
name = "Accuracy",
value = user["accuracy"]
).add_field(
name = "Performance Points",
value = user["pp_raw"]
).add_field(
name = "Rank",
value = f"#{user['pp_rank']} (#{user['pp_country_rank']} in {pycountry.countries.get(alpha_2 = user['country']).name})"
).add_field(
name = "Score Counts",
value = " ".join(f"{user['count' + x]} {emoji('osu_' + x)}" for x in ["300", "100", "50"]),
inline = False
).add_field(
name = "Rating Counts",
value = " ".join(f"{user['count_rank_' + x.lower()]} {emoji('osu_' + x)}" for x in ["SSH", "SS", "SH", "S", "A"]),
inline = False
).add_field(
name = "Best Score",
value = user['ranked_score']
).add_field(
name = "Total Score",
value = user['total_score']
).add_field(
name = "Time Played",
value = f"{hours}:{str(minutes).zfill(2)}:{str(seconds).zfill(2)}"
).set_thumbnail(
url = f"http://s.ppy.sh/a/{user['user_id']}"
))
else:
await send(message, f"Failed to fetch from osu! API: status code {rv.status_code}!", reaction = "x")
def display_ow_rank(rating):
try:
rank = int(rating)
if rank < 1500:
e = "ow_bronze"
elif rank < 2000:
e = "ow_silver"
elif rank < 2500:
e = "ow_gold"
elif rank < 3000:
e = "ow_platinum"
elif rank < 3500:
e = "ow_diamond"
elif rank < 4000:
e = "ow_master"
else:
e = "ow_grandmaster"
return f"{rating} {emoji(e)}"
except:
return rating
@client.command("", [("ow", "overwatch"), "summary"], "", "")
@client.command("External User Commands", [("ow", "overwatch"), "summary", "?"], "ow/overwatch summary <player = me>", "report an overwatch player's summary")
async def command_ow_summary(command, message):
ow = await get_ext_user("ow", "a Blizzard battletag", command, message)
try:
r = requests.get(f"https://ow-api.com/v1/stats/pc/us/{ow}/profile")
if r.status_code != 200:
raise RuntimeError("Status Code not 200")
data = r.json()
try:
await send(message, embed = discord.Embed(
title = f"Overwatch player summary: {data['name']}",
description = "",
color = client.color
).add_field(
name = "Level",
value = str(data["level"] + 100 * data["prestige"])
).add_field(
name = "Rating",
value = display_ow_rank(data["rating"])
).add_field(
name = "Games Won",
value = str(data["gamesWon"])
).add_field(
name = "Competitive Winrate",
value = "%.2f%%" % (data["competitiveStats"]["games"]["won"] / data["competitiveStats"]["games"]["played"] * 100) if "games" in data["competitiveStats"] else "N/A"
).set_thumbnail(
url = data["icon"]
))
except:
print(traceback.format_exc())
await send(message, "Failed to generate embed!", reaction = "x")
except:
await send(message, f"Failed to fetch user data for `{ow}` from Overwatch API; check the spelling of this battletag (please format as `name-number`)!", reaction = "x")
@client.command("", [("lol", "league"), ("report", "current", "report-player", "current-player")], "", "")
@client.command("League of Legends Commands", [("lol", "league"), ("report", "current", "report-player", "current-player"), "?"], "lol/league <report | current>[-player] [player = me]", "create a game report for the player")
async def command_lol_report(command, message):
sm = await get_ext_user("lol", "a League of Legends summoner", command, message)
try:
summoner = watcher.summoner.by_name(lol_region, sm)
if command[2] == "report" or command[2] == "report-player":
try:
game = watcher.match.matchlist_by_account(lol_region, summoner["accountId"], end_index = 1)["matches"][0]
try:
if command[2] == "report":
await send(message, embed = await lol_game_embed(message.guild, game["gameId"], sm, False), reaction = "check")
elif command[2] == "report-player":
await send(message, embed = await lol_player_embed(message.guild, game["gameId"], sm, False), reaction = "check")
except:
print(traceback.format_exc())
await send(message, "Failed to create embed!", reaction = "x")
except Exception as e:
await send(message, f"Could not find a game for {lol_region.upper()}/{sm}! The summoner may not have played a proper game recently enough.", reaction = "x")
else:
try:
game = watcher.spectator.by_summoner(lol_region, summoner["id"])
try:
if command[2] == "current":
await send(message, embed = await lol_current_embed(message.guild, game, sm))
elif command[2] == "current-player":
await send(message, embed = await lol_current_player_embed(message.guild, game, [sm]))
except:
print(traceback.format_exc())
await send(message, "Failed to create embed!", reaction = "x")
except Exception as e:
await send(message, f"Could not find current game for {lol_region.upper()}/{sm}! The summoner may not be in game.", reaction = "x")
except:
await send(message, f"Could not find summoner {lol_region.upper()}/{sm}! Please check your spelling.", reaction = "x")
@client.command("League of Legends Commands", [("lol", "league"), "rotation"], "lol/league rotation", "check the current free champion rotation")
async def command_lol_rotation(command, message):
champions = [champs[cid] for cid in watcher.champion.rotations(lol_region)["freeChampionIds"]]
champions.sort()
await send(message, f"This week's free rotation is: {english_list(champions)}.")
@client.command("League of Legends Commands", [("lol", "league"), "ranges", "..."], "lol/league ranges <champion> [champion...]", "compare ability ranges for champions")
async def command_lol_ranges(command, message):
champs = set()
for champ in command[3:]:
champ = champ.lower()
if champ not in cmap:
await send(message, f"{champ} is not a recognized champion name or ID!", reaction = "x")
break
champs.add(cmap[champ])
else:
items = []
for champ in champs:
data = requests.get(f"http://ddragon.leagueoflegends.com/cdn/{lol_version}/data/en_US/champion/{champ}.json").json()
items.append((data["data"][champ]["stats"]["attackrange"], data["data"][champ]["name"], "Basic Attack"))
for i, spell in enumerate(data["data"][champ]["spells"]):
ident = data["data"][champ]["name"] + " " + ("QWER"[i] if 0 <= i < 4 else "?")
if len(set(spell["range"])) == 1:
items.append((spell["range"][0], ident, spell["name"]))
else:
clusters = {}
for i, r in enumerate(spell["range"]):
if r not in clusters:
clusters[r] = []
clusters[r].append(i + 1)
for key in clusters:
items.append((key, ident, spell["name"] + " Rank " + "/".join(map(str, clusters[key]))))
items.sort()
stacked = []
for item in items:
if stacked == [] or item[0] != stacked[-1][0]:
stacked.append([item[0], []])
stacked[-1][1].append((item[1], item[2]))
info = "**Range Analysis**\n"
for rng, stack in stacked:
stack = ", ".join(f"{ident} ({name})" for ident, name in stack)
info += f"\n__{rng}__: {stack}"
await send(message, info, reaction = "check")
@client.command("League of Legends Commands", [("lol", "league"), "item", "?", "..."], "lol item <name>", "get details about an item")
async def command_lol_item(command, message):
item = find_item("".join(command[3:]).lower())
await send(message, embed = discord.Embed(
title = f"League of Legends Item: {item['name']} (#{item['id']})",
description = re.sub("(\\() (.)|(.) (\\))", "\\1\\2\\3\\4", re.sub(" +", " ", re.sub("<[^>]+?>", "", re.sub("<br>|<li>", "\n", item["description"])))),
color = client.color,
url = f"https://leagueoflegends.fandom.com/wiki/{item['name'].replace(' ', '_')}"
).add_field(
name = "Build Path",
value = build_path(item["id"]) + ("\n\nBuilds into: " + english_list(lolitems[key]["name"] for key in item.get("into")) if item.get("into") else "")
).add_field(
name = "Tags",
value = "\n".join("- " + {
"CriticalStrike": "Critical Strike",
"NonbootsMovement": "Movement Speed",
"SpellDamage": "Ability Power",
"MagicPenetration": "Magic Penetration",
"ArmorPenetration": "Armor Penetration",
"SpellBlock": "Magic Resistance",
"Slow": "Movement Reduction",
"Jungle": "Jungling",
"Health": "Health",
"Lane": "Laning",
"Aura": "Aura",
"HealthRegen": "Health Regeneration",
"SpellVamp": "Spell Vamp",
"GoldPer": "Gold Income",
"Mana": "Mana",
"Vision": "Vision",
"LifeSteal": "Physical Vamp",
"Consumable": "Consumable",
"Armor": "Armor",
"Stealth": "Stealth",
"ManaRegen": "Mana Regeneration",
"OnHit": "On-Hit",
"Active": "Active",
"CooldownReduction": "Cooldown Reduction",
"Trinket": "Trinket",
"AttackSpeed": "Attack Speed",
"Boots": "Boots",
"AbilityHaste": "Ability Haste",
"Tenacity": "Tenacity",
"Damage": "Attack Damage"
}[tag] for tag in item["tags"])
).set_thumbnail(
url = f"http://ddragon.leagueoflegends.com/cdn/{lol_version}/img/item/{item['id']}.png"
))
stats_length = 24
async def stats(channel, vis = None):
counts = {}
async for message in channel.history(limit = None):
if not vis or message.author.id in vis:
uinfo = f"{truncate(message.author.name, stats_length - 5)}#{message.author.discriminator}"
counts[uinfo] = counts.get(uinfo, 0) + 1
return sorted(counts.items(), key = lambda a: (-a[1], a[0]))
def truncate(string, length):
if len(string) > length:
return string[:length - 1] + "…"
return string
@client.command("Server Statistics Commands", [("channel", "server"), "stats"], "<channel | server> stats", "output the number of messages sent in each channel by each user")
async def command_channel_stats(command, message):
v = set(m.id for m in message.channel.members)
async with message.channel.typing():
if command[1] == "channel":
s = await stats(message.channel, v)
total = sum(b for _, b in s)
mc = len(str(max(b for _, b in s)))
l = max(len(a) for a, _ in s)
await send(message, embed = discord.Embed(
title = f"Channel Stats for #{message.channel.name}",
description = "```\n" + "\n".join(f"{uinfo.ljust(l)} {str(count).ljust(mc)} ({count / total * 100:.2f}%)" for uinfo, count in s) + "\n```",
color = client.color
))
else:
vis = set(message.channel.members)
counts = {}
ccount = {}
cname = {}
total = 0
failed = 0
for channel in message.guild.channels:
try:
if isinstance(channel, discord.TextChannel):
if set(channel.members) >= vis:
cname[channel.id] = channel.name
for uinfo, count in await stats(channel, v):
counts[uinfo] = counts.get(uinfo, 0) + count
ccount[channel.id] = ccount.get(channel.id, 0) + count
total += count
except:
failed += 1
mc = len(str(max(max(counts.values()), max(ccount.values()))))
ul = max(map(len, counts))
cl = max(map(len, cname.values()))
l = min(max(ul, cl), stats_length)
counts = sorted(counts.items(), key = lambda a: (-a[1], a[0]))
ccount = sorted(ccount.items(), key = lambda a: (-a[1], a[0]))
await send(message, embed = discord.Embed(
title = f"Server Stats for {message.guild.name}",
description = "```\n" + "\n".join(f"{uinfo.ljust(l)} {str(count).ljust(mc)} ({count / total * 100:.2f}%)" for uinfo, count in counts) +
"\n\n" + "\n".join(f"#{truncate(cname[cid].ljust(l - 1), stats_length - 1)} {str(count).ljust(mc)} ({count / total:.2f}%)" for cid, count in ccount) + "\n```",
color = client.color
))
if failed:
await send(message, f"Failed to index the results from {failed} channel{'s' * (failed != 1)}; likely this bot does not have permission to access them.")
@client.command("Miscellaneous Commands", ["blame"], "blame", "blame a random person in this channel (cannot blame any bots)")
async def command_blame(command, message):
members = []
for member in message.channel.members:
if not member.bot:
members.append(member)
await send(message, f"It was {random.choice(members).mention}'s fault!", allowed_mentions = discord.AllowedMentions.none())
@client.command("Miscellaneous Commands", ["spoiler", "image"], "spoiler image", "accept an image in a DM to spoiler (for mobile users)")
async def command_spoiler_image(command, message):
try:
await dm(message.author, f"The next image(s) you DM to me will be sent to {message.guild.name}#{message.channel.name} as a spoiler.")
await message.delete()
await set_data("dm_spoiler", message.author.id, message.channel.id)
except:
await send(message, "You need to allow me to DM you to use this feature!", reaction = "x")
@client.command("Miscellaneous Commands", ["color", "image"], "color image", "auto-color the next image you send in this channel with DeepAI")
async def command_spoiler_image(command, message):
await send(message, f"The next image you send in this channel will be automatically colored with the power of Artificial Intelligence.")
await set_data("img_color", message.author.id, message.channel.id, 0)
async def nhentai(nhid, force = False):
if force or not await has_data("nhentai", nhid):
response = requests.get(f"https://nhentai.net/g/{nhid}")
if response.status_code == 404:
raise BotError("404 Not Found!")
elif response.status_code == 200:
t = response.text
urls = {x.replace("t.", "i.", 1).replace("t.", ".") for x in re.findall("https://t\\.nhentai\\.net/galleries/\\d+/\\d+t\\.\\w+", t)}
urls = sorted(urls, key = lambda s: [int(x) for x in re.findall("\\d+", s)])
title = re.findall("<span class=\"pretty\">\\s*(.+?)\\s*</span>", t)[0]
subtitle = re.findall("<span class=\"after\">\\s*(.+?)\\s*</span>", t)[0]
sauce = int(re.findall("\\d+", urls[0])[0])
await set_data("nhentai", nhid, (title, subtitle, sauce, urls))
return (title, subtitle, sauce, urls)
else:
raise BotError(f"Unknown error: {response.status_code}")
else:
return await get_data("nhentai", nhid)
@client.command("Genshin Commands", ["genshin", "info", "..."], "genshin info <item>", "get info on an item (must enter the internal ID; ask a developer if unsure but it's not too counterintuitive)")
async def command_genshin_info(command, message):
item = " ".join(command[3:]).lower()
await client.genshin_info(item, message.channel)
await message.add_reaction("✅")
async def resin_set(user, amt):
await set_data("genshin", "resin_info", user.id, time.time() - 8 * 60 * amt)
async def resin_rmd(user):
return await get_data("genshin", "resin_reminder", user.id, default = -1)
async def resin_amount(uid):
if await has_data("genshin", "resin_info", uid):
return min(160, (time.time() - await get_data("genshin", "resin_info", uid)) / 8 / 60)
else:
return -1
def hm(s):
h, m = divmod(int(s // 60), 60)
return str(h) + "h" + str(m).zfill(2) if h else str(m) + "m"
@client.command("Genshin Commands", ["genshin", "resin", "set", "?"], "genshin resin set <amount>", "tell me how much resin you currently have")
async def command_genshin_resin_set(command, message):
amt = int(command[4])
await resin_set(message.author, amt)
cur = await resin_rmd(message.author)
msg = await send(message, "Set your resin!" + ("" if cur == -1 else f" Your existing reminder, set for {cur} resin, will occur in {hm(8 * 60 * (cur - amt))}."))
if message.guild:
await message.delete(delay = 5)
await msg.delete(delay = 5)
@client.command("Genshin Commands", ["genshin", "resin", "now"], "genshin resin now", "check how much resin you currently have")
async def command_genshin_resin_now(command, message):
amt = await resin_amount(message.author.id)
cur = await resin_rmd(message.author)
if amt == -1:
await send(message, "You haven't told me how much resin you have yet!", reaction = "x")
else:
await send(message, f"You currently have {int(amt)} resin!" + ("" if cur == -1 else f" Your reminder, set for {cur} resin, will occur in {hm(8 * 60 * (cur - amt))}."))
@client.command("Genshin Commands", ["genshin", "resin", "reminder"], "genshin resin reminder [[amount] <desired = 160>] / stop", "set / stop a reminder for when you reach a specific amount of resin; your current amount is optional if you've already set your resin amount")
@client.command("", ["genshin", "resin", "reminder", "?"], "", "")
@client.command("", ["genshin", "resin", "reminder", "?", "?"], "", "")
async def command_genshin_resin_reminder(command, message):
if len(command) == 5 and command[4] == "stop":
msg = await send(message, "I will no longer remind you about your resin!")
await del_data("genshin", "resin_reminder", message.author.id)
else:
if len(command) <= 5:
if not await has_data("genshin", "resin_info", message.author.id):
raise BotError("You need to tell me how much resin you have with `genshin resin set` or specify the amount you currently have!")
des = int(command[4]) if len(command) == 5 else 160
amt = await resin_amount(message.author.id)
else:
amt = int(command[4])
await resin_set(message.author, amt)
des = int(command[5])
if des > 160:
raise BotError("You cannot have more than 160 resin without using Fragile Resin to exceed that cap manually!")
if des <= amt:
raise BotError("You already have that much resin!")
cur = await resin_rmd(message.author)
if cur == -1:
msg = await send(message, f"I will remind you when you reach {des} resin (in {hm(8 * 60 * (des - amt))})!")
else:
msg = await send(message, f"You previously had a reminder for when you reached {cur} resin; I will instead remind you when you reach {des} (in {hm(8 * 60 * (des - amt))})!")
await set_data("genshin", "resin_reminder", message.author.id, des)
if message.guild:
await message.delete(delay = 5)
await msg.delete(delay = 5)
@client.command("", [("nhentai", "fnhentai"), "?"], "", "")
async def command_nhentai(command, message):
nhid = int(command[2])
title, subtitle, sauce, urls = await nhentai(nhid, command[1] == "fnhentai")
reply = await send(message, embed = discord.Embed(title = title + " " + subtitle, url = f"https://nhentai.net/g/{nhid}", description = f"Page 1 / {len(urls)}").set_image(url = urls[0]))
await reply.add_reaction("⬅️")
await reply.add_reaction("➡️")
await set_data("nhentai_embed", reply.id, (nhid, 0))
import httpx
import img2pdf, os
from PIL import Image
from PyPDF3 import PdfFileMerger
from io import BytesIO
async def get_async(url):
async with httpx.AsyncClient() as client:
return await client.get(url)
@client.command("", ["nhdownload", "?"], "", "")
async def command_nhdownload(command, message):
async with message.channel.typing():
nhid = int(command[2])
title, subtitle, sauce, urls = await nhentai(nhid, True)
try:
os.mkdir(f"/tmp/{nhid}")
except:
pass
merger = PdfFileMerger()
responses = await asyncio.gather(*map(get_async, urls))
for page, r in enumerate(responses):
pdf_path = f"/tmp/{nhid}/{page}.pdf"
pdf_bytes = img2pdf.convert(r.content)
with open(pdf_path, "wb") as f:
f.write(pdf_bytes)
merger.append(pdf_path)
final_path = f"/tmp/{nhid}/final.pdf"
merger.write(final_path)
merger.close()
try:
with open(final_path, "rb") as f:
await send(message, file = discord.File(fp = f, filename = f"[{nhid}] {title}.pdf"))
except:
await send(message, f"The file is too large to upload; you can access it here: https://dev.hyper-neutrino.xyz/nh/{nhid}")
@client.command("", lambda m: True, "", "")
async def command_image_spoiler_reply(command, message):
if type(message.channel) == discord.DMChannel:
if len(message.attachments) > 0:
if await has_data("dm_spoiler", message.author.id):
await client.get_channel(await get_data("dm_spoiler", message.author.id)).send(files = [(await attachment.to_file(spoiler = True)) for attachment in message.attachments])
await del_data("dm_spoiler", message.author.id)
@client.command("", lambda m: True, "", "")
async def command_image_spoiler_reply(command, message):
if len(message.attachments) > 0:
if await has_data("img_color", message.author.id, message.channel.id):
r = requests.post("https://api.deepai.org/api/colorizer", data = {"image": message.attachments[0].url}, headers = {"api-key": "551549c3-8d2c-426b-ae9f-9211b13e6f14"})
await send(message, r.json()["output_url"])
await del_data("img_color", message.author.id, message.channel.id)
@client.command("", ["echo", "..."], "echo <message>", "echo the message")
async def command_echo(command, message):
await send(message, message.content[message.content.find("echo") + 4:])
@client.command("", ["say", "..."], "say <message>", "echo, then immediately delete the command")
async def command_say(command, message):
await send(message, message.content[message.content.find("say") + 3:])
await message.delete()
@client.command("", ["eval", "?", "..."], "eval <expr>", "evaluate a Python expression in a command function's scope")
async def command_eval(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
code = message.content[message.content.find("eval") + 4:].strip()
if code.startswith("```python"):
code = code[9:]
elif code.startswith("```py"):
code = code[5:]
code = code.strip("`")
await send(message, str(eval(code))[:2000])
except:
await send(message, "Error evaluating expression!", reaction = "x")
@client.command("", ["exec", "?", "..."], "exec <code>", "execute Python code in a command function's scope (print is replaced with message output)")
async def command_exec(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
code = message.content[message.content.find("exec") + 4:].strip()
if code.startswith("```python"):
code = code[9:]
elif code.startswith("```py"):
code = code[5:]
code = code.strip("`")
output = []
def print(*items, end = "\n", sep = " "):
output.extend(list(sep.join(map(str, items)) + end))
exec(code)
await send(message, "```python\n" + "".join(output[:1980]) + "\n```")
except:
await send(message, "Error executing expression!", reaction = "x")
@client.command("", ["adjust", "ehecd", "?"], "adjust ehecd <x>", "adjust the cooldown of ehe te nandayo")
async def command_exec(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!", reaction = "x")
else:
try:
await set_data("ehecd", int(command[3]))
await send(message, f"Cooldown of 'ehe te nandayo' is now {command[3]} second{'s' * (command[3] != '1')}!")
except:
await send(message, "Error; make sure you entered an integer!", reaction = "x")
@client.command("", ["data", "..."], "data", "fetch data from the bot")
async def command_data(command, message):
if message.author.id not in config["sudo"]:
await send(message, "You must be a sudo user to do that!")
else:
await send(message, "```python\n" + str(await get_data(*map(eval, command[2:]), default = None, set_if_missing = False))[:1980] + "\n```")
@client.command("", ["identify", "?"], "identify <user>", "identify a user")
async def command_identify(command, message):
member = await get_member(message.guild, command[2], message.author)
await send(message, f"Identified {member.name}#{member.discriminator}, a.k.a {member.display_name}, I.D. {member.id} ({member.mention})", allowed_mentions = discord.AllowedMentions.none())
@client.command("", ["emoji", "?", "-"], "", "")
@client.command("", ["emoji", "?"], "emoji <lookup> [-]", "post an emoji by lookup ID")
async def command_emoji(command, message):
try:
await send(message, str(emoji(command[2])))
if len(command) == 4:
await message.delete()
except:
await send(message, "That resulted in an error.", reaction = "x")
raise
@client.command("", [("summary", "summarize"), "?"], "", "")
@client.command("", [("summary", "summarize"), "?", "?"], "", "")
@client.command("", [("summary", "summarize"), "?", "?", "?"], "", "")
async def command_summarize(command, message):
url = command[2]
if url[0] == "<" and url[-1] == ">":
url = url[1:-1]
await message.edit(suppress = True)
rurl = f"https://api.smmry.com/?SM_API_KEY={config['api-keys']['sm']}"
if len(command) >= 4:
rurl += "&SM_LENGTH=" + command[3]
if len(command) >= 5:
rurl += "&SM_KEYWORD_COUNT=" + command[4]
rurl += "&SM_URL=" + url
r = requests.get(rurl)
data = r.json()
if "sm_api_error" in data:
error = data["sm_api_error"]
if error == 0:
await send(message, "Internal server problem with the SMMRY API; this is not your fault. Try again later.", reaction = "x")
elif error == 1:
await send(message, "Parameters are invalid. Check that you entered a real URL; otherwise, contact a developer.", reaction = "x")
elif error == 2:
await send(message, "This request has intentionally been restricted. Perhaps you have expended the API key's limit (100 per day).", reaction = "x")
elif error == 3:
await send(message, "Summarization error. This website might not be summarizable.")
else:
await send(message, (f"**{data['sm_api_title'].strip() or '(no title)'}**\n\n{data['sm_api_content'].strip() or '(no content)'}")[:2000])
if "sm_api_keyword_array" in data:
await message.channel.send(f"**Keywords**: {', '.join(data['sm_api_keyword_array'])}")
@client.command("", ["tsr", "?"], "", "")
async def command_toggle_suppress_reacts(command, message):
member = await get_member(message.guild, command[2], message.author)
await mod_data("tsr", lambda x: x ^ {member.id}, default = set())
await message.add_reaction("✅")
@client.command("", ["react", "..."], "", "")
async def command_react(command, message):
if not message.reference or not message.reference.resolved:
raise BotError("You need to refer to a message via reply!")
fails = []
for x in command[2:]:
try:
await message.reference.resolved.add_reaction(emoji(x))
except:
fails.append(x)
if fails:
await send(message, "The following emojis do not exist / could not have been added: " + ", ".join(fails))
else:
await message.delete()
# @client.command("", re.compile(r"\b[hH]?[eE][hH][eE]\b").search, "", "")
async def command_ehe_te_nandayo(command, message):
if message.author != client.user and time.time() - await get_data("ehe", message.author.id, default = 0) > (await get_data("ehecd", default = 30)):
await send(message, "**ehe te nandayo!?**", reaction = "?")
await set_data("ehe", message.author.id, time.time())
# @client.command("", re.compile(r"\[\w+\]").search, "", "")
async def command_emoji_react(command, message):
for c in re.findall(r"\[(\w+)\]", message.content):
try:
await message.add_reaction(emoji(c))
except:
pass
# @client.command("", re.compile(r"\b[Aa][Oo][Cc]\b").search, "", "")
async def command_aoc(command, message):
await message.channel.send("Alexandria Ocasio-Cortez")
# @client.command("", ["toggle69"], "", "")
async def command_toggle69(command, message):
await set_data("disable_69", not await get_data("disable_69", default = False))
await message.add_reaction("✅")
# @client.command("", re.compile(r"\b69\b").search, "", "")
async def command_69(command, message):
if await get_data("disable_69", default = False):
return
await message.reply("nice", mention_author = False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.