hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
sequence
language
stringclasses
1 value
identifier
stringlengths
1
160
return_type
stringlengths
2
354
original_string
stringlengths
57
438k
original_docstring
stringlengths
13
88.1k
docstring
stringlengths
13
2.86k
docstring_tokens
sequence
code
stringlengths
16
437k
code_tokens
sequence
short_docstring
stringlengths
1
1.58k
short_docstring_tokens
sequence
comment
sequence
parameters
list
docstring_params
dict
code_with_imports
stringlengths
16
437k
31bd537a3d1572c9fa6aeb3baecb55a4e485344d
fasaxc/clowder
calicoctl/tests/st/utils/utils.py
[ "Apache-2.0" ]
Python
clean_calico_data
<not_specific>
def clean_calico_data(data, extra_keys_to_remove=None): """ Clean the data returned from a calicoctl get command to remove empty structs, null values and non-configurable fields. This makes comparison with the input data much simpler. Args: data: The data to clean. extra_keys_to_remove: more keys to remove if needed. Returns: The cleaned data. """ new = copy.deepcopy(data) # Recursively delete empty structs / nil values and non-configurable # fields. def clean_elem(elem, extra_keys): if isinstance(elem, list): # Loop through each element in the list for i in elem: clean_elem(i, extra_keys) if isinstance(elem, dict): # Remove non-settable fields, and recursively clean each value of # the dictionary, removing nil values or values that are empty # dicts after cleaning. del_keys = ['creationTimestamp', 'resourceVersion', 'uid'] if extra_keys is not None: for extra_key in extra_keys: del_keys.append(extra_key) for k, v in elem.iteritems(): clean_elem(v, extra_keys) if v is None or v == {}: del_keys.append(k) for k in del_keys: if k in elem: del(elem[k]) clean_elem(new, extra_keys_to_remove) return new
Clean the data returned from a calicoctl get command to remove empty structs, null values and non-configurable fields. This makes comparison with the input data much simpler. Args: data: The data to clean. extra_keys_to_remove: more keys to remove if needed. Returns: The cleaned data.
Clean the data returned from a calicoctl get command to remove empty structs, null values and non-configurable fields. This makes comparison with the input data much simpler.
[ "Clean", "the", "data", "returned", "from", "a", "calicoctl", "get", "command", "to", "remove", "empty", "structs", "null", "values", "and", "non", "-", "configurable", "fields", ".", "This", "makes", "comparison", "with", "the", "input", "data", "much", "simpler", "." ]
def clean_calico_data(data, extra_keys_to_remove=None): new = copy.deepcopy(data) def clean_elem(elem, extra_keys): if isinstance(elem, list): for i in elem: clean_elem(i, extra_keys) if isinstance(elem, dict): del_keys = ['creationTimestamp', 'resourceVersion', 'uid'] if extra_keys is not None: for extra_key in extra_keys: del_keys.append(extra_key) for k, v in elem.iteritems(): clean_elem(v, extra_keys) if v is None or v == {}: del_keys.append(k) for k in del_keys: if k in elem: del(elem[k]) clean_elem(new, extra_keys_to_remove) return new
[ "def", "clean_calico_data", "(", "data", ",", "extra_keys_to_remove", "=", "None", ")", ":", "new", "=", "copy", ".", "deepcopy", "(", "data", ")", "def", "clean_elem", "(", "elem", ",", "extra_keys", ")", ":", "if", "isinstance", "(", "elem", ",", "list", ")", ":", "for", "i", "in", "elem", ":", "clean_elem", "(", "i", ",", "extra_keys", ")", "if", "isinstance", "(", "elem", ",", "dict", ")", ":", "del_keys", "=", "[", "'creationTimestamp'", ",", "'resourceVersion'", ",", "'uid'", "]", "if", "extra_keys", "is", "not", "None", ":", "for", "extra_key", "in", "extra_keys", ":", "del_keys", ".", "append", "(", "extra_key", ")", "for", "k", ",", "v", "in", "elem", ".", "iteritems", "(", ")", ":", "clean_elem", "(", "v", ",", "extra_keys", ")", "if", "v", "is", "None", "or", "v", "==", "{", "}", ":", "del_keys", ".", "append", "(", "k", ")", "for", "k", "in", "del_keys", ":", "if", "k", "in", "elem", ":", "del", "(", "elem", "[", "k", "]", ")", "clean_elem", "(", "new", ",", "extra_keys_to_remove", ")", "return", "new" ]
Clean the data returned from a calicoctl get command to remove empty structs, null values and non-configurable fields.
[ "Clean", "the", "data", "returned", "from", "a", "calicoctl", "get", "command", "to", "remove", "empty", "structs", "null", "values", "and", "non", "-", "configurable", "fields", "." ]
[ "\"\"\"\n Clean the data returned from a calicoctl get command to remove empty\n structs, null values and non-configurable fields. This makes comparison\n with the input data much simpler.\n\n Args:\n data: The data to clean.\n extra_keys_to_remove: more keys to remove if needed.\n\n Returns: The cleaned data.\n\n \"\"\"", "# Recursively delete empty structs / nil values and non-configurable", "# fields.", "# Loop through each element in the list", "# Remove non-settable fields, and recursively clean each value of", "# the dictionary, removing nil values or values that are empty", "# dicts after cleaning." ]
[ { "param": "data", "type": null }, { "param": "extra_keys_to_remove", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": "The data to clean.", "docstring_tokens": [ "The", "data", "to", "clean", "." ], "default": null, "is_optional": null }, { "identifier": "extra_keys_to_remove", "type": null, "docstring": "more keys to remove if needed.", "docstring_tokens": [ "more", "keys", "to", "remove", "if", "needed", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import copy def clean_calico_data(data, extra_keys_to_remove=None): new = copy.deepcopy(data) def clean_elem(elem, extra_keys): if isinstance(elem, list): for i in elem: clean_elem(i, extra_keys) if isinstance(elem, dict): del_keys = ['creationTimestamp', 'resourceVersion', 'uid'] if extra_keys is not None: for extra_key in extra_keys: del_keys.append(extra_key) for k, v in elem.iteritems(): clean_elem(v, extra_keys) if v is None or v == {}: del_keys.append(k) for k in del_keys: if k in elem: del(elem[k]) clean_elem(new, extra_keys_to_remove) return new
31bd537a3d1572c9fa6aeb3baecb55a4e485344d
fasaxc/clowder
calicoctl/tests/st/utils/utils.py
[ "Apache-2.0" ]
Python
name
<not_specific>
def name(data): """ Returns the name of the resource in the supplied data Args: data: A dictionary containing the resource. Returns: The resource name. """ return data['metadata']['name']
Returns the name of the resource in the supplied data Args: data: A dictionary containing the resource. Returns: The resource name.
Returns the name of the resource in the supplied data
[ "Returns", "the", "name", "of", "the", "resource", "in", "the", "supplied", "data" ]
def name(data): return data['metadata']['name']
[ "def", "name", "(", "data", ")", ":", "return", "data", "[", "'metadata'", "]", "[", "'name'", "]" ]
Returns the name of the resource in the supplied data
[ "Returns", "the", "name", "of", "the", "resource", "in", "the", "supplied", "data" ]
[ "\"\"\"\n Returns the name of the resource in the supplied data\n Args:\n data: A dictionary containing the resource.\n\n Returns: The resource name.\n \"\"\"" ]
[ { "param": "data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": "A dictionary containing the resource.", "docstring_tokens": [ "A", "dictionary", "containing", "the", "resource", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def name(data): return data['metadata']['name']
31bd537a3d1572c9fa6aeb3baecb55a4e485344d
fasaxc/clowder
calicoctl/tests/st/utils/utils.py
[ "Apache-2.0" ]
Python
namespace
<not_specific>
def namespace(data): """ Returns the namespace of the resource in the supplied data Args: data: A dictionary containing the resource. Returns: The resource name. """ return data['metadata']['namespace']
Returns the namespace of the resource in the supplied data Args: data: A dictionary containing the resource. Returns: The resource name.
Returns the namespace of the resource in the supplied data
[ "Returns", "the", "namespace", "of", "the", "resource", "in", "the", "supplied", "data" ]
def namespace(data): return data['metadata']['namespace']
[ "def", "namespace", "(", "data", ")", ":", "return", "data", "[", "'metadata'", "]", "[", "'namespace'", "]" ]
Returns the namespace of the resource in the supplied data
[ "Returns", "the", "namespace", "of", "the", "resource", "in", "the", "supplied", "data" ]
[ "\"\"\"\n Returns the namespace of the resource in the supplied data\n Args:\n data: A dictionary containing the resource.\n\n Returns: The resource name.\n \"\"\"" ]
[ { "param": "data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": "A dictionary containing the resource.", "docstring_tokens": [ "A", "dictionary", "containing", "the", "resource", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def namespace(data): return data['metadata']['namespace']
9ebc8da0ad2a9f6b5b1079c09e6e80593a1a6bac
OdiaNLP/spelling-correction
utils.py
[ "MIT" ]
Python
edit_distance
int
def edit_distance(s1: str, s2: str) -> int: """Compute edit distance between two strings using dynamic programmic. Lifted from: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python""" if len(s1) < len(s2): return edit_distance(s2, s1) # len(s1) >= len(s2) if len(s2) == 0: return len(s1) previous_row = range(len(s2) + 1) for i, c1 in enumerate(s1): current_row = [i + 1] for j, c2 in enumerate(s2): insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and # current_row are one character longer than s2 deletions = current_row[j] + 1 substitutions = previous_row[j] + (c1 != c2) current_row.append(min(insertions, deletions, substitutions)) previous_row = current_row return previous_row[-1]
Compute edit distance between two strings using dynamic programmic. Lifted from: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
Compute edit distance between two strings using dynamic programmic.
[ "Compute", "edit", "distance", "between", "two", "strings", "using", "dynamic", "programmic", "." ]
def edit_distance(s1: str, s2: str) -> int: if len(s1) < len(s2): return edit_distance(s2, s1) if len(s2) == 0: return len(s1) previous_row = range(len(s2) + 1) for i, c1 in enumerate(s1): current_row = [i + 1] for j, c2 in enumerate(s2): insertions = previous_row[j + 1] + 1 deletions = current_row[j] + 1 substitutions = previous_row[j] + (c1 != c2) current_row.append(min(insertions, deletions, substitutions)) previous_row = current_row return previous_row[-1]
[ "def", "edit_distance", "(", "s1", ":", "str", ",", "s2", ":", "str", ")", "->", "int", ":", "if", "len", "(", "s1", ")", "<", "len", "(", "s2", ")", ":", "return", "edit_distance", "(", "s2", ",", "s1", ")", "if", "len", "(", "s2", ")", "==", "0", ":", "return", "len", "(", "s1", ")", "previous_row", "=", "range", "(", "len", "(", "s2", ")", "+", "1", ")", "for", "i", ",", "c1", "in", "enumerate", "(", "s1", ")", ":", "current_row", "=", "[", "i", "+", "1", "]", "for", "j", ",", "c2", "in", "enumerate", "(", "s2", ")", ":", "insertions", "=", "previous_row", "[", "j", "+", "1", "]", "+", "1", "deletions", "=", "current_row", "[", "j", "]", "+", "1", "substitutions", "=", "previous_row", "[", "j", "]", "+", "(", "c1", "!=", "c2", ")", "current_row", ".", "append", "(", "min", "(", "insertions", ",", "deletions", ",", "substitutions", ")", ")", "previous_row", "=", "current_row", "return", "previous_row", "[", "-", "1", "]" ]
Compute edit distance between two strings using dynamic programmic.
[ "Compute", "edit", "distance", "between", "two", "strings", "using", "dynamic", "programmic", "." ]
[ "\"\"\"Compute edit distance between two strings using dynamic programmic.\n Lifted from: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python\"\"\"", "# len(s1) >= len(s2)", "# j+1 instead of j since previous_row and", "# current_row are one character longer than s2" ]
[ { "param": "s1", "type": "str" }, { "param": "s2", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "s1", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "s2", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def edit_distance(s1: str, s2: str) -> int: if len(s1) < len(s2): return edit_distance(s2, s1) if len(s2) == 0: return len(s1) previous_row = range(len(s2) + 1) for i, c1 in enumerate(s1): current_row = [i + 1] for j, c2 in enumerate(s2): insertions = previous_row[j + 1] + 1 deletions = current_row[j] + 1 substitutions = previous_row[j] + (c1 != c2) current_row.append(min(insertions, deletions, substitutions)) previous_row = current_row return previous_row[-1]
f8914201b858c40768ea60a99d03e878d6b81db8
nataliyah123/phageParser
util/acc.py
[ "MIT" ]
Python
read_accession_file
null
def read_accession_file(f): """ Read an open accession file, returning the list of accession numbers it contains. This automatically skips blank lines and comments. """ for line in f: line = line.strip() if not line or line.startswith('#'): continue yield line
Read an open accession file, returning the list of accession numbers it contains. This automatically skips blank lines and comments.
Read an open accession file, returning the list of accession numbers it contains. This automatically skips blank lines and comments.
[ "Read", "an", "open", "accession", "file", "returning", "the", "list", "of", "accession", "numbers", "it", "contains", ".", "This", "automatically", "skips", "blank", "lines", "and", "comments", "." ]
def read_accession_file(f): for line in f: line = line.strip() if not line or line.startswith('#'): continue yield line
[ "def", "read_accession_file", "(", "f", ")", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", "or", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "yield", "line" ]
Read an open accession file, returning the list of accession numbers it contains.
[ "Read", "an", "open", "accession", "file", "returning", "the", "list", "of", "accession", "numbers", "it", "contains", "." ]
[ "\"\"\"\n Read an open accession file, returning the list of accession numbers it\n contains.\n\n This automatically skips blank lines and comments.\n \"\"\"" ]
[ { "param": "f", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "f", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def read_accession_file(f): for line in f: line = line.strip() if not line or line.startswith('#'): continue yield line
963db2c08d1590debdaf46085464e8392c243870
xolox/python-rsync-system-backup
rsync_system_backup/__init__.py
[ "MIT" ]
Python
ensure_trailing_slash
<not_specific>
def ensure_trailing_slash(expression): """ Add a trailing slash to rsync source/destination locations. :param expression: The rsync source/destination expression (a string). :returns: The same expression with exactly one trailing slash. """ if expression: # Strip any existing trailing slashes. expression = expression.rstrip('/') # Add exactly one trailing slash. expression += '/' return expression
Add a trailing slash to rsync source/destination locations. :param expression: The rsync source/destination expression (a string). :returns: The same expression with exactly one trailing slash.
Add a trailing slash to rsync source/destination locations.
[ "Add", "a", "trailing", "slash", "to", "rsync", "source", "/", "destination", "locations", "." ]
def ensure_trailing_slash(expression): if expression: expression = expression.rstrip('/') expression += '/' return expression
[ "def", "ensure_trailing_slash", "(", "expression", ")", ":", "if", "expression", ":", "expression", "=", "expression", ".", "rstrip", "(", "'/'", ")", "expression", "+=", "'/'", "return", "expression" ]
Add a trailing slash to rsync source/destination locations.
[ "Add", "a", "trailing", "slash", "to", "rsync", "source", "/", "destination", "locations", "." ]
[ "\"\"\"\n Add a trailing slash to rsync source/destination locations.\n\n :param expression: The rsync source/destination expression (a string).\n :returns: The same expression with exactly one trailing slash.\n \"\"\"", "# Strip any existing trailing slashes.", "# Add exactly one trailing slash." ]
[ { "param": "expression", "type": null } ]
{ "returns": [ { "docstring": "The same expression with exactly one trailing slash.", "docstring_tokens": [ "The", "same", "expression", "with", "exactly", "one", "trailing", "slash", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "expression", "type": null, "docstring": "The rsync source/destination expression (a string).", "docstring_tokens": [ "The", "rsync", "source", "/", "destination", "expression", "(", "a", "string", ")", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def ensure_trailing_slash(expression): if expression: expression = expression.rstrip('/') expression += '/' return expression
6bcf19cc2ef1c9616b663c229fa983de85a420fa
petrpavlu/storepass
storepass/utils.py
[ "MIT" ]
Python
escape_bytes
<not_specific>
def escape_bytes(bytes_): """ Convert a bytes object to an escaped string. Convert bytes to an ASCII string. Non-printable characters and a single quote (') are escaped. This allows to format bytes in messages as f"b'{utils.escape_bytes(bytes)}'". """ res = "" for byte in bytes_: char = chr(byte) if char == '\\': res += "\\\\" elif char == '\'': res += "\\'" elif char in (string.digits + string.ascii_letters + string.punctuation + ' '): res += char else: res += "\\x%0.2x" % byte return res
Convert a bytes object to an escaped string. Convert bytes to an ASCII string. Non-printable characters and a single quote (') are escaped. This allows to format bytes in messages as f"b'{utils.escape_bytes(bytes)}'".
Convert a bytes object to an escaped string. Convert bytes to an ASCII string. Non-printable characters and a single quote (') are escaped.
[ "Convert", "a", "bytes", "object", "to", "an", "escaped", "string", ".", "Convert", "bytes", "to", "an", "ASCII", "string", ".", "Non", "-", "printable", "characters", "and", "a", "single", "quote", "(", "'", ")", "are", "escaped", "." ]
def escape_bytes(bytes_): res = "" for byte in bytes_: char = chr(byte) if char == '\\': res += "\\\\" elif char == '\'': res += "\\'" elif char in (string.digits + string.ascii_letters + string.punctuation + ' '): res += char else: res += "\\x%0.2x" % byte return res
[ "def", "escape_bytes", "(", "bytes_", ")", ":", "res", "=", "\"\"", "for", "byte", "in", "bytes_", ":", "char", "=", "chr", "(", "byte", ")", "if", "char", "==", "'\\\\'", ":", "res", "+=", "\"\\\\\\\\\"", "elif", "char", "==", "'\\''", ":", "res", "+=", "\"\\\\'\"", "elif", "char", "in", "(", "string", ".", "digits", "+", "string", ".", "ascii_letters", "+", "string", ".", "punctuation", "+", "' '", ")", ":", "res", "+=", "char", "else", ":", "res", "+=", "\"\\\\x%0.2x\"", "%", "byte", "return", "res" ]
Convert a bytes object to an escaped string.
[ "Convert", "a", "bytes", "object", "to", "an", "escaped", "string", "." ]
[ "\"\"\"\n Convert a bytes object to an escaped string.\n\n Convert bytes to an ASCII string. Non-printable characters and a single\n quote (') are escaped. This allows to format bytes in messages as\n f\"b'{utils.escape_bytes(bytes)}'\".\n \"\"\"" ]
[ { "param": "bytes_", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "bytes_", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import string def escape_bytes(bytes_): res = "" for byte in bytes_: char = chr(byte) if char == '\\': res += "\\\\" elif char == '\'': res += "\\'" elif char in (string.digits + string.ascii_letters + string.punctuation + ' '): res += char else: res += "\\x%0.2x" % byte return res
46c1ced6778e7bf0021180efba652ba8cf0721e3
petrpavlu/storepass
storepass/cli/__main__.py
[ "MIT" ]
Python
_check_entry_name
<not_specific>
def _check_entry_name(args): """Validate an entry name specified on the command line.""" # Reject an empty entry name. if args.entry == '': print("Specified entry name is empty", file=sys.stderr) return 1 return 0
Validate an entry name specified on the command line.
Validate an entry name specified on the command line.
[ "Validate", "an", "entry", "name", "specified", "on", "the", "command", "line", "." ]
def _check_entry_name(args): if args.entry == '': print("Specified entry name is empty", file=sys.stderr) return 1 return 0
[ "def", "_check_entry_name", "(", "args", ")", ":", "if", "args", ".", "entry", "==", "''", ":", "print", "(", "\"Specified entry name is empty\"", ",", "file", "=", "sys", ".", "stderr", ")", "return", "1", "return", "0" ]
Validate an entry name specified on the command line.
[ "Validate", "an", "entry", "name", "specified", "on", "the", "command", "line", "." ]
[ "\"\"\"Validate an entry name specified on the command line.\"\"\"", "# Reject an empty entry name." ]
[ { "param": "args", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "args", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import sys def _check_entry_name(args): if args.entry == '': print("Specified entry name is empty", file=sys.stderr) return 1 return 0
46c1ced6778e7bf0021180efba652ba8cf0721e3
petrpavlu/storepass
storepass/cli/__main__.py
[ "MIT" ]
Python
_process_init_command
<not_specific>
def _process_init_command(args, _model): """Handle the init command: create an empty password database.""" assert args.command == 'init' # Keep the model empty and let the main() function write out the database. return 0
Handle the init command: create an empty password database.
Handle the init command: create an empty password database.
[ "Handle", "the", "init", "command", ":", "create", "an", "empty", "password", "database", "." ]
def _process_init_command(args, _model): assert args.command == 'init' return 0
[ "def", "_process_init_command", "(", "args", ",", "_model", ")", ":", "assert", "args", ".", "command", "==", "'init'", "return", "0" ]
Handle the init command: create an empty password database.
[ "Handle", "the", "init", "command", ":", "create", "an", "empty", "password", "database", "." ]
[ "\"\"\"Handle the init command: create an empty password database.\"\"\"", "# Keep the model empty and let the main() function write out the database." ]
[ { "param": "args", "type": null }, { "param": "_model", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "args", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "_model", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _process_init_command(args, _model): assert args.command == 'init' return 0
54ac2b165f2db32a16fb2e82e078d1d199bae23c
petrpavlu/storepass
tests/utils.py
[ "MIT" ]
Python
dedent2
<not_specific>
def dedent2(text): """ Remove any common leading whitespace + '|' from every line in a given text. Remove any common leading whitespace + character '|' from every line in a given text. """ output = '' lines = textwrap.dedent(text).splitlines(True) for line in lines: assert line[:1] == '|' output += line[1:] return output
Remove any common leading whitespace + '|' from every line in a given text. Remove any common leading whitespace + character '|' from every line in a given text.
Remove any common leading whitespace + '|' from every line in a given text. Remove any common leading whitespace + character '|' from every line in a given text.
[ "Remove", "any", "common", "leading", "whitespace", "+", "'", "|", "'", "from", "every", "line", "in", "a", "given", "text", ".", "Remove", "any", "common", "leading", "whitespace", "+", "character", "'", "|", "'", "from", "every", "line", "in", "a", "given", "text", "." ]
def dedent2(text): output = '' lines = textwrap.dedent(text).splitlines(True) for line in lines: assert line[:1] == '|' output += line[1:] return output
[ "def", "dedent2", "(", "text", ")", ":", "output", "=", "''", "lines", "=", "textwrap", ".", "dedent", "(", "text", ")", ".", "splitlines", "(", "True", ")", "for", "line", "in", "lines", ":", "assert", "line", "[", ":", "1", "]", "==", "'|'", "output", "+=", "line", "[", "1", ":", "]", "return", "output" ]
Remove any common leading whitespace + '|' from every line in a given text.
[ "Remove", "any", "common", "leading", "whitespace", "+", "'", "|", "'", "from", "every", "line", "in", "a", "given", "text", "." ]
[ "\"\"\"\n Remove any common leading whitespace + '|' from every line in a given text.\n\n Remove any common leading whitespace + character '|' from every line in a\n given text.\n \"\"\"" ]
[ { "param": "text", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import textwrap def dedent2(text): output = '' lines = textwrap.dedent(text).splitlines(True) for line in lines: assert line[:1] == '|' output += line[1:] return output
4fd414247668b7d588591bb43cc1842d26b71ad0
petrpavlu/storepass
storepass/model.py
[ "MIT" ]
Python
path_element_to_string
<not_specific>
def path_element_to_string(path_element): """Convert a single path element to its escaped string representation.""" res = "" for char in path_element: if char == '\\': res += "\\\\" elif char == '/': res += "\\/" else: res += char return res
Convert a single path element to its escaped string representation.
Convert a single path element to its escaped string representation.
[ "Convert", "a", "single", "path", "element", "to", "its", "escaped", "string", "representation", "." ]
def path_element_to_string(path_element): res = "" for char in path_element: if char == '\\': res += "\\\\" elif char == '/': res += "\\/" else: res += char return res
[ "def", "path_element_to_string", "(", "path_element", ")", ":", "res", "=", "\"\"", "for", "char", "in", "path_element", ":", "if", "char", "==", "'\\\\'", ":", "res", "+=", "\"\\\\\\\\\"", "elif", "char", "==", "'/'", ":", "res", "+=", "\"\\\\/\"", "else", ":", "res", "+=", "char", "return", "res" ]
Convert a single path element to its escaped string representation.
[ "Convert", "a", "single", "path", "element", "to", "its", "escaped", "string", "representation", "." ]
[ "\"\"\"Convert a single path element to its escaped string representation.\"\"\"" ]
[ { "param": "path_element", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path_element", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def path_element_to_string(path_element): res = "" for char in path_element: if char == '\\': res += "\\\\" elif char == '/': res += "\\/" else: res += char return res
15ae12f0046127583343ca0ead7a202117484ca8
eyangs/transferNILM
model_structure.py
[ "MIT" ]
Python
save_model
null
def save_model(model, network_type, algorithm, appliance, save_model_dir): """ Saves a model to a specified location. Models are named using a combination of their target appliance, architecture, and pruning algorithm. Parameters: model (tensorflow.keras.Model): The Keras model to save. network_type (string): The architecture of the model ('', 'reduced', 'dropout', or 'reduced_dropout'). algorithm (string): The pruning algorithm applied to the model. appliance (string): The appliance the model was trained with. """ #model_path = "saved_models/" + appliance + "_" + algorithm + "_" + network_type + "_model.h5" model_path = save_model_dir if not os.path.exists (model_path): open((model_path), 'a').close() model.save(model_path)
Saves a model to a specified location. Models are named using a combination of their target appliance, architecture, and pruning algorithm. Parameters: model (tensorflow.keras.Model): The Keras model to save. network_type (string): The architecture of the model ('', 'reduced', 'dropout', or 'reduced_dropout'). algorithm (string): The pruning algorithm applied to the model. appliance (string): The appliance the model was trained with.
Saves a model to a specified location. Models are named using a combination of their target appliance, architecture, and pruning algorithm. model (tensorflow.keras.Model): The Keras model to save.
[ "Saves", "a", "model", "to", "a", "specified", "location", ".", "Models", "are", "named", "using", "a", "combination", "of", "their", "target", "appliance", "architecture", "and", "pruning", "algorithm", ".", "model", "(", "tensorflow", ".", "keras", ".", "Model", ")", ":", "The", "Keras", "model", "to", "save", "." ]
def save_model(model, network_type, algorithm, appliance, save_model_dir): model_path = save_model_dir if not os.path.exists (model_path): open((model_path), 'a').close() model.save(model_path)
[ "def", "save_model", "(", "model", ",", "network_type", ",", "algorithm", ",", "appliance", ",", "save_model_dir", ")", ":", "model_path", "=", "save_model_dir", "if", "not", "os", ".", "path", ".", "exists", "(", "model_path", ")", ":", "open", "(", "(", "model_path", ")", ",", "'a'", ")", ".", "close", "(", ")", "model", ".", "save", "(", "model_path", ")" ]
Saves a model to a specified location.
[ "Saves", "a", "model", "to", "a", "specified", "location", "." ]
[ "\"\"\" Saves a model to a specified location. Models are named using a combination of their \n target appliance, architecture, and pruning algorithm.\n\n Parameters:\n model (tensorflow.keras.Model): The Keras model to save.\n network_type (string): The architecture of the model ('', 'reduced', 'dropout', or 'reduced_dropout').\n algorithm (string): The pruning algorithm applied to the model.\n appliance (string): The appliance the model was trained with.\n\n \"\"\"", "#model_path = \"saved_models/\" + appliance + \"_\" + algorithm + \"_\" + network_type + \"_model.h5\"" ]
[ { "param": "model", "type": null }, { "param": "network_type", "type": null }, { "param": "algorithm", "type": null }, { "param": "appliance", "type": null }, { "param": "save_model_dir", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "model", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "network_type", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "algorithm", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "appliance", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "save_model_dir", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def save_model(model, network_type, algorithm, appliance, save_model_dir): model_path = save_model_dir if not os.path.exists (model_path): open((model_path), 'a').close() model.save(model_path)
d20ef7f7ae603259ed23e254994e98c70370287c
WojciechMula/canvas2svg
canvasvg.py
[ "BSD-3-Clause" ]
Python
parse_dash
<not_specific>
def parse_dash(string, width): "parse dash pattern specified with string" # DashConvert from {tk-sources}/generic/tkCanvUtil.c w = max(1, int(width + 0.5)) n = len(string) result = [] for i, c in enumerate(string): if c == " " and len(result): result[-1] += w + 1 elif c == "_": result.append(8*w) result.append(4*w) elif c == "-": result.append(6*w) result.append(4*w) elif c == ",": result.append(4*w) result.append(4*w) elif c == ".": result.append(2*w) result.append(4*w) return result
parse dash pattern specified with string
parse dash pattern specified with string
[ "parse", "dash", "pattern", "specified", "with", "string" ]
def parse_dash(string, width): w = max(1, int(width + 0.5)) n = len(string) result = [] for i, c in enumerate(string): if c == " " and len(result): result[-1] += w + 1 elif c == "_": result.append(8*w) result.append(4*w) elif c == "-": result.append(6*w) result.append(4*w) elif c == ",": result.append(4*w) result.append(4*w) elif c == ".": result.append(2*w) result.append(4*w) return result
[ "def", "parse_dash", "(", "string", ",", "width", ")", ":", "w", "=", "max", "(", "1", ",", "int", "(", "width", "+", "0.5", ")", ")", "n", "=", "len", "(", "string", ")", "result", "=", "[", "]", "for", "i", ",", "c", "in", "enumerate", "(", "string", ")", ":", "if", "c", "==", "\" \"", "and", "len", "(", "result", ")", ":", "result", "[", "-", "1", "]", "+=", "w", "+", "1", "elif", "c", "==", "\"_\"", ":", "result", ".", "append", "(", "8", "*", "w", ")", "result", ".", "append", "(", "4", "*", "w", ")", "elif", "c", "==", "\"-\"", ":", "result", ".", "append", "(", "6", "*", "w", ")", "result", ".", "append", "(", "4", "*", "w", ")", "elif", "c", "==", "\",\"", ":", "result", ".", "append", "(", "4", "*", "w", ")", "result", ".", "append", "(", "4", "*", "w", ")", "elif", "c", "==", "\".\"", ":", "result", ".", "append", "(", "2", "*", "w", ")", "result", ".", "append", "(", "4", "*", "w", ")", "return", "result" ]
parse dash pattern specified with string
[ "parse", "dash", "pattern", "specified", "with", "string" ]
[ "\"parse dash pattern specified with string\"", "# DashConvert from {tk-sources}/generic/tkCanvUtil.c" ]
[ { "param": "string", "type": null }, { "param": "width", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "width", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def parse_dash(string, width): w = max(1, int(width + 0.5)) n = len(string) result = [] for i, c in enumerate(string): if c == " " and len(result): result[-1] += w + 1 elif c == "_": result.append(8*w) result.append(4*w) elif c == "-": result.append(6*w) result.append(4*w) elif c == ",": result.append(4*w) result.append(4*w) elif c == ".": result.append(2*w) result.append(4*w) return result
241c36d99c353c53d5ed55f9a59808bea1330510
chrisk27/fastdifgrow
fastdifgrow/fastdifgrow_main.py
[ "MIT" ]
Python
sim_parameters
null
def sim_parameters(): """This function defines the initial parameters used in simulations""" global rows, cols, h, per_cycle, num_cycles rows = 100 cols = 100 h = 15 per_cycle = 10**7 num_cycles = 10**2
This function defines the initial parameters used in simulations
This function defines the initial parameters used in simulations
[ "This", "function", "defines", "the", "initial", "parameters", "used", "in", "simulations" ]
def sim_parameters(): global rows, cols, h, per_cycle, num_cycles rows = 100 cols = 100 h = 15 per_cycle = 10**7 num_cycles = 10**2
[ "def", "sim_parameters", "(", ")", ":", "global", "rows", ",", "cols", ",", "h", ",", "per_cycle", ",", "num_cycles", "rows", "=", "100", "cols", "=", "100", "h", "=", "15", "per_cycle", "=", "10", "**", "7", "num_cycles", "=", "10", "**", "2" ]
This function defines the initial parameters used in simulations
[ "This", "function", "defines", "the", "initial", "parameters", "used", "in", "simulations" ]
[ "\"\"\"This function defines the initial parameters used in simulations\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def sim_parameters(): global rows, cols, h, per_cycle, num_cycles rows = 100 cols = 100 h = 15 per_cycle = 10**7 num_cycles = 10**2
241c36d99c353c53d5ed55f9a59808bea1330510
chrisk27/fastdifgrow
fastdifgrow/fastdifgrow_main.py
[ "MIT" ]
Python
reaction_rates
<not_specific>
def reaction_rates(): """This function defines the reaction rates for each process""" global bx, bm, dx, dm, sm, sx, lx bx = 1 # birth of xantophores bm = 0 # birth of melanophores dx = 0 # death of xantophores dm = 0 # death of melanophores sm = 1 # short-range killing of xantophore by melanophore sx = 1 # short-range killing of melanophore by xantophore lx = 2.5 # long-range activation/birth strength return
This function defines the reaction rates for each process
This function defines the reaction rates for each process
[ "This", "function", "defines", "the", "reaction", "rates", "for", "each", "process" ]
def reaction_rates(): global bx, bm, dx, dm, sm, sx, lx bx = 1 bm = 0 dx = 0 dm = 0 sm = 1 sx = 1 lx = 2.5 return
[ "def", "reaction_rates", "(", ")", ":", "global", "bx", ",", "bm", ",", "dx", ",", "dm", ",", "sm", ",", "sx", ",", "lx", "bx", "=", "1", "bm", "=", "0", "dx", "=", "0", "dm", "=", "0", "sm", "=", "1", "sx", "=", "1", "lx", "=", "2.5", "return" ]
This function defines the reaction rates for each process
[ "This", "function", "defines", "the", "reaction", "rates", "for", "each", "process" ]
[ "\"\"\"This function defines the reaction rates for each process\"\"\"", "# birth of xantophores", "# birth of melanophores", "# death of xantophores", "# death of melanophores", "# short-range killing of xantophore by melanophore", "# short-range killing of melanophore by xantophore", "# long-range activation/birth strength" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def reaction_rates(): global bx, bm, dx, dm, sm, sx, lx bx = 1 bm = 0 dx = 0 dm = 0 sm = 1 sx = 1 lx = 2.5 return
e308b5520485f58c0a528ff53d5240b4450cc42c
macph/nextbus
nextbus/models/tables.py
[ "MIT" ]
Python
_insert_service_modes
null
def _insert_service_modes(target, connection, **kw): """ Inserts service mode IDs and names after creating lookup table. """ statement = target.insert().values([ {"id": 1, "name": "bus"}, {"id": 2, "name": "coach"}, {"id": 3, "name": "tram"}, {"id": 4, "name": "metro"}, {"id": 5, "name": "underground"} ]) connection.execute(statement)
Inserts service mode IDs and names after creating lookup table.
Inserts service mode IDs and names after creating lookup table.
[ "Inserts", "service", "mode", "IDs", "and", "names", "after", "creating", "lookup", "table", "." ]
def _insert_service_modes(target, connection, **kw): statement = target.insert().values([ {"id": 1, "name": "bus"}, {"id": 2, "name": "coach"}, {"id": 3, "name": "tram"}, {"id": 4, "name": "metro"}, {"id": 5, "name": "underground"} ]) connection.execute(statement)
[ "def", "_insert_service_modes", "(", "target", ",", "connection", ",", "**", "kw", ")", ":", "statement", "=", "target", ".", "insert", "(", ")", ".", "values", "(", "[", "{", "\"id\"", ":", "1", ",", "\"name\"", ":", "\"bus\"", "}", ",", "{", "\"id\"", ":", "2", ",", "\"name\"", ":", "\"coach\"", "}", ",", "{", "\"id\"", ":", "3", ",", "\"name\"", ":", "\"tram\"", "}", ",", "{", "\"id\"", ":", "4", ",", "\"name\"", ":", "\"metro\"", "}", ",", "{", "\"id\"", ":", "5", ",", "\"name\"", ":", "\"underground\"", "}", "]", ")", "connection", ".", "execute", "(", "statement", ")" ]
Inserts service mode IDs and names after creating lookup table.
[ "Inserts", "service", "mode", "IDs", "and", "names", "after", "creating", "lookup", "table", "." ]
[ "\"\"\" Inserts service mode IDs and names after creating lookup table. \"\"\"" ]
[ { "param": "target", "type": null }, { "param": "connection", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "target", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "connection", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _insert_service_modes(target, connection, **kw): statement = target.insert().values([ {"id": 1, "name": "bus"}, {"id": 2, "name": "coach"}, {"id": 3, "name": "tram"}, {"id": 4, "name": "metro"}, {"id": 5, "name": "underground"} ]) connection.execute(statement)
e308b5520485f58c0a528ff53d5240b4450cc42c
macph/nextbus
nextbus/models/tables.py
[ "MIT" ]
Python
_insert_bank_holidays
null
def _insert_bank_holidays(target, connection, **kw): """ Inserts bank holiday IDs and names after creating lookup table. """ statement = target.insert().values([ {"id": 1, "name": "NewYearsDay"}, {"id": 2, "name": "Jan2ndScotland"}, {"id": 3, "name": "GoodFriday"}, {"id": 4, "name": "EasterMonday"}, {"id": 5, "name": "MayDay"}, {"id": 6, "name": "SpringBank"}, {"id": 7, "name": "LateSummerBankHolidayNotScotland"}, {"id": 8, "name": "AugustBankHolidayScotland"}, {"id": 9, "name": "ChristmasDay"}, {"id": 10, "name": "BoxingDay"}, {"id": 11, "name": "ChristmasDayHoliday"}, {"id": 12, "name": "BoxingDayHoliday"}, {"id": 13, "name": "NewYearsDayHoliday"}, {"id": 14, "name": "ChristmasEve"}, {"id": 15, "name": "NewYearsEve"}, ]) connection.execute(statement)
Inserts bank holiday IDs and names after creating lookup table.
Inserts bank holiday IDs and names after creating lookup table.
[ "Inserts", "bank", "holiday", "IDs", "and", "names", "after", "creating", "lookup", "table", "." ]
def _insert_bank_holidays(target, connection, **kw): statement = target.insert().values([ {"id": 1, "name": "NewYearsDay"}, {"id": 2, "name": "Jan2ndScotland"}, {"id": 3, "name": "GoodFriday"}, {"id": 4, "name": "EasterMonday"}, {"id": 5, "name": "MayDay"}, {"id": 6, "name": "SpringBank"}, {"id": 7, "name": "LateSummerBankHolidayNotScotland"}, {"id": 8, "name": "AugustBankHolidayScotland"}, {"id": 9, "name": "ChristmasDay"}, {"id": 10, "name": "BoxingDay"}, {"id": 11, "name": "ChristmasDayHoliday"}, {"id": 12, "name": "BoxingDayHoliday"}, {"id": 13, "name": "NewYearsDayHoliday"}, {"id": 14, "name": "ChristmasEve"}, {"id": 15, "name": "NewYearsEve"}, ]) connection.execute(statement)
[ "def", "_insert_bank_holidays", "(", "target", ",", "connection", ",", "**", "kw", ")", ":", "statement", "=", "target", ".", "insert", "(", ")", ".", "values", "(", "[", "{", "\"id\"", ":", "1", ",", "\"name\"", ":", "\"NewYearsDay\"", "}", ",", "{", "\"id\"", ":", "2", ",", "\"name\"", ":", "\"Jan2ndScotland\"", "}", ",", "{", "\"id\"", ":", "3", ",", "\"name\"", ":", "\"GoodFriday\"", "}", ",", "{", "\"id\"", ":", "4", ",", "\"name\"", ":", "\"EasterMonday\"", "}", ",", "{", "\"id\"", ":", "5", ",", "\"name\"", ":", "\"MayDay\"", "}", ",", "{", "\"id\"", ":", "6", ",", "\"name\"", ":", "\"SpringBank\"", "}", ",", "{", "\"id\"", ":", "7", ",", "\"name\"", ":", "\"LateSummerBankHolidayNotScotland\"", "}", ",", "{", "\"id\"", ":", "8", ",", "\"name\"", ":", "\"AugustBankHolidayScotland\"", "}", ",", "{", "\"id\"", ":", "9", ",", "\"name\"", ":", "\"ChristmasDay\"", "}", ",", "{", "\"id\"", ":", "10", ",", "\"name\"", ":", "\"BoxingDay\"", "}", ",", "{", "\"id\"", ":", "11", ",", "\"name\"", ":", "\"ChristmasDayHoliday\"", "}", ",", "{", "\"id\"", ":", "12", ",", "\"name\"", ":", "\"BoxingDayHoliday\"", "}", ",", "{", "\"id\"", ":", "13", ",", "\"name\"", ":", "\"NewYearsDayHoliday\"", "}", ",", "{", "\"id\"", ":", "14", ",", "\"name\"", ":", "\"ChristmasEve\"", "}", ",", "{", "\"id\"", ":", "15", ",", "\"name\"", ":", "\"NewYearsEve\"", "}", ",", "]", ")", "connection", ".", "execute", "(", "statement", ")" ]
Inserts bank holiday IDs and names after creating lookup table.
[ "Inserts", "bank", "holiday", "IDs", "and", "names", "after", "creating", "lookup", "table", "." ]
[ "\"\"\" Inserts bank holiday IDs and names after creating lookup table. \"\"\"" ]
[ { "param": "target", "type": null }, { "param": "connection", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "target", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "connection", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _insert_bank_holidays(target, connection, **kw): statement = target.insert().values([ {"id": 1, "name": "NewYearsDay"}, {"id": 2, "name": "Jan2ndScotland"}, {"id": 3, "name": "GoodFriday"}, {"id": 4, "name": "EasterMonday"}, {"id": 5, "name": "MayDay"}, {"id": 6, "name": "SpringBank"}, {"id": 7, "name": "LateSummerBankHolidayNotScotland"}, {"id": 8, "name": "AugustBankHolidayScotland"}, {"id": 9, "name": "ChristmasDay"}, {"id": 10, "name": "BoxingDay"}, {"id": 11, "name": "ChristmasDayHoliday"}, {"id": 12, "name": "BoxingDayHoliday"}, {"id": 13, "name": "NewYearsDayHoliday"}, {"id": 14, "name": "ChristmasEve"}, {"id": 15, "name": "NewYearsEve"}, ]) connection.execute(statement)
e308b5520485f58c0a528ff53d5240b4450cc42c
macph/nextbus
nextbus/models/tables.py
[ "MIT" ]
Python
_insert_bank_holiday_dates
null
def _insert_bank_holiday_dates(target, connection, **kw): """ Inserts bank holiday dates after creating table. """ statement = target.insert().values([ {"holiday_ref": 13, "date": "2017-01-02"}, {"holiday_ref": 2, "date": "2017-01-02"}, {"holiday_ref": 3, "date": "2017-04-14"}, {"holiday_ref": 4, "date": "2017-04-17"}, {"holiday_ref": 5, "date": "2017-05-01"}, {"holiday_ref": 6, "date": "2017-05-29"}, {"holiday_ref": 8, "date": "2017-08-05"}, {"holiday_ref": 7, "date": "2017-08-28"}, {"holiday_ref": 9, "date": "2017-12-25"}, {"holiday_ref": 10, "date": "2017-12-26"}, {"holiday_ref": 1, "date": "2018-01-01"}, {"holiday_ref": 2, "date": "2018-01-02"}, {"holiday_ref": 3, "date": "2018-03-30"}, {"holiday_ref": 4, "date": "2018-04-02"}, {"holiday_ref": 5, "date": "2018-05-07"}, {"holiday_ref": 6, "date": "2018-05-28"}, {"holiday_ref": 8, "date": "2018-08-06"}, {"holiday_ref": 7, "date": "2018-08-27"}, {"holiday_ref": 9, "date": "2018-12-25"}, {"holiday_ref": 10, "date": "2018-12-26"}, {"holiday_ref": 1, "date": "2019-01-01"}, {"holiday_ref": 2, "date": "2019-01-02"}, {"holiday_ref": 3, "date": "2019-04-19"}, {"holiday_ref": 4, "date": "2019-04-22"}, {"holiday_ref": 5, "date": "2019-05-06"}, {"holiday_ref": 6, "date": "2019-05-27"}, {"holiday_ref": 8, "date": "2019-08-05"}, {"holiday_ref": 7, "date": "2019-08-26"}, {"holiday_ref": 9, "date": "2019-12-25"}, {"holiday_ref": 10, "date": "2019-12-26"}, {"holiday_ref": 1, "date": "2020-01-01"}, {"holiday_ref": 2, "date": "2020-01-02"}, {"holiday_ref": 3, "date": "2020-04-10"}, {"holiday_ref": 4, "date": "2020-04-13"}, {"holiday_ref": 5, "date": "2020-05-08"}, {"holiday_ref": 6, "date": "2020-05-25"}, {"holiday_ref": 7, "date": "2020-08-03"}, {"holiday_ref": 8, "date": "2020-08-31"}, {"holiday_ref": 14, "date": "2020-12-24"}, {"holiday_ref": 9, "date": "2020-12-25"}, {"holiday_ref": 12, "date": "2020-12-28"}, {"holiday_ref": 15, "date": "2020-12-31"}, ]) connection.execute(statement)
Inserts bank holiday dates after creating table.
Inserts bank holiday dates after creating table.
[ "Inserts", "bank", "holiday", "dates", "after", "creating", "table", "." ]
def _insert_bank_holiday_dates(target, connection, **kw): statement = target.insert().values([ {"holiday_ref": 13, "date": "2017-01-02"}, {"holiday_ref": 2, "date": "2017-01-02"}, {"holiday_ref": 3, "date": "2017-04-14"}, {"holiday_ref": 4, "date": "2017-04-17"}, {"holiday_ref": 5, "date": "2017-05-01"}, {"holiday_ref": 6, "date": "2017-05-29"}, {"holiday_ref": 8, "date": "2017-08-05"}, {"holiday_ref": 7, "date": "2017-08-28"}, {"holiday_ref": 9, "date": "2017-12-25"}, {"holiday_ref": 10, "date": "2017-12-26"}, {"holiday_ref": 1, "date": "2018-01-01"}, {"holiday_ref": 2, "date": "2018-01-02"}, {"holiday_ref": 3, "date": "2018-03-30"}, {"holiday_ref": 4, "date": "2018-04-02"}, {"holiday_ref": 5, "date": "2018-05-07"}, {"holiday_ref": 6, "date": "2018-05-28"}, {"holiday_ref": 8, "date": "2018-08-06"}, {"holiday_ref": 7, "date": "2018-08-27"}, {"holiday_ref": 9, "date": "2018-12-25"}, {"holiday_ref": 10, "date": "2018-12-26"}, {"holiday_ref": 1, "date": "2019-01-01"}, {"holiday_ref": 2, "date": "2019-01-02"}, {"holiday_ref": 3, "date": "2019-04-19"}, {"holiday_ref": 4, "date": "2019-04-22"}, {"holiday_ref": 5, "date": "2019-05-06"}, {"holiday_ref": 6, "date": "2019-05-27"}, {"holiday_ref": 8, "date": "2019-08-05"}, {"holiday_ref": 7, "date": "2019-08-26"}, {"holiday_ref": 9, "date": "2019-12-25"}, {"holiday_ref": 10, "date": "2019-12-26"}, {"holiday_ref": 1, "date": "2020-01-01"}, {"holiday_ref": 2, "date": "2020-01-02"}, {"holiday_ref": 3, "date": "2020-04-10"}, {"holiday_ref": 4, "date": "2020-04-13"}, {"holiday_ref": 5, "date": "2020-05-08"}, {"holiday_ref": 6, "date": "2020-05-25"}, {"holiday_ref": 7, "date": "2020-08-03"}, {"holiday_ref": 8, "date": "2020-08-31"}, {"holiday_ref": 14, "date": "2020-12-24"}, {"holiday_ref": 9, "date": "2020-12-25"}, {"holiday_ref": 12, "date": "2020-12-28"}, {"holiday_ref": 15, "date": "2020-12-31"}, ]) connection.execute(statement)
[ "def", "_insert_bank_holiday_dates", "(", "target", ",", "connection", ",", "**", "kw", ")", ":", "statement", "=", "target", ".", "insert", "(", ")", ".", "values", "(", "[", "{", "\"holiday_ref\"", ":", "13", ",", "\"date\"", ":", "\"2017-01-02\"", "}", ",", "{", "\"holiday_ref\"", ":", "2", ",", "\"date\"", ":", "\"2017-01-02\"", "}", ",", "{", "\"holiday_ref\"", ":", "3", ",", "\"date\"", ":", "\"2017-04-14\"", "}", ",", "{", "\"holiday_ref\"", ":", "4", ",", "\"date\"", ":", "\"2017-04-17\"", "}", ",", "{", "\"holiday_ref\"", ":", "5", ",", "\"date\"", ":", "\"2017-05-01\"", "}", ",", "{", "\"holiday_ref\"", ":", "6", ",", "\"date\"", ":", "\"2017-05-29\"", "}", ",", "{", "\"holiday_ref\"", ":", "8", ",", "\"date\"", ":", "\"2017-08-05\"", "}", ",", "{", "\"holiday_ref\"", ":", "7", ",", "\"date\"", ":", "\"2017-08-28\"", "}", ",", "{", "\"holiday_ref\"", ":", "9", ",", "\"date\"", ":", "\"2017-12-25\"", "}", ",", "{", "\"holiday_ref\"", ":", "10", ",", "\"date\"", ":", "\"2017-12-26\"", "}", ",", "{", "\"holiday_ref\"", ":", "1", ",", "\"date\"", ":", "\"2018-01-01\"", "}", ",", "{", "\"holiday_ref\"", ":", "2", ",", "\"date\"", ":", "\"2018-01-02\"", "}", ",", "{", "\"holiday_ref\"", ":", "3", ",", "\"date\"", ":", "\"2018-03-30\"", "}", ",", "{", "\"holiday_ref\"", ":", "4", ",", "\"date\"", ":", "\"2018-04-02\"", "}", ",", "{", "\"holiday_ref\"", ":", "5", ",", "\"date\"", ":", "\"2018-05-07\"", "}", ",", "{", "\"holiday_ref\"", ":", "6", ",", "\"date\"", ":", "\"2018-05-28\"", "}", ",", "{", "\"holiday_ref\"", ":", "8", ",", "\"date\"", ":", "\"2018-08-06\"", "}", ",", "{", "\"holiday_ref\"", ":", "7", ",", "\"date\"", ":", "\"2018-08-27\"", "}", ",", "{", "\"holiday_ref\"", ":", "9", ",", "\"date\"", ":", "\"2018-12-25\"", "}", ",", "{", "\"holiday_ref\"", ":", "10", ",", "\"date\"", ":", "\"2018-12-26\"", "}", ",", "{", "\"holiday_ref\"", ":", "1", ",", "\"date\"", ":", "\"2019-01-01\"", "}", ",", "{", "\"holiday_ref\"", ":", "2", ",", "\"date\"", ":", "\"2019-01-02\"", "}", ",", "{", "\"holiday_ref\"", ":", "3", ",", "\"date\"", ":", "\"2019-04-19\"", "}", ",", "{", "\"holiday_ref\"", ":", "4", ",", "\"date\"", ":", "\"2019-04-22\"", "}", ",", "{", "\"holiday_ref\"", ":", "5", ",", "\"date\"", ":", "\"2019-05-06\"", "}", ",", "{", "\"holiday_ref\"", ":", "6", ",", "\"date\"", ":", "\"2019-05-27\"", "}", ",", "{", "\"holiday_ref\"", ":", "8", ",", "\"date\"", ":", "\"2019-08-05\"", "}", ",", "{", "\"holiday_ref\"", ":", "7", ",", "\"date\"", ":", "\"2019-08-26\"", "}", ",", "{", "\"holiday_ref\"", ":", "9", ",", "\"date\"", ":", "\"2019-12-25\"", "}", ",", "{", "\"holiday_ref\"", ":", "10", ",", "\"date\"", ":", "\"2019-12-26\"", "}", ",", "{", "\"holiday_ref\"", ":", "1", ",", "\"date\"", ":", "\"2020-01-01\"", "}", ",", "{", "\"holiday_ref\"", ":", "2", ",", "\"date\"", ":", "\"2020-01-02\"", "}", ",", "{", "\"holiday_ref\"", ":", "3", ",", "\"date\"", ":", "\"2020-04-10\"", "}", ",", "{", "\"holiday_ref\"", ":", "4", ",", "\"date\"", ":", "\"2020-04-13\"", "}", ",", "{", "\"holiday_ref\"", ":", "5", ",", "\"date\"", ":", "\"2020-05-08\"", "}", ",", "{", "\"holiday_ref\"", ":", "6", ",", "\"date\"", ":", "\"2020-05-25\"", "}", ",", "{", "\"holiday_ref\"", ":", "7", ",", "\"date\"", ":", "\"2020-08-03\"", "}", ",", "{", "\"holiday_ref\"", ":", "8", ",", "\"date\"", ":", "\"2020-08-31\"", "}", ",", "{", "\"holiday_ref\"", ":", "14", ",", "\"date\"", ":", "\"2020-12-24\"", "}", ",", "{", "\"holiday_ref\"", ":", "9", ",", "\"date\"", ":", "\"2020-12-25\"", "}", ",", "{", "\"holiday_ref\"", ":", "12", ",", "\"date\"", ":", "\"2020-12-28\"", "}", ",", "{", "\"holiday_ref\"", ":", "15", ",", "\"date\"", ":", "\"2020-12-31\"", "}", ",", "]", ")", "connection", ".", "execute", "(", "statement", ")" ]
Inserts bank holiday dates after creating table.
[ "Inserts", "bank", "holiday", "dates", "after", "creating", "table", "." ]
[ "\"\"\" Inserts bank holiday dates after creating table. \"\"\"" ]
[ { "param": "target", "type": null }, { "param": "connection", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "target", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "connection", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _insert_bank_holiday_dates(target, connection, **kw): statement = target.insert().values([ {"holiday_ref": 13, "date": "2017-01-02"}, {"holiday_ref": 2, "date": "2017-01-02"}, {"holiday_ref": 3, "date": "2017-04-14"}, {"holiday_ref": 4, "date": "2017-04-17"}, {"holiday_ref": 5, "date": "2017-05-01"}, {"holiday_ref": 6, "date": "2017-05-29"}, {"holiday_ref": 8, "date": "2017-08-05"}, {"holiday_ref": 7, "date": "2017-08-28"}, {"holiday_ref": 9, "date": "2017-12-25"}, {"holiday_ref": 10, "date": "2017-12-26"}, {"holiday_ref": 1, "date": "2018-01-01"}, {"holiday_ref": 2, "date": "2018-01-02"}, {"holiday_ref": 3, "date": "2018-03-30"}, {"holiday_ref": 4, "date": "2018-04-02"}, {"holiday_ref": 5, "date": "2018-05-07"}, {"holiday_ref": 6, "date": "2018-05-28"}, {"holiday_ref": 8, "date": "2018-08-06"}, {"holiday_ref": 7, "date": "2018-08-27"}, {"holiday_ref": 9, "date": "2018-12-25"}, {"holiday_ref": 10, "date": "2018-12-26"}, {"holiday_ref": 1, "date": "2019-01-01"}, {"holiday_ref": 2, "date": "2019-01-02"}, {"holiday_ref": 3, "date": "2019-04-19"}, {"holiday_ref": 4, "date": "2019-04-22"}, {"holiday_ref": 5, "date": "2019-05-06"}, {"holiday_ref": 6, "date": "2019-05-27"}, {"holiday_ref": 8, "date": "2019-08-05"}, {"holiday_ref": 7, "date": "2019-08-26"}, {"holiday_ref": 9, "date": "2019-12-25"}, {"holiday_ref": 10, "date": "2019-12-26"}, {"holiday_ref": 1, "date": "2020-01-01"}, {"holiday_ref": 2, "date": "2020-01-02"}, {"holiday_ref": 3, "date": "2020-04-10"}, {"holiday_ref": 4, "date": "2020-04-13"}, {"holiday_ref": 5, "date": "2020-05-08"}, {"holiday_ref": 6, "date": "2020-05-25"}, {"holiday_ref": 7, "date": "2020-08-03"}, {"holiday_ref": 8, "date": "2020-08-31"}, {"holiday_ref": 14, "date": "2020-12-24"}, {"holiday_ref": 9, "date": "2020-12-25"}, {"holiday_ref": 12, "date": "2020-12-28"}, {"holiday_ref": 15, "date": "2020-12-31"}, ]) connection.execute(statement)
016c673a5f440b4ae1b2683cf9387cf302f5a6d5
macph/nextbus
nextbus/populate/naptan.py
[ "MIT" ]
Python
_find_stop_area_mode
<not_specific>
def _find_stop_area_mode(query_result, ref): """ Finds the mode of references for each stop area. The query results must have 3 columns: primary key, foreign key reference and number of stop points within each area matching that reference, in that order. :param ref: Name of the reference column. :returns: Two lists; one to be to be used with `bulk_update_mappings` and the other strings for invalid areas. """ # Group by stop area and reference stop_areas = collections.defaultdict(dict) for row in query_result: stop_areas[row[0]][row[1]] = row[2] # Check each area and find mode matching reference update_areas = [] invalid_areas = {} for sa, count in stop_areas.items(): max_count = [k for k, v in count.items() if v == max(count.values())] if len(max_count) == 1: update_areas.append({"code": sa, ref: max_count[0]}) else: invalid_areas[sa] = max_count return update_areas, invalid_areas
Finds the mode of references for each stop area. The query results must have 3 columns: primary key, foreign key reference and number of stop points within each area matching that reference, in that order. :param ref: Name of the reference column. :returns: Two lists; one to be to be used with `bulk_update_mappings` and the other strings for invalid areas.
Finds the mode of references for each stop area. The query results must have 3 columns: primary key, foreign key reference and number of stop points within each area matching that reference, in that order.
[ "Finds", "the", "mode", "of", "references", "for", "each", "stop", "area", ".", "The", "query", "results", "must", "have", "3", "columns", ":", "primary", "key", "foreign", "key", "reference", "and", "number", "of", "stop", "points", "within", "each", "area", "matching", "that", "reference", "in", "that", "order", "." ]
def _find_stop_area_mode(query_result, ref): stop_areas = collections.defaultdict(dict) for row in query_result: stop_areas[row[0]][row[1]] = row[2] update_areas = [] invalid_areas = {} for sa, count in stop_areas.items(): max_count = [k for k, v in count.items() if v == max(count.values())] if len(max_count) == 1: update_areas.append({"code": sa, ref: max_count[0]}) else: invalid_areas[sa] = max_count return update_areas, invalid_areas
[ "def", "_find_stop_area_mode", "(", "query_result", ",", "ref", ")", ":", "stop_areas", "=", "collections", ".", "defaultdict", "(", "dict", ")", "for", "row", "in", "query_result", ":", "stop_areas", "[", "row", "[", "0", "]", "]", "[", "row", "[", "1", "]", "]", "=", "row", "[", "2", "]", "update_areas", "=", "[", "]", "invalid_areas", "=", "{", "}", "for", "sa", ",", "count", "in", "stop_areas", ".", "items", "(", ")", ":", "max_count", "=", "[", "k", "for", "k", ",", "v", "in", "count", ".", "items", "(", ")", "if", "v", "==", "max", "(", "count", ".", "values", "(", ")", ")", "]", "if", "len", "(", "max_count", ")", "==", "1", ":", "update_areas", ".", "append", "(", "{", "\"code\"", ":", "sa", ",", "ref", ":", "max_count", "[", "0", "]", "}", ")", "else", ":", "invalid_areas", "[", "sa", "]", "=", "max_count", "return", "update_areas", ",", "invalid_areas" ]
Finds the mode of references for each stop area.
[ "Finds", "the", "mode", "of", "references", "for", "each", "stop", "area", "." ]
[ "\"\"\" Finds the mode of references for each stop area.\n\n The query results must have 3 columns: primary key, foreign key\n reference and number of stop points within each area matching that\n reference, in that order.\n\n :param ref: Name of the reference column.\n :returns: Two lists; one to be to be used with `bulk_update_mappings`\n and the other strings for invalid areas.\n \"\"\"", "# Group by stop area and reference", "# Check each area and find mode matching reference" ]
[ { "param": "query_result", "type": null }, { "param": "ref", "type": null } ]
{ "returns": [ { "docstring": "Two lists; one to be to be used with `bulk_update_mappings`\nand the other strings for invalid areas.", "docstring_tokens": [ "Two", "lists", ";", "one", "to", "be", "to", "be", "used", "with", "`", "bulk_update_mappings", "`", "and", "the", "other", "strings", "for", "invalid", "areas", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "query_result", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "ref", "type": null, "docstring": "Name of the reference column.", "docstring_tokens": [ "Name", "of", "the", "reference", "column", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import collections def _find_stop_area_mode(query_result, ref): stop_areas = collections.defaultdict(dict) for row in query_result: stop_areas[row[0]][row[1]] = row[2] update_areas = [] invalid_areas = {} for sa, count in stop_areas.items(): max_count = [k for k, v in count.items() if v == max(count.values())] if len(max_count) == 1: update_areas.append({"code": sa, ref: max_count[0]}) else: invalid_areas[sa] = max_count return update_areas, invalid_areas
f1dfd1277ba810a4fdb1dd0e7b4ca3a004196f29
macph/nextbus
nextbus/views.py
[ "MIT" ]
Python
_display_operators
<not_specific>
def _display_operators(operators): """ Returns sorted list of operators with any information. """ def sort_name(o): return o.name def filter_op(o): return any([o.email, o.address, o.website, o.twitter]) return sorted(filter(filter_op, operators), key=sort_name)
Returns sorted list of operators with any information.
Returns sorted list of operators with any information.
[ "Returns", "sorted", "list", "of", "operators", "with", "any", "information", "." ]
def _display_operators(operators): def sort_name(o): return o.name def filter_op(o): return any([o.email, o.address, o.website, o.twitter]) return sorted(filter(filter_op, operators), key=sort_name)
[ "def", "_display_operators", "(", "operators", ")", ":", "def", "sort_name", "(", "o", ")", ":", "return", "o", ".", "name", "def", "filter_op", "(", "o", ")", ":", "return", "any", "(", "[", "o", ".", "email", ",", "o", ".", "address", ",", "o", ".", "website", ",", "o", ".", "twitter", "]", ")", "return", "sorted", "(", "filter", "(", "filter_op", ",", "operators", ")", ",", "key", "=", "sort_name", ")" ]
Returns sorted list of operators with any information.
[ "Returns", "sorted", "list", "of", "operators", "with", "any", "information", "." ]
[ "\"\"\" Returns sorted list of operators with any information. \"\"\"" ]
[ { "param": "operators", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "operators", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _display_operators(operators): def sort_name(o): return o.name def filter_op(o): return any([o.email, o.address, o.website, o.twitter]) return sorted(filter(filter_op, operators), key=sort_name)
b8af42c3035877a1083808f4d71d1c2518314a01
macph/nextbus
nextbus/graph.py
[ "MIT" ]
Python
_merge_forward
null
def _merge_forward(graph, sequence, path, index): """ Merges path into sequence, ensuring all new vertices follows the existing ones in the adjacency list. """ i = index for v in path: if v in sequence: continue # Check if any later vertices have this path and move index after = [j for j, w in enumerate(sequence[i:], i) if v in graph.following(w)] if after: i = after[-1] + 1 sequence.insert(i, v) i += 1
Merges path into sequence, ensuring all new vertices follows the existing ones in the adjacency list.
Merges path into sequence, ensuring all new vertices follows the existing ones in the adjacency list.
[ "Merges", "path", "into", "sequence", "ensuring", "all", "new", "vertices", "follows", "the", "existing", "ones", "in", "the", "adjacency", "list", "." ]
def _merge_forward(graph, sequence, path, index): i = index for v in path: if v in sequence: continue after = [j for j, w in enumerate(sequence[i:], i) if v in graph.following(w)] if after: i = after[-1] + 1 sequence.insert(i, v) i += 1
[ "def", "_merge_forward", "(", "graph", ",", "sequence", ",", "path", ",", "index", ")", ":", "i", "=", "index", "for", "v", "in", "path", ":", "if", "v", "in", "sequence", ":", "continue", "after", "=", "[", "j", "for", "j", ",", "w", "in", "enumerate", "(", "sequence", "[", "i", ":", "]", ",", "i", ")", "if", "v", "in", "graph", ".", "following", "(", "w", ")", "]", "if", "after", ":", "i", "=", "after", "[", "-", "1", "]", "+", "1", "sequence", ".", "insert", "(", "i", ",", "v", ")", "i", "+=", "1" ]
Merges path into sequence, ensuring all new vertices follows the existing ones in the adjacency list.
[ "Merges", "path", "into", "sequence", "ensuring", "all", "new", "vertices", "follows", "the", "existing", "ones", "in", "the", "adjacency", "list", "." ]
[ "\"\"\" Merges path into sequence, ensuring all new vertices follows the\n existing ones in the adjacency list.\n \"\"\"", "# Check if any later vertices have this path and move index" ]
[ { "param": "graph", "type": null }, { "param": "sequence", "type": null }, { "param": "path", "type": null }, { "param": "index", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "graph", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "sequence", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "index", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _merge_forward(graph, sequence, path, index): i = index for v in path: if v in sequence: continue after = [j for j, w in enumerate(sequence[i:], i) if v in graph.following(w)] if after: i = after[-1] + 1 sequence.insert(i, v) i += 1
b8af42c3035877a1083808f4d71d1c2518314a01
macph/nextbus
nextbus/graph.py
[ "MIT" ]
Python
_merge_backward
null
def _merge_backward(graph, sequence, path, index): """ Merges path into sequence, ensuring all new vertices precedes the existing ones in the adjacency list. """ i = index for v in path[::-1]: if v in sequence: continue # Check if any previous vertices have this path and move index after = [i - j for j, w in enumerate(sequence[i::-1]) if v in graph.preceding(w)] if after: i = after[-1] sequence.insert(i, v)
Merges path into sequence, ensuring all new vertices precedes the existing ones in the adjacency list.
Merges path into sequence, ensuring all new vertices precedes the existing ones in the adjacency list.
[ "Merges", "path", "into", "sequence", "ensuring", "all", "new", "vertices", "precedes", "the", "existing", "ones", "in", "the", "adjacency", "list", "." ]
def _merge_backward(graph, sequence, path, index): i = index for v in path[::-1]: if v in sequence: continue after = [i - j for j, w in enumerate(sequence[i::-1]) if v in graph.preceding(w)] if after: i = after[-1] sequence.insert(i, v)
[ "def", "_merge_backward", "(", "graph", ",", "sequence", ",", "path", ",", "index", ")", ":", "i", "=", "index", "for", "v", "in", "path", "[", ":", ":", "-", "1", "]", ":", "if", "v", "in", "sequence", ":", "continue", "after", "=", "[", "i", "-", "j", "for", "j", ",", "w", "in", "enumerate", "(", "sequence", "[", "i", ":", ":", "-", "1", "]", ")", "if", "v", "in", "graph", ".", "preceding", "(", "w", ")", "]", "if", "after", ":", "i", "=", "after", "[", "-", "1", "]", "sequence", ".", "insert", "(", "i", ",", "v", ")" ]
Merges path into sequence, ensuring all new vertices precedes the existing ones in the adjacency list.
[ "Merges", "path", "into", "sequence", "ensuring", "all", "new", "vertices", "precedes", "the", "existing", "ones", "in", "the", "adjacency", "list", "." ]
[ "\"\"\" Merges path into sequence, ensuring all new vertices precedes the\n existing ones in the adjacency list.\n \"\"\"", "# Check if any previous vertices have this path and move index" ]
[ { "param": "graph", "type": null }, { "param": "sequence", "type": null }, { "param": "path", "type": null }, { "param": "index", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "graph", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "sequence", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "index", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _merge_backward(graph, sequence, path, index): i = index for v in path[::-1]: if v in sequence: continue after = [i - j for j, w in enumerate(sequence[i::-1]) if v in graph.preceding(w)] if after: i = after[-1] sequence.insert(i, v)
b8af42c3035877a1083808f4d71d1c2518314a01
macph/nextbus
nextbus/graph.py
[ "MIT" ]
Python
_count_cycles
<not_specific>
def _count_cycles(graph, sequence): """ Counts number of cycles in a sequence by checking the preceding nodes for every vertex in order. """ cycles = set() indices = {v: i for i, v in enumerate(sequence)} for v in sequence: cycles |= {(u, v) for u in graph.preceding(v) if indices[u] > indices[v]} return cycles
Counts number of cycles in a sequence by checking the preceding nodes for every vertex in order.
Counts number of cycles in a sequence by checking the preceding nodes for every vertex in order.
[ "Counts", "number", "of", "cycles", "in", "a", "sequence", "by", "checking", "the", "preceding", "nodes", "for", "every", "vertex", "in", "order", "." ]
def _count_cycles(graph, sequence): cycles = set() indices = {v: i for i, v in enumerate(sequence)} for v in sequence: cycles |= {(u, v) for u in graph.preceding(v) if indices[u] > indices[v]} return cycles
[ "def", "_count_cycles", "(", "graph", ",", "sequence", ")", ":", "cycles", "=", "set", "(", ")", "indices", "=", "{", "v", ":", "i", "for", "i", ",", "v", "in", "enumerate", "(", "sequence", ")", "}", "for", "v", "in", "sequence", ":", "cycles", "|=", "{", "(", "u", ",", "v", ")", "for", "u", "in", "graph", ".", "preceding", "(", "v", ")", "if", "indices", "[", "u", "]", ">", "indices", "[", "v", "]", "}", "return", "cycles" ]
Counts number of cycles in a sequence by checking the preceding nodes for every vertex in order.
[ "Counts", "number", "of", "cycles", "in", "a", "sequence", "by", "checking", "the", "preceding", "nodes", "for", "every", "vertex", "in", "order", "." ]
[ "\"\"\" Counts number of cycles in a sequence by checking the preceding nodes\n for every vertex in order.\n \"\"\"" ]
[ { "param": "graph", "type": null }, { "param": "sequence", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "graph", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "sequence", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _count_cycles(graph, sequence): cycles = set() indices = {v: i for i, v in enumerate(sequence)} for v in sequence: cycles |= {(u, v) for u in graph.preceding(v) if indices[u] > indices[v]} return cycles
b8af42c3035877a1083808f4d71d1c2518314a01
macph/nextbus
nextbus/graph.py
[ "MIT" ]
Python
_median
<not_specific>
def _median(collection): """ Calculates the median of an collection, eg a list. """ ordered = sorted(collection) len_ = len(collection) middle = len_ // 2 if not ordered: return -1 elif len_ % 2 == 1: return ordered[middle] else: return (ordered[middle - 1] + ordered[middle]) / 2
Calculates the median of an collection, eg a list.
Calculates the median of an collection, eg a list.
[ "Calculates", "the", "median", "of", "an", "collection", "eg", "a", "list", "." ]
def _median(collection): ordered = sorted(collection) len_ = len(collection) middle = len_ // 2 if not ordered: return -1 elif len_ % 2 == 1: return ordered[middle] else: return (ordered[middle - 1] + ordered[middle]) / 2
[ "def", "_median", "(", "collection", ")", ":", "ordered", "=", "sorted", "(", "collection", ")", "len_", "=", "len", "(", "collection", ")", "middle", "=", "len_", "//", "2", "if", "not", "ordered", ":", "return", "-", "1", "elif", "len_", "%", "2", "==", "1", ":", "return", "ordered", "[", "middle", "]", "else", ":", "return", "(", "ordered", "[", "middle", "-", "1", "]", "+", "ordered", "[", "middle", "]", ")", "/", "2" ]
Calculates the median of an collection, eg a list.
[ "Calculates", "the", "median", "of", "an", "collection", "eg", "a", "list", "." ]
[ "\"\"\" Calculates the median of an collection, eg a list. \"\"\"" ]
[ { "param": "collection", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "collection", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _median(collection): ordered = sorted(collection) len_ = len(collection) middle = len_ // 2 if not ordered: return -1 elif len_ % 2 == 1: return ordered[middle] else: return (ordered[middle - 1] + ordered[middle]) / 2
b8af42c3035877a1083808f4d71d1c2518314a01
macph/nextbus
nextbus/graph.py
[ "MIT" ]
Python
_transpose_order
<not_specific>
def _transpose_order(row, forward=True): """ Swaps lines within a row to see if the number of crossings improve. """ len_ = len(row.end) if forward else len(row.start) order = list(range(len_)) if len_ < 2: return order crossings = row.count_crossings() improved = True while improved: improved = False for i in range(len_ - 1): new_order = order[:i] + [order[i + 1], order[i]] + order[i + 2:] if forward: temp = [set(row.end[j]) for j in new_order] new_crossings = row.count_crossings(end=temp) else: temp = [set(row.start[j]) for j in new_order] new_crossings = row.count_crossings(start=temp) if new_crossings < crossings: order = new_order crossings = new_crossings improved = True return order
Swaps lines within a row to see if the number of crossings improve.
Swaps lines within a row to see if the number of crossings improve.
[ "Swaps", "lines", "within", "a", "row", "to", "see", "if", "the", "number", "of", "crossings", "improve", "." ]
def _transpose_order(row, forward=True): len_ = len(row.end) if forward else len(row.start) order = list(range(len_)) if len_ < 2: return order crossings = row.count_crossings() improved = True while improved: improved = False for i in range(len_ - 1): new_order = order[:i] + [order[i + 1], order[i]] + order[i + 2:] if forward: temp = [set(row.end[j]) for j in new_order] new_crossings = row.count_crossings(end=temp) else: temp = [set(row.start[j]) for j in new_order] new_crossings = row.count_crossings(start=temp) if new_crossings < crossings: order = new_order crossings = new_crossings improved = True return order
[ "def", "_transpose_order", "(", "row", ",", "forward", "=", "True", ")", ":", "len_", "=", "len", "(", "row", ".", "end", ")", "if", "forward", "else", "len", "(", "row", ".", "start", ")", "order", "=", "list", "(", "range", "(", "len_", ")", ")", "if", "len_", "<", "2", ":", "return", "order", "crossings", "=", "row", ".", "count_crossings", "(", ")", "improved", "=", "True", "while", "improved", ":", "improved", "=", "False", "for", "i", "in", "range", "(", "len_", "-", "1", ")", ":", "new_order", "=", "order", "[", ":", "i", "]", "+", "[", "order", "[", "i", "+", "1", "]", ",", "order", "[", "i", "]", "]", "+", "order", "[", "i", "+", "2", ":", "]", "if", "forward", ":", "temp", "=", "[", "set", "(", "row", ".", "end", "[", "j", "]", ")", "for", "j", "in", "new_order", "]", "new_crossings", "=", "row", ".", "count_crossings", "(", "end", "=", "temp", ")", "else", ":", "temp", "=", "[", "set", "(", "row", ".", "start", "[", "j", "]", ")", "for", "j", "in", "new_order", "]", "new_crossings", "=", "row", ".", "count_crossings", "(", "start", "=", "temp", ")", "if", "new_crossings", "<", "crossings", ":", "order", "=", "new_order", "crossings", "=", "new_crossings", "improved", "=", "True", "return", "order" ]
Swaps lines within a row to see if the number of crossings improve.
[ "Swaps", "lines", "within", "a", "row", "to", "see", "if", "the", "number", "of", "crossings", "improve", "." ]
[ "\"\"\" Swaps lines within a row to see if the number of crossings improve. \"\"\"" ]
[ { "param": "row", "type": null }, { "param": "forward", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "row", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "forward", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _transpose_order(row, forward=True): len_ = len(row.end) if forward else len(row.start) order = list(range(len_)) if len_ < 2: return order crossings = row.count_crossings() improved = True while improved: improved = False for i in range(len_ - 1): new_order = order[:i] + [order[i + 1], order[i]] + order[i + 2:] if forward: temp = [set(row.end[j]) for j in new_order] new_crossings = row.count_crossings(end=temp) else: temp = [set(row.start[j]) for j in new_order] new_crossings = row.count_crossings(start=temp) if new_crossings < crossings: order = new_order crossings = new_crossings improved = True return order
b8af42c3035877a1083808f4d71d1c2518314a01
macph/nextbus
nextbus/graph.py
[ "MIT" ]
Python
_memoize_graph
<not_specific>
def _memoize_graph(graph, method): """ Wraps graph method in a function that remembers adjacency list and last result. """ adj = None result = None @functools.wraps(method) def _method(*args, **kwargs): nonlocal adj, result new_adj = graph.adj if adj != new_adj: result = method(*args, **kwargs) adj = new_adj return result return _method
Wraps graph method in a function that remembers adjacency list and last result.
Wraps graph method in a function that remembers adjacency list and last result.
[ "Wraps", "graph", "method", "in", "a", "function", "that", "remembers", "adjacency", "list", "and", "last", "result", "." ]
def _memoize_graph(graph, method): adj = None result = None @functools.wraps(method) def _method(*args, **kwargs): nonlocal adj, result new_adj = graph.adj if adj != new_adj: result = method(*args, **kwargs) adj = new_adj return result return _method
[ "def", "_memoize_graph", "(", "graph", ",", "method", ")", ":", "adj", "=", "None", "result", "=", "None", "@", "functools", ".", "wraps", "(", "method", ")", "def", "_method", "(", "*", "args", ",", "**", "kwargs", ")", ":", "nonlocal", "adj", ",", "result", "new_adj", "=", "graph", ".", "adj", "if", "adj", "!=", "new_adj", ":", "result", "=", "method", "(", "*", "args", ",", "**", "kwargs", ")", "adj", "=", "new_adj", "return", "result", "return", "_method" ]
Wraps graph method in a function that remembers adjacency list and last result.
[ "Wraps", "graph", "method", "in", "a", "function", "that", "remembers", "adjacency", "list", "and", "last", "result", "." ]
[ "\"\"\" Wraps graph method in a function that remembers adjacency list and last\n result.\n \"\"\"" ]
[ { "param": "graph", "type": null }, { "param": "method", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "graph", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "method", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import functools def _memoize_graph(graph, method): adj = None result = None @functools.wraps(method) def _method(*args, **kwargs): nonlocal adj, result new_adj = graph.adj if adj != new_adj: result = method(*args, **kwargs) adj = new_adj return result return _method
b8af42c3035877a1083808f4d71d1c2518314a01
macph/nextbus
nextbus/graph.py
[ "MIT" ]
Python
from_adj
<not_specific>
def from_adj(cls, adj_list): """ Creates graph from adjacency list as a dict of vertices and iterables of following vertices. """ adj = {} for start, end in adj_list.items(): adj[start] = set(end) for v in set().union(*adj_list.values()): if v not in adj: adj[v] = set() new_graph = cls() new_graph._v = adj return new_graph
Creates graph from adjacency list as a dict of vertices and iterables of following vertices.
Creates graph from adjacency list as a dict of vertices and iterables of following vertices.
[ "Creates", "graph", "from", "adjacency", "list", "as", "a", "dict", "of", "vertices", "and", "iterables", "of", "following", "vertices", "." ]
def from_adj(cls, adj_list): adj = {} for start, end in adj_list.items(): adj[start] = set(end) for v in set().union(*adj_list.values()): if v not in adj: adj[v] = set() new_graph = cls() new_graph._v = adj return new_graph
[ "def", "from_adj", "(", "cls", ",", "adj_list", ")", ":", "adj", "=", "{", "}", "for", "start", ",", "end", "in", "adj_list", ".", "items", "(", ")", ":", "adj", "[", "start", "]", "=", "set", "(", "end", ")", "for", "v", "in", "set", "(", ")", ".", "union", "(", "*", "adj_list", ".", "values", "(", ")", ")", ":", "if", "v", "not", "in", "adj", ":", "adj", "[", "v", "]", "=", "set", "(", ")", "new_graph", "=", "cls", "(", ")", "new_graph", ".", "_v", "=", "adj", "return", "new_graph" ]
Creates graph from adjacency list as a dict of vertices and iterables of following vertices.
[ "Creates", "graph", "from", "adjacency", "list", "as", "a", "dict", "of", "vertices", "and", "iterables", "of", "following", "vertices", "." ]
[ "\"\"\" Creates graph from adjacency list as a dict of vertices and\n iterables of following vertices.\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "adj_list", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "adj_list", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_adj(cls, adj_list): adj = {} for start, end in adj_list.items(): adj[start] = set(end) for v in set().union(*adj_list.values()): if v not in adj: adj[v] = set() new_graph = cls() new_graph._v = adj return new_graph
af385c977a333a00ce6bbe119e31f05e674dca99
macph/nextbus
nextbus/models/__init__.py
[ "MIT" ]
Python
define_collation
null
def define_collation(_, connection, **kw): """ Define the numeric collation required for some text columns. """ connection.execute( "CREATE COLLATION IF NOT EXISTS utf8_numeric " "(provider = icu, locale = 'en@colNumeric=yes')" )
Define the numeric collation required for some text columns.
Define the numeric collation required for some text columns.
[ "Define", "the", "numeric", "collation", "required", "for", "some", "text", "columns", "." ]
def define_collation(_, connection, **kw): connection.execute( "CREATE COLLATION IF NOT EXISTS utf8_numeric " "(provider = icu, locale = 'en@colNumeric=yes')" )
[ "def", "define_collation", "(", "_", ",", "connection", ",", "**", "kw", ")", ":", "connection", ".", "execute", "(", "\"CREATE COLLATION IF NOT EXISTS utf8_numeric \"", "\"(provider = icu, locale = 'en@colNumeric=yes')\"", ")" ]
Define the numeric collation required for some text columns.
[ "Define", "the", "numeric", "collation", "required", "for", "some", "text", "columns", "." ]
[ "\"\"\" Define the numeric collation required for some text columns. \"\"\"" ]
[ { "param": "_", "type": null }, { "param": "connection", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "_", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "connection", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def define_collation(_, connection, **kw): connection.execute( "CREATE COLLATION IF NOT EXISTS utf8_numeric " "(provider = icu, locale = 'en@colNumeric=yes')" )
215e59d85b1b9e6cbe706aaa01863855ea64dada
macph/nextbus
nextbus/resources.py
[ "MIT" ]
Python
_list_geojson
<not_specific>
def _list_geojson(list_stops): """ Creates a list of stop data in GeoJSON format. :param list_stops: List of StopPoint objects. :returns: JSON-serializable dict. """ geojson = { "type": "FeatureCollection", "features": [s.to_geojson() for s in list_stops] } return geojson
Creates a list of stop data in GeoJSON format. :param list_stops: List of StopPoint objects. :returns: JSON-serializable dict.
Creates a list of stop data in GeoJSON format.
[ "Creates", "a", "list", "of", "stop", "data", "in", "GeoJSON", "format", "." ]
def _list_geojson(list_stops): geojson = { "type": "FeatureCollection", "features": [s.to_geojson() for s in list_stops] } return geojson
[ "def", "_list_geojson", "(", "list_stops", ")", ":", "geojson", "=", "{", "\"type\"", ":", "\"FeatureCollection\"", ",", "\"features\"", ":", "[", "s", ".", "to_geojson", "(", ")", "for", "s", "in", "list_stops", "]", "}", "return", "geojson" ]
Creates a list of stop data in GeoJSON format.
[ "Creates", "a", "list", "of", "stop", "data", "in", "GeoJSON", "format", "." ]
[ "\"\"\" Creates a list of stop data in GeoJSON format.\n\n :param list_stops: List of StopPoint objects.\n :returns: JSON-serializable dict.\n \"\"\"" ]
[ { "param": "list_stops", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "list_stops", "type": null, "docstring": "List of StopPoint objects.", "docstring_tokens": [ "List", "of", "StopPoint", "objects", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _list_geojson(list_stops): geojson = { "type": "FeatureCollection", "features": [s.to_geojson() for s in list_stops] } return geojson
5d7302ec41cec7840082d2f8888a4856f61a9e5b
macph/nextbus
nextbus/timetable.py
[ "MIT" ]
Python
from_row
<not_specific>
def from_row(cls, row): """ Creates TimetableStop instance from row returned from query. """ return cls( row.stop_point_ref, row.arrive, row.depart, row.timing_point, row.utc_arrive, row.utc_depart, )
Creates TimetableStop instance from row returned from query.
Creates TimetableStop instance from row returned from query.
[ "Creates", "TimetableStop", "instance", "from", "row", "returned", "from", "query", "." ]
def from_row(cls, row): return cls( row.stop_point_ref, row.arrive, row.depart, row.timing_point, row.utc_arrive, row.utc_depart, )
[ "def", "from_row", "(", "cls", ",", "row", ")", ":", "return", "cls", "(", "row", ".", "stop_point_ref", ",", "row", ".", "arrive", ",", "row", ".", "depart", ",", "row", ".", "timing_point", ",", "row", ".", "utc_arrive", ",", "row", ".", "utc_depart", ",", ")" ]
Creates TimetableStop instance from row returned from query.
[ "Creates", "TimetableStop", "instance", "from", "row", "returned", "from", "query", "." ]
[ "\"\"\" Creates TimetableStop instance from row returned from query. \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "row", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "row", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def from_row(cls, row): return cls( row.stop_point_ref, row.arrive, row.depart, row.timing_point, row.utc_arrive, row.utc_depart, )
3d75a9c4b8e0c48f6643a1588804a95005dc7426
macph/nextbus
nextbus/populate/utils.py
[ "MIT" ]
Python
xml_as_dict
<not_specific>
def xml_as_dict(element): """ Creates a dictionary from a flat XML element. :param element: XML Element object :returns: A dictionary with keys matching subelement tags in the element. """ data = {} for e in element: if e.tag in data: raise ValueError(f"Multiple elements have the same tag {e.tag!r}.") default = e.get("default", None) data[e.tag] = default if e.text is None else e.text return data
Creates a dictionary from a flat XML element. :param element: XML Element object :returns: A dictionary with keys matching subelement tags in the element.
Creates a dictionary from a flat XML element.
[ "Creates", "a", "dictionary", "from", "a", "flat", "XML", "element", "." ]
def xml_as_dict(element): data = {} for e in element: if e.tag in data: raise ValueError(f"Multiple elements have the same tag {e.tag!r}.") default = e.get("default", None) data[e.tag] = default if e.text is None else e.text return data
[ "def", "xml_as_dict", "(", "element", ")", ":", "data", "=", "{", "}", "for", "e", "in", "element", ":", "if", "e", ".", "tag", "in", "data", ":", "raise", "ValueError", "(", "f\"Multiple elements have the same tag {e.tag!r}.\"", ")", "default", "=", "e", ".", "get", "(", "\"default\"", ",", "None", ")", "data", "[", "e", ".", "tag", "]", "=", "default", "if", "e", ".", "text", "is", "None", "else", "e", ".", "text", "return", "data" ]
Creates a dictionary from a flat XML element.
[ "Creates", "a", "dictionary", "from", "a", "flat", "XML", "element", "." ]
[ "\"\"\" Creates a dictionary from a flat XML element.\n\n :param element: XML Element object\n :returns: A dictionary with keys matching subelement tags in the\n element.\n \"\"\"" ]
[ { "param": "element", "type": null } ]
{ "returns": [ { "docstring": "A dictionary with keys matching subelement tags in the\nelement.", "docstring_tokens": [ "A", "dictionary", "with", "keys", "matching", "subelement", "tags", "in", "the", "element", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "element", "type": null, "docstring": "XML Element object", "docstring_tokens": [ "XML", "Element", "object" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def xml_as_dict(element): data = {} for e in element: if e.tag in data: raise ValueError(f"Multiple elements have the same tag {e.tag!r}.") default = e.get("default", None) data[e.tag] = default if e.text is None else e.text return data
3d75a9c4b8e0c48f6643a1588804a95005dc7426
macph/nextbus
nextbus/populate/utils.py
[ "MIT" ]
Python
_convert_to_text
<not_specific>
def _convert_to_text(result): """ Takes first element from list and returns text or None. """ if isinstance(result, list) and not result: node = None elif isinstance(result, list) and len(result) == 1: node = result[0] elif isinstance(result, list): raise ValueError("XPath query returned multiple elements.") else: node = result try: return node.text except AttributeError: return node
Takes first element from list and returns text or None.
Takes first element from list and returns text or None.
[ "Takes", "first", "element", "from", "list", "and", "returns", "text", "or", "None", "." ]
def _convert_to_text(result): if isinstance(result, list) and not result: node = None elif isinstance(result, list) and len(result) == 1: node = result[0] elif isinstance(result, list): raise ValueError("XPath query returned multiple elements.") else: node = result try: return node.text except AttributeError: return node
[ "def", "_convert_to_text", "(", "result", ")", ":", "if", "isinstance", "(", "result", ",", "list", ")", "and", "not", "result", ":", "node", "=", "None", "elif", "isinstance", "(", "result", ",", "list", ")", "and", "len", "(", "result", ")", "==", "1", ":", "node", "=", "result", "[", "0", "]", "elif", "isinstance", "(", "result", ",", "list", ")", ":", "raise", "ValueError", "(", "\"XPath query returned multiple elements.\"", ")", "else", ":", "node", "=", "result", "try", ":", "return", "node", ".", "text", "except", "AttributeError", ":", "return", "node" ]
Takes first element from list and returns text or None.
[ "Takes", "first", "element", "from", "list", "and", "returns", "text", "or", "None", "." ]
[ "\"\"\" Takes first element from list and returns text or None. \"\"\"" ]
[ { "param": "result", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "result", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _convert_to_text(result): if isinstance(result, list) and not result: node = None elif isinstance(result, list) and len(result) == 1: node = result[0] elif isinstance(result, list): raise ValueError("XPath query returned multiple elements.") else: node = result try: return node.text except AttributeError: return node
3d75a9c4b8e0c48f6643a1588804a95005dc7426
macph/nextbus
nextbus/populate/utils.py
[ "MIT" ]
Python
capitalize
<not_specific>
def capitalize(_, text): """ Capitalises every word in a string, include these enclosed within brackets and excluding apostrophes. """ list_words = text.lower().split() for _w, word in enumerate(list_words): for _c, char in enumerate(word): if char.isalpha(): list_words[_w] = word[:_c] + char.upper() + word[_c+1:] break return " ".join(list_words)
Capitalises every word in a string, include these enclosed within brackets and excluding apostrophes.
Capitalises every word in a string, include these enclosed within brackets and excluding apostrophes.
[ "Capitalises", "every", "word", "in", "a", "string", "include", "these", "enclosed", "within", "brackets", "and", "excluding", "apostrophes", "." ]
def capitalize(_, text): list_words = text.lower().split() for _w, word in enumerate(list_words): for _c, char in enumerate(word): if char.isalpha(): list_words[_w] = word[:_c] + char.upper() + word[_c+1:] break return " ".join(list_words)
[ "def", "capitalize", "(", "_", ",", "text", ")", ":", "list_words", "=", "text", ".", "lower", "(", ")", ".", "split", "(", ")", "for", "_w", ",", "word", "in", "enumerate", "(", "list_words", ")", ":", "for", "_c", ",", "char", "in", "enumerate", "(", "word", ")", ":", "if", "char", ".", "isalpha", "(", ")", ":", "list_words", "[", "_w", "]", "=", "word", "[", ":", "_c", "]", "+", "char", ".", "upper", "(", ")", "+", "word", "[", "_c", "+", "1", ":", "]", "break", "return", "\" \"", ".", "join", "(", "list_words", ")" ]
Capitalises every word in a string, include these enclosed within brackets and excluding apostrophes.
[ "Capitalises", "every", "word", "in", "a", "string", "include", "these", "enclosed", "within", "brackets", "and", "excluding", "apostrophes", "." ]
[ "\"\"\" Capitalises every word in a string, include these enclosed within\n brackets and excluding apostrophes.\n \"\"\"" ]
[ { "param": "_", "type": null }, { "param": "text", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "_", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def capitalize(_, text): list_words = text.lower().split() for _w, word in enumerate(list_words): for _c, char in enumerate(word): if char.isalpha(): list_words[_w] = word[:_c] + char.upper() + word[_c+1:] break return " ".join(list_words)
3d75a9c4b8e0c48f6643a1588804a95005dc7426
macph/nextbus
nextbus/populate/utils.py
[ "MIT" ]
Python
_iter_every
null
def _iter_every(iterable, length): """ Generator for iterable split into lists with maximum length. """ iterator = iter(iterable) section = list(itertools.islice(iterator, length)) while section: yield section section = list(itertools.islice(iterator, length))
Generator for iterable split into lists with maximum length.
Generator for iterable split into lists with maximum length.
[ "Generator", "for", "iterable", "split", "into", "lists", "with", "maximum", "length", "." ]
def _iter_every(iterable, length): iterator = iter(iterable) section = list(itertools.islice(iterator, length)) while section: yield section section = list(itertools.islice(iterator, length))
[ "def", "_iter_every", "(", "iterable", ",", "length", ")", ":", "iterator", "=", "iter", "(", "iterable", ")", "section", "=", "list", "(", "itertools", ".", "islice", "(", "iterator", ",", "length", ")", ")", "while", "section", ":", "yield", "section", "section", "=", "list", "(", "itertools", ".", "islice", "(", "iterator", ",", "length", ")", ")" ]
Generator for iterable split into lists with maximum length.
[ "Generator", "for", "iterable", "split", "into", "lists", "with", "maximum", "length", "." ]
[ "\"\"\" Generator for iterable split into lists with maximum length. \"\"\"" ]
[ { "param": "iterable", "type": null }, { "param": "length", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "iterable", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "length", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import itertools def _iter_every(iterable, length): iterator = iter(iterable) section = list(itertools.islice(iterator, length)) while section: yield section section = list(itertools.islice(iterator, length))
cbc58f1846fbb518eafcb252345529fc66de3f4b
macph/nextbus
nextbus/models/derived.py
[ "MIT" ]
Python
_apply_filters
<not_specific>
def _apply_filters(cls, match, groups=None, areas=None): """ Apply filters to a search expression if they are specified. :param match: The original query expression :param groups: Groups, eg 'stop' or 'area' :param areas: Administrative area codes to filter by :returns: Query expression with added filters, if any """ if groups is not None: if set(groups) - cls.GROUP_NAMES.keys(): raise ValueError(f"Groups {groups!r} contain invalid values.") tables = [] for g in groups: tables.extend(cls.GROUPS[g]) match = match.filter(cls.table_name.in_(tables)) if areas is not None: match = match.filter(cls.admin_areas.overlap(areas)) return match
Apply filters to a search expression if they are specified. :param match: The original query expression :param groups: Groups, eg 'stop' or 'area' :param areas: Administrative area codes to filter by :returns: Query expression with added filters, if any
Apply filters to a search expression if they are specified.
[ "Apply", "filters", "to", "a", "search", "expression", "if", "they", "are", "specified", "." ]
def _apply_filters(cls, match, groups=None, areas=None): if groups is not None: if set(groups) - cls.GROUP_NAMES.keys(): raise ValueError(f"Groups {groups!r} contain invalid values.") tables = [] for g in groups: tables.extend(cls.GROUPS[g]) match = match.filter(cls.table_name.in_(tables)) if areas is not None: match = match.filter(cls.admin_areas.overlap(areas)) return match
[ "def", "_apply_filters", "(", "cls", ",", "match", ",", "groups", "=", "None", ",", "areas", "=", "None", ")", ":", "if", "groups", "is", "not", "None", ":", "if", "set", "(", "groups", ")", "-", "cls", ".", "GROUP_NAMES", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "f\"Groups {groups!r} contain invalid values.\"", ")", "tables", "=", "[", "]", "for", "g", "in", "groups", ":", "tables", ".", "extend", "(", "cls", ".", "GROUPS", "[", "g", "]", ")", "match", "=", "match", ".", "filter", "(", "cls", ".", "table_name", ".", "in_", "(", "tables", ")", ")", "if", "areas", "is", "not", "None", ":", "match", "=", "match", ".", "filter", "(", "cls", ".", "admin_areas", ".", "overlap", "(", "areas", ")", ")", "return", "match" ]
Apply filters to a search expression if they are specified.
[ "Apply", "filters", "to", "a", "search", "expression", "if", "they", "are", "specified", "." ]
[ "\"\"\" Apply filters to a search expression if they are specified.\n\n :param match: The original query expression\n :param groups: Groups, eg 'stop' or 'area'\n :param areas: Administrative area codes to filter by\n :returns: Query expression with added filters, if any\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "match", "type": null }, { "param": "groups", "type": null }, { "param": "areas", "type": null } ]
{ "returns": [ { "docstring": "Query expression with added filters, if any", "docstring_tokens": [ "Query", "expression", "with", "added", "filters", "if", "any" ], "type": null } ], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "match", "type": null, "docstring": "The original query expression", "docstring_tokens": [ "The", "original", "query", "expression" ], "default": null, "is_optional": null }, { "identifier": "groups", "type": null, "docstring": "Groups, eg 'stop' or 'area'", "docstring_tokens": [ "Groups", "eg", "'", "stop", "'", "or", "'", "area", "'" ], "default": null, "is_optional": null }, { "identifier": "areas", "type": null, "docstring": "Administrative area codes to filter by", "docstring_tokens": [ "Administrative", "area", "codes", "to", "filter", "by" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _apply_filters(cls, match, groups=None, areas=None): if groups is not None: if set(groups) - cls.GROUP_NAMES.keys(): raise ValueError(f"Groups {groups!r} contain invalid values.") tables = [] for g in groups: tables.extend(cls.GROUPS[g]) match = match.filter(cls.table_name.in_(tables)) if areas is not None: match = match.filter(cls.admin_areas.overlap(areas)) return match
9a6ea9567ca64c8e62bbebcb44c40fa08660c859
macph/nextbus
nextbus/forms.py
[ "MIT" ]
Python
_date_long_form
<not_specific>
def _date_long_form(date): """ Displays a date in long form, eg 'Monday 29th April 2019'. """ second_last = (date.day // 10) % 10 last = date.day % 10 if second_last != 1 and last == 1: ordinal = "st" elif second_last != 1 and last == 2: ordinal = "nd" elif second_last != 1 and last == 3: ordinal = "rd" else: ordinal = "th" return f"{date:%A} {date.day}{ordinal} {date:%B} {date.year}"
Displays a date in long form, eg 'Monday 29th April 2019'.
Displays a date in long form, eg 'Monday 29th April 2019'.
[ "Displays", "a", "date", "in", "long", "form", "eg", "'", "Monday", "29th", "April", "2019", "'", "." ]
def _date_long_form(date): second_last = (date.day // 10) % 10 last = date.day % 10 if second_last != 1 and last == 1: ordinal = "st" elif second_last != 1 and last == 2: ordinal = "nd" elif second_last != 1 and last == 3: ordinal = "rd" else: ordinal = "th" return f"{date:%A} {date.day}{ordinal} {date:%B} {date.year}"
[ "def", "_date_long_form", "(", "date", ")", ":", "second_last", "=", "(", "date", ".", "day", "//", "10", ")", "%", "10", "last", "=", "date", ".", "day", "%", "10", "if", "second_last", "!=", "1", "and", "last", "==", "1", ":", "ordinal", "=", "\"st\"", "elif", "second_last", "!=", "1", "and", "last", "==", "2", ":", "ordinal", "=", "\"nd\"", "elif", "second_last", "!=", "1", "and", "last", "==", "3", ":", "ordinal", "=", "\"rd\"", "else", ":", "ordinal", "=", "\"th\"", "return", "f\"{date:%A} {date.day}{ordinal} {date:%B} {date.year}\"" ]
Displays a date in long form, eg 'Monday 29th April 2019'.
[ "Displays", "a", "date", "in", "long", "form", "eg", "'", "Monday", "29th", "April", "2019", "'", "." ]
[ "\"\"\" Displays a date in long form, eg 'Monday 29th April 2019'. \"\"\"" ]
[ { "param": "date", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "date", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _date_long_form(date): second_last = (date.day // 10) % 10 last = date.day % 10 if second_last != 1 and last == 1: ordinal = "st" elif second_last != 1 and last == 2: ordinal = "nd" elif second_last != 1 and last == 3: ordinal = "rd" else: ordinal = "th" return f"{date:%A} {date.day}{ordinal} {date:%B} {date.year}"
10ea12e47bbfc326a8eff02a32b765fe37a42b11
macph/nextbus
nextbus/populate/file_ops.py
[ "MIT" ]
Python
_file_name
<not_specific>
def _file_name(response): """ Gets the file name from the response header or the URL name. """ content = response.headers.get("content-disposition") if content and "filename" in content: file_name = re.search(r"filename=(.+)", content).group(1) else: # Get the path and split it to get the rightmost part path = urllib.parse.urlparse(response.url)[2] file_name = path.split("/")[-1] return file_name
Gets the file name from the response header or the URL name.
Gets the file name from the response header or the URL name.
[ "Gets", "the", "file", "name", "from", "the", "response", "header", "or", "the", "URL", "name", "." ]
def _file_name(response): content = response.headers.get("content-disposition") if content and "filename" in content: file_name = re.search(r"filename=(.+)", content).group(1) else: path = urllib.parse.urlparse(response.url)[2] file_name = path.split("/")[-1] return file_name
[ "def", "_file_name", "(", "response", ")", ":", "content", "=", "response", ".", "headers", ".", "get", "(", "\"content-disposition\"", ")", "if", "content", "and", "\"filename\"", "in", "content", ":", "file_name", "=", "re", ".", "search", "(", "r\"filename=(.+)\"", ",", "content", ")", ".", "group", "(", "1", ")", "else", ":", "path", "=", "urllib", ".", "parse", ".", "urlparse", "(", "response", ".", "url", ")", "[", "2", "]", "file_name", "=", "path", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "return", "file_name" ]
Gets the file name from the response header or the URL name.
[ "Gets", "the", "file", "name", "from", "the", "response", "header", "or", "the", "URL", "name", "." ]
[ "\"\"\" Gets the file name from the response header or the URL name. \"\"\"", "# Get the path and split it to get the rightmost part" ]
[ { "param": "response", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "response", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re import urllib def _file_name(response): content = response.headers.get("content-disposition") if content and "filename" in content: file_name = re.search(r"filename=(.+)", content).group(1) else: path = urllib.parse.urlparse(response.url)[2] file_name = path.split("/")[-1] return file_name
10ea12e47bbfc326a8eff02a32b765fe37a42b11
macph/nextbus
nextbus/populate/file_ops.py
[ "MIT" ]
Python
iter_archive
null
def iter_archive(archive): """ Generator function iterating over all files in a zipped archive file. The generator will open each file, yielding its file-like object. This file will be closed before opening the next file. When the iteration is finished the archive is closed. :param archive: Path to the archive file. :returns: File-like object for current archived file. """ zip_ = zipfile.ZipFile(archive) for name in zip_.namelist(): with zip_.open(name) as current: yield current zip_.close()
Generator function iterating over all files in a zipped archive file. The generator will open each file, yielding its file-like object. This file will be closed before opening the next file. When the iteration is finished the archive is closed. :param archive: Path to the archive file. :returns: File-like object for current archived file.
Generator function iterating over all files in a zipped archive file. The generator will open each file, yielding its file-like object. This file will be closed before opening the next file. When the iteration is finished the archive is closed.
[ "Generator", "function", "iterating", "over", "all", "files", "in", "a", "zipped", "archive", "file", ".", "The", "generator", "will", "open", "each", "file", "yielding", "its", "file", "-", "like", "object", ".", "This", "file", "will", "be", "closed", "before", "opening", "the", "next", "file", ".", "When", "the", "iteration", "is", "finished", "the", "archive", "is", "closed", "." ]
def iter_archive(archive): zip_ = zipfile.ZipFile(archive) for name in zip_.namelist(): with zip_.open(name) as current: yield current zip_.close()
[ "def", "iter_archive", "(", "archive", ")", ":", "zip_", "=", "zipfile", ".", "ZipFile", "(", "archive", ")", "for", "name", "in", "zip_", ".", "namelist", "(", ")", ":", "with", "zip_", ".", "open", "(", "name", ")", "as", "current", ":", "yield", "current", "zip_", ".", "close", "(", ")" ]
Generator function iterating over all files in a zipped archive file.
[ "Generator", "function", "iterating", "over", "all", "files", "in", "a", "zipped", "archive", "file", "." ]
[ "\"\"\" Generator function iterating over all files in a zipped archive file.\n\n The generator will open each file, yielding its file-like object. This\n file will be closed before opening the next file. When the iteration\n is finished the archive is closed.\n\n :param archive: Path to the archive file.\n :returns: File-like object for current archived file.\n \"\"\"" ]
[ { "param": "archive", "type": null } ]
{ "returns": [ { "docstring": "File-like object for current archived file.", "docstring_tokens": [ "File", "-", "like", "object", "for", "current", "archived", "file", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "archive", "type": null, "docstring": "Path to the archive file.", "docstring_tokens": [ "Path", "to", "the", "archive", "file", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import zipfile def iter_archive(archive): zip_ = zipfile.ZipFile(archive) for name in zip_.namelist(): with zip_.open(name) as current: yield current zip_.close()
6a1c3ea6d5dc629b0e1f2d46d2f4f96c249a68ef
mikeatm/pythontutorial
science/02_vectorize.py
[ "Info-ZIP" ]
Python
convert_to_polar
<not_specific>
def convert_to_polar(N): """ Generate a random set of N (x,y) cartesian coordinates, convert them to polar coordinates. Hints tuple (a,b) in python is a sequence of immutable data. """ cartesian_set = [] a = 0 while a < N : cartesian_set.append( tuple (random.sample(range(1, 100), 2) ) ) a+=1 polar_set = [] index = 0 for coordinate in cartesian_set: x,y = coordinate # coordinate is a tuple, we can split it to x, y r = math.sqrt(x**2 + y**2) theta = math.atan2(float(y), x) polar_set.append ( tuple([r,theta])) return polar_set
Generate a random set of N (x,y) cartesian coordinates, convert them to polar coordinates. Hints tuple (a,b) in python is a sequence of immutable data.
Generate a random set of N (x,y) cartesian coordinates, convert them to polar coordinates. Hints tuple (a,b) in python is a sequence of immutable data.
[ "Generate", "a", "random", "set", "of", "N", "(", "x", "y", ")", "cartesian", "coordinates", "convert", "them", "to", "polar", "coordinates", ".", "Hints", "tuple", "(", "a", "b", ")", "in", "python", "is", "a", "sequence", "of", "immutable", "data", "." ]
def convert_to_polar(N): cartesian_set = [] a = 0 while a < N : cartesian_set.append( tuple (random.sample(range(1, 100), 2) ) ) a+=1 polar_set = [] index = 0 for coordinate in cartesian_set: x,y = coordinate r = math.sqrt(x**2 + y**2) theta = math.atan2(float(y), x) polar_set.append ( tuple([r,theta])) return polar_set
[ "def", "convert_to_polar", "(", "N", ")", ":", "cartesian_set", "=", "[", "]", "a", "=", "0", "while", "a", "<", "N", ":", "cartesian_set", ".", "append", "(", "tuple", "(", "random", ".", "sample", "(", "range", "(", "1", ",", "100", ")", ",", "2", ")", ")", ")", "a", "+=", "1", "polar_set", "=", "[", "]", "index", "=", "0", "for", "coordinate", "in", "cartesian_set", ":", "x", ",", "y", "=", "coordinate", "r", "=", "math", ".", "sqrt", "(", "x", "**", "2", "+", "y", "**", "2", ")", "theta", "=", "math", ".", "atan2", "(", "float", "(", "y", ")", ",", "x", ")", "polar_set", ".", "append", "(", "tuple", "(", "[", "r", ",", "theta", "]", ")", ")", "return", "polar_set" ]
Generate a random set of N (x,y) cartesian coordinates, convert them to polar coordinates.
[ "Generate", "a", "random", "set", "of", "N", "(", "x", "y", ")", "cartesian", "coordinates", "convert", "them", "to", "polar", "coordinates", "." ]
[ "\"\"\"\n Generate a random set of N (x,y) cartesian coordinates, \n convert them to polar coordinates.\n Hints\n tuple (a,b) in python is a sequence of immutable data. \n \"\"\"", "# coordinate is a tuple, we can split it to x, y" ]
[ { "param": "N", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "N", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import math import random def convert_to_polar(N): cartesian_set = [] a = 0 while a < N : cartesian_set.append( tuple (random.sample(range(1, 100), 2) ) ) a+=1 polar_set = [] index = 0 for coordinate in cartesian_set: x,y = coordinate r = math.sqrt(x**2 + y**2) theta = math.atan2(float(y), x) polar_set.append ( tuple([r,theta])) return polar_set
b86d5068669ed95198fee33bb9790d5ef3512d27
tensorlayer/TLXZoo
tlxzoo/module/unet/unet.py
[ "Apache-2.0" ]
Python
crop_to_shape
<not_specific>
def crop_to_shape(data, shape: Tuple[int, int, int]): """ Crops the array to the given image shape by removing the border :param data: the array to crop, expects a tensor of shape [batches, nx, ny, channels] :param shape: the target shape [batches, nx, ny, channels] """ diff_nx = (data.shape[0] - shape[0]) diff_ny = (data.shape[1] - shape[1]) if diff_nx == 0 and diff_ny == 0: return data offset_nx_left = diff_nx // 2 offset_nx_right = diff_nx - offset_nx_left offset_ny_left = diff_ny // 2 offset_ny_right = diff_ny - offset_ny_left cropped = data[offset_nx_left:(-offset_nx_right), offset_ny_left:(-offset_ny_right)] assert cropped.shape[0] == shape[0] assert cropped.shape[1] == shape[1] return cropped
Crops the array to the given image shape by removing the border :param data: the array to crop, expects a tensor of shape [batches, nx, ny, channels] :param shape: the target shape [batches, nx, ny, channels]
Crops the array to the given image shape by removing the border
[ "Crops", "the", "array", "to", "the", "given", "image", "shape", "by", "removing", "the", "border" ]
def crop_to_shape(data, shape: Tuple[int, int, int]): diff_nx = (data.shape[0] - shape[0]) diff_ny = (data.shape[1] - shape[1]) if diff_nx == 0 and diff_ny == 0: return data offset_nx_left = diff_nx // 2 offset_nx_right = diff_nx - offset_nx_left offset_ny_left = diff_ny // 2 offset_ny_right = diff_ny - offset_ny_left cropped = data[offset_nx_left:(-offset_nx_right), offset_ny_left:(-offset_ny_right)] assert cropped.shape[0] == shape[0] assert cropped.shape[1] == shape[1] return cropped
[ "def", "crop_to_shape", "(", "data", ",", "shape", ":", "Tuple", "[", "int", ",", "int", ",", "int", "]", ")", ":", "diff_nx", "=", "(", "data", ".", "shape", "[", "0", "]", "-", "shape", "[", "0", "]", ")", "diff_ny", "=", "(", "data", ".", "shape", "[", "1", "]", "-", "shape", "[", "1", "]", ")", "if", "diff_nx", "==", "0", "and", "diff_ny", "==", "0", ":", "return", "data", "offset_nx_left", "=", "diff_nx", "//", "2", "offset_nx_right", "=", "diff_nx", "-", "offset_nx_left", "offset_ny_left", "=", "diff_ny", "//", "2", "offset_ny_right", "=", "diff_ny", "-", "offset_ny_left", "cropped", "=", "data", "[", "offset_nx_left", ":", "(", "-", "offset_nx_right", ")", ",", "offset_ny_left", ":", "(", "-", "offset_ny_right", ")", "]", "assert", "cropped", ".", "shape", "[", "0", "]", "==", "shape", "[", "0", "]", "assert", "cropped", ".", "shape", "[", "1", "]", "==", "shape", "[", "1", "]", "return", "cropped" ]
Crops the array to the given image shape by removing the border
[ "Crops", "the", "array", "to", "the", "given", "image", "shape", "by", "removing", "the", "border" ]
[ "\"\"\"\n Crops the array to the given image shape by removing the border\n\n :param data: the array to crop, expects a tensor of shape [batches, nx, ny, channels]\n :param shape: the target shape [batches, nx, ny, channels]\n \"\"\"" ]
[ { "param": "data", "type": null }, { "param": "shape", "type": "Tuple[int, int, int]" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": "the array to crop, expects a tensor of shape [batches, nx, ny, channels]", "docstring_tokens": [ "the", "array", "to", "crop", "expects", "a", "tensor", "of", "shape", "[", "batches", "nx", "ny", "channels", "]" ], "default": null, "is_optional": null }, { "identifier": "shape", "type": "Tuple[int, int, int]", "docstring": "the target shape [batches, nx, ny, channels]", "docstring_tokens": [ "the", "target", "shape", "[", "batches", "nx", "ny", "channels", "]" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def crop_to_shape(data, shape: Tuple[int, int, int]): diff_nx = (data.shape[0] - shape[0]) diff_ny = (data.shape[1] - shape[1]) if diff_nx == 0 and diff_ny == 0: return data offset_nx_left = diff_nx // 2 offset_nx_right = diff_nx - offset_nx_left offset_ny_left = diff_ny // 2 offset_ny_right = diff_ny - offset_ny_left cropped = data[offset_nx_left:(-offset_nx_right), offset_ny_left:(-offset_ny_right)] assert cropped.shape[0] == shape[0] assert cropped.shape[1] == shape[1] return cropped
a96271b249ae82bf9d2ee9253de822fda9bf61e8
tensorlayer/TLXZoo
tlxzoo/module/wav2vec2/transform.py
[ "Apache-2.0" ]
Python
clean_up_tokenization
str
def clean_up_tokenization(out_string: str) -> str: """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms. Args: out_string (:obj:`str`): The text to clean up. Returns: :obj:`str`: The cleaned-up string. """ out_string = ( out_string.replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") ) return out_string
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms. Args: out_string (:obj:`str`): The text to clean up. Returns: :obj:`str`: The cleaned-up string.
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
[ "Clean", "up", "a", "list", "of", "simple", "English", "tokenization", "artifacts", "like", "spaces", "before", "punctuations", "and", "abbreviated", "forms", "." ]
def clean_up_tokenization(out_string: str) -> str: out_string = ( out_string.replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") ) return out_string
[ "def", "clean_up_tokenization", "(", "out_string", ":", "str", ")", "->", "str", ":", "out_string", "=", "(", "out_string", ".", "replace", "(", "\" .\"", ",", "\".\"", ")", ".", "replace", "(", "\" ?\"", ",", "\"?\"", ")", ".", "replace", "(", "\" !\"", ",", "\"!\"", ")", ".", "replace", "(", "\" ,\"", ",", "\",\"", ")", ".", "replace", "(", "\" ' \"", ",", "\"'\"", ")", ".", "replace", "(", "\" n't\"", ",", "\"n't\"", ")", ".", "replace", "(", "\" 'm\"", ",", "\"'m\"", ")", ".", "replace", "(", "\" 's\"", ",", "\"'s\"", ")", ".", "replace", "(", "\" 've\"", ",", "\"'ve\"", ")", ".", "replace", "(", "\" 're\"", ",", "\"'re\"", ")", ")", "return", "out_string" ]
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
[ "Clean", "up", "a", "list", "of", "simple", "English", "tokenization", "artifacts", "like", "spaces", "before", "punctuations", "and", "abbreviated", "forms", "." ]
[ "\"\"\"\n Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.\n\n Args:\n out_string (:obj:`str`): The text to clean up.\n\n Returns:\n :obj:`str`: The cleaned-up string.\n \"\"\"" ]
[ { "param": "out_string", "type": "str" } ]
{ "returns": [ { "docstring": ":obj:`str`: The cleaned-up string.", "docstring_tokens": [ ":", "obj", ":", "`", "str", "`", ":", "The", "cleaned", "-", "up", "string", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "out_string", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "out_string (", "type": null, "docstring": "`str`): The text to clean up.", "docstring_tokens": [ "`", "str", "`", ")", ":", "The", "text", "to", "clean", "up", "." ], "default": null, "is_optional": null } ], "others": [] }
def clean_up_tokenization(out_string: str) -> str: out_string = ( out_string.replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") ) return out_string
ffe01c3c27cc04b4f0477c55adeb7dc896d4af4f
dangvinh1406/CNNForSentenceClassification
cnn/Preprocessor.py
[ "MIT" ]
Python
tokenizeSentence
<not_specific>
def tokenizeSentence(raw): """ Function tokenizes a string to sentences based the character "new line" """ if type(raw) is not str: return [] return raw.split("\n")
Function tokenizes a string to sentences based the character "new line"
Function tokenizes a string to sentences based the character "new line"
[ "Function", "tokenizes", "a", "string", "to", "sentences", "based", "the", "character", "\"", "new", "line", "\"" ]
def tokenizeSentence(raw): if type(raw) is not str: return [] return raw.split("\n")
[ "def", "tokenizeSentence", "(", "raw", ")", ":", "if", "type", "(", "raw", ")", "is", "not", "str", ":", "return", "[", "]", "return", "raw", ".", "split", "(", "\"\\n\"", ")" ]
Function tokenizes a string to sentences based the character "new line"
[ "Function", "tokenizes", "a", "string", "to", "sentences", "based", "the", "character", "\"", "new", "line", "\"" ]
[ "\"\"\"\n Function tokenizes a string to sentences based the character \"new line\"\n \"\"\"" ]
[ { "param": "raw", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "raw", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def tokenizeSentence(raw): if type(raw) is not str: return [] return raw.split("\n")
ffe01c3c27cc04b4f0477c55adeb7dc896d4af4f
dangvinh1406/CNNForSentenceClassification
cnn/Preprocessor.py
[ "MIT" ]
Python
tokenizeWord
<not_specific>
def tokenizeWord(raw): """ Function tokenizes a string to words based the non-word characters """ if type(raw) is not str: return [] return re.findall(r"[\w]+", raw)
Function tokenizes a string to words based the non-word characters
Function tokenizes a string to words based the non-word characters
[ "Function", "tokenizes", "a", "string", "to", "words", "based", "the", "non", "-", "word", "characters" ]
def tokenizeWord(raw): if type(raw) is not str: return [] return re.findall(r"[\w]+", raw)
[ "def", "tokenizeWord", "(", "raw", ")", ":", "if", "type", "(", "raw", ")", "is", "not", "str", ":", "return", "[", "]", "return", "re", ".", "findall", "(", "r\"[\\w]+\"", ",", "raw", ")" ]
Function tokenizes a string to words based the non-word characters
[ "Function", "tokenizes", "a", "string", "to", "words", "based", "the", "non", "-", "word", "characters" ]
[ "\"\"\"\n Function tokenizes a string to words based the non-word characters\n \"\"\"" ]
[ { "param": "raw", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "raw", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def tokenizeWord(raw): if type(raw) is not str: return [] return re.findall(r"[\w]+", raw)
ffe01c3c27cc04b4f0477c55adeb7dc896d4af4f
dangvinh1406/CNNForSentenceClassification
cnn/Preprocessor.py
[ "MIT" ]
Python
filterWord
<not_specific>
def filterWord(listOfWords, blackSet): """ Function filters out all stop words and numbers """ return [word for word in listOfWords if word not in blackSet and not word.isdigit()]
Function filters out all stop words and numbers
Function filters out all stop words and numbers
[ "Function", "filters", "out", "all", "stop", "words", "and", "numbers" ]
def filterWord(listOfWords, blackSet): return [word for word in listOfWords if word not in blackSet and not word.isdigit()]
[ "def", "filterWord", "(", "listOfWords", ",", "blackSet", ")", ":", "return", "[", "word", "for", "word", "in", "listOfWords", "if", "word", "not", "in", "blackSet", "and", "not", "word", ".", "isdigit", "(", ")", "]" ]
Function filters out all stop words and numbers
[ "Function", "filters", "out", "all", "stop", "words", "and", "numbers" ]
[ "\"\"\"\n Function filters out all stop words and numbers\n \"\"\"" ]
[ { "param": "listOfWords", "type": null }, { "param": "blackSet", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "listOfWords", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "blackSet", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def filterWord(listOfWords, blackSet): return [word for word in listOfWords if word not in blackSet and not word.isdigit()]
ffe01c3c27cc04b4f0477c55adeb7dc896d4af4f
dangvinh1406/CNNForSentenceClassification
cnn/Preprocessor.py
[ "MIT" ]
Python
filterSentence
<not_specific>
def filterSentence(listOfSentences, numberOfWordsPerSentence): """ Function filters out all sentences which have less than a number of words """ return [l for l in listOfSentences if len(l) > numberOfWordsPerSentence]
Function filters out all sentences which have less than a number of words
Function filters out all sentences which have less than a number of words
[ "Function", "filters", "out", "all", "sentences", "which", "have", "less", "than", "a", "number", "of", "words" ]
def filterSentence(listOfSentences, numberOfWordsPerSentence): return [l for l in listOfSentences if len(l) > numberOfWordsPerSentence]
[ "def", "filterSentence", "(", "listOfSentences", ",", "numberOfWordsPerSentence", ")", ":", "return", "[", "l", "for", "l", "in", "listOfSentences", "if", "len", "(", "l", ")", ">", "numberOfWordsPerSentence", "]" ]
Function filters out all sentences which have less than a number of words
[ "Function", "filters", "out", "all", "sentences", "which", "have", "less", "than", "a", "number", "of", "words" ]
[ "\"\"\"\n Function filters out all sentences which have less than a number of words\n \"\"\"" ]
[ { "param": "listOfSentences", "type": null }, { "param": "numberOfWordsPerSentence", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "listOfSentences", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "numberOfWordsPerSentence", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def filterSentence(listOfSentences, numberOfWordsPerSentence): return [l for l in listOfSentences if len(l) > numberOfWordsPerSentence]
502017bd1c80f619871fcdcc57fa1095da039d36
carlosasj/gauss-jordan
project/aux_functions.py
[ "MIT" ]
Python
find_pivot
int
def find_pivot(matrix, col: int) -> int: """ Given the matrix and the column index, finds the line that should be swaped with the "current" pivot line. The number returned is the index of the line """ col_terms = (matrix[line][col] for line in range(col, len(matrix))) col_terms_abs = list(map(abs, col_terms)) max_abs = max(col_terms_abs) return col_terms_abs.index(max_abs) + col
Given the matrix and the column index, finds the line that should be swaped with the "current" pivot line. The number returned is the index of the line
Given the matrix and the column index, finds the line that should be swaped with the "current" pivot line. The number returned is the index of the line
[ "Given", "the", "matrix", "and", "the", "column", "index", "finds", "the", "line", "that", "should", "be", "swaped", "with", "the", "\"", "current", "\"", "pivot", "line", ".", "The", "number", "returned", "is", "the", "index", "of", "the", "line" ]
def find_pivot(matrix, col: int) -> int: col_terms = (matrix[line][col] for line in range(col, len(matrix))) col_terms_abs = list(map(abs, col_terms)) max_abs = max(col_terms_abs) return col_terms_abs.index(max_abs) + col
[ "def", "find_pivot", "(", "matrix", ",", "col", ":", "int", ")", "->", "int", ":", "col_terms", "=", "(", "matrix", "[", "line", "]", "[", "col", "]", "for", "line", "in", "range", "(", "col", ",", "len", "(", "matrix", ")", ")", ")", "col_terms_abs", "=", "list", "(", "map", "(", "abs", ",", "col_terms", ")", ")", "max_abs", "=", "max", "(", "col_terms_abs", ")", "return", "col_terms_abs", ".", "index", "(", "max_abs", ")", "+", "col" ]
Given the matrix and the column index, finds the line that should be swaped with the "current" pivot line.
[ "Given", "the", "matrix", "and", "the", "column", "index", "finds", "the", "line", "that", "should", "be", "swaped", "with", "the", "\"", "current", "\"", "pivot", "line", "." ]
[ "\"\"\"\n Given the matrix and the column index,\n finds the line that should be swaped with the \"current\" pivot line.\n\n The number returned is the index of the line\n \"\"\"" ]
[ { "param": "matrix", "type": null }, { "param": "col", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "matrix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "col", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def find_pivot(matrix, col: int) -> int: col_terms = (matrix[line][col] for line in range(col, len(matrix))) col_terms_abs = list(map(abs, col_terms)) max_abs = max(col_terms_abs) return col_terms_abs.index(max_abs) + col
ebe3d8b8a51bc99de7ac0eb0b09e23195a85a8f5
AtomCrafty/catsystem-py
src/catsys/crypt/mt19937.py
[ "MIT" ]
Python
temper
int
def temper(cls, y:int) -> int: """Returns the tempered state value y, called during genrand. """ y ^= (y >> cls._SHIFT_U) y ^= (y << cls._SHIFT_S) & cls._MASK_B y ^= (y << cls._SHIFT_T) & cls._MASK_C y ^= (y >> cls._SHIFT_L) return y & 0xffffffff
Returns the tempered state value y, called during genrand.
Returns the tempered state value y, called during genrand.
[ "Returns", "the", "tempered", "state", "value", "y", "called", "during", "genrand", "." ]
def temper(cls, y:int) -> int: y ^= (y >> cls._SHIFT_U) y ^= (y << cls._SHIFT_S) & cls._MASK_B y ^= (y << cls._SHIFT_T) & cls._MASK_C y ^= (y >> cls._SHIFT_L) return y & 0xffffffff
[ "def", "temper", "(", "cls", ",", "y", ":", "int", ")", "->", "int", ":", "y", "^=", "(", "y", ">>", "cls", ".", "_SHIFT_U", ")", "y", "^=", "(", "y", "<<", "cls", ".", "_SHIFT_S", ")", "&", "cls", ".", "_MASK_B", "y", "^=", "(", "y", "<<", "cls", ".", "_SHIFT_T", ")", "&", "cls", ".", "_MASK_C", "y", "^=", "(", "y", ">>", "cls", ".", "_SHIFT_L", ")", "return", "y", "&", "0xffffffff" ]
Returns the tempered state value y, called during genrand.
[ "Returns", "the", "tempered", "state", "value", "y", "called", "during", "genrand", "." ]
[ "\"\"\"Returns the tempered state value y, called during genrand.\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "y", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "y", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def temper(cls, y:int) -> int: y ^= (y >> cls._SHIFT_U) y ^= (y << cls._SHIFT_S) & cls._MASK_B y ^= (y << cls._SHIFT_T) & cls._MASK_C y ^= (y >> cls._SHIFT_L) return y & 0xffffffff
ebe3d8b8a51bc99de7ac0eb0b09e23195a85a8f5
AtomCrafty/catsystem-py
src/catsys/crypt/mt19937.py
[ "MIT" ]
Python
untemper
int
def untemper(cls, y:int) -> int: """Returns the un-tempered original state value of y. (for reversing) """ y ^= (y >> cls._SHIFT_L) y ^= (y << cls._SHIFT_T) & cls._MASK_C for _ in range(7): y ^= (y << cls._SHIFT_S) & cls._MASK_B for _ in range(3): y ^= (y >> cls._SHIFT_U) return y & 0xffffffff
Returns the un-tempered original state value of y. (for reversing)
Returns the un-tempered original state value of y. (for reversing)
[ "Returns", "the", "un", "-", "tempered", "original", "state", "value", "of", "y", ".", "(", "for", "reversing", ")" ]
def untemper(cls, y:int) -> int: y ^= (y >> cls._SHIFT_L) y ^= (y << cls._SHIFT_T) & cls._MASK_C for _ in range(7): y ^= (y << cls._SHIFT_S) & cls._MASK_B for _ in range(3): y ^= (y >> cls._SHIFT_U) return y & 0xffffffff
[ "def", "untemper", "(", "cls", ",", "y", ":", "int", ")", "->", "int", ":", "y", "^=", "(", "y", ">>", "cls", ".", "_SHIFT_L", ")", "y", "^=", "(", "y", "<<", "cls", ".", "_SHIFT_T", ")", "&", "cls", ".", "_MASK_C", "for", "_", "in", "range", "(", "7", ")", ":", "y", "^=", "(", "y", "<<", "cls", ".", "_SHIFT_S", ")", "&", "cls", ".", "_MASK_B", "for", "_", "in", "range", "(", "3", ")", ":", "y", "^=", "(", "y", ">>", "cls", ".", "_SHIFT_U", ")", "return", "y", "&", "0xffffffff" ]
Returns the un-tempered original state value of y.
[ "Returns", "the", "un", "-", "tempered", "original", "state", "value", "of", "y", "." ]
[ "\"\"\"Returns the un-tempered original state value of y. (for reversing)\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "y", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "y", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def untemper(cls, y:int) -> int: y ^= (y >> cls._SHIFT_L) y ^= (y << cls._SHIFT_T) & cls._MASK_C for _ in range(7): y ^= (y << cls._SHIFT_S) & cls._MASK_B for _ in range(3): y ^= (y >> cls._SHIFT_U) return y & 0xffffffff
12158ebd66fa5889236500b9da66d041b68ccc24
tkphd/pycalphad
pycalphad/core/utils.py
[ "MIT" ]
Python
sizeof_fmt
<not_specific>
def sizeof_fmt(num, suffix='B'): """ Human-readable string for a number of bytes. http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size """ for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1000.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1000.0 return "%.1f%s%s" % (num, 'Y', suffix)
Human-readable string for a number of bytes. http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
Human-readable string for a number of bytes.
[ "Human", "-", "readable", "string", "for", "a", "number", "of", "bytes", "." ]
def sizeof_fmt(num, suffix='B'): for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1000.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1000.0 return "%.1f%s%s" % (num, 'Y', suffix)
[ "def", "sizeof_fmt", "(", "num", ",", "suffix", "=", "'B'", ")", ":", "for", "unit", "in", "[", "''", ",", "'K'", ",", "'M'", ",", "'G'", ",", "'T'", ",", "'P'", ",", "'E'", ",", "'Z'", "]", ":", "if", "abs", "(", "num", ")", "<", "1000.0", ":", "return", "\"%3.1f%s%s\"", "%", "(", "num", ",", "unit", ",", "suffix", ")", "num", "/=", "1000.0", "return", "\"%.1f%s%s\"", "%", "(", "num", ",", "'Y'", ",", "suffix", ")" ]
Human-readable string for a number of bytes.
[ "Human", "-", "readable", "string", "for", "a", "number", "of", "bytes", "." ]
[ "\"\"\"\n Human-readable string for a number of bytes.\n http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size\n \"\"\"" ]
[ { "param": "num", "type": null }, { "param": "suffix", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "num", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "suffix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def sizeof_fmt(num, suffix='B'): for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1000.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1000.0 return "%.1f%s%s" % (num, 'Y', suffix)
12158ebd66fa5889236500b9da66d041b68ccc24
tkphd/pycalphad
pycalphad/core/utils.py
[ "MIT" ]
Python
unpack_phases
<not_specific>
def unpack_phases(phases): "Convert a phases list/dict into a sorted list." active_phases = None if isinstance(phases, (list, tuple, set)): active_phases = sorted(phases) elif isinstance(phases, dict): active_phases = sorted(phases.keys()) elif type(phases) is str: active_phases = [phases] return active_phases
Convert a phases list/dict into a sorted list.
Convert a phases list/dict into a sorted list.
[ "Convert", "a", "phases", "list", "/", "dict", "into", "a", "sorted", "list", "." ]
def unpack_phases(phases): active_phases = None if isinstance(phases, (list, tuple, set)): active_phases = sorted(phases) elif isinstance(phases, dict): active_phases = sorted(phases.keys()) elif type(phases) is str: active_phases = [phases] return active_phases
[ "def", "unpack_phases", "(", "phases", ")", ":", "active_phases", "=", "None", "if", "isinstance", "(", "phases", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "active_phases", "=", "sorted", "(", "phases", ")", "elif", "isinstance", "(", "phases", ",", "dict", ")", ":", "active_phases", "=", "sorted", "(", "phases", ".", "keys", "(", ")", ")", "elif", "type", "(", "phases", ")", "is", "str", ":", "active_phases", "=", "[", "phases", "]", "return", "active_phases" ]
Convert a phases list/dict into a sorted list.
[ "Convert", "a", "phases", "list", "/", "dict", "into", "a", "sorted", "list", "." ]
[ "\"Convert a phases list/dict into a sorted list.\"" ]
[ { "param": "phases", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "phases", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def unpack_phases(phases): active_phases = None if isinstance(phases, (list, tuple, set)): active_phases = sorted(phases) elif isinstance(phases, dict): active_phases = sorted(phases.keys()) elif type(phases) is str: active_phases = [phases] return active_phases
12158ebd66fa5889236500b9da66d041b68ccc24
tkphd/pycalphad
pycalphad/core/utils.py
[ "MIT" ]
Python
filter_phases
<not_specific>
def filter_phases(dbf, comps, candidate_phases=None): """Return phases that are valid for equilibrium calculations for the given database and components Filters out phases that * Have no active components in any sublattice of a phase * Are disordered phases in an order-disorder model Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : list of v.Species Species to consider in the calculation. candidate_phases : list Names of phases to consider in the calculation, if not passed all phases from DBF will be considered Returns ------- list Sorted list of phases that are valid for the Database and components """ # TODO: filter phases that can not charge balance def all_sublattices_active(comps, phase): active_sublattices = [len(set(comps).intersection(subl)) > 0 for subl in phase.constituents] return all(active_sublattices) if candidate_phases == None: candidate_phases = dbf.phases.keys() else: candidate_phases = set(candidate_phases).intersection(dbf.phases.keys()) disordered_phases = [dbf.phases[phase].model_hints.get('disordered_phase') for phase in candidate_phases] phases = [phase for phase in candidate_phases if all_sublattices_active(comps, dbf.phases[phase]) and (phase not in disordered_phases or (phase in disordered_phases and dbf.phases[phase].model_hints.get('ordered_phase') not in candidate_phases))] return sorted(phases)
Return phases that are valid for equilibrium calculations for the given database and components Filters out phases that * Have no active components in any sublattice of a phase * Are disordered phases in an order-disorder model Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : list of v.Species Species to consider in the calculation. candidate_phases : list Names of phases to consider in the calculation, if not passed all phases from DBF will be considered Returns ------- list Sorted list of phases that are valid for the Database and components
Return phases that are valid for equilibrium calculations for the given database and components Filters out phases that Have no active components in any sublattice of a phase Are disordered phases in an order-disorder model Parameters dbf : Database Thermodynamic database containing the relevant parameters. comps : list of v.Species Species to consider in the calculation. candidate_phases : list Names of phases to consider in the calculation, if not passed all phases from DBF will be considered Returns list Sorted list of phases that are valid for the Database and components
[ "Return", "phases", "that", "are", "valid", "for", "equilibrium", "calculations", "for", "the", "given", "database", "and", "components", "Filters", "out", "phases", "that", "Have", "no", "active", "components", "in", "any", "sublattice", "of", "a", "phase", "Are", "disordered", "phases", "in", "an", "order", "-", "disorder", "model", "Parameters", "dbf", ":", "Database", "Thermodynamic", "database", "containing", "the", "relevant", "parameters", ".", "comps", ":", "list", "of", "v", ".", "Species", "Species", "to", "consider", "in", "the", "calculation", ".", "candidate_phases", ":", "list", "Names", "of", "phases", "to", "consider", "in", "the", "calculation", "if", "not", "passed", "all", "phases", "from", "DBF", "will", "be", "considered", "Returns", "list", "Sorted", "list", "of", "phases", "that", "are", "valid", "for", "the", "Database", "and", "components" ]
def filter_phases(dbf, comps, candidate_phases=None): def all_sublattices_active(comps, phase): active_sublattices = [len(set(comps).intersection(subl)) > 0 for subl in phase.constituents] return all(active_sublattices) if candidate_phases == None: candidate_phases = dbf.phases.keys() else: candidate_phases = set(candidate_phases).intersection(dbf.phases.keys()) disordered_phases = [dbf.phases[phase].model_hints.get('disordered_phase') for phase in candidate_phases] phases = [phase for phase in candidate_phases if all_sublattices_active(comps, dbf.phases[phase]) and (phase not in disordered_phases or (phase in disordered_phases and dbf.phases[phase].model_hints.get('ordered_phase') not in candidate_phases))] return sorted(phases)
[ "def", "filter_phases", "(", "dbf", ",", "comps", ",", "candidate_phases", "=", "None", ")", ":", "def", "all_sublattices_active", "(", "comps", ",", "phase", ")", ":", "active_sublattices", "=", "[", "len", "(", "set", "(", "comps", ")", ".", "intersection", "(", "subl", ")", ")", ">", "0", "for", "subl", "in", "phase", ".", "constituents", "]", "return", "all", "(", "active_sublattices", ")", "if", "candidate_phases", "==", "None", ":", "candidate_phases", "=", "dbf", ".", "phases", ".", "keys", "(", ")", "else", ":", "candidate_phases", "=", "set", "(", "candidate_phases", ")", ".", "intersection", "(", "dbf", ".", "phases", ".", "keys", "(", ")", ")", "disordered_phases", "=", "[", "dbf", ".", "phases", "[", "phase", "]", ".", "model_hints", ".", "get", "(", "'disordered_phase'", ")", "for", "phase", "in", "candidate_phases", "]", "phases", "=", "[", "phase", "for", "phase", "in", "candidate_phases", "if", "all_sublattices_active", "(", "comps", ",", "dbf", ".", "phases", "[", "phase", "]", ")", "and", "(", "phase", "not", "in", "disordered_phases", "or", "(", "phase", "in", "disordered_phases", "and", "dbf", ".", "phases", "[", "phase", "]", ".", "model_hints", ".", "get", "(", "'ordered_phase'", ")", "not", "in", "candidate_phases", ")", ")", "]", "return", "sorted", "(", "phases", ")" ]
Return phases that are valid for equilibrium calculations for the given database and components Filters out phases that Have no active components in any sublattice of a phase Are disordered phases in an order-disorder model
[ "Return", "phases", "that", "are", "valid", "for", "equilibrium", "calculations", "for", "the", "given", "database", "and", "components", "Filters", "out", "phases", "that", "Have", "no", "active", "components", "in", "any", "sublattice", "of", "a", "phase", "Are", "disordered", "phases", "in", "an", "order", "-", "disorder", "model" ]
[ "\"\"\"Return phases that are valid for equilibrium calculations for the given database and components\n\n Filters out phases that\n * Have no active components in any sublattice of a phase\n * Are disordered phases in an order-disorder model\n\n Parameters\n ----------\n dbf : Database\n Thermodynamic database containing the relevant parameters.\n comps : list of v.Species\n Species to consider in the calculation.\n candidate_phases : list\n Names of phases to consider in the calculation, if not passed all phases from DBF will be considered\n Returns\n -------\n list\n Sorted list of phases that are valid for the Database and components\n \"\"\"", "# TODO: filter phases that can not charge balance" ]
[ { "param": "dbf", "type": null }, { "param": "comps", "type": null }, { "param": "candidate_phases", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dbf", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "comps", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "candidate_phases", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def filter_phases(dbf, comps, candidate_phases=None): def all_sublattices_active(comps, phase): active_sublattices = [len(set(comps).intersection(subl)) > 0 for subl in phase.constituents] return all(active_sublattices) if candidate_phases == None: candidate_phases = dbf.phases.keys() else: candidate_phases = set(candidate_phases).intersection(dbf.phases.keys()) disordered_phases = [dbf.phases[phase].model_hints.get('disordered_phase') for phase in candidate_phases] phases = [phase for phase in candidate_phases if all_sublattices_active(comps, dbf.phases[phase]) and (phase not in disordered_phases or (phase in disordered_phases and dbf.phases[phase].model_hints.get('ordered_phase') not in candidate_phases))] return sorted(phases)
11a0d1dc11e5438da33e3e14b60167bea7fd105c
Mrpye/pictoplot
inkscape/svg_parser.py
[ "Apache-2.0" ]
Python
parseLengthWithUnits
<not_specific>
def parseLengthWithUnits( str ): ''' Parse an SVG value which may or may not have units attached This version is greatly simplified in that it only allows: no units, units of px, and units of %. Everything else, it returns None for. There is a more general routine to consider in scour.py if more generality is ever needed. ''' u = 'px' s = str.strip() if s[-2:] == 'px': s = s[:-2] elif s[-1:] == '%': u = '%' s = s[:-1] try: v = float( s ) except: return None, None return v, u
Parse an SVG value which may or may not have units attached This version is greatly simplified in that it only allows: no units, units of px, and units of %. Everything else, it returns None for. There is a more general routine to consider in scour.py if more generality is ever needed.
Parse an SVG value which may or may not have units attached This version is greatly simplified in that it only allows: no units, units of px, and units of %. Everything else, it returns None for. There is a more general routine to consider in scour.py if more generality is ever needed.
[ "Parse", "an", "SVG", "value", "which", "may", "or", "may", "not", "have", "units", "attached", "This", "version", "is", "greatly", "simplified", "in", "that", "it", "only", "allows", ":", "no", "units", "units", "of", "px", "and", "units", "of", "%", ".", "Everything", "else", "it", "returns", "None", "for", ".", "There", "is", "a", "more", "general", "routine", "to", "consider", "in", "scour", ".", "py", "if", "more", "generality", "is", "ever", "needed", "." ]
def parseLengthWithUnits( str ): u = 'px' s = str.strip() if s[-2:] == 'px': s = s[:-2] elif s[-1:] == '%': u = '%' s = s[:-1] try: v = float( s ) except: return None, None return v, u
[ "def", "parseLengthWithUnits", "(", "str", ")", ":", "u", "=", "'px'", "s", "=", "str", ".", "strip", "(", ")", "if", "s", "[", "-", "2", ":", "]", "==", "'px'", ":", "s", "=", "s", "[", ":", "-", "2", "]", "elif", "s", "[", "-", "1", ":", "]", "==", "'%'", ":", "u", "=", "'%'", "s", "=", "s", "[", ":", "-", "1", "]", "try", ":", "v", "=", "float", "(", "s", ")", "except", ":", "return", "None", ",", "None", "return", "v", ",", "u" ]
Parse an SVG value which may or may not have units attached This version is greatly simplified in that it only allows: no units, units of px, and units of %.
[ "Parse", "an", "SVG", "value", "which", "may", "or", "may", "not", "have", "units", "attached", "This", "version", "is", "greatly", "simplified", "in", "that", "it", "only", "allows", ":", "no", "units", "units", "of", "px", "and", "units", "of", "%", "." ]
[ "'''\n Parse an SVG value which may or may not have units attached\n This version is greatly simplified in that it only allows: no units,\n units of px, and units of %. Everything else, it returns None for.\n There is a more general routine to consider in scour.py if more\n generality is ever needed.\n '''" ]
[ { "param": "str", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "str", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def parseLengthWithUnits( str ): u = 'px' s = str.strip() if s[-2:] == 'px': s = s[:-2] elif s[-1:] == '%': u = '%' s = s[:-1] try: v = float( s ) except: return None, None return v, u
03a37e67d6478e0c29ef3b504472a33d937b063b
paul-shannon/slexil2
slexil/ijalLine.py
[ "MIT" ]
Python
replaceHyphensWithNDashes
<not_specific>
def replaceHyphensWithNDashes(list): ''' replace hyphens with n-dashes ''' newList = [] for text in list: text = text.replace('-', '–') newList.append(text) return (newList)
replace hyphens with n-dashes
replace hyphens with n-dashes
[ "replace", "hyphens", "with", "n", "-", "dashes" ]
def replaceHyphensWithNDashes(list): newList = [] for text in list: text = text.replace('-', '–') newList.append(text) return (newList)
[ "def", "replaceHyphensWithNDashes", "(", "list", ")", ":", "newList", "=", "[", "]", "for", "text", "in", "list", ":", "text", "=", "text", ".", "replace", "(", "'-'", ",", "'–')", "", "newList", ".", "append", "(", "text", ")", "return", "(", "newList", ")" ]
replace hyphens with n-dashes
[ "replace", "hyphens", "with", "n", "-", "dashes" ]
[ "''' replace hyphens with n-dashes\n '''" ]
[ { "param": "list", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "list", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def replaceHyphensWithNDashes(list): newList = [] for text in list: text = text.replace('-', '–') newList.append(text) return (newList)
4c506cf14e8e208370ea21563ac3a3d1681e6ee9
shubhsherl/sympy
sympy/core/compatibility.py
[ "BSD-3-Clause" ]
Python
unwrap
<not_specific>
def unwrap(func, stop=None): """Get the object wrapped by *func*. Follows the chain of :attr:`__wrapped__` attributes returning the last object in the chain. *stop* is an optional callback accepting an object in the wrapper chain as its sole argument that allows the unwrapping to be terminated early if the callback returns a true value. If the callback never returns a true value, the last object in the chain is returned as usual. For example, :func:`signature` uses this to stop unwrapping if any object in the chain has a ``__signature__`` attribute defined. :exc:`ValueError` is raised if a cycle is encountered. """ if stop is None: def _is_wrapper(f): return hasattr(f, '__wrapped__') else: def _is_wrapper(f): return hasattr(f, '__wrapped__') and not stop(f) f = func # remember the original func for error reporting memo = {id(f)} # Memoise by id to tolerate non-hashable objects while _is_wrapper(func): func = func.__wrapped__ id_func = id(func) if id_func in memo: raise ValueError('wrapper loop when unwrapping {!r}'.format(f)) memo.add(id_func) return func
Get the object wrapped by *func*. Follows the chain of :attr:`__wrapped__` attributes returning the last object in the chain. *stop* is an optional callback accepting an object in the wrapper chain as its sole argument that allows the unwrapping to be terminated early if the callback returns a true value. If the callback never returns a true value, the last object in the chain is returned as usual. For example, :func:`signature` uses this to stop unwrapping if any object in the chain has a ``__signature__`` attribute defined. :exc:`ValueError` is raised if a cycle is encountered.
Get the object wrapped by *func*. Follows the chain of :attr:`__wrapped__` attributes returning the last object in the chain. stop* is an optional callback accepting an object in the wrapper chain as its sole argument that allows the unwrapping to be terminated early if the callback returns a true value. If the callback never returns a true value, the last object in the chain is returned as usual. For example.
[ "Get", "the", "object", "wrapped", "by", "*", "func", "*", ".", "Follows", "the", "chain", "of", ":", "attr", ":", "`", "__wrapped__", "`", "attributes", "returning", "the", "last", "object", "in", "the", "chain", ".", "stop", "*", "is", "an", "optional", "callback", "accepting", "an", "object", "in", "the", "wrapper", "chain", "as", "its", "sole", "argument", "that", "allows", "the", "unwrapping", "to", "be", "terminated", "early", "if", "the", "callback", "returns", "a", "true", "value", ".", "If", "the", "callback", "never", "returns", "a", "true", "value", "the", "last", "object", "in", "the", "chain", "is", "returned", "as", "usual", ".", "For", "example", "." ]
def unwrap(func, stop=None): if stop is None: def _is_wrapper(f): return hasattr(f, '__wrapped__') else: def _is_wrapper(f): return hasattr(f, '__wrapped__') and not stop(f) f = func memo = {id(f)} while _is_wrapper(func): func = func.__wrapped__ id_func = id(func) if id_func in memo: raise ValueError('wrapper loop when unwrapping {!r}'.format(f)) memo.add(id_func) return func
[ "def", "unwrap", "(", "func", ",", "stop", "=", "None", ")", ":", "if", "stop", "is", "None", ":", "def", "_is_wrapper", "(", "f", ")", ":", "return", "hasattr", "(", "f", ",", "'__wrapped__'", ")", "else", ":", "def", "_is_wrapper", "(", "f", ")", ":", "return", "hasattr", "(", "f", ",", "'__wrapped__'", ")", "and", "not", "stop", "(", "f", ")", "f", "=", "func", "memo", "=", "{", "id", "(", "f", ")", "}", "while", "_is_wrapper", "(", "func", ")", ":", "func", "=", "func", ".", "__wrapped__", "id_func", "=", "id", "(", "func", ")", "if", "id_func", "in", "memo", ":", "raise", "ValueError", "(", "'wrapper loop when unwrapping {!r}'", ".", "format", "(", "f", ")", ")", "memo", ".", "add", "(", "id_func", ")", "return", "func" ]
Get the object wrapped by *func*.
[ "Get", "the", "object", "wrapped", "by", "*", "func", "*", "." ]
[ "\"\"\"Get the object wrapped by *func*.\n\n Follows the chain of :attr:`__wrapped__` attributes returning the last\n object in the chain.\n\n *stop* is an optional callback accepting an object in the wrapper chain\n as its sole argument that allows the unwrapping to be terminated early if\n the callback returns a true value. If the callback never returns a true\n value, the last object in the chain is returned as usual. For example,\n :func:`signature` uses this to stop unwrapping if any object in the\n chain has a ``__signature__`` attribute defined.\n\n :exc:`ValueError` is raised if a cycle is encountered.\n\n \"\"\"", "# remember the original func for error reporting", "# Memoise by id to tolerate non-hashable objects" ]
[ { "param": "func", "type": null }, { "param": "stop", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "func", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "stop", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "func", "docstring": "`signature` uses this to stop unwrapping if any object in the\nchain has a ``__signature__`` attribute defined.", "docstring_tokens": [ "`", "signature", "`", "uses", "this", "to", "stop", "unwrapping", "if", "any", "object", "in", "the", "chain", "has", "a", "`", "`", "__signature__", "`", "`", "attribute", "defined", "." ] }, { "identifier": "exc", "docstring": "`ValueError` is raised if a cycle is encountered.", "docstring_tokens": [ "`", "ValueError", "`", "is", "raised", "if", "a", "cycle", "is", "encountered", "." ] } ] }
def unwrap(func, stop=None): if stop is None: def _is_wrapper(f): return hasattr(f, '__wrapped__') else: def _is_wrapper(f): return hasattr(f, '__wrapped__') and not stop(f) f = func memo = {id(f)} while _is_wrapper(func): func = func.__wrapped__ id_func = id(func) if id_func in memo: raise ValueError('wrapper loop when unwrapping {!r}'.format(f)) memo.add(id_func) return func
d60d52c7975e8401d07d203b07d59bad88c5c55a
zniper/test-blog
src/content/views.py
[ "MIT" ]
Python
normalize_query
<not_specific>
def normalize_query(query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall, normspace=re.compile(r'\s{2,}').sub): """Find the term in query string and reduce redundant spaces.""" return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
Find the term in query string and reduce redundant spaces.
Find the term in query string and reduce redundant spaces.
[ "Find", "the", "term", "in", "query", "string", "and", "reduce", "redundant", "spaces", "." ]
def normalize_query(query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall, normspace=re.compile(r'\s{2,}').sub): return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
[ "def", "normalize_query", "(", "query_string", ",", "findterms", "=", "re", ".", "compile", "(", "r'\"([^\"]+)\"|(\\S+)'", ")", ".", "findall", ",", "normspace", "=", "re", ".", "compile", "(", "r'\\s{2,}'", ")", ".", "sub", ")", ":", "return", "[", "normspace", "(", "' '", ",", "(", "t", "[", "0", "]", "or", "t", "[", "1", "]", ")", ".", "strip", "(", ")", ")", "for", "t", "in", "findterms", "(", "query_string", ")", "]" ]
Find the term in query string and reduce redundant spaces.
[ "Find", "the", "term", "in", "query", "string", "and", "reduce", "redundant", "spaces", "." ]
[ "\"\"\"Find the term in query string and reduce redundant spaces.\"\"\"" ]
[ { "param": "query_string", "type": null }, { "param": "findterms", "type": null }, { "param": "normspace", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "query_string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "findterms", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "normspace", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def normalize_query(query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall, normspace=re.compile(r'\s{2,}').sub): return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
f1201c77eb98f8ab3338ef2e28f887f61c466539
elliottd/imagination
nmt/utils.py
[ "BSD-3-Clause" ]
Python
warning
null
def warning(*objs): """ Prints warning text/object to stderr :param objs: :return: """ print(*objs, file=sys.stderr)
Prints warning text/object to stderr :param objs: :return:
Prints warning text/object to stderr
[ "Prints", "warning", "text", "/", "object", "to", "stderr" ]
def warning(*objs): print(*objs, file=sys.stderr)
[ "def", "warning", "(", "*", "objs", ")", ":", "print", "(", "*", "objs", ",", "file", "=", "sys", ".", "stderr", ")" ]
Prints warning text/object to stderr
[ "Prints", "warning", "text", "/", "object", "to", "stderr" ]
[ "\"\"\"\n Prints warning text/object to stderr\n\n :param objs:\n :return:\n \"\"\"" ]
[]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [], "outlier_params": [ { "identifier": "objs", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
import sys def warning(*objs): print(*objs, file=sys.stderr)
f1201c77eb98f8ab3338ef2e28f887f61c466539
elliottd/imagination
nmt/utils.py
[ "BSD-3-Clause" ]
Python
zipp
null
def zipp(params, theano_params): """ Push parameters to Theano shared variables :param params: :param theano_params: :return: """ for kk, vv in params.items(): theano_params[kk].set_value(vv)
Push parameters to Theano shared variables :param params: :param theano_params: :return:
Push parameters to Theano shared variables
[ "Push", "parameters", "to", "Theano", "shared", "variables" ]
def zipp(params, theano_params): for kk, vv in params.items(): theano_params[kk].set_value(vv)
[ "def", "zipp", "(", "params", ",", "theano_params", ")", ":", "for", "kk", ",", "vv", "in", "params", ".", "items", "(", ")", ":", "theano_params", "[", "kk", "]", ".", "set_value", "(", "vv", ")" ]
Push parameters to Theano shared variables
[ "Push", "parameters", "to", "Theano", "shared", "variables" ]
[ "\"\"\"\n Push parameters to Theano shared variables\n\n :param params:\n :param theano_params:\n :return:\n \"\"\"" ]
[ { "param": "params", "type": null }, { "param": "theano_params", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "params", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "theano_params", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def zipp(params, theano_params): for kk, vv in params.items(): theano_params[kk].set_value(vv)
f1201c77eb98f8ab3338ef2e28f887f61c466539
elliottd/imagination
nmt/utils.py
[ "BSD-3-Clause" ]
Python
load_pickle_dictionary
<not_specific>
def load_pickle_dictionary(dictionary_path): """ Load a dictionary and optionally also return the inverted dictionary :param dictionary_path: :param invert: :return dictionary: :return inverted_dictionary: """ with open(dictionary_path, mode='rb') as f: dictionary = pickle.load(f) return dictionary
Load a dictionary and optionally also return the inverted dictionary :param dictionary_path: :param invert: :return dictionary: :return inverted_dictionary:
Load a dictionary and optionally also return the inverted dictionary
[ "Load", "a", "dictionary", "and", "optionally", "also", "return", "the", "inverted", "dictionary" ]
def load_pickle_dictionary(dictionary_path): with open(dictionary_path, mode='rb') as f: dictionary = pickle.load(f) return dictionary
[ "def", "load_pickle_dictionary", "(", "dictionary_path", ")", ":", "with", "open", "(", "dictionary_path", ",", "mode", "=", "'rb'", ")", "as", "f", ":", "dictionary", "=", "pickle", ".", "load", "(", "f", ")", "return", "dictionary" ]
Load a dictionary and optionally also return the inverted dictionary
[ "Load", "a", "dictionary", "and", "optionally", "also", "return", "the", "inverted", "dictionary" ]
[ "\"\"\"\n Load a dictionary and optionally also return the inverted dictionary\n\n :param dictionary_path:\n :param invert:\n :return dictionary:\n :return inverted_dictionary:\n \"\"\"" ]
[ { "param": "dictionary_path", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "dictionary" }, { "docstring": null, "docstring_tokens": [ "None" ], "type": "inverted_dictionary" } ], "raises": [], "params": [ { "identifier": "dictionary_path", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "invert", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
import pickle def load_pickle_dictionary(dictionary_path): with open(dictionary_path, mode='rb') as f: dictionary = pickle.load(f) return dictionary
f1201c77eb98f8ab3338ef2e28f887f61c466539
elliottd/imagination
nmt/utils.py
[ "BSD-3-Clause" ]
Python
load_json
<not_specific>
def load_json(filename): """ json loader to load Nematus vocabularies :param filename: :return: """ with open(filename, mode='rb') as f: # return unicode_to_utf8(json.load(f)) return json.load(f)
json loader to load Nematus vocabularies :param filename: :return:
json loader to load Nematus vocabularies
[ "json", "loader", "to", "load", "Nematus", "vocabularies" ]
def load_json(filename): with open(filename, mode='rb') as f: return json.load(f)
[ "def", "load_json", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "mode", "=", "'rb'", ")", "as", "f", ":", "return", "json", ".", "load", "(", "f", ")" ]
json loader to load Nematus vocabularies
[ "json", "loader", "to", "load", "Nematus", "vocabularies" ]
[ "\"\"\"\n json loader to load Nematus vocabularies\n :param filename:\n :return:\n \"\"\"", "# return unicode_to_utf8(json.load(f))" ]
[ { "param": "filename", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import json def load_json(filename): with open(filename, mode='rb') as f: return json.load(f)
f1201c77eb98f8ab3338ef2e28f887f61c466539
elliottd/imagination
nmt/utils.py
[ "BSD-3-Clause" ]
Python
idx_to_word
<not_specific>
def idx_to_word(seq, ivocab, remove_eos_token=True): """ Get the words for a sequence of word IDs :param seq: :param ivocab: :param unk_symbol: :param remove_eos_token: :return: """ # remove EOS token if seq[-1] == 0 and remove_eos_token: seq = seq[:-1] unk_symbol = ivocab[1] translation = ' '.join([ivocab.get(idx, unk_symbol) for idx in seq]) return translation
Get the words for a sequence of word IDs :param seq: :param ivocab: :param unk_symbol: :param remove_eos_token: :return:
Get the words for a sequence of word IDs
[ "Get", "the", "words", "for", "a", "sequence", "of", "word", "IDs" ]
def idx_to_word(seq, ivocab, remove_eos_token=True): if seq[-1] == 0 and remove_eos_token: seq = seq[:-1] unk_symbol = ivocab[1] translation = ' '.join([ivocab.get(idx, unk_symbol) for idx in seq]) return translation
[ "def", "idx_to_word", "(", "seq", ",", "ivocab", ",", "remove_eos_token", "=", "True", ")", ":", "if", "seq", "[", "-", "1", "]", "==", "0", "and", "remove_eos_token", ":", "seq", "=", "seq", "[", ":", "-", "1", "]", "unk_symbol", "=", "ivocab", "[", "1", "]", "translation", "=", "' '", ".", "join", "(", "[", "ivocab", ".", "get", "(", "idx", ",", "unk_symbol", ")", "for", "idx", "in", "seq", "]", ")", "return", "translation" ]
Get the words for a sequence of word IDs
[ "Get", "the", "words", "for", "a", "sequence", "of", "word", "IDs" ]
[ "\"\"\"\n Get the words for a sequence of word IDs\n :param seq:\n :param ivocab:\n :param unk_symbol:\n :param remove_eos_token:\n :return:\n \"\"\"", "# remove EOS token" ]
[ { "param": "seq", "type": null }, { "param": "ivocab", "type": null }, { "param": "remove_eos_token", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "seq", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "ivocab", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "remove_eos_token", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "unk_symbol", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
def idx_to_word(seq, ivocab, remove_eos_token=True): if seq[-1] == 0 and remove_eos_token: seq = seq[:-1] unk_symbol = ivocab[1] translation = ' '.join([ivocab.get(idx, unk_symbol) for idx in seq]) return translation
ec724771b82e321d3d8028ccd33c291fb9863f9f
dokyungs/fuzzbench
fuzzers/utils.py
[ "Apache-2.0" ]
Python
append_flags
null
def append_flags(env_var, additional_flags, env=None): """Append |additional_flags| to those already set in the value of |env_var| and assign env_var to the result.""" if env is None: env = os.environ flags = env.get(env_var, '').split(' ') flags.extend(additional_flags) env[env_var] = ' '.join(flags)
Append |additional_flags| to those already set in the value of |env_var| and assign env_var to the result.
Append |additional_flags| to those already set in the value of |env_var| and assign env_var to the result.
[ "Append", "|additional_flags|", "to", "those", "already", "set", "in", "the", "value", "of", "|env_var|", "and", "assign", "env_var", "to", "the", "result", "." ]
def append_flags(env_var, additional_flags, env=None): if env is None: env = os.environ flags = env.get(env_var, '').split(' ') flags.extend(additional_flags) env[env_var] = ' '.join(flags)
[ "def", "append_flags", "(", "env_var", ",", "additional_flags", ",", "env", "=", "None", ")", ":", "if", "env", "is", "None", ":", "env", "=", "os", ".", "environ", "flags", "=", "env", ".", "get", "(", "env_var", ",", "''", ")", ".", "split", "(", "' '", ")", "flags", ".", "extend", "(", "additional_flags", ")", "env", "[", "env_var", "]", "=", "' '", ".", "join", "(", "flags", ")" ]
Append |additional_flags| to those already set in the value of |env_var| and assign env_var to the result.
[ "Append", "|additional_flags|", "to", "those", "already", "set", "in", "the", "value", "of", "|env_var|", "and", "assign", "env_var", "to", "the", "result", "." ]
[ "\"\"\"Append |additional_flags| to those already set in the value of |env_var|\n and assign env_var to the result.\"\"\"" ]
[ { "param": "env_var", "type": null }, { "param": "additional_flags", "type": null }, { "param": "env", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "env_var", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "additional_flags", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "env", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def append_flags(env_var, additional_flags, env=None): if env is None: env = os.environ flags = env.get(env_var, '').split(' ') flags.extend(additional_flags) env[env_var] = ' '.join(flags)
c671a2e96d283ae2b8fbb833a1c1cb6e356062a3
balabit-deps/balabit-os-7-walinuxagent
azurelinuxagent/common/cgroupapi.py
[ "Apache-2.0" ]
Python
_is_systemd
<not_specific>
def _is_systemd(): """ Determine if systemd is managing system services; the implementation follows the same strategy as, for example, sd_booted() in libsystemd, or /usr/sbin/service """ return os.path.exists('/run/systemd/system/')
Determine if systemd is managing system services; the implementation follows the same strategy as, for example, sd_booted() in libsystemd, or /usr/sbin/service
Determine if systemd is managing system services; the implementation follows the same strategy as, for example, sd_booted() in libsystemd, or /usr/sbin/service
[ "Determine", "if", "systemd", "is", "managing", "system", "services", ";", "the", "implementation", "follows", "the", "same", "strategy", "as", "for", "example", "sd_booted", "()", "in", "libsystemd", "or", "/", "usr", "/", "sbin", "/", "service" ]
def _is_systemd(): return os.path.exists('/run/systemd/system/')
[ "def", "_is_systemd", "(", ")", ":", "return", "os", ".", "path", ".", "exists", "(", "'/run/systemd/system/'", ")" ]
Determine if systemd is managing system services; the implementation follows the same strategy as, for example, sd_booted() in libsystemd, or /usr/sbin/service
[ "Determine", "if", "systemd", "is", "managing", "system", "services", ";", "the", "implementation", "follows", "the", "same", "strategy", "as", "for", "example", "sd_booted", "()", "in", "libsystemd", "or", "/", "usr", "/", "sbin", "/", "service" ]
[ "\"\"\"\n Determine if systemd is managing system services; the implementation follows the same strategy as, for example,\n sd_booted() in libsystemd, or /usr/sbin/service\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import os def _is_systemd(): return os.path.exists('/run/systemd/system/')
8daf0a21e73d423055933958dae6886001215fac
Acidburn0zzz/helloworld
packages/magicsig/__init__.py
[ "MIT" ]
Python
NormalizeUserIdToUri
<not_specific>
def NormalizeUserIdToUri(userid): """Normalizes a user-provided user id to a reasonable guess at a URI.""" userid = userid.strip() # If already in a URI form, we're done: if (userid.startswith('http:') or userid.startswith('https:') or userid.startswith('acct:')): return userid if userid.find('@') > 0: return 'acct:'+userid # Catchall: Guess at http: if nothing else works. return 'http://'+userid
Normalizes a user-provided user id to a reasonable guess at a URI.
Normalizes a user-provided user id to a reasonable guess at a URI.
[ "Normalizes", "a", "user", "-", "provided", "user", "id", "to", "a", "reasonable", "guess", "at", "a", "URI", "." ]
def NormalizeUserIdToUri(userid): userid = userid.strip() if (userid.startswith('http:') or userid.startswith('https:') or userid.startswith('acct:')): return userid if userid.find('@') > 0: return 'acct:'+userid return 'http://'+userid
[ "def", "NormalizeUserIdToUri", "(", "userid", ")", ":", "userid", "=", "userid", ".", "strip", "(", ")", "if", "(", "userid", ".", "startswith", "(", "'http:'", ")", "or", "userid", ".", "startswith", "(", "'https:'", ")", "or", "userid", ".", "startswith", "(", "'acct:'", ")", ")", ":", "return", "userid", "if", "userid", ".", "find", "(", "'@'", ")", ">", "0", ":", "return", "'acct:'", "+", "userid", "return", "'http://'", "+", "userid" ]
Normalizes a user-provided user id to a reasonable guess at a URI.
[ "Normalizes", "a", "user", "-", "provided", "user", "id", "to", "a", "reasonable", "guess", "at", "a", "URI", "." ]
[ "\"\"\"Normalizes a user-provided user id to a reasonable guess at a URI.\"\"\"", "# If already in a URI form, we're done:", "# Catchall: Guess at http: if nothing else works." ]
[ { "param": "userid", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "userid", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def NormalizeUserIdToUri(userid): userid = userid.strip() if (userid.startswith('http:') or userid.startswith('https:') or userid.startswith('acct:')): return userid if userid.find('@') > 0: return 'acct:'+userid return 'http://'+userid
8daf0a21e73d423055933958dae6886001215fac
Acidburn0zzz/helloworld
packages/magicsig/__init__.py
[ "MIT" ]
Python
_ToPretty
<not_specific>
def _ToPretty(text, indent, linelength): """Makes huge text lines pretty, or at least printable.""" tl = linelength - indent output = '' for i in range(0, len(text), tl): if output: output += '\n' output += ' ' * indent + text[i:i+tl] return output
Makes huge text lines pretty, or at least printable.
Makes huge text lines pretty, or at least printable.
[ "Makes", "huge", "text", "lines", "pretty", "or", "at", "least", "printable", "." ]
def _ToPretty(text, indent, linelength): tl = linelength - indent output = '' for i in range(0, len(text), tl): if output: output += '\n' output += ' ' * indent + text[i:i+tl] return output
[ "def", "_ToPretty", "(", "text", ",", "indent", ",", "linelength", ")", ":", "tl", "=", "linelength", "-", "indent", "output", "=", "''", "for", "i", "in", "range", "(", "0", ",", "len", "(", "text", ")", ",", "tl", ")", ":", "if", "output", ":", "output", "+=", "'\\n'", "output", "+=", "' '", "*", "indent", "+", "text", "[", "i", ":", "i", "+", "tl", "]", "return", "output" ]
Makes huge text lines pretty, or at least printable.
[ "Makes", "huge", "text", "lines", "pretty", "or", "at", "least", "printable", "." ]
[ "\"\"\"Makes huge text lines pretty, or at least printable.\"\"\"" ]
[ { "param": "text", "type": null }, { "param": "indent", "type": null }, { "param": "linelength", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "indent", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "linelength", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _ToPretty(text, indent, linelength): tl = linelength - indent output = '' for i in range(0, len(text), tl): if output: output += '\n' output += ' ' * indent + text[i:i+tl] return output
fd0dd9be1e27f2195dd24e848ded26fe8b3db7f9
brianb1/2017Challenges
challenge_6/python/alexbotello/src/ranges.py
[ "Apache-2.0" ]
Python
ranges
<not_specific>
def ranges(int_list): """ Given a sorted list of integers function will return an array of strings that represent the ranges """ begin = 0 end = 0 ranges = [] for i in int_list: # At the start of iteration set the value of # `begin` and `end` to equal the first element if begin == 0: begin = i end = i # Set the current element as the value of `end` # as long as the array is in sequence elif i-1 == end: end = i # Reset flags to current element when iterating through # multiple integers that are of broken sequence elif begin == end: begin = i end = i else: # Sequence of array has been broken, append current range # to `ranges` and set the value of `begin and `end` flags to # equal the current element ranges.append("{0}->{1}".format(begin, end)) begin = i end = i # Grab the last range from the array if begin != end: ranges.append("{0}->{1}".format(begin, end)) return ranges
Given a sorted list of integers function will return an array of strings that represent the ranges
Given a sorted list of integers function will return an array of strings that represent the ranges
[ "Given", "a", "sorted", "list", "of", "integers", "function", "will", "return", "an", "array", "of", "strings", "that", "represent", "the", "ranges" ]
def ranges(int_list): begin = 0 end = 0 ranges = [] for i in int_list: if begin == 0: begin = i end = i elif i-1 == end: end = i elif begin == end: begin = i end = i else: ranges.append("{0}->{1}".format(begin, end)) begin = i end = i if begin != end: ranges.append("{0}->{1}".format(begin, end)) return ranges
[ "def", "ranges", "(", "int_list", ")", ":", "begin", "=", "0", "end", "=", "0", "ranges", "=", "[", "]", "for", "i", "in", "int_list", ":", "if", "begin", "==", "0", ":", "begin", "=", "i", "end", "=", "i", "elif", "i", "-", "1", "==", "end", ":", "end", "=", "i", "elif", "begin", "==", "end", ":", "begin", "=", "i", "end", "=", "i", "else", ":", "ranges", ".", "append", "(", "\"{0}->{1}\"", ".", "format", "(", "begin", ",", "end", ")", ")", "begin", "=", "i", "end", "=", "i", "if", "begin", "!=", "end", ":", "ranges", ".", "append", "(", "\"{0}->{1}\"", ".", "format", "(", "begin", ",", "end", ")", ")", "return", "ranges" ]
Given a sorted list of integers function will return an array of strings that represent the ranges
[ "Given", "a", "sorted", "list", "of", "integers", "function", "will", "return", "an", "array", "of", "strings", "that", "represent", "the", "ranges" ]
[ "\"\"\"\n Given a sorted list of integers function will return\n an array of strings that represent the ranges\n \"\"\"", "# At the start of iteration set the value of", "# `begin` and `end` to equal the first element", "# Set the current element as the value of `end`", "# as long as the array is in sequence", "# Reset flags to current element when iterating through", "# multiple integers that are of broken sequence", "# Sequence of array has been broken, append current range", "# to `ranges` and set the value of `begin and `end` flags to", "# equal the current element", "# Grab the last range from the array" ]
[ { "param": "int_list", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "int_list", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def ranges(int_list): begin = 0 end = 0 ranges = [] for i in int_list: if begin == 0: begin = i end = i elif i-1 == end: end = i elif begin == end: begin = i end = i else: ranges.append("{0}->{1}".format(begin, end)) begin = i end = i if begin != end: ranges.append("{0}->{1}".format(begin, end)) return ranges
4c058eee2b08930e6b1c6c41ecd808d99daaa892
vmonaco/enigma
break_enigma.py
[ "MIT" ]
Python
valid_cycle
<not_specific>
def valid_cycle(enigma, rotor_positions, E, perm_cycle): ''' Check if the permutation cycle is valid for the given configuration ''' c = E for P in perm_cycle: enigma.set_rotor_positions(rotor_positions) enigma.step_to(abs(P)) c = enigma.encrypt(c) # reset the machine enigma.set_rotor_positions(rotor_positions) # the cycle holds if the input and output are the same if c == E: return True return False
Check if the permutation cycle is valid for the given configuration
Check if the permutation cycle is valid for the given configuration
[ "Check", "if", "the", "permutation", "cycle", "is", "valid", "for", "the", "given", "configuration" ]
def valid_cycle(enigma, rotor_positions, E, perm_cycle): c = E for P in perm_cycle: enigma.set_rotor_positions(rotor_positions) enigma.step_to(abs(P)) c = enigma.encrypt(c) enigma.set_rotor_positions(rotor_positions) if c == E: return True return False
[ "def", "valid_cycle", "(", "enigma", ",", "rotor_positions", ",", "E", ",", "perm_cycle", ")", ":", "c", "=", "E", "for", "P", "in", "perm_cycle", ":", "enigma", ".", "set_rotor_positions", "(", "rotor_positions", ")", "enigma", ".", "step_to", "(", "abs", "(", "P", ")", ")", "c", "=", "enigma", ".", "encrypt", "(", "c", ")", "enigma", ".", "set_rotor_positions", "(", "rotor_positions", ")", "if", "c", "==", "E", ":", "return", "True", "return", "False" ]
Check if the permutation cycle is valid for the given configuration
[ "Check", "if", "the", "permutation", "cycle", "is", "valid", "for", "the", "given", "configuration" ]
[ "'''\n Check if the permutation cycle is valid for the given configuration\n '''", "# reset the machine", "# the cycle holds if the input and output are the same" ]
[ { "param": "enigma", "type": null }, { "param": "rotor_positions", "type": null }, { "param": "E", "type": null }, { "param": "perm_cycle", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "enigma", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "rotor_positions", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "E", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "perm_cycle", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def valid_cycle(enigma, rotor_positions, E, perm_cycle): c = E for P in perm_cycle: enigma.set_rotor_positions(rotor_positions) enigma.step_to(abs(P)) c = enigma.encrypt(c) enigma.set_rotor_positions(rotor_positions) if c == E: return True return False
101a6c94b7fc225c94ac8af0a44131d5c445d3bc
S-Hanin/PyXmlMapper
pyxmlmapper/components/xpath_functions.py
[ "MIT" ]
Python
tag
<not_specific>
def tag(context): """:return str Returns tag without namespace. Just short replacement for xpath local-name() function without arguments""" ns_key = context.context_node.prefix ns_link = "{{{}}}".format(context.context_node.nsmap.get(ns_key)) return context.context_node.tag.replace(ns_link, "")
:return str Returns tag without namespace. Just short replacement for xpath local-name() function without arguments
:return str Returns tag without namespace. Just short replacement for xpath local-name() function without arguments
[ ":", "return", "str", "Returns", "tag", "without", "namespace", ".", "Just", "short", "replacement", "for", "xpath", "local", "-", "name", "()", "function", "without", "arguments" ]
def tag(context): ns_key = context.context_node.prefix ns_link = "{{{}}}".format(context.context_node.nsmap.get(ns_key)) return context.context_node.tag.replace(ns_link, "")
[ "def", "tag", "(", "context", ")", ":", "ns_key", "=", "context", ".", "context_node", ".", "prefix", "ns_link", "=", "\"{{{}}}\"", ".", "format", "(", "context", ".", "context_node", ".", "nsmap", ".", "get", "(", "ns_key", ")", ")", "return", "context", ".", "context_node", ".", "tag", ".", "replace", "(", "ns_link", ",", "\"\"", ")" ]
:return str Returns tag without namespace.
[ ":", "return", "str", "Returns", "tag", "without", "namespace", "." ]
[ "\"\"\":return str\n Returns tag without namespace. Just short replacement for xpath local-name() function\n without arguments\"\"\"" ]
[ { "param": "context", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "context", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def tag(context): ns_key = context.context_node.prefix ns_link = "{{{}}}".format(context.context_node.nsmap.get(ns_key)) return context.context_node.tag.replace(ns_link, "")
101a6c94b7fc225c94ac8af0a44131d5c445d3bc
S-Hanin/PyXmlMapper
pyxmlmapper/components/xpath_functions.py
[ "MIT" ]
Python
match
<not_specific>
def match(context, tag, *search): """:return bool search exact match for tag from several variants """ return any(pattern == tag for pattern in search)
:return bool search exact match for tag from several variants
:return bool search exact match for tag from several variants
[ ":", "return", "bool", "search", "exact", "match", "for", "tag", "from", "several", "variants" ]
def match(context, tag, *search): return any(pattern == tag for pattern in search)
[ "def", "match", "(", "context", ",", "tag", ",", "*", "search", ")", ":", "return", "any", "(", "pattern", "==", "tag", "for", "pattern", "in", "search", ")" ]
:return bool search exact match for tag from several variants
[ ":", "return", "bool", "search", "exact", "match", "for", "tag", "from", "several", "variants" ]
[ "\"\"\":return bool\n search exact match for tag from several variants\n \"\"\"" ]
[ { "param": "context", "type": null }, { "param": "tag", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "context", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "tag", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def match(context, tag, *search): return any(pattern == tag for pattern in search)
a29c58053eef33cd8b171c999c068b8eb9bd3e8b
Daphnis-z/nlp-ztools
nlp/entity/entity_utils.py
[ "MIT" ]
Python
full_to_half
<not_specific>
def full_to_half(s): """ Convert full-width character to half-width one """ n = [] for char in s: try: num = ord(char) if num == 0x3000: num = 32 elif 0xFF01 <= num <= 0xFF5E: num -= 0xfee0 char = chr(num) n.append(char) except: pass return ''.join(n)
Convert full-width character to half-width one
Convert full-width character to half-width one
[ "Convert", "full", "-", "width", "character", "to", "half", "-", "width", "one" ]
def full_to_half(s): n = [] for char in s: try: num = ord(char) if num == 0x3000: num = 32 elif 0xFF01 <= num <= 0xFF5E: num -= 0xfee0 char = chr(num) n.append(char) except: pass return ''.join(n)
[ "def", "full_to_half", "(", "s", ")", ":", "n", "=", "[", "]", "for", "char", "in", "s", ":", "try", ":", "num", "=", "ord", "(", "char", ")", "if", "num", "==", "0x3000", ":", "num", "=", "32", "elif", "0xFF01", "<=", "num", "<=", "0xFF5E", ":", "num", "-=", "0xfee0", "char", "=", "chr", "(", "num", ")", "n", ".", "append", "(", "char", ")", "except", ":", "pass", "return", "''", ".", "join", "(", "n", ")" ]
Convert full-width character to half-width one
[ "Convert", "full", "-", "width", "character", "to", "half", "-", "width", "one" ]
[ "\"\"\"\n Convert full-width character to half-width one \n \"\"\"" ]
[ { "param": "s", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "s", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def full_to_half(s): n = [] for char in s: try: num = ord(char) if num == 0x3000: num = 32 elif 0xFF01 <= num <= 0xFF5E: num -= 0xfee0 char = chr(num) n.append(char) except: pass return ''.join(n)
3d28bd011a280e2aba4cc16979984ef5778949a5
aws-samples/aws-autonomous-driving-data-lake-ros-bag-scene-detection-pipeline
infrastructure/emr_trigger/lambda_source/trigger.py
[ "MIT-0" ]
Python
initialize_table
<not_specific>
def initialize_table(table): """ Initialize 'Latest' Item in DynamoDB if no LATEST item is found :param table: :return: """ batch_id = str(int(datetime.datetime.now().timestamp())) table.put_item( Item={ "BatchId": "LATEST", "Name": "LATEST", "FileSizeKb": 0, "NumFiles": 0, "BatchWindowStartTime": batch_id, } ) return batch_id
Initialize 'Latest' Item in DynamoDB if no LATEST item is found :param table: :return:
Initialize 'Latest' Item in DynamoDB if no LATEST item is found
[ "Initialize", "'", "Latest", "'", "Item", "in", "DynamoDB", "if", "no", "LATEST", "item", "is", "found" ]
def initialize_table(table): batch_id = str(int(datetime.datetime.now().timestamp())) table.put_item( Item={ "BatchId": "LATEST", "Name": "LATEST", "FileSizeKb": 0, "NumFiles": 0, "BatchWindowStartTime": batch_id, } ) return batch_id
[ "def", "initialize_table", "(", "table", ")", ":", "batch_id", "=", "str", "(", "int", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timestamp", "(", ")", ")", ")", "table", ".", "put_item", "(", "Item", "=", "{", "\"BatchId\"", ":", "\"LATEST\"", ",", "\"Name\"", ":", "\"LATEST\"", ",", "\"FileSizeKb\"", ":", "0", ",", "\"NumFiles\"", ":", "0", ",", "\"BatchWindowStartTime\"", ":", "batch_id", ",", "}", ")", "return", "batch_id" ]
Initialize 'Latest' Item in DynamoDB if no LATEST item is found
[ "Initialize", "'", "Latest", "'", "Item", "in", "DynamoDB", "if", "no", "LATEST", "item", "is", "found" ]
[ "\"\"\"\n Initialize 'Latest' Item in DynamoDB if no LATEST item is found\n :param table:\n :return:\n \"\"\"" ]
[ { "param": "table", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "table", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def initialize_table(table): batch_id = str(int(datetime.datetime.now().timestamp())) table.put_item( Item={ "BatchId": "LATEST", "Name": "LATEST", "FileSizeKb": 0, "NumFiles": 0, "BatchWindowStartTime": batch_id, } ) return batch_id
3d28bd011a280e2aba4cc16979984ef5778949a5
aws-samples/aws-autonomous-driving-data-lake-ros-bag-scene-detection-pipeline
infrastructure/emr_trigger/lambda_source/trigger.py
[ "MIT-0" ]
Python
reset_batch
null
def reset_batch(table, latest, pipeline_arn, execution_arn, cluster_name): """ When a batch run is triggered, reset the LATEST item to start collecting files for the next batch run. Also add batch metadata to DynamoDB for the batch run just triggered :param table: :param latest: :param pipeline_arn: :param execution_arn: :param cluster_name: :return: """ table.update_item( Key={ "BatchId": "LATEST", "Name": "LATEST", }, UpdateExpression="set FileSizeKb = :f, NumFiles = :n, BatchWindowStartTime = :t", ExpressionAttributeValues={ ":f": 0, ":n": 0, ":t": int(datetime.datetime.now().timestamp()), }, ) table.put_item( Item={ "BatchId": "BatchMetadata", "Name": str(latest["BatchWindowStartTime"]), "FileSizeKb": latest["FileSizeKb"], "NumFiles": latest["NumFiles"], "BatchWindowStartTime": latest["BatchWindowStartTime"], "BatchWindowEndTime": int(datetime.datetime.now().timestamp()), "PipelineArn": pipeline_arn, "ExecutionArn": execution_arn, "ClusterName": cluster_name, } )
When a batch run is triggered, reset the LATEST item to start collecting files for the next batch run. Also add batch metadata to DynamoDB for the batch run just triggered :param table: :param latest: :param pipeline_arn: :param execution_arn: :param cluster_name: :return:
When a batch run is triggered, reset the LATEST item to start collecting files for the next batch run. Also add batch metadata to DynamoDB for the batch run just triggered
[ "When", "a", "batch", "run", "is", "triggered", "reset", "the", "LATEST", "item", "to", "start", "collecting", "files", "for", "the", "next", "batch", "run", ".", "Also", "add", "batch", "metadata", "to", "DynamoDB", "for", "the", "batch", "run", "just", "triggered" ]
def reset_batch(table, latest, pipeline_arn, execution_arn, cluster_name): table.update_item( Key={ "BatchId": "LATEST", "Name": "LATEST", }, UpdateExpression="set FileSizeKb = :f, NumFiles = :n, BatchWindowStartTime = :t", ExpressionAttributeValues={ ":f": 0, ":n": 0, ":t": int(datetime.datetime.now().timestamp()), }, ) table.put_item( Item={ "BatchId": "BatchMetadata", "Name": str(latest["BatchWindowStartTime"]), "FileSizeKb": latest["FileSizeKb"], "NumFiles": latest["NumFiles"], "BatchWindowStartTime": latest["BatchWindowStartTime"], "BatchWindowEndTime": int(datetime.datetime.now().timestamp()), "PipelineArn": pipeline_arn, "ExecutionArn": execution_arn, "ClusterName": cluster_name, } )
[ "def", "reset_batch", "(", "table", ",", "latest", ",", "pipeline_arn", ",", "execution_arn", ",", "cluster_name", ")", ":", "table", ".", "update_item", "(", "Key", "=", "{", "\"BatchId\"", ":", "\"LATEST\"", ",", "\"Name\"", ":", "\"LATEST\"", ",", "}", ",", "UpdateExpression", "=", "\"set FileSizeKb = :f, NumFiles = :n, BatchWindowStartTime = :t\"", ",", "ExpressionAttributeValues", "=", "{", "\":f\"", ":", "0", ",", "\":n\"", ":", "0", ",", "\":t\"", ":", "int", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timestamp", "(", ")", ")", ",", "}", ",", ")", "table", ".", "put_item", "(", "Item", "=", "{", "\"BatchId\"", ":", "\"BatchMetadata\"", ",", "\"Name\"", ":", "str", "(", "latest", "[", "\"BatchWindowStartTime\"", "]", ")", ",", "\"FileSizeKb\"", ":", "latest", "[", "\"FileSizeKb\"", "]", ",", "\"NumFiles\"", ":", "latest", "[", "\"NumFiles\"", "]", ",", "\"BatchWindowStartTime\"", ":", "latest", "[", "\"BatchWindowStartTime\"", "]", ",", "\"BatchWindowEndTime\"", ":", "int", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timestamp", "(", ")", ")", ",", "\"PipelineArn\"", ":", "pipeline_arn", ",", "\"ExecutionArn\"", ":", "execution_arn", ",", "\"ClusterName\"", ":", "cluster_name", ",", "}", ")" ]
When a batch run is triggered, reset the LATEST item to start collecting files for the next batch run.
[ "When", "a", "batch", "run", "is", "triggered", "reset", "the", "LATEST", "item", "to", "start", "collecting", "files", "for", "the", "next", "batch", "run", "." ]
[ "\"\"\"\n When a batch run is triggered, reset the LATEST item to start collecting files for the next batch run.\n Also add batch metadata to DynamoDB for the batch run just triggered\n :param table:\n :param latest:\n :param pipeline_arn:\n :param execution_arn:\n :param cluster_name:\n :return:\n \"\"\"" ]
[ { "param": "table", "type": null }, { "param": "latest", "type": null }, { "param": "pipeline_arn", "type": null }, { "param": "execution_arn", "type": null }, { "param": "cluster_name", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "table", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "latest", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "pipeline_arn", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "execution_arn", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "cluster_name", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import datetime def reset_batch(table, latest, pipeline_arn, execution_arn, cluster_name): table.update_item( Key={ "BatchId": "LATEST", "Name": "LATEST", }, UpdateExpression="set FileSizeKb = :f, NumFiles = :n, BatchWindowStartTime = :t", ExpressionAttributeValues={ ":f": 0, ":n": 0, ":t": int(datetime.datetime.now().timestamp()), }, ) table.put_item( Item={ "BatchId": "BatchMetadata", "Name": str(latest["BatchWindowStartTime"]), "FileSizeKb": latest["FileSizeKb"], "NumFiles": latest["NumFiles"], "BatchWindowStartTime": latest["BatchWindowStartTime"], "BatchWindowEndTime": int(datetime.datetime.now().timestamp()), "PipelineArn": pipeline_arn, "ExecutionArn": execution_arn, "ClusterName": cluster_name, } )
3d28bd011a280e2aba4cc16979984ef5778949a5
aws-samples/aws-autonomous-driving-data-lake-ros-bag-scene-detection-pipeline
infrastructure/emr_trigger/lambda_source/trigger.py
[ "MIT-0" ]
Python
should_lambda_trigger_pipeline
<not_specific>
def should_lambda_trigger_pipeline(latest_batch, latest_bag_file): """ return true if pipeline should be triggered, else false based on values in LATEST item :param latest: :return: """ # FIXME: Trigger EMR if the latest bag_file has all of the topics in DynamoDB AND X+ number of bagfiles to process num_topics = int(os.environ["NUM_TOPICS"]) min_num_bags_to_process = 2 all_topics_in_dynamo = len(list(set(latest_bag_file["topics"]))) == num_topics number_of_bag_files_in_batch = latest_batch["NumFiles"] / num_topics return ( all_topics_in_dynamo and number_of_bag_files_in_batch >= min_num_bags_to_process )
return true if pipeline should be triggered, else false based on values in LATEST item :param latest: :return:
return true if pipeline should be triggered, else false based on values in LATEST item
[ "return", "true", "if", "pipeline", "should", "be", "triggered", "else", "false", "based", "on", "values", "in", "LATEST", "item" ]
def should_lambda_trigger_pipeline(latest_batch, latest_bag_file): num_topics = int(os.environ["NUM_TOPICS"]) min_num_bags_to_process = 2 all_topics_in_dynamo = len(list(set(latest_bag_file["topics"]))) == num_topics number_of_bag_files_in_batch = latest_batch["NumFiles"] / num_topics return ( all_topics_in_dynamo and number_of_bag_files_in_batch >= min_num_bags_to_process )
[ "def", "should_lambda_trigger_pipeline", "(", "latest_batch", ",", "latest_bag_file", ")", ":", "num_topics", "=", "int", "(", "os", ".", "environ", "[", "\"NUM_TOPICS\"", "]", ")", "min_num_bags_to_process", "=", "2", "all_topics_in_dynamo", "=", "len", "(", "list", "(", "set", "(", "latest_bag_file", "[", "\"topics\"", "]", ")", ")", ")", "==", "num_topics", "number_of_bag_files_in_batch", "=", "latest_batch", "[", "\"NumFiles\"", "]", "/", "num_topics", "return", "(", "all_topics_in_dynamo", "and", "number_of_bag_files_in_batch", ">=", "min_num_bags_to_process", ")" ]
return true if pipeline should be triggered, else false based on values in LATEST item
[ "return", "true", "if", "pipeline", "should", "be", "triggered", "else", "false", "based", "on", "values", "in", "LATEST", "item" ]
[ "\"\"\"\n return true if pipeline should be triggered, else false\n based on values in LATEST item\n :param latest:\n :return:\n \"\"\"", "# FIXME: Trigger EMR if the latest bag_file has all of the topics in DynamoDB AND X+ number of bagfiles to process" ]
[ { "param": "latest_batch", "type": null }, { "param": "latest_bag_file", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "latest_batch", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "latest_bag_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "latest", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "others": [] }
import os def should_lambda_trigger_pipeline(latest_batch, latest_bag_file): num_topics = int(os.environ["NUM_TOPICS"]) min_num_bags_to_process = 2 all_topics_in_dynamo = len(list(set(latest_bag_file["topics"]))) == num_topics number_of_bag_files_in_batch = latest_batch["NumFiles"] / num_topics return ( all_topics_in_dynamo and number_of_bag_files_in_batch >= min_num_bags_to_process )
20e0e2be4786ea38651b62d3950c08efb7fc7c9e
kaeawc/django-auth-example
app/controllers/decorator.py
[ "MIT" ]
Python
logged_out
<not_specific>
def logged_out(func): """ Controllers decorated with @logged_out deny users who have the 'user_id' cookie. :param func: :return: """ @functools.wraps(func) def wrap(*args, **kwargs): request = args[0] if request.user and request.user.is_authenticated(): controller = request.resolver_match.url_name return {u"ok": False, u"status": 401, u"reason": u"You must be logged out to access %s." % controller} response = func(*args, **kwargs) return response return wrap
Controllers decorated with @logged_out deny users who have the 'user_id' cookie. :param func: :return:
Controllers decorated with @logged_out deny users who have the 'user_id' cookie.
[ "Controllers", "decorated", "with", "@logged_out", "deny", "users", "who", "have", "the", "'", "user_id", "'", "cookie", "." ]
def logged_out(func): @functools.wraps(func) def wrap(*args, **kwargs): request = args[0] if request.user and request.user.is_authenticated(): controller = request.resolver_match.url_name return {u"ok": False, u"status": 401, u"reason": u"You must be logged out to access %s." % controller} response = func(*args, **kwargs) return response return wrap
[ "def", "logged_out", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrap", "(", "*", "args", ",", "**", "kwargs", ")", ":", "request", "=", "args", "[", "0", "]", "if", "request", ".", "user", "and", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "controller", "=", "request", ".", "resolver_match", ".", "url_name", "return", "{", "u\"ok\"", ":", "False", ",", "u\"status\"", ":", "401", ",", "u\"reason\"", ":", "u\"You must be logged out to access %s.\"", "%", "controller", "}", "response", "=", "func", "(", "*", "args", ",", "**", "kwargs", ")", "return", "response", "return", "wrap" ]
Controllers decorated with @logged_out deny users who have the 'user_id' cookie.
[ "Controllers", "decorated", "with", "@logged_out", "deny", "users", "who", "have", "the", "'", "user_id", "'", "cookie", "." ]
[ "\"\"\"\n Controllers decorated with @logged_out deny users who have the 'user_id' cookie.\n :param func:\n :return:\n \"\"\"" ]
[ { "param": "func", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "func", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import functools def logged_out(func): @functools.wraps(func) def wrap(*args, **kwargs): request = args[0] if request.user and request.user.is_authenticated(): controller = request.resolver_match.url_name return {u"ok": False, u"status": 401, u"reason": u"You must be logged out to access %s." % controller} response = func(*args, **kwargs) return response return wrap
903cba3fa9ca3e51a30ce333e539ec2b8e4b613a
Tythos/sdsu
__init__.py
[ "BSD-2-Clause" ]
Python
isIPv4
<not_specific>
def isIPv4(ip): """Returns *True* if the given string is a dotted quad (four integers seperated by a period). """ return re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip) is not None
Returns *True* if the given string is a dotted quad (four integers seperated by a period).
Returns *True* if the given string is a dotted quad (four integers seperated by a period).
[ "Returns", "*", "True", "*", "if", "the", "given", "string", "is", "a", "dotted", "quad", "(", "four", "integers", "seperated", "by", "a", "period", ")", "." ]
def isIPv4(ip): return re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip) is not None
[ "def", "isIPv4", "(", "ip", ")", ":", "return", "re", ".", "match", "(", "r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\"", ",", "ip", ")", "is", "not", "None" ]
Returns *True* if the given string is a dotted quad (four integers seperated by a period).
[ "Returns", "*", "True", "*", "if", "the", "given", "string", "is", "a", "dotted", "quad", "(", "four", "integers", "seperated", "by", "a", "period", ")", "." ]
[ "\"\"\"Returns *True* if the given string is a dotted quad (four integers\r\n seperated by a period).\r\n \"\"\"" ]
[ { "param": "ip", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ip", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def isIPv4(ip): return re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip) is not None
1ab316708e7107536d21ebd26ced4f3b7c6a5f3e
CarliJoy/RoWoOekostromDB
anbieter/conv_helpers.py
[ "MIT" ]
Python
conv_bool
bool
def conv_bool(input_value: Optional[Union[str, int, bool]]) -> bool: """ Convert anything that is not explicit false (like empty, 0 or false) """ if input_value is None: return False elif input_value is False or str(input_value).lower() in ("false", "", "no"): return False elif input_value == 0: return False else: return True
Convert anything that is not explicit false (like empty, 0 or false)
Convert anything that is not explicit false (like empty, 0 or false)
[ "Convert", "anything", "that", "is", "not", "explicit", "false", "(", "like", "empty", "0", "or", "false", ")" ]
def conv_bool(input_value: Optional[Union[str, int, bool]]) -> bool: if input_value is None: return False elif input_value is False or str(input_value).lower() in ("false", "", "no"): return False elif input_value == 0: return False else: return True
[ "def", "conv_bool", "(", "input_value", ":", "Optional", "[", "Union", "[", "str", ",", "int", ",", "bool", "]", "]", ")", "->", "bool", ":", "if", "input_value", "is", "None", ":", "return", "False", "elif", "input_value", "is", "False", "or", "str", "(", "input_value", ")", ".", "lower", "(", ")", "in", "(", "\"false\"", ",", "\"\"", ",", "\"no\"", ")", ":", "return", "False", "elif", "input_value", "==", "0", ":", "return", "False", "else", ":", "return", "True" ]
Convert anything that is not explicit false (like empty, 0 or false)
[ "Convert", "anything", "that", "is", "not", "explicit", "false", "(", "like", "empty", "0", "or", "false", ")" ]
[ "\"\"\"\n Convert anything that is not explicit false (like empty, 0 or false)\n \"\"\"" ]
[ { "param": "input_value", "type": "Optional[Union[str, int, bool]]" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "input_value", "type": "Optional[Union[str, int, bool]]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def conv_bool(input_value: Optional[Union[str, int, bool]]) -> bool: if input_value is None: return False elif input_value is False or str(input_value).lower() in ("false", "", "no"): return False elif input_value == 0: return False else: return True
ae51d94ecf2f57dd647e5c580a5883d0f5d05219
PhantomInsights/tweet-transcriber
bot.py
[ "MIT" ]
Python
load_log
<not_specific>
def load_log(log_file): """Reads the processed comments/posts log file and creates it if it doesn't exist. Returns ------- list A list of Reddit comments/posts ids. """ try: with open(log_file, "r", encoding="utf-8") as temp_file: return temp_file.read().splitlines() except FileNotFoundError: with open(log_file, "a", encoding="utf-8") as temp_file: return []
Reads the processed comments/posts log file and creates it if it doesn't exist. Returns ------- list A list of Reddit comments/posts ids.
Reads the processed comments/posts log file and creates it if it doesn't exist. Returns list A list of Reddit comments/posts ids.
[ "Reads", "the", "processed", "comments", "/", "posts", "log", "file", "and", "creates", "it", "if", "it", "doesn", "'", "t", "exist", ".", "Returns", "list", "A", "list", "of", "Reddit", "comments", "/", "posts", "ids", "." ]
def load_log(log_file): try: with open(log_file, "r", encoding="utf-8") as temp_file: return temp_file.read().splitlines() except FileNotFoundError: with open(log_file, "a", encoding="utf-8") as temp_file: return []
[ "def", "load_log", "(", "log_file", ")", ":", "try", ":", "with", "open", "(", "log_file", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "temp_file", ":", "return", "temp_file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "except", "FileNotFoundError", ":", "with", "open", "(", "log_file", ",", "\"a\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "temp_file", ":", "return", "[", "]" ]
Reads the processed comments/posts log file and creates it if it doesn't exist.
[ "Reads", "the", "processed", "comments", "/", "posts", "log", "file", "and", "creates", "it", "if", "it", "doesn", "'", "t", "exist", "." ]
[ "\"\"\"Reads the processed comments/posts log file and creates it if it doesn't exist.\n\n Returns\n -------\n list\n A list of Reddit comments/posts ids.\n\n \"\"\"" ]
[ { "param": "log_file", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "log_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def load_log(log_file): try: with open(log_file, "r", encoding="utf-8") as temp_file: return temp_file.read().splitlines() except FileNotFoundError: with open(log_file, "a", encoding="utf-8") as temp_file: return []
10c509284bbdccd3c54701c5610a2a0d3fb6599c
PhantomInsights/tweet-transcriber
bot_sitewide.py
[ "MIT" ]
Python
update_log
null
def update_log(log_file, item_id): """Updates the processed posts log with the given post id. Parameters ---------- comment_id : str A Reddit post id. """ with open(log_file, "a", encoding="utf-8") as temp_file: temp_file.write("{}\n".format(item_id))
Updates the processed posts log with the given post id. Parameters ---------- comment_id : str A Reddit post id.
Updates the processed posts log with the given post id. Parameters comment_id : str A Reddit post id.
[ "Updates", "the", "processed", "posts", "log", "with", "the", "given", "post", "id", ".", "Parameters", "comment_id", ":", "str", "A", "Reddit", "post", "id", "." ]
def update_log(log_file, item_id): with open(log_file, "a", encoding="utf-8") as temp_file: temp_file.write("{}\n".format(item_id))
[ "def", "update_log", "(", "log_file", ",", "item_id", ")", ":", "with", "open", "(", "log_file", ",", "\"a\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "temp_file", ":", "temp_file", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "item_id", ")", ")" ]
Updates the processed posts log with the given post id.
[ "Updates", "the", "processed", "posts", "log", "with", "the", "given", "post", "id", "." ]
[ "\"\"\"Updates the processed posts log with the given post id.\n\n Parameters\n ----------\n comment_id : str\n A Reddit post id.\n\n \"\"\"" ]
[ { "param": "log_file", "type": null }, { "param": "item_id", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "log_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "item_id", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def update_log(log_file, item_id): with open(log_file, "a", encoding="utf-8") as temp_file: temp_file.write("{}\n".format(item_id))
a75485139097ce6c4109c0d24d3f28c2bca64842
tinahuang1994/data-pre-processing
upload_worldbank_data/contents/misc.py
[ "MIT" ]
Python
pick_wanted_entities
<not_specific>
def pick_wanted_entities(entities, drop_patterns=drop_patterns): """ Input: * a list of entities that correspond to a dataframe of observations for which these may be in the index * a list of which entities you'd like to eliminate Output: which indices to keep from the originating dataframe to eliminate the desired entities """ ix_to_keep = [ix for ix, entity in enumerate(entities) if entity not in drop_patterns] return(ix_to_keep)
Input: * a list of entities that correspond to a dataframe of observations for which these may be in the index * a list of which entities you'd like to eliminate Output: which indices to keep from the originating dataframe to eliminate the desired entities
a list of entities that correspond to a dataframe of observations for which these may be in the index a list of which entities you'd like to eliminate which indices to keep from the originating dataframe to eliminate the desired entities
[ "a", "list", "of", "entities", "that", "correspond", "to", "a", "dataframe", "of", "observations", "for", "which", "these", "may", "be", "in", "the", "index", "a", "list", "of", "which", "entities", "you", "'", "d", "like", "to", "eliminate", "which", "indices", "to", "keep", "from", "the", "originating", "dataframe", "to", "eliminate", "the", "desired", "entities" ]
def pick_wanted_entities(entities, drop_patterns=drop_patterns): ix_to_keep = [ix for ix, entity in enumerate(entities) if entity not in drop_patterns] return(ix_to_keep)
[ "def", "pick_wanted_entities", "(", "entities", ",", "drop_patterns", "=", "drop_patterns", ")", ":", "ix_to_keep", "=", "[", "ix", "for", "ix", ",", "entity", "in", "enumerate", "(", "entities", ")", "if", "entity", "not", "in", "drop_patterns", "]", "return", "(", "ix_to_keep", ")" ]
Input: a list of entities that correspond to a dataframe of observations for which these may be in the index a list of which entities you'd like to eliminate
[ "Input", ":", "a", "list", "of", "entities", "that", "correspond", "to", "a", "dataframe", "of", "observations", "for", "which", "these", "may", "be", "in", "the", "index", "a", "list", "of", "which", "entities", "you", "'", "d", "like", "to", "eliminate" ]
[ "\"\"\"\n Input: \n * a list of entities that correspond to a dataframe of observations for which these may be in the index\n * a list of which entities you'd like to eliminate\n \n Output: which indices to keep from the originating dataframe to eliminate the desired entities\n \"\"\"" ]
[ { "param": "entities", "type": null }, { "param": "drop_patterns", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "entities", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "drop_patterns", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def pick_wanted_entities(entities, drop_patterns=drop_patterns): ix_to_keep = [ix for ix, entity in enumerate(entities) if entity not in drop_patterns] return(ix_to_keep)
fcf6fef3eec1f0db7698e7f5e27aa080fe4d98cc
tinahuang1994/data-pre-processing
upload_metadata_to_api/contents/src/__init__.py
[ "MIT" ]
Python
create_source_object
<not_specific>
def create_source_object(sources): """Format the source information as appropriate for the api""" if sources: source_object = [] srcs = sources.split("/") for ix, src in enumerate(srcs): source_object.append({ "source-name": src, "id": ix, "source-description": "" }) return source_object return None
Format the source information as appropriate for the api
Format the source information as appropriate for the api
[ "Format", "the", "source", "information", "as", "appropriate", "for", "the", "api" ]
def create_source_object(sources): if sources: source_object = [] srcs = sources.split("/") for ix, src in enumerate(srcs): source_object.append({ "source-name": src, "id": ix, "source-description": "" }) return source_object return None
[ "def", "create_source_object", "(", "sources", ")", ":", "if", "sources", ":", "source_object", "=", "[", "]", "srcs", "=", "sources", ".", "split", "(", "\"/\"", ")", "for", "ix", ",", "src", "in", "enumerate", "(", "srcs", ")", ":", "source_object", ".", "append", "(", "{", "\"source-name\"", ":", "src", ",", "\"id\"", ":", "ix", ",", "\"source-description\"", ":", "\"\"", "}", ")", "return", "source_object", "return", "None" ]
Format the source information as appropriate for the api
[ "Format", "the", "source", "information", "as", "appropriate", "for", "the", "api" ]
[ "\"\"\"Format the source information as appropriate for the api\"\"\"" ]
[ { "param": "sources", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "sources", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def create_source_object(sources): if sources: source_object = [] srcs = sources.split("/") for ix, src in enumerate(srcs): source_object.append({ "source-name": src, "id": ix, "source-description": "" }) return source_object return None
967901e8e5171002a42f981bcf2b815c6e52acb7
devvyn/knowledge-mapper
devvyn/cache/file_cache.py
[ "MIT" ]
Python
sanitize_filename
str
def sanitize_filename(filename: str) -> str: """ Make the given string into a filename by removing non-descriptive characters. :param filename: :return: """ return re.sub(r'(?u)[^-\w.]', '', filename)
Make the given string into a filename by removing non-descriptive characters. :param filename: :return:
Make the given string into a filename by removing non-descriptive characters.
[ "Make", "the", "given", "string", "into", "a", "filename", "by", "removing", "non", "-", "descriptive", "characters", "." ]
def sanitize_filename(filename: str) -> str: return re.sub(r'(?u)[^-\w.]', '', filename)
[ "def", "sanitize_filename", "(", "filename", ":", "str", ")", "->", "str", ":", "return", "re", ".", "sub", "(", "r'(?u)[^-\\w.]'", ",", "''", ",", "filename", ")" ]
Make the given string into a filename by removing non-descriptive characters.
[ "Make", "the", "given", "string", "into", "a", "filename", "by", "removing", "non", "-", "descriptive", "characters", "." ]
[ "\"\"\"\n Make the given string into a filename by removing\n non-descriptive characters.\n\n :param filename:\n :return:\n \"\"\"" ]
[ { "param": "filename", "type": "str" } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "filename", "type": "str", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def sanitize_filename(filename: str) -> str: return re.sub(r'(?u)[^-\w.]', '', filename)
557bd90cabee968968091a1b3336c777feb3cb98
devvyn/knowledge-mapper
devvyn/scrape/parse.py
[ "MIT" ]
Python
clean_whitespace
str
def clean_whitespace(text: str) -> str: """ Replace all contiguous whitespace with single space character, strip leading and trailing whitespace. """ text = str(text or '') stripped = text.strip() sub = re.sub(r'\s+', ' ', stripped, ) return sub
Replace all contiguous whitespace with single space character, strip leading and trailing whitespace.
Replace all contiguous whitespace with single space character, strip leading and trailing whitespace.
[ "Replace", "all", "contiguous", "whitespace", "with", "single", "space", "character", "strip", "leading", "and", "trailing", "whitespace", "." ]
def clean_whitespace(text: str) -> str: text = str(text or '') stripped = text.strip() sub = re.sub(r'\s+', ' ', stripped, ) return sub
[ "def", "clean_whitespace", "(", "text", ":", "str", ")", "->", "str", ":", "text", "=", "str", "(", "text", "or", "''", ")", "stripped", "=", "text", ".", "strip", "(", ")", "sub", "=", "re", ".", "sub", "(", "r'\\s+'", ",", "' '", ",", "stripped", ",", ")", "return", "sub" ]
Replace all contiguous whitespace with single space character, strip leading and trailing whitespace.
[ "Replace", "all", "contiguous", "whitespace", "with", "single", "space", "character", "strip", "leading", "and", "trailing", "whitespace", "." ]
[ "\"\"\"\n Replace all contiguous whitespace with single space character,\n strip leading and trailing whitespace.\n \"\"\"" ]
[ { "param": "text", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "text", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def clean_whitespace(text: str) -> str: text = str(text or '') stripped = text.strip() sub = re.sub(r'\s+', ' ', stripped, ) return sub
85d94d73635f4c767c895501c8081fbf115b1914
Othernet-Project/ndb-utils
ndb_utils/models.py
[ "MIT" ]
Python
_get_key
<not_specific>
def _get_key(cls, owner): """ Ensures owner is a key and not entity """ if hasattr(owner, 'key'): return owner.key return owner
Ensures owner is a key and not entity
Ensures owner is a key and not entity
[ "Ensures", "owner", "is", "a", "key", "and", "not", "entity" ]
def _get_key(cls, owner): if hasattr(owner, 'key'): return owner.key return owner
[ "def", "_get_key", "(", "cls", ",", "owner", ")", ":", "if", "hasattr", "(", "owner", ",", "'key'", ")", ":", "return", "owner", ".", "key", "return", "owner" ]
Ensures owner is a key and not entity
[ "Ensures", "owner", "is", "a", "key", "and", "not", "entity" ]
[ "\"\"\" Ensures owner is a key and not entity \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "owner", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "owner", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _get_key(cls, owner): if hasattr(owner, 'key'): return owner.key return owner
35c15191d355e744f278b2c130d9468837593d91
flipcoder/siege-tools
sgmake.py
[ "MIT" ]
Python
is_project
<not_specific>
def is_project(project): """ Checks if a project meets the minimum step standards """ for step in project.steps: if step.type in ("make","package"): # at least one make or package step return True return False
Checks if a project meets the minimum step standards
Checks if a project meets the minimum step standards
[ "Checks", "if", "a", "project", "meets", "the", "minimum", "step", "standards" ]
def is_project(project): for step in project.steps: if step.type in ("make","package"): return True return False
[ "def", "is_project", "(", "project", ")", ":", "for", "step", "in", "project", ".", "steps", ":", "if", "step", ".", "type", "in", "(", "\"make\"", ",", "\"package\"", ")", ":", "return", "True", "return", "False" ]
Checks if a project meets the minimum step standards
[ "Checks", "if", "a", "project", "meets", "the", "minimum", "step", "standards" ]
[ "\"\"\"\n Checks if a project meets the minimum step standards\n \"\"\"", "# at least one make or package step" ]
[ { "param": "project", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "project", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_project(project): for step in project.steps: if step.type in ("make","package"): return True return False
acb6a9e46d5b30e1876001422d42d343ee1ad856
LemonJust/synapse-redistribution
sbr/cohort.py
[ "MIT" ]
Python
import_imgpairstudy
<not_specific>
def import_imgpairstudy(fish_id, cohort_df, syn=None, resolution=None): """ Imports the coordinates , xyz in pixels, and intensity , int_core & int_vcn, from the image-pair-study csv produced by the synspy. To get the cohort_df run load_imgpairstudy_csv. resolution ( xyz) : get coordinates in pixels if resolution is provided syn : if None creates a new syn, if given adds to existing dictionary TODO : finish describtion """ def create_syn_type(syn, df, syn_type, xyz_cols, int_core_col, int_vcn_col, resolution=None): """ Helper function : populates syn[fish_id][syn_type] dictionary""" syn[fish_id][syn_type] = {} syn[fish_id][syn_type]["xyz"] = df.loc[:, xyz_cols].values syn[fish_id][syn_type]["int_core"] = df.loc[:, int_core_col].values syn[fish_id][syn_type]["int_vcn"] = df.loc[:, int_vcn_col].values # get coordinates in pixels if resolution is provided if resolution is not None: syn[fish_id][syn_type]["xyz_pix"] = syn[fish_id][syn_type]["xyz"] / resolution return syn # create new syn if none provided if syn is None: syn = {} # create fish entry syn[fish_id] = {} # get all the data for this fish is_unch = cohort_df.loc[(cohort_df['study_id'] == fish_id) & (cohort_df['t'] == 0.0)] is_lost = cohort_df.loc[(cohort_df['study_id'] == fish_id) & (cohort_df['t'] == 1.0)] is_gain = cohort_df.loc[(cohort_df['study_id'] == fish_id) & (cohort_df['t'] == 2.0)] # populate syn syn = create_syn_type(syn, is_lost, "lost", ["x1", "y1", "z1"], ["core1"], ["vcn1"], resolution) syn = create_syn_type(syn, is_gain, "gain", ["x2", "y2", "z2"], ["core2"], ["vcn2"], resolution) syn = create_syn_type(syn, is_unch, "uncB", ["x1", "y1", "z1"], ["core1"], ["vcn1"], resolution) syn = create_syn_type(syn, is_unch, "uncA", ["x2", "y2", "z2"], ["core2"], ["vcn2"], resolution) return syn
Imports the coordinates , xyz in pixels, and intensity , int_core & int_vcn, from the image-pair-study csv produced by the synspy. To get the cohort_df run load_imgpairstudy_csv. resolution ( xyz) : get coordinates in pixels if resolution is provided syn : if None creates a new syn, if given adds to existing dictionary TODO : finish describtion
Imports the coordinates , xyz in pixels, and intensity , int_core & int_vcn, from the image-pair-study csv produced by the synspy. resolution ( xyz) : get coordinates in pixels if resolution is provided syn : if None creates a new syn, if given adds to existing dictionary TODO : finish describtion
[ "Imports", "the", "coordinates", "xyz", "in", "pixels", "and", "intensity", "int_core", "&", "int_vcn", "from", "the", "image", "-", "pair", "-", "study", "csv", "produced", "by", "the", "synspy", ".", "resolution", "(", "xyz", ")", ":", "get", "coordinates", "in", "pixels", "if", "resolution", "is", "provided", "syn", ":", "if", "None", "creates", "a", "new", "syn", "if", "given", "adds", "to", "existing", "dictionary", "TODO", ":", "finish", "describtion" ]
def import_imgpairstudy(fish_id, cohort_df, syn=None, resolution=None): def create_syn_type(syn, df, syn_type, xyz_cols, int_core_col, int_vcn_col, resolution=None): syn[fish_id][syn_type] = {} syn[fish_id][syn_type]["xyz"] = df.loc[:, xyz_cols].values syn[fish_id][syn_type]["int_core"] = df.loc[:, int_core_col].values syn[fish_id][syn_type]["int_vcn"] = df.loc[:, int_vcn_col].values if resolution is not None: syn[fish_id][syn_type]["xyz_pix"] = syn[fish_id][syn_type]["xyz"] / resolution return syn if syn is None: syn = {} syn[fish_id] = {} is_unch = cohort_df.loc[(cohort_df['study_id'] == fish_id) & (cohort_df['t'] == 0.0)] is_lost = cohort_df.loc[(cohort_df['study_id'] == fish_id) & (cohort_df['t'] == 1.0)] is_gain = cohort_df.loc[(cohort_df['study_id'] == fish_id) & (cohort_df['t'] == 2.0)] syn = create_syn_type(syn, is_lost, "lost", ["x1", "y1", "z1"], ["core1"], ["vcn1"], resolution) syn = create_syn_type(syn, is_gain, "gain", ["x2", "y2", "z2"], ["core2"], ["vcn2"], resolution) syn = create_syn_type(syn, is_unch, "uncB", ["x1", "y1", "z1"], ["core1"], ["vcn1"], resolution) syn = create_syn_type(syn, is_unch, "uncA", ["x2", "y2", "z2"], ["core2"], ["vcn2"], resolution) return syn
[ "def", "import_imgpairstudy", "(", "fish_id", ",", "cohort_df", ",", "syn", "=", "None", ",", "resolution", "=", "None", ")", ":", "def", "create_syn_type", "(", "syn", ",", "df", ",", "syn_type", ",", "xyz_cols", ",", "int_core_col", ",", "int_vcn_col", ",", "resolution", "=", "None", ")", ":", "\"\"\" Helper function : populates syn[fish_id][syn_type] dictionary\"\"\"", "syn", "[", "fish_id", "]", "[", "syn_type", "]", "=", "{", "}", "syn", "[", "fish_id", "]", "[", "syn_type", "]", "[", "\"xyz\"", "]", "=", "df", ".", "loc", "[", ":", ",", "xyz_cols", "]", ".", "values", "syn", "[", "fish_id", "]", "[", "syn_type", "]", "[", "\"int_core\"", "]", "=", "df", ".", "loc", "[", ":", ",", "int_core_col", "]", ".", "values", "syn", "[", "fish_id", "]", "[", "syn_type", "]", "[", "\"int_vcn\"", "]", "=", "df", ".", "loc", "[", ":", ",", "int_vcn_col", "]", ".", "values", "if", "resolution", "is", "not", "None", ":", "syn", "[", "fish_id", "]", "[", "syn_type", "]", "[", "\"xyz_pix\"", "]", "=", "syn", "[", "fish_id", "]", "[", "syn_type", "]", "[", "\"xyz\"", "]", "/", "resolution", "return", "syn", "if", "syn", "is", "None", ":", "syn", "=", "{", "}", "syn", "[", "fish_id", "]", "=", "{", "}", "is_unch", "=", "cohort_df", ".", "loc", "[", "(", "cohort_df", "[", "'study_id'", "]", "==", "fish_id", ")", "&", "(", "cohort_df", "[", "'t'", "]", "==", "0.0", ")", "]", "is_lost", "=", "cohort_df", ".", "loc", "[", "(", "cohort_df", "[", "'study_id'", "]", "==", "fish_id", ")", "&", "(", "cohort_df", "[", "'t'", "]", "==", "1.0", ")", "]", "is_gain", "=", "cohort_df", ".", "loc", "[", "(", "cohort_df", "[", "'study_id'", "]", "==", "fish_id", ")", "&", "(", "cohort_df", "[", "'t'", "]", "==", "2.0", ")", "]", "syn", "=", "create_syn_type", "(", "syn", ",", "is_lost", ",", "\"lost\"", ",", "[", "\"x1\"", ",", "\"y1\"", ",", "\"z1\"", "]", ",", "[", "\"core1\"", "]", ",", "[", "\"vcn1\"", "]", ",", "resolution", ")", "syn", "=", "create_syn_type", "(", "syn", ",", "is_gain", ",", "\"gain\"", ",", "[", "\"x2\"", ",", "\"y2\"", ",", "\"z2\"", "]", ",", "[", "\"core2\"", "]", ",", "[", "\"vcn2\"", "]", ",", "resolution", ")", "syn", "=", "create_syn_type", "(", "syn", ",", "is_unch", ",", "\"uncB\"", ",", "[", "\"x1\"", ",", "\"y1\"", ",", "\"z1\"", "]", ",", "[", "\"core1\"", "]", ",", "[", "\"vcn1\"", "]", ",", "resolution", ")", "syn", "=", "create_syn_type", "(", "syn", ",", "is_unch", ",", "\"uncA\"", ",", "[", "\"x2\"", ",", "\"y2\"", ",", "\"z2\"", "]", ",", "[", "\"core2\"", "]", ",", "[", "\"vcn2\"", "]", ",", "resolution", ")", "return", "syn" ]
Imports the coordinates , xyz in pixels, and intensity , int_core & int_vcn, from the image-pair-study csv produced by the synspy.
[ "Imports", "the", "coordinates", "xyz", "in", "pixels", "and", "intensity", "int_core", "&", "int_vcn", "from", "the", "image", "-", "pair", "-", "study", "csv", "produced", "by", "the", "synspy", "." ]
[ "\"\"\"\n Imports the coordinates , xyz in pixels, and intensity , int_core & int_vcn,\n from the image-pair-study csv produced by the synspy.\n To get the cohort_df run load_imgpairstudy_csv.\n\n resolution ( xyz) : get coordinates in pixels if resolution is provided\n syn : if None creates a new syn, if given adds to existing dictionary\n TODO : finish describtion\n \"\"\"", "\"\"\" Helper function : populates syn[fish_id][syn_type] dictionary\"\"\"", "# get coordinates in pixels if resolution is provided", "# create new syn if none provided", "# create fish entry", "# get all the data for this fish", "# populate syn" ]
[ { "param": "fish_id", "type": null }, { "param": "cohort_df", "type": null }, { "param": "syn", "type": null }, { "param": "resolution", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "fish_id", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cohort_df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "syn", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "resolution", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def import_imgpairstudy(fish_id, cohort_df, syn=None, resolution=None): def create_syn_type(syn, df, syn_type, xyz_cols, int_core_col, int_vcn_col, resolution=None): syn[fish_id][syn_type] = {} syn[fish_id][syn_type]["xyz"] = df.loc[:, xyz_cols].values syn[fish_id][syn_type]["int_core"] = df.loc[:, int_core_col].values syn[fish_id][syn_type]["int_vcn"] = df.loc[:, int_vcn_col].values if resolution is not None: syn[fish_id][syn_type]["xyz_pix"] = syn[fish_id][syn_type]["xyz"] / resolution return syn if syn is None: syn = {} syn[fish_id] = {} is_unch = cohort_df.loc[(cohort_df['study_id'] == fish_id) & (cohort_df['t'] == 0.0)] is_lost = cohort_df.loc[(cohort_df['study_id'] == fish_id) & (cohort_df['t'] == 1.0)] is_gain = cohort_df.loc[(cohort_df['study_id'] == fish_id) & (cohort_df['t'] == 2.0)] syn = create_syn_type(syn, is_lost, "lost", ["x1", "y1", "z1"], ["core1"], ["vcn1"], resolution) syn = create_syn_type(syn, is_gain, "gain", ["x2", "y2", "z2"], ["core2"], ["vcn2"], resolution) syn = create_syn_type(syn, is_unch, "uncB", ["x1", "y1", "z1"], ["core1"], ["vcn1"], resolution) syn = create_syn_type(syn, is_unch, "uncA", ["x2", "y2", "z2"], ["core2"], ["vcn2"], resolution) return syn
f7db062f33eef35965a90842ec792cf3b1ed925d
LemonJust/synapse-redistribution
sbr/build_features.py
[ "MIT" ]
Python
subtract_bg
<not_specific>
def subtract_bg(signal, bg): """ returns normalised intensity as (signal - bg) """ return signal - bg
returns normalised intensity as (signal - bg)
returns normalised intensity as (signal - bg)
[ "returns", "normalised", "intensity", "as", "(", "signal", "-", "bg", ")" ]
def subtract_bg(signal, bg): return signal - bg
[ "def", "subtract_bg", "(", "signal", ",", "bg", ")", ":", "return", "signal", "-", "bg" ]
returns normalised intensity as (signal - bg)
[ "returns", "normalised", "intensity", "as", "(", "signal", "-", "bg", ")" ]
[ "\"\"\"\n returns normalised intensity as (signal - bg)\n \"\"\"" ]
[ { "param": "signal", "type": null }, { "param": "bg", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "signal", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "bg", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def subtract_bg(signal, bg): return signal - bg
d293cd6ca7cba3191bf87d01e567b296c8eb5d94
otosense/hear
hear/regular_panel_data.py
[ "Apache-2.0" ]
Python
_random_data_and_serialization_params
null
def _random_data_and_serialization_params( n_samples=100, n_channels=1, value_range=(-2000, 2000), dtype='float64' ): """ Get random data and serialization params (i.e. how to map to bytes)""" raise NotImplementedError('Not implemented yet')
Get random data and serialization params (i.e. how to map to bytes)
Get random data and serialization params
[ "Get", "random", "data", "and", "serialization", "params" ]
def _random_data_and_serialization_params( n_samples=100, n_channels=1, value_range=(-2000, 2000), dtype='float64' ): raise NotImplementedError('Not implemented yet')
[ "def", "_random_data_and_serialization_params", "(", "n_samples", "=", "100", ",", "n_channels", "=", "1", ",", "value_range", "=", "(", "-", "2000", ",", "2000", ")", ",", "dtype", "=", "'float64'", ")", ":", "raise", "NotImplementedError", "(", "'Not implemented yet'", ")" ]
Get random data and serialization params (i.e.
[ "Get", "random", "data", "and", "serialization", "params", "(", "i", ".", "e", "." ]
[ "\"\"\" Get random data and serialization params (i.e. how to map to bytes)\"\"\"" ]
[ { "param": "n_samples", "type": null }, { "param": "n_channels", "type": null }, { "param": "value_range", "type": null }, { "param": "dtype", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "n_samples", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "n_channels", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "value_range", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dtype", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _random_data_and_serialization_params( n_samples=100, n_channels=1, value_range=(-2000, 2000), dtype='float64' ): raise NotImplementedError('Not implemented yet')
72eaf5629265978a5ac7fe27dd95d966292d767e
jastemborski/JIRA-Import
utilities.py
[ "MIT" ]
Python
write_story
null
def write_story(wb, col, key, filename): """ Writes Stories to Excel Workbook. Args: wb: A variable storing the Excel Workbook in memory. col: A variable containing the column being updated. key: A variable containing the JIRA Story Key. """ try: jira_sheet = wb.get_sheet_by_name('JIRA Stories') jira_sheet[col + "2"] = key wb.save(filename) except Exception: print("""Unable to save workbook. Please close excel spreadsheet then try again.""")
Writes Stories to Excel Workbook. Args: wb: A variable storing the Excel Workbook in memory. col: A variable containing the column being updated. key: A variable containing the JIRA Story Key.
Writes Stories to Excel Workbook.
[ "Writes", "Stories", "to", "Excel", "Workbook", "." ]
def write_story(wb, col, key, filename): try: jira_sheet = wb.get_sheet_by_name('JIRA Stories') jira_sheet[col + "2"] = key wb.save(filename) except Exception: print("""Unable to save workbook. Please close excel spreadsheet then try again.""")
[ "def", "write_story", "(", "wb", ",", "col", ",", "key", ",", "filename", ")", ":", "try", ":", "jira_sheet", "=", "wb", ".", "get_sheet_by_name", "(", "'JIRA Stories'", ")", "jira_sheet", "[", "col", "+", "\"2\"", "]", "=", "key", "wb", ".", "save", "(", "filename", ")", "except", "Exception", ":", "print", "(", "\"\"\"Unable to save workbook. Please close excel spreadsheet then\n try again.\"\"\"", ")" ]
Writes Stories to Excel Workbook.
[ "Writes", "Stories", "to", "Excel", "Workbook", "." ]
[ "\"\"\" Writes Stories to Excel Workbook.\n\n Args:\n wb: A variable storing the Excel Workbook in memory.\n col: A variable containing the column being updated.\n key: A variable containing the JIRA Story Key.\n \"\"\"" ]
[ { "param": "wb", "type": null }, { "param": "col", "type": null }, { "param": "key", "type": null }, { "param": "filename", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "wb", "type": null, "docstring": "A variable storing the Excel Workbook in memory.", "docstring_tokens": [ "A", "variable", "storing", "the", "Excel", "Workbook", "in", "memory", "." ], "default": null, "is_optional": null }, { "identifier": "col", "type": null, "docstring": "A variable containing the column being updated.", "docstring_tokens": [ "A", "variable", "containing", "the", "column", "being", "updated", "." ], "default": null, "is_optional": null }, { "identifier": "key", "type": null, "docstring": "A variable containing the JIRA Story Key.", "docstring_tokens": [ "A", "variable", "containing", "the", "JIRA", "Story", "Key", "." ], "default": null, "is_optional": null }, { "identifier": "filename", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def write_story(wb, col, key, filename): try: jira_sheet = wb.get_sheet_by_name('JIRA Stories') jira_sheet[col + "2"] = key wb.save(filename) except Exception: print("""Unable to save workbook. Please close excel spreadsheet then try again.""")
72eaf5629265978a5ac7fe27dd95d966292d767e
jastemborski/JIRA-Import
utilities.py
[ "MIT" ]
Python
extract_comments
<not_specific>
def extract_comments(comments): """ Utility method for parsing JIRA comments represented as JSON Args: comments: A variable containing JIRA comments in JSON representation. Returns: A string containing all of the JIRA comments tied to an issue """ size = len(comments) addtional_notes = "" for n in range(0, size): addtional_notes = addtional_notes + comments[n]['body'] + "\n" return addtional_notes
Utility method for parsing JIRA comments represented as JSON Args: comments: A variable containing JIRA comments in JSON representation. Returns: A string containing all of the JIRA comments tied to an issue
Utility method for parsing JIRA comments represented as JSON
[ "Utility", "method", "for", "parsing", "JIRA", "comments", "represented", "as", "JSON" ]
def extract_comments(comments): size = len(comments) addtional_notes = "" for n in range(0, size): addtional_notes = addtional_notes + comments[n]['body'] + "\n" return addtional_notes
[ "def", "extract_comments", "(", "comments", ")", ":", "size", "=", "len", "(", "comments", ")", "addtional_notes", "=", "\"\"", "for", "n", "in", "range", "(", "0", ",", "size", ")", ":", "addtional_notes", "=", "addtional_notes", "+", "comments", "[", "n", "]", "[", "'body'", "]", "+", "\"\\n\"", "return", "addtional_notes" ]
Utility method for parsing JIRA comments represented as JSON
[ "Utility", "method", "for", "parsing", "JIRA", "comments", "represented", "as", "JSON" ]
[ "\"\"\" Utility method for parsing JIRA comments represented as JSON\n Args:\n comments: A variable containing JIRA comments in JSON\n representation.\n Returns:\n A string containing all of the JIRA comments tied to an issue\n \"\"\"" ]
[ { "param": "comments", "type": null } ]
{ "returns": [ { "docstring": "A string containing all of the JIRA comments tied to an issue", "docstring_tokens": [ "A", "string", "containing", "all", "of", "the", "JIRA", "comments", "tied", "to", "an", "issue" ], "type": null } ], "raises": [], "params": [ { "identifier": "comments", "type": null, "docstring": "A variable containing JIRA comments in JSON\nrepresentation.", "docstring_tokens": [ "A", "variable", "containing", "JIRA", "comments", "in", "JSON", "representation", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def extract_comments(comments): size = len(comments) addtional_notes = "" for n in range(0, size): addtional_notes = addtional_notes + comments[n]['body'] + "\n" return addtional_notes
5f2b8f167098601f57e356078a0c664ef5c28741
cdeil/ccdproc
ccdproc/ccdproc.py
[ "BSD-3-Clause" ]
Python
flat_correct
<not_specific>
def flat_correct(ccd, flat): """Correct the image for flatfielding Parameters ---------- ccd : CCDData object Data to be flatfield corrected flat : CCDData object Flatfield to apply to the data {log} Returns ------- ccd : CCDData object CCDData object with flat corrected """ # normalize the flat flat.data = flat.data / flat.data.mean() if flat.uncertainty is not None: flat.uncertainty.array = flat.uncertainty.array / flat.data.mean() # divide through the flat ccd.divide(flat) return ccd
Correct the image for flatfielding Parameters ---------- ccd : CCDData object Data to be flatfield corrected flat : CCDData object Flatfield to apply to the data {log} Returns ------- ccd : CCDData object CCDData object with flat corrected
Correct the image for flatfielding Parameters ccd : CCDData object Data to be flatfield corrected flat : CCDData object Flatfield to apply to the data {log} Returns ccd : CCDData object CCDData object with flat corrected
[ "Correct", "the", "image", "for", "flatfielding", "Parameters", "ccd", ":", "CCDData", "object", "Data", "to", "be", "flatfield", "corrected", "flat", ":", "CCDData", "object", "Flatfield", "to", "apply", "to", "the", "data", "{", "log", "}", "Returns", "ccd", ":", "CCDData", "object", "CCDData", "object", "with", "flat", "corrected" ]
def flat_correct(ccd, flat): flat.data = flat.data / flat.data.mean() if flat.uncertainty is not None: flat.uncertainty.array = flat.uncertainty.array / flat.data.mean() ccd.divide(flat) return ccd
[ "def", "flat_correct", "(", "ccd", ",", "flat", ")", ":", "flat", ".", "data", "=", "flat", ".", "data", "/", "flat", ".", "data", ".", "mean", "(", ")", "if", "flat", ".", "uncertainty", "is", "not", "None", ":", "flat", ".", "uncertainty", ".", "array", "=", "flat", ".", "uncertainty", ".", "array", "/", "flat", ".", "data", ".", "mean", "(", ")", "ccd", ".", "divide", "(", "flat", ")", "return", "ccd" ]
Correct the image for flatfielding Parameters
[ "Correct", "the", "image", "for", "flatfielding", "Parameters" ]
[ "\"\"\"Correct the image for flatfielding\n\n Parameters\n ----------\n ccd : CCDData object\n Data to be flatfield corrected\n\n flat : CCDData object\n Flatfield to apply to the data\n\n {log}\n\n Returns\n -------\n ccd : CCDData object\n CCDData object with flat corrected\n \"\"\"", "# normalize the flat", "# divide through the flat" ]
[ { "param": "ccd", "type": null }, { "param": "flat", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ccd", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "flat", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def flat_correct(ccd, flat): flat.data = flat.data / flat.data.mean() if flat.uncertainty is not None: flat.uncertainty.array = flat.uncertainty.array / flat.data.mean() ccd.divide(flat) return ccd
0c76efa1f4b48899c9729b497d1310785491746d
bloomsburyai/cape-slack
slack.py
[ "MIT" ]
Python
parse_slack_output
<not_specific>
def parse_slack_output(slack_rtm_output, bot): """ The Slack Real Time Messaging API is an events firehose. this parsing function returns None unless a message is directed at the Bot, based on its ID. """ output_list = slack_rtm_output if output_list and len(output_list) > 0: for output in output_list: at_bot = "<@%s>" % bot['bot_id'] if output and 'text' in output and at_bot in output['text'] and 'channel' in output: # return text after the @ mention, whitespace removed return output['text'].split(at_bot)[1].strip(), \ output['channel'] return None, None
The Slack Real Time Messaging API is an events firehose. this parsing function returns None unless a message is directed at the Bot, based on its ID.
The Slack Real Time Messaging API is an events firehose. this parsing function returns None unless a message is directed at the Bot, based on its ID.
[ "The", "Slack", "Real", "Time", "Messaging", "API", "is", "an", "events", "firehose", ".", "this", "parsing", "function", "returns", "None", "unless", "a", "message", "is", "directed", "at", "the", "Bot", "based", "on", "its", "ID", "." ]
def parse_slack_output(slack_rtm_output, bot): output_list = slack_rtm_output if output_list and len(output_list) > 0: for output in output_list: at_bot = "<@%s>" % bot['bot_id'] if output and 'text' in output and at_bot in output['text'] and 'channel' in output: return output['text'].split(at_bot)[1].strip(), \ output['channel'] return None, None
[ "def", "parse_slack_output", "(", "slack_rtm_output", ",", "bot", ")", ":", "output_list", "=", "slack_rtm_output", "if", "output_list", "and", "len", "(", "output_list", ")", ">", "0", ":", "for", "output", "in", "output_list", ":", "at_bot", "=", "\"<@%s>\"", "%", "bot", "[", "'bot_id'", "]", "if", "output", "and", "'text'", "in", "output", "and", "at_bot", "in", "output", "[", "'text'", "]", "and", "'channel'", "in", "output", ":", "return", "output", "[", "'text'", "]", ".", "split", "(", "at_bot", ")", "[", "1", "]", ".", "strip", "(", ")", ",", "output", "[", "'channel'", "]", "return", "None", ",", "None" ]
The Slack Real Time Messaging API is an events firehose.
[ "The", "Slack", "Real", "Time", "Messaging", "API", "is", "an", "events", "firehose", "." ]
[ "\"\"\"\n The Slack Real Time Messaging API is an events firehose.\n this parsing function returns None unless a message is\n directed at the Bot, based on its ID.\n \"\"\"", "# return text after the @ mention, whitespace removed" ]
[ { "param": "slack_rtm_output", "type": null }, { "param": "bot", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "slack_rtm_output", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "bot", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def parse_slack_output(slack_rtm_output, bot): output_list = slack_rtm_output if output_list and len(output_list) > 0: for output in output_list: at_bot = "<@%s>" % bot['bot_id'] if output and 'text' in output and at_bot in output['text'] and 'channel' in output: return output['text'].split(at_bot)[1].strip(), \ output['channel'] return None, None
285c94fb046591b3e627b1753b3d620cff33333c
sourcery-ai-bot/streamlit
lib/tests/testutil.py
[ "Apache-2.0" ]
Python
normalize_md
<not_specific>
def normalize_md(txt): """Replace newlines *inside paragraphs* with spaces. Consecutive lines of text are considered part of the same paragraph in Markdown. So this function joins those into a single line to make the test robust to changes in text wrapping. NOTE: This function doesn't attempt to be 100% grammatically correct Markdown! It's just supposed to be "correct enough" for tests to pass. For example, when we guard "\n\n" from being converted, we really should be guarding for RegEx("\n\n+") instead. But that doesn't matter for our tests. """ # Two newlines in a row should NOT be replaced with a space. txt = txt.replace("\n\n", "OMG_NEWLINE") # Lists should NOT be replaced with a space. txt = txt.replace("\n*", "OMG_STAR") txt = txt.replace("\n-", "OMG_HYPHEN") # Links broken over two lines should not get an extra space. txt = txt.replace("]\n(", "OMG_LINK") # Convert all remaining newlines into spaces. txt = txt.replace("\n", " ") # Restore everything else. txt = txt.replace("OMG_NEWLINE", "\n\n") txt = txt.replace("OMG_STAR", "\n*") txt = txt.replace("OMG_HYPHEN", "\n-") txt = txt.replace("OMG_LINK", "](") return txt.strip()
Replace newlines *inside paragraphs* with spaces. Consecutive lines of text are considered part of the same paragraph in Markdown. So this function joins those into a single line to make the test robust to changes in text wrapping. NOTE: This function doesn't attempt to be 100% grammatically correct Markdown! It's just supposed to be "correct enough" for tests to pass. For example, when we guard "\n\n" from being converted, we really should be guarding for RegEx("\n\n+") instead. But that doesn't matter for our tests.
Replace newlines *inside paragraphs* with spaces. Consecutive lines of text are considered part of the same paragraph in Markdown. So this function joins those into a single line to make the test robust to changes in text wrapping. This function doesn't attempt to be 100% grammatically correct Markdown. It's just supposed to be "correct enough" for tests to pass. For example, when we guard "\n\n" from being converted, we really should be guarding for RegEx("\n\n+") instead. But that doesn't matter for our tests.
[ "Replace", "newlines", "*", "inside", "paragraphs", "*", "with", "spaces", ".", "Consecutive", "lines", "of", "text", "are", "considered", "part", "of", "the", "same", "paragraph", "in", "Markdown", ".", "So", "this", "function", "joins", "those", "into", "a", "single", "line", "to", "make", "the", "test", "robust", "to", "changes", "in", "text", "wrapping", ".", "This", "function", "doesn", "'", "t", "attempt", "to", "be", "100%", "grammatically", "correct", "Markdown", ".", "It", "'", "s", "just", "supposed", "to", "be", "\"", "correct", "enough", "\"", "for", "tests", "to", "pass", ".", "For", "example", "when", "we", "guard", "\"", "\\", "n", "\\", "n", "\"", "from", "being", "converted", "we", "really", "should", "be", "guarding", "for", "RegEx", "(", "\"", "\\", "n", "\\", "n", "+", "\"", ")", "instead", ".", "But", "that", "doesn", "'", "t", "matter", "for", "our", "tests", "." ]
def normalize_md(txt): txt = txt.replace("\n\n", "OMG_NEWLINE") txt = txt.replace("\n*", "OMG_STAR") txt = txt.replace("\n-", "OMG_HYPHEN") txt = txt.replace("]\n(", "OMG_LINK") txt = txt.replace("\n", " ") txt = txt.replace("OMG_NEWLINE", "\n\n") txt = txt.replace("OMG_STAR", "\n*") txt = txt.replace("OMG_HYPHEN", "\n-") txt = txt.replace("OMG_LINK", "](") return txt.strip()
[ "def", "normalize_md", "(", "txt", ")", ":", "txt", "=", "txt", ".", "replace", "(", "\"\\n\\n\"", ",", "\"OMG_NEWLINE\"", ")", "txt", "=", "txt", ".", "replace", "(", "\"\\n*\"", ",", "\"OMG_STAR\"", ")", "txt", "=", "txt", ".", "replace", "(", "\"\\n-\"", ",", "\"OMG_HYPHEN\"", ")", "txt", "=", "txt", ".", "replace", "(", "\"]\\n(\"", ",", "\"OMG_LINK\"", ")", "txt", "=", "txt", ".", "replace", "(", "\"\\n\"", ",", "\" \"", ")", "txt", "=", "txt", ".", "replace", "(", "\"OMG_NEWLINE\"", ",", "\"\\n\\n\"", ")", "txt", "=", "txt", ".", "replace", "(", "\"OMG_STAR\"", ",", "\"\\n*\"", ")", "txt", "=", "txt", ".", "replace", "(", "\"OMG_HYPHEN\"", ",", "\"\\n-\"", ")", "txt", "=", "txt", ".", "replace", "(", "\"OMG_LINK\"", ",", "\"](\"", ")", "return", "txt", ".", "strip", "(", ")" ]
Replace newlines *inside paragraphs* with spaces.
[ "Replace", "newlines", "*", "inside", "paragraphs", "*", "with", "spaces", "." ]
[ "\"\"\"Replace newlines *inside paragraphs* with spaces.\n\n Consecutive lines of text are considered part of the same paragraph\n in Markdown. So this function joins those into a single line to make the\n test robust to changes in text wrapping.\n\n NOTE: This function doesn't attempt to be 100% grammatically correct\n Markdown! It's just supposed to be \"correct enough\" for tests to pass. For\n example, when we guard \"\\n\\n\" from being converted, we really should be\n guarding for RegEx(\"\\n\\n+\") instead. But that doesn't matter for our tests.\n \"\"\"", "# Two newlines in a row should NOT be replaced with a space.", "# Lists should NOT be replaced with a space.", "# Links broken over two lines should not get an extra space.", "# Convert all remaining newlines into spaces.", "# Restore everything else." ]
[ { "param": "txt", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "txt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def normalize_md(txt): txt = txt.replace("\n\n", "OMG_NEWLINE") txt = txt.replace("\n*", "OMG_STAR") txt = txt.replace("\n-", "OMG_HYPHEN") txt = txt.replace("]\n(", "OMG_LINK") txt = txt.replace("\n", " ") txt = txt.replace("OMG_NEWLINE", "\n\n") txt = txt.replace("OMG_STAR", "\n*") txt = txt.replace("OMG_HYPHEN", "\n-") txt = txt.replace("OMG_LINK", "](") return txt.strip()
d6e49f650cac2b1b91194827d4ab6b62476c5f3e
skirpichev/diofant
diofant/simplify/sqrtdenest.py
[ "BSD-3-Clause" ]
Python
_subsets
<not_specific>
def _subsets(n): """ Returns all possible subsets of the set (0, 1, ..., n-1) except the empty set, listed in reversed lexicographical order according to binary representation, so that the case of the fourth root is treated last. Examples ======== >>> _subsets(2) [[1, 0], [0, 1], [1, 1]] """ if n == 1: a = [[1]] elif n == 2: a = [[1, 0], [0, 1], [1, 1]] elif n == 3: a = [[1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]] else: b = _subsets(n - 1) a0 = [x + [0] for x in b] a1 = [x + [1] for x in b] a = a0 + [[0]*(n - 1) + [1]] + a1 return a
Returns all possible subsets of the set (0, 1, ..., n-1) except the empty set, listed in reversed lexicographical order according to binary representation, so that the case of the fourth root is treated last. Examples ======== >>> _subsets(2) [[1, 0], [0, 1], [1, 1]]
Returns all possible subsets of the set (0, 1, ..., n-1) except the empty set, listed in reversed lexicographical order according to binary representation, so that the case of the fourth root is treated last. Examples
[ "Returns", "all", "possible", "subsets", "of", "the", "set", "(", "0", "1", "...", "n", "-", "1", ")", "except", "the", "empty", "set", "listed", "in", "reversed", "lexicographical", "order", "according", "to", "binary", "representation", "so", "that", "the", "case", "of", "the", "fourth", "root", "is", "treated", "last", ".", "Examples" ]
def _subsets(n): if n == 1: a = [[1]] elif n == 2: a = [[1, 0], [0, 1], [1, 1]] elif n == 3: a = [[1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]] else: b = _subsets(n - 1) a0 = [x + [0] for x in b] a1 = [x + [1] for x in b] a = a0 + [[0]*(n - 1) + [1]] + a1 return a
[ "def", "_subsets", "(", "n", ")", ":", "if", "n", "==", "1", ":", "a", "=", "[", "[", "1", "]", "]", "elif", "n", "==", "2", ":", "a", "=", "[", "[", "1", ",", "0", "]", ",", "[", "0", ",", "1", "]", ",", "[", "1", ",", "1", "]", "]", "elif", "n", "==", "3", ":", "a", "=", "[", "[", "1", ",", "0", ",", "0", "]", ",", "[", "0", ",", "1", ",", "0", "]", ",", "[", "1", ",", "1", ",", "0", "]", ",", "[", "0", ",", "0", ",", "1", "]", ",", "[", "1", ",", "0", ",", "1", "]", ",", "[", "0", ",", "1", ",", "1", "]", ",", "[", "1", ",", "1", ",", "1", "]", "]", "else", ":", "b", "=", "_subsets", "(", "n", "-", "1", ")", "a0", "=", "[", "x", "+", "[", "0", "]", "for", "x", "in", "b", "]", "a1", "=", "[", "x", "+", "[", "1", "]", "for", "x", "in", "b", "]", "a", "=", "a0", "+", "[", "[", "0", "]", "*", "(", "n", "-", "1", ")", "+", "[", "1", "]", "]", "+", "a1", "return", "a" ]
Returns all possible subsets of the set (0, 1, ..., n-1) except the empty set, listed in reversed lexicographical order according to binary representation, so that the case of the fourth root is treated last.
[ "Returns", "all", "possible", "subsets", "of", "the", "set", "(", "0", "1", "...", "n", "-", "1", ")", "except", "the", "empty", "set", "listed", "in", "reversed", "lexicographical", "order", "according", "to", "binary", "representation", "so", "that", "the", "case", "of", "the", "fourth", "root", "is", "treated", "last", "." ]
[ "\"\"\"\n Returns all possible subsets of the set (0, 1, ..., n-1) except the\n empty set, listed in reversed lexicographical order according to binary\n representation, so that the case of the fourth root is treated last.\n\n Examples\n ========\n\n >>> _subsets(2)\n [[1, 0], [0, 1], [1, 1]]\n\n \"\"\"" ]
[ { "param": "n", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "n", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _subsets(n): if n == 1: a = [[1]] elif n == 2: a = [[1, 0], [0, 1], [1, 1]] elif n == 3: a = [[1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]] else: b = _subsets(n - 1) a0 = [x + [0] for x in b] a1 = [x + [1] for x in b] a = a0 + [[0]*(n - 1) + [1]] + a1 return a
d64096b5c0b44d9dab578dfcc5f15dc961618725
skirpichev/diofant
diofant/functions/elementary/miscellaneous.py
[ "BSD-3-Clause" ]
Python
_find_localzeros
<not_specific>
def _find_localzeros(cls, values, **options): """ Sequentially allocate values to localzeros. When a value is identified as being more extreme than another member it replaces that member; if this is never true, then the value is simply appended to the localzeros. """ localzeros = set() for v in values: is_newzero = True localzeros_ = list(localzeros) for z in localzeros_: assert v != z con = cls._is_connected(v, z) if con: is_newzero = False if con is True or con == cls: localzeros.remove(z) localzeros.update([v]) if is_newzero: localzeros.update([v]) return localzeros
Sequentially allocate values to localzeros. When a value is identified as being more extreme than another member it replaces that member; if this is never true, then the value is simply appended to the localzeros.
Sequentially allocate values to localzeros. When a value is identified as being more extreme than another member it replaces that member; if this is never true, then the value is simply appended to the localzeros.
[ "Sequentially", "allocate", "values", "to", "localzeros", ".", "When", "a", "value", "is", "identified", "as", "being", "more", "extreme", "than", "another", "member", "it", "replaces", "that", "member", ";", "if", "this", "is", "never", "true", "then", "the", "value", "is", "simply", "appended", "to", "the", "localzeros", "." ]
def _find_localzeros(cls, values, **options): localzeros = set() for v in values: is_newzero = True localzeros_ = list(localzeros) for z in localzeros_: assert v != z con = cls._is_connected(v, z) if con: is_newzero = False if con is True or con == cls: localzeros.remove(z) localzeros.update([v]) if is_newzero: localzeros.update([v]) return localzeros
[ "def", "_find_localzeros", "(", "cls", ",", "values", ",", "**", "options", ")", ":", "localzeros", "=", "set", "(", ")", "for", "v", "in", "values", ":", "is_newzero", "=", "True", "localzeros_", "=", "list", "(", "localzeros", ")", "for", "z", "in", "localzeros_", ":", "assert", "v", "!=", "z", "con", "=", "cls", ".", "_is_connected", "(", "v", ",", "z", ")", "if", "con", ":", "is_newzero", "=", "False", "if", "con", "is", "True", "or", "con", "==", "cls", ":", "localzeros", ".", "remove", "(", "z", ")", "localzeros", ".", "update", "(", "[", "v", "]", ")", "if", "is_newzero", ":", "localzeros", ".", "update", "(", "[", "v", "]", ")", "return", "localzeros" ]
Sequentially allocate values to localzeros.
[ "Sequentially", "allocate", "values", "to", "localzeros", "." ]
[ "\"\"\"\n Sequentially allocate values to localzeros.\n\n When a value is identified as being more extreme than another member it\n replaces that member; if this is never true, then the value is simply\n appended to the localzeros.\n\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "values", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "values", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _find_localzeros(cls, values, **options): localzeros = set() for v in values: is_newzero = True localzeros_ = list(localzeros) for z in localzeros_: assert v != z con = cls._is_connected(v, z) if con: is_newzero = False if con is True or con == cls: localzeros.remove(z) localzeros.update([v]) if is_newzero: localzeros.update([v]) return localzeros
9a05b802f23aa9344e90029fdc4bda14f1403d54
skirpichev/diofant
diofant/integrals/prde.py
[ "BSD-3-Clause" ]
Python
real_imag
<not_specific>
def real_imag(ba, bd, gen): """ Helper function, to get the real and imaginary part of a rational function evaluated at sqrt(-1) without actually evaluating it at sqrt(-1) Separates the even and odd power terms by checking the degree of terms wrt mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part of the numerator ba[1] is the imaginary part and bd is the denominator of the rational function. """ bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()] denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()] bd_real = sum(r for r in denom_real) bd_imag = sum(r for r in denom_imag) num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()] num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()] ba_real = sum(r for r in num_real) ba_imag = sum(r for r in num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return ba[0], ba[1], bd
Helper function, to get the real and imaginary part of a rational function evaluated at sqrt(-1) without actually evaluating it at sqrt(-1) Separates the even and odd power terms by checking the degree of terms wrt mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part of the numerator ba[1] is the imaginary part and bd is the denominator of the rational function.
Helper function, to get the real and imaginary part of a rational function evaluated at sqrt(-1) without actually evaluating it at sqrt(-1) Separates the even and odd power terms by checking the degree of terms wrt mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part of the numerator ba[1] is the imaginary part and bd is the denominator of the rational function.
[ "Helper", "function", "to", "get", "the", "real", "and", "imaginary", "part", "of", "a", "rational", "function", "evaluated", "at", "sqrt", "(", "-", "1", ")", "without", "actually", "evaluating", "it", "at", "sqrt", "(", "-", "1", ")", "Separates", "the", "even", "and", "odd", "power", "terms", "by", "checking", "the", "degree", "of", "terms", "wrt", "mod", "4", ".", "Returns", "a", "tuple", "(", "ba", "[", "0", "]", "ba", "[", "1", "]", "bd", ")", "where", "ba", "[", "0", "]", "is", "real", "part", "of", "the", "numerator", "ba", "[", "1", "]", "is", "the", "imaginary", "part", "and", "bd", "is", "the", "denominator", "of", "the", "rational", "function", "." ]
def real_imag(ba, bd, gen): bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()] denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()] bd_real = sum(r for r in denom_real) bd_imag = sum(r for r in denom_imag) num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()] num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()] ba_real = sum(r for r in num_real) ba_imag = sum(r for r in num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return ba[0], ba[1], bd
[ "def", "real_imag", "(", "ba", ",", "bd", ",", "gen", ")", ":", "bd", "=", "bd", ".", "as_poly", "(", "gen", ")", ".", "as_dict", "(", ")", "ba", "=", "ba", ".", "as_poly", "(", "gen", ")", ".", "as_dict", "(", ")", "denom_real", "=", "[", "value", "if", "key", "[", "0", "]", "%", "4", "==", "0", "else", "-", "value", "if", "key", "[", "0", "]", "%", "4", "==", "2", "else", "0", "for", "key", ",", "value", "in", "bd", ".", "items", "(", ")", "]", "denom_imag", "=", "[", "value", "if", "key", "[", "0", "]", "%", "4", "==", "1", "else", "-", "value", "if", "key", "[", "0", "]", "%", "4", "==", "3", "else", "0", "for", "key", ",", "value", "in", "bd", ".", "items", "(", ")", "]", "bd_real", "=", "sum", "(", "r", "for", "r", "in", "denom_real", ")", "bd_imag", "=", "sum", "(", "r", "for", "r", "in", "denom_imag", ")", "num_real", "=", "[", "value", "if", "key", "[", "0", "]", "%", "4", "==", "0", "else", "-", "value", "if", "key", "[", "0", "]", "%", "4", "==", "2", "else", "0", "for", "key", ",", "value", "in", "ba", ".", "items", "(", ")", "]", "num_imag", "=", "[", "value", "if", "key", "[", "0", "]", "%", "4", "==", "1", "else", "-", "value", "if", "key", "[", "0", "]", "%", "4", "==", "3", "else", "0", "for", "key", ",", "value", "in", "ba", ".", "items", "(", ")", "]", "ba_real", "=", "sum", "(", "r", "for", "r", "in", "num_real", ")", "ba_imag", "=", "sum", "(", "r", "for", "r", "in", "num_imag", ")", "ba", "=", "(", "(", "ba_real", "*", "bd_real", "+", "ba_imag", "*", "bd_imag", ")", ".", "as_poly", "(", "gen", ")", ",", "(", "ba_imag", "*", "bd_real", "-", "ba_real", "*", "bd_imag", ")", ".", "as_poly", "(", "gen", ")", ")", "bd", "=", "(", "bd_real", "*", "bd_real", "+", "bd_imag", "*", "bd_imag", ")", ".", "as_poly", "(", "gen", ")", "return", "ba", "[", "0", "]", ",", "ba", "[", "1", "]", ",", "bd" ]
Helper function, to get the real and imaginary part of a rational function evaluated at sqrt(-1) without actually evaluating it at sqrt(-1)
[ "Helper", "function", "to", "get", "the", "real", "and", "imaginary", "part", "of", "a", "rational", "function", "evaluated", "at", "sqrt", "(", "-", "1", ")", "without", "actually", "evaluating", "it", "at", "sqrt", "(", "-", "1", ")" ]
[ "\"\"\"\n Helper function, to get the real and imaginary part of a rational function\n evaluated at sqrt(-1) without actually evaluating it at sqrt(-1)\n\n Separates the even and odd power terms by checking the degree of terms wrt\n mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part\n of the numerator ba[1] is the imaginary part and bd is the denominator\n of the rational function.\n \"\"\"" ]
[ { "param": "ba", "type": null }, { "param": "bd", "type": null }, { "param": "gen", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ba", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "bd", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "gen", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def real_imag(ba, bd, gen): bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()] denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()] bd_real = sum(r for r in denom_real) bd_imag = sum(r for r in denom_imag) num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()] num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()] ba_real = sum(r for r in num_real) ba_imag = sum(r for r in num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return ba[0], ba[1], bd
dd43098e1a0e8ffc912cf31cd3bdfb5fb69f0f90
skirpichev/diofant
diofant/interactive/printing.py
[ "BSD-3-Clause" ]
Python
_init_python_printing
null
def _init_python_printing(stringify_func): """Setup printing in Python interactive session.""" def _displayhook(arg): """Python's pretty-printer display hook. This function was adapted from PEP 217. """ if arg is not None: builtins._ = None if isinstance(arg, str): print(repr(arg)) else: print(stringify_func(arg)) builtins._ = arg sys.displayhook = _displayhook
Setup printing in Python interactive session.
Setup printing in Python interactive session.
[ "Setup", "printing", "in", "Python", "interactive", "session", "." ]
def _init_python_printing(stringify_func): def _displayhook(arg): if arg is not None: builtins._ = None if isinstance(arg, str): print(repr(arg)) else: print(stringify_func(arg)) builtins._ = arg sys.displayhook = _displayhook
[ "def", "_init_python_printing", "(", "stringify_func", ")", ":", "def", "_displayhook", "(", "arg", ")", ":", "\"\"\"Python's pretty-printer display hook.\n\n This function was adapted from PEP 217.\n\n \"\"\"", "if", "arg", "is", "not", "None", ":", "builtins", ".", "_", "=", "None", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "print", "(", "repr", "(", "arg", ")", ")", "else", ":", "print", "(", "stringify_func", "(", "arg", ")", ")", "builtins", ".", "_", "=", "arg", "sys", ".", "displayhook", "=", "_displayhook" ]
Setup printing in Python interactive session.
[ "Setup", "printing", "in", "Python", "interactive", "session", "." ]
[ "\"\"\"Setup printing in Python interactive session.\"\"\"", "\"\"\"Python's pretty-printer display hook.\n\n This function was adapted from PEP 217.\n\n \"\"\"" ]
[ { "param": "stringify_func", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "stringify_func", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import builtins import sys def _init_python_printing(stringify_func): def _displayhook(arg): if arg is not None: builtins._ = None if isinstance(arg, str): print(repr(arg)) else: print(stringify_func(arg)) builtins._ = arg sys.displayhook = _displayhook
ee13c0001ad1e5cdccbc81d714e8be5ac848ab40
skirpichev/diofant
diofant/core/compatibility.py
[ "BSD-3-Clause" ]
Python
as_int
<not_specific>
def as_int(n): """ Convert the argument to a builtin integer. The return value is guaranteed to be equal to the input. ValueError is raised if the input has a non-integral value. Examples ======== >>> 3.0 3.0 >>> as_int(3.0) # convert to int and test for equality 3 >>> int(sqrt(10)) 3 >>> as_int(sqrt(10)) Traceback (most recent call last): ... ValueError: ... is not an integer """ try: result = int(n) if result != n: raise TypeError except TypeError: raise ValueError(f'{n} is not an integer') return result
Convert the argument to a builtin integer. The return value is guaranteed to be equal to the input. ValueError is raised if the input has a non-integral value. Examples ======== >>> 3.0 3.0 >>> as_int(3.0) # convert to int and test for equality 3 >>> int(sqrt(10)) 3 >>> as_int(sqrt(10)) Traceback (most recent call last): ... ValueError: ... is not an integer
Convert the argument to a builtin integer. The return value is guaranteed to be equal to the input. ValueError is raised if the input has a non-integral value. Examples
[ "Convert", "the", "argument", "to", "a", "builtin", "integer", ".", "The", "return", "value", "is", "guaranteed", "to", "be", "equal", "to", "the", "input", ".", "ValueError", "is", "raised", "if", "the", "input", "has", "a", "non", "-", "integral", "value", ".", "Examples" ]
def as_int(n): try: result = int(n) if result != n: raise TypeError except TypeError: raise ValueError(f'{n} is not an integer') return result
[ "def", "as_int", "(", "n", ")", ":", "try", ":", "result", "=", "int", "(", "n", ")", "if", "result", "!=", "n", ":", "raise", "TypeError", "except", "TypeError", ":", "raise", "ValueError", "(", "f'{n} is not an integer'", ")", "return", "result" ]
Convert the argument to a builtin integer.
[ "Convert", "the", "argument", "to", "a", "builtin", "integer", "." ]
[ "\"\"\"\n Convert the argument to a builtin integer.\n\n The return value is guaranteed to be equal to the input. ValueError is\n raised if the input has a non-integral value.\n\n Examples\n ========\n\n >>> 3.0\n 3.0\n >>> as_int(3.0) # convert to int and test for equality\n 3\n >>> int(sqrt(10))\n 3\n >>> as_int(sqrt(10))\n Traceback (most recent call last):\n ...\n ValueError: ... is not an integer\n\n \"\"\"" ]
[ { "param": "n", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "n", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def as_int(n): try: result = int(n) if result != n: raise TypeError except TypeError: raise ValueError(f'{n} is not an integer') return result
724d7041d4b792c274dbcd77b578bcb869116219
skirpichev/diofant
diofant/polys/rootoftools.py
[ "BSD-3-Clause" ]
Python
_get_reals
<not_specific>
def _get_reals(cls, factors): """Compute real root isolating intervals for a list of factors.""" reals = [] for factor, k in factors: real_part = cls._get_reals_sqf(factor) reals.extend([(root, factor, k) for root in real_part]) return reals
Compute real root isolating intervals for a list of factors.
Compute real root isolating intervals for a list of factors.
[ "Compute", "real", "root", "isolating", "intervals", "for", "a", "list", "of", "factors", "." ]
def _get_reals(cls, factors): reals = [] for factor, k in factors: real_part = cls._get_reals_sqf(factor) reals.extend([(root, factor, k) for root in real_part]) return reals
[ "def", "_get_reals", "(", "cls", ",", "factors", ")", ":", "reals", "=", "[", "]", "for", "factor", ",", "k", "in", "factors", ":", "real_part", "=", "cls", ".", "_get_reals_sqf", "(", "factor", ")", "reals", ".", "extend", "(", "[", "(", "root", ",", "factor", ",", "k", ")", "for", "root", "in", "real_part", "]", ")", "return", "reals" ]
Compute real root isolating intervals for a list of factors.
[ "Compute", "real", "root", "isolating", "intervals", "for", "a", "list", "of", "factors", "." ]
[ "\"\"\"Compute real root isolating intervals for a list of factors.\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "factors", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "factors", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _get_reals(cls, factors): reals = [] for factor, k in factors: real_part = cls._get_reals_sqf(factor) reals.extend([(root, factor, k) for root in real_part]) return reals
724d7041d4b792c274dbcd77b578bcb869116219
skirpichev/diofant
diofant/polys/rootoftools.py
[ "BSD-3-Clause" ]
Python
_get_complexes
<not_specific>
def _get_complexes(cls, factors): """Compute complex root isolating intervals for a list of factors.""" complexes = [] for factor, k in factors: complex_part = cls._get_complexes_sqf(factor) complexes.extend([(root, factor, k) for root in complex_part]) return complexes
Compute complex root isolating intervals for a list of factors.
Compute complex root isolating intervals for a list of factors.
[ "Compute", "complex", "root", "isolating", "intervals", "for", "a", "list", "of", "factors", "." ]
def _get_complexes(cls, factors): complexes = [] for factor, k in factors: complex_part = cls._get_complexes_sqf(factor) complexes.extend([(root, factor, k) for root in complex_part]) return complexes
[ "def", "_get_complexes", "(", "cls", ",", "factors", ")", ":", "complexes", "=", "[", "]", "for", "factor", ",", "k", "in", "factors", ":", "complex_part", "=", "cls", ".", "_get_complexes_sqf", "(", "factor", ")", "complexes", ".", "extend", "(", "[", "(", "root", ",", "factor", ",", "k", ")", "for", "root", "in", "complex_part", "]", ")", "return", "complexes" ]
Compute complex root isolating intervals for a list of factors.
[ "Compute", "complex", "root", "isolating", "intervals", "for", "a", "list", "of", "factors", "." ]
[ "\"\"\"Compute complex root isolating intervals for a list of factors.\"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "factors", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "factors", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _get_complexes(cls, factors): complexes = [] for factor, k in factors: complex_part = cls._get_complexes_sqf(factor) complexes.extend([(root, factor, k) for root in complex_part]) return complexes
724d7041d4b792c274dbcd77b578bcb869116219
skirpichev/diofant
diofant/polys/rootoftools.py
[ "BSD-3-Clause" ]
Python
_reals_index
<not_specific>
def _reals_index(cls, reals, index): """Map initial real root index to an index in a factor where the root belongs.""" i = 0 for j, (_, factor, k) in enumerate(reals): # pragma: no branch if index < i + k: poly, index = factor, 0 for _, factor, _ in reals[:j]: if factor == poly: index += 1 return poly, index else: i += k
Map initial real root index to an index in a factor where the root belongs.
Map initial real root index to an index in a factor where the root belongs.
[ "Map", "initial", "real", "root", "index", "to", "an", "index", "in", "a", "factor", "where", "the", "root", "belongs", "." ]
def _reals_index(cls, reals, index): i = 0 for j, (_, factor, k) in enumerate(reals): if index < i + k: poly, index = factor, 0 for _, factor, _ in reals[:j]: if factor == poly: index += 1 return poly, index else: i += k
[ "def", "_reals_index", "(", "cls", ",", "reals", ",", "index", ")", ":", "i", "=", "0", "for", "j", ",", "(", "_", ",", "factor", ",", "k", ")", "in", "enumerate", "(", "reals", ")", ":", "if", "index", "<", "i", "+", "k", ":", "poly", ",", "index", "=", "factor", ",", "0", "for", "_", ",", "factor", ",", "_", "in", "reals", "[", ":", "j", "]", ":", "if", "factor", "==", "poly", ":", "index", "+=", "1", "return", "poly", ",", "index", "else", ":", "i", "+=", "k" ]
Map initial real root index to an index in a factor where the root belongs.
[ "Map", "initial", "real", "root", "index", "to", "an", "index", "in", "a", "factor", "where", "the", "root", "belongs", "." ]
[ "\"\"\"Map initial real root index to an index in a factor where the root belongs.\"\"\"", "# pragma: no branch" ]
[ { "param": "cls", "type": null }, { "param": "reals", "type": null }, { "param": "index", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "reals", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "index", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _reals_index(cls, reals, index): i = 0 for j, (_, factor, k) in enumerate(reals): if index < i + k: poly, index = factor, 0 for _, factor, _ in reals[:j]: if factor == poly: index += 1 return poly, index else: i += k
724d7041d4b792c274dbcd77b578bcb869116219
skirpichev/diofant
diofant/polys/rootoftools.py
[ "BSD-3-Clause" ]
Python
_complexes_index
<not_specific>
def _complexes_index(cls, complexes, index): """Map initial complex root index to an index in a factor where the root belongs.""" i = 0 for j, (_, factor, k) in enumerate(complexes): # pragma: no branch if index < i + k: poly, index = factor, 0 for _, factor, _ in complexes[:j]: if factor == poly: index += 1 index += poly.count_roots() return poly, index else: i += k
Map initial complex root index to an index in a factor where the root belongs.
Map initial complex root index to an index in a factor where the root belongs.
[ "Map", "initial", "complex", "root", "index", "to", "an", "index", "in", "a", "factor", "where", "the", "root", "belongs", "." ]
def _complexes_index(cls, complexes, index): i = 0 for j, (_, factor, k) in enumerate(complexes): if index < i + k: poly, index = factor, 0 for _, factor, _ in complexes[:j]: if factor == poly: index += 1 index += poly.count_roots() return poly, index else: i += k
[ "def", "_complexes_index", "(", "cls", ",", "complexes", ",", "index", ")", ":", "i", "=", "0", "for", "j", ",", "(", "_", ",", "factor", ",", "k", ")", "in", "enumerate", "(", "complexes", ")", ":", "if", "index", "<", "i", "+", "k", ":", "poly", ",", "index", "=", "factor", ",", "0", "for", "_", ",", "factor", ",", "_", "in", "complexes", "[", ":", "j", "]", ":", "if", "factor", "==", "poly", ":", "index", "+=", "1", "index", "+=", "poly", ".", "count_roots", "(", ")", "return", "poly", ",", "index", "else", ":", "i", "+=", "k" ]
Map initial complex root index to an index in a factor where the root belongs.
[ "Map", "initial", "complex", "root", "index", "to", "an", "index", "in", "a", "factor", "where", "the", "root", "belongs", "." ]
[ "\"\"\"Map initial complex root index to an index in a factor where the root belongs.\"\"\"", "# pragma: no branch" ]
[ { "param": "cls", "type": null }, { "param": "complexes", "type": null }, { "param": "index", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "complexes", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "index", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _complexes_index(cls, complexes, index): i = 0 for j, (_, factor, k) in enumerate(complexes): if index < i + k: poly, index = factor, 0 for _, factor, _ in complexes[:j]: if factor == poly: index += 1 index += poly.count_roots() return poly, index else: i += k