Dataset Viewer
hexsha
stringlengths 40
40
| repo
stringlengths 5
121
| path
stringlengths 4
227
| license
sequence | language
stringclasses 1
value | identifier
stringlengths 1
160
| return_type
stringlengths 2
354
โ | original_string
stringlengths 57
438k
| original_docstring
stringlengths 13
88.1k
| docstring
stringlengths 13
2.86k
| docstring_tokens
sequence | code
stringlengths 16
437k
| code_tokens
sequence | short_docstring
stringlengths 1
1.58k
| short_docstring_tokens
sequence | comment
sequence | parameters
list | docstring_params
dict | code_with_imports
stringlengths 16
437k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31bd537a3d1572c9fa6aeb3baecb55a4e485344d | fasaxc/clowder | calicoctl/tests/st/utils/utils.py | [
"Apache-2.0"
] | Python | clean_calico_data | <not_specific> | def clean_calico_data(data, extra_keys_to_remove=None):
"""
Clean the data returned from a calicoctl get command to remove empty
structs, null values and non-configurable fields. This makes comparison
with the input data much simpler.
Args:
data: The data to clean.
extra_keys_to_remove: more keys to remove if needed.
Returns: The cleaned data.
"""
new = copy.deepcopy(data)
# Recursively delete empty structs / nil values and non-configurable
# fields.
def clean_elem(elem, extra_keys):
if isinstance(elem, list):
# Loop through each element in the list
for i in elem:
clean_elem(i, extra_keys)
if isinstance(elem, dict):
# Remove non-settable fields, and recursively clean each value of
# the dictionary, removing nil values or values that are empty
# dicts after cleaning.
del_keys = ['creationTimestamp', 'resourceVersion', 'uid']
if extra_keys is not None:
for extra_key in extra_keys:
del_keys.append(extra_key)
for k, v in elem.iteritems():
clean_elem(v, extra_keys)
if v is None or v == {}:
del_keys.append(k)
for k in del_keys:
if k in elem:
del(elem[k])
clean_elem(new, extra_keys_to_remove)
return new |
Clean the data returned from a calicoctl get command to remove empty
structs, null values and non-configurable fields. This makes comparison
with the input data much simpler.
Args:
data: The data to clean.
extra_keys_to_remove: more keys to remove if needed.
Returns: The cleaned data.
| Clean the data returned from a calicoctl get command to remove empty
structs, null values and non-configurable fields. This makes comparison
with the input data much simpler. | [
"Clean",
"the",
"data",
"returned",
"from",
"a",
"calicoctl",
"get",
"command",
"to",
"remove",
"empty",
"structs",
"null",
"values",
"and",
"non",
"-",
"configurable",
"fields",
".",
"This",
"makes",
"comparison",
"with",
"the",
"input",
"data",
"much",
"simpler",
"."
] | def clean_calico_data(data, extra_keys_to_remove=None):
new = copy.deepcopy(data)
def clean_elem(elem, extra_keys):
if isinstance(elem, list):
for i in elem:
clean_elem(i, extra_keys)
if isinstance(elem, dict):
del_keys = ['creationTimestamp', 'resourceVersion', 'uid']
if extra_keys is not None:
for extra_key in extra_keys:
del_keys.append(extra_key)
for k, v in elem.iteritems():
clean_elem(v, extra_keys)
if v is None or v == {}:
del_keys.append(k)
for k in del_keys:
if k in elem:
del(elem[k])
clean_elem(new, extra_keys_to_remove)
return new | [
"def",
"clean_calico_data",
"(",
"data",
",",
"extra_keys_to_remove",
"=",
"None",
")",
":",
"new",
"=",
"copy",
".",
"deepcopy",
"(",
"data",
")",
"def",
"clean_elem",
"(",
"elem",
",",
"extra_keys",
")",
":",
"if",
"isinstance",
"(",
"elem",
",",
"list",
")",
":",
"for",
"i",
"in",
"elem",
":",
"clean_elem",
"(",
"i",
",",
"extra_keys",
")",
"if",
"isinstance",
"(",
"elem",
",",
"dict",
")",
":",
"del_keys",
"=",
"[",
"'creationTimestamp'",
",",
"'resourceVersion'",
",",
"'uid'",
"]",
"if",
"extra_keys",
"is",
"not",
"None",
":",
"for",
"extra_key",
"in",
"extra_keys",
":",
"del_keys",
".",
"append",
"(",
"extra_key",
")",
"for",
"k",
",",
"v",
"in",
"elem",
".",
"iteritems",
"(",
")",
":",
"clean_elem",
"(",
"v",
",",
"extra_keys",
")",
"if",
"v",
"is",
"None",
"or",
"v",
"==",
"{",
"}",
":",
"del_keys",
".",
"append",
"(",
"k",
")",
"for",
"k",
"in",
"del_keys",
":",
"if",
"k",
"in",
"elem",
":",
"del",
"(",
"elem",
"[",
"k",
"]",
")",
"clean_elem",
"(",
"new",
",",
"extra_keys_to_remove",
")",
"return",
"new"
] | Clean the data returned from a calicoctl get command to remove empty
structs, null values and non-configurable fields. | [
"Clean",
"the",
"data",
"returned",
"from",
"a",
"calicoctl",
"get",
"command",
"to",
"remove",
"empty",
"structs",
"null",
"values",
"and",
"non",
"-",
"configurable",
"fields",
"."
] | [
"\"\"\"\n Clean the data returned from a calicoctl get command to remove empty\n structs, null values and non-configurable fields. This makes comparison\n with the input data much simpler.\n\n Args:\n data: The data to clean.\n extra_keys_to_remove: more keys to remove if needed.\n\n Returns: The cleaned data.\n\n \"\"\"",
"# Recursively delete empty structs / nil values and non-configurable",
"# fields.",
"# Loop through each element in the list",
"# Remove non-settable fields, and recursively clean each value of",
"# the dictionary, removing nil values or values that are empty",
"# dicts after cleaning."
] | [
{
"param": "data",
"type": null
},
{
"param": "extra_keys_to_remove",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": "The data to clean.",
"docstring_tokens": [
"The",
"data",
"to",
"clean",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "extra_keys_to_remove",
"type": null,
"docstring": "more keys to remove if needed.",
"docstring_tokens": [
"more",
"keys",
"to",
"remove",
"if",
"needed",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import copy
def clean_calico_data(data, extra_keys_to_remove=None):
new = copy.deepcopy(data)
def clean_elem(elem, extra_keys):
if isinstance(elem, list):
for i in elem:
clean_elem(i, extra_keys)
if isinstance(elem, dict):
del_keys = ['creationTimestamp', 'resourceVersion', 'uid']
if extra_keys is not None:
for extra_key in extra_keys:
del_keys.append(extra_key)
for k, v in elem.iteritems():
clean_elem(v, extra_keys)
if v is None or v == {}:
del_keys.append(k)
for k in del_keys:
if k in elem:
del(elem[k])
clean_elem(new, extra_keys_to_remove)
return new |
31bd537a3d1572c9fa6aeb3baecb55a4e485344d | fasaxc/clowder | calicoctl/tests/st/utils/utils.py | [
"Apache-2.0"
] | Python | name | <not_specific> | def name(data):
"""
Returns the name of the resource in the supplied data
Args:
data: A dictionary containing the resource.
Returns: The resource name.
"""
return data['metadata']['name'] |
Returns the name of the resource in the supplied data
Args:
data: A dictionary containing the resource.
Returns: The resource name.
| Returns the name of the resource in the supplied data | [
"Returns",
"the",
"name",
"of",
"the",
"resource",
"in",
"the",
"supplied",
"data"
] | def name(data):
return data['metadata']['name'] | [
"def",
"name",
"(",
"data",
")",
":",
"return",
"data",
"[",
"'metadata'",
"]",
"[",
"'name'",
"]"
] | Returns the name of the resource in the supplied data | [
"Returns",
"the",
"name",
"of",
"the",
"resource",
"in",
"the",
"supplied",
"data"
] | [
"\"\"\"\n Returns the name of the resource in the supplied data\n Args:\n data: A dictionary containing the resource.\n\n Returns: The resource name.\n \"\"\""
] | [
{
"param": "data",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": "A dictionary containing the resource.",
"docstring_tokens": [
"A",
"dictionary",
"containing",
"the",
"resource",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def name(data):
return data['metadata']['name'] |
31bd537a3d1572c9fa6aeb3baecb55a4e485344d | fasaxc/clowder | calicoctl/tests/st/utils/utils.py | [
"Apache-2.0"
] | Python | namespace | <not_specific> | def namespace(data):
"""
Returns the namespace of the resource in the supplied data
Args:
data: A dictionary containing the resource.
Returns: The resource name.
"""
return data['metadata']['namespace'] |
Returns the namespace of the resource in the supplied data
Args:
data: A dictionary containing the resource.
Returns: The resource name.
| Returns the namespace of the resource in the supplied data | [
"Returns",
"the",
"namespace",
"of",
"the",
"resource",
"in",
"the",
"supplied",
"data"
] | def namespace(data):
return data['metadata']['namespace'] | [
"def",
"namespace",
"(",
"data",
")",
":",
"return",
"data",
"[",
"'metadata'",
"]",
"[",
"'namespace'",
"]"
] | Returns the namespace of the resource in the supplied data | [
"Returns",
"the",
"namespace",
"of",
"the",
"resource",
"in",
"the",
"supplied",
"data"
] | [
"\"\"\"\n Returns the namespace of the resource in the supplied data\n Args:\n data: A dictionary containing the resource.\n\n Returns: The resource name.\n \"\"\""
] | [
{
"param": "data",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": "A dictionary containing the resource.",
"docstring_tokens": [
"A",
"dictionary",
"containing",
"the",
"resource",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def namespace(data):
return data['metadata']['namespace'] |
9ebc8da0ad2a9f6b5b1079c09e6e80593a1a6bac | OdiaNLP/spelling-correction | utils.py | [
"MIT"
] | Python | edit_distance | int | def edit_distance(s1: str, s2: str) -> int:
"""Compute edit distance between two strings using dynamic programmic.
Lifted from: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python"""
if len(s1) < len(s2):
return edit_distance(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and
# current_row are one character longer than s2
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1] | Compute edit distance between two strings using dynamic programmic.
Lifted from: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python | Compute edit distance between two strings using dynamic programmic. | [
"Compute",
"edit",
"distance",
"between",
"two",
"strings",
"using",
"dynamic",
"programmic",
"."
] | def edit_distance(s1: str, s2: str) -> int:
if len(s1) < len(s2):
return edit_distance(s2, s1)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1] | [
"def",
"edit_distance",
"(",
"s1",
":",
"str",
",",
"s2",
":",
"str",
")",
"->",
"int",
":",
"if",
"len",
"(",
"s1",
")",
"<",
"len",
"(",
"s2",
")",
":",
"return",
"edit_distance",
"(",
"s2",
",",
"s1",
")",
"if",
"len",
"(",
"s2",
")",
"==",
"0",
":",
"return",
"len",
"(",
"s1",
")",
"previous_row",
"=",
"range",
"(",
"len",
"(",
"s2",
")",
"+",
"1",
")",
"for",
"i",
",",
"c1",
"in",
"enumerate",
"(",
"s1",
")",
":",
"current_row",
"=",
"[",
"i",
"+",
"1",
"]",
"for",
"j",
",",
"c2",
"in",
"enumerate",
"(",
"s2",
")",
":",
"insertions",
"=",
"previous_row",
"[",
"j",
"+",
"1",
"]",
"+",
"1",
"deletions",
"=",
"current_row",
"[",
"j",
"]",
"+",
"1",
"substitutions",
"=",
"previous_row",
"[",
"j",
"]",
"+",
"(",
"c1",
"!=",
"c2",
")",
"current_row",
".",
"append",
"(",
"min",
"(",
"insertions",
",",
"deletions",
",",
"substitutions",
")",
")",
"previous_row",
"=",
"current_row",
"return",
"previous_row",
"[",
"-",
"1",
"]"
] | Compute edit distance between two strings using dynamic programmic. | [
"Compute",
"edit",
"distance",
"between",
"two",
"strings",
"using",
"dynamic",
"programmic",
"."
] | [
"\"\"\"Compute edit distance between two strings using dynamic programmic.\n Lifted from: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python\"\"\"",
"# len(s1) >= len(s2)",
"# j+1 instead of j since previous_row and",
"# current_row are one character longer than s2"
] | [
{
"param": "s1",
"type": "str"
},
{
"param": "s2",
"type": "str"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "s1",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "s2",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def edit_distance(s1: str, s2: str) -> int:
if len(s1) < len(s2):
return edit_distance(s2, s1)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1] |
f8914201b858c40768ea60a99d03e878d6b81db8 | nataliyah123/phageParser | util/acc.py | [
"MIT"
] | Python | read_accession_file | null | def read_accession_file(f):
"""
Read an open accession file, returning the list of accession numbers it
contains.
This automatically skips blank lines and comments.
"""
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
yield line |
Read an open accession file, returning the list of accession numbers it
contains.
This automatically skips blank lines and comments.
| Read an open accession file, returning the list of accession numbers it
contains.
This automatically skips blank lines and comments. | [
"Read",
"an",
"open",
"accession",
"file",
"returning",
"the",
"list",
"of",
"accession",
"numbers",
"it",
"contains",
".",
"This",
"automatically",
"skips",
"blank",
"lines",
"and",
"comments",
"."
] | def read_accession_file(f):
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
yield line | [
"def",
"read_accession_file",
"(",
"f",
")",
":",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
"or",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"yield",
"line"
] | Read an open accession file, returning the list of accession numbers it
contains. | [
"Read",
"an",
"open",
"accession",
"file",
"returning",
"the",
"list",
"of",
"accession",
"numbers",
"it",
"contains",
"."
] | [
"\"\"\"\n Read an open accession file, returning the list of accession numbers it\n contains.\n\n This automatically skips blank lines and comments.\n \"\"\""
] | [
{
"param": "f",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "f",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def read_accession_file(f):
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
yield line |
963db2c08d1590debdaf46085464e8392c243870 | xolox/python-rsync-system-backup | rsync_system_backup/__init__.py | [
"MIT"
] | Python | ensure_trailing_slash | <not_specific> | def ensure_trailing_slash(expression):
"""
Add a trailing slash to rsync source/destination locations.
:param expression: The rsync source/destination expression (a string).
:returns: The same expression with exactly one trailing slash.
"""
if expression:
# Strip any existing trailing slashes.
expression = expression.rstrip('/')
# Add exactly one trailing slash.
expression += '/'
return expression |
Add a trailing slash to rsync source/destination locations.
:param expression: The rsync source/destination expression (a string).
:returns: The same expression with exactly one trailing slash.
| Add a trailing slash to rsync source/destination locations. | [
"Add",
"a",
"trailing",
"slash",
"to",
"rsync",
"source",
"/",
"destination",
"locations",
"."
] | def ensure_trailing_slash(expression):
if expression:
expression = expression.rstrip('/')
expression += '/'
return expression | [
"def",
"ensure_trailing_slash",
"(",
"expression",
")",
":",
"if",
"expression",
":",
"expression",
"=",
"expression",
".",
"rstrip",
"(",
"'/'",
")",
"expression",
"+=",
"'/'",
"return",
"expression"
] | Add a trailing slash to rsync source/destination locations. | [
"Add",
"a",
"trailing",
"slash",
"to",
"rsync",
"source",
"/",
"destination",
"locations",
"."
] | [
"\"\"\"\n Add a trailing slash to rsync source/destination locations.\n\n :param expression: The rsync source/destination expression (a string).\n :returns: The same expression with exactly one trailing slash.\n \"\"\"",
"# Strip any existing trailing slashes.",
"# Add exactly one trailing slash."
] | [
{
"param": "expression",
"type": null
}
] | {
"returns": [
{
"docstring": "The same expression with exactly one trailing slash.",
"docstring_tokens": [
"The",
"same",
"expression",
"with",
"exactly",
"one",
"trailing",
"slash",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "expression",
"type": null,
"docstring": "The rsync source/destination expression (a string).",
"docstring_tokens": [
"The",
"rsync",
"source",
"/",
"destination",
"expression",
"(",
"a",
"string",
")",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def ensure_trailing_slash(expression):
if expression:
expression = expression.rstrip('/')
expression += '/'
return expression |
6bcf19cc2ef1c9616b663c229fa983de85a420fa | petrpavlu/storepass | storepass/utils.py | [
"MIT"
] | Python | escape_bytes | <not_specific> | def escape_bytes(bytes_):
"""
Convert a bytes object to an escaped string.
Convert bytes to an ASCII string. Non-printable characters and a single
quote (') are escaped. This allows to format bytes in messages as
f"b'{utils.escape_bytes(bytes)}'".
"""
res = ""
for byte in bytes_:
char = chr(byte)
if char == '\\':
res += "\\\\"
elif char == '\'':
res += "\\'"
elif char in (string.digits + string.ascii_letters +
string.punctuation + ' '):
res += char
else:
res += "\\x%0.2x" % byte
return res |
Convert a bytes object to an escaped string.
Convert bytes to an ASCII string. Non-printable characters and a single
quote (') are escaped. This allows to format bytes in messages as
f"b'{utils.escape_bytes(bytes)}'".
| Convert a bytes object to an escaped string.
Convert bytes to an ASCII string. Non-printable characters and a single
quote (') are escaped. | [
"Convert",
"a",
"bytes",
"object",
"to",
"an",
"escaped",
"string",
".",
"Convert",
"bytes",
"to",
"an",
"ASCII",
"string",
".",
"Non",
"-",
"printable",
"characters",
"and",
"a",
"single",
"quote",
"(",
"'",
")",
"are",
"escaped",
"."
] | def escape_bytes(bytes_):
res = ""
for byte in bytes_:
char = chr(byte)
if char == '\\':
res += "\\\\"
elif char == '\'':
res += "\\'"
elif char in (string.digits + string.ascii_letters +
string.punctuation + ' '):
res += char
else:
res += "\\x%0.2x" % byte
return res | [
"def",
"escape_bytes",
"(",
"bytes_",
")",
":",
"res",
"=",
"\"\"",
"for",
"byte",
"in",
"bytes_",
":",
"char",
"=",
"chr",
"(",
"byte",
")",
"if",
"char",
"==",
"'\\\\'",
":",
"res",
"+=",
"\"\\\\\\\\\"",
"elif",
"char",
"==",
"'\\''",
":",
"res",
"+=",
"\"\\\\'\"",
"elif",
"char",
"in",
"(",
"string",
".",
"digits",
"+",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"punctuation",
"+",
"' '",
")",
":",
"res",
"+=",
"char",
"else",
":",
"res",
"+=",
"\"\\\\x%0.2x\"",
"%",
"byte",
"return",
"res"
] | Convert a bytes object to an escaped string. | [
"Convert",
"a",
"bytes",
"object",
"to",
"an",
"escaped",
"string",
"."
] | [
"\"\"\"\n Convert a bytes object to an escaped string.\n\n Convert bytes to an ASCII string. Non-printable characters and a single\n quote (') are escaped. This allows to format bytes in messages as\n f\"b'{utils.escape_bytes(bytes)}'\".\n \"\"\""
] | [
{
"param": "bytes_",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "bytes_",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import string
def escape_bytes(bytes_):
res = ""
for byte in bytes_:
char = chr(byte)
if char == '\\':
res += "\\\\"
elif char == '\'':
res += "\\'"
elif char in (string.digits + string.ascii_letters +
string.punctuation + ' '):
res += char
else:
res += "\\x%0.2x" % byte
return res |
46c1ced6778e7bf0021180efba652ba8cf0721e3 | petrpavlu/storepass | storepass/cli/__main__.py | [
"MIT"
] | Python | _check_entry_name | <not_specific> | def _check_entry_name(args):
"""Validate an entry name specified on the command line."""
# Reject an empty entry name.
if args.entry == '':
print("Specified entry name is empty", file=sys.stderr)
return 1
return 0 | Validate an entry name specified on the command line. | Validate an entry name specified on the command line. | [
"Validate",
"an",
"entry",
"name",
"specified",
"on",
"the",
"command",
"line",
"."
] | def _check_entry_name(args):
if args.entry == '':
print("Specified entry name is empty", file=sys.stderr)
return 1
return 0 | [
"def",
"_check_entry_name",
"(",
"args",
")",
":",
"if",
"args",
".",
"entry",
"==",
"''",
":",
"print",
"(",
"\"Specified entry name is empty\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"1",
"return",
"0"
] | Validate an entry name specified on the command line. | [
"Validate",
"an",
"entry",
"name",
"specified",
"on",
"the",
"command",
"line",
"."
] | [
"\"\"\"Validate an entry name specified on the command line.\"\"\"",
"# Reject an empty entry name."
] | [
{
"param": "args",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import sys
def _check_entry_name(args):
if args.entry == '':
print("Specified entry name is empty", file=sys.stderr)
return 1
return 0 |
46c1ced6778e7bf0021180efba652ba8cf0721e3 | petrpavlu/storepass | storepass/cli/__main__.py | [
"MIT"
] | Python | _process_init_command | <not_specific> | def _process_init_command(args, _model):
"""Handle the init command: create an empty password database."""
assert args.command == 'init'
# Keep the model empty and let the main() function write out the database.
return 0 | Handle the init command: create an empty password database. | Handle the init command: create an empty password database. | [
"Handle",
"the",
"init",
"command",
":",
"create",
"an",
"empty",
"password",
"database",
"."
] | def _process_init_command(args, _model):
assert args.command == 'init'
return 0 | [
"def",
"_process_init_command",
"(",
"args",
",",
"_model",
")",
":",
"assert",
"args",
".",
"command",
"==",
"'init'",
"return",
"0"
] | Handle the init command: create an empty password database. | [
"Handle",
"the",
"init",
"command",
":",
"create",
"an",
"empty",
"password",
"database",
"."
] | [
"\"\"\"Handle the init command: create an empty password database.\"\"\"",
"# Keep the model empty and let the main() function write out the database."
] | [
{
"param": "args",
"type": null
},
{
"param": "_model",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "args",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "_model",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _process_init_command(args, _model):
assert args.command == 'init'
return 0 |
54ac2b165f2db32a16fb2e82e078d1d199bae23c | petrpavlu/storepass | tests/utils.py | [
"MIT"
] | Python | dedent2 | <not_specific> | def dedent2(text):
"""
Remove any common leading whitespace + '|' from every line in a given text.
Remove any common leading whitespace + character '|' from every line in a
given text.
"""
output = ''
lines = textwrap.dedent(text).splitlines(True)
for line in lines:
assert line[:1] == '|'
output += line[1:]
return output |
Remove any common leading whitespace + '|' from every line in a given text.
Remove any common leading whitespace + character '|' from every line in a
given text.
| Remove any common leading whitespace + '|' from every line in a given text.
Remove any common leading whitespace + character '|' from every line in a
given text. | [
"Remove",
"any",
"common",
"leading",
"whitespace",
"+",
"'",
"|",
"'",
"from",
"every",
"line",
"in",
"a",
"given",
"text",
".",
"Remove",
"any",
"common",
"leading",
"whitespace",
"+",
"character",
"'",
"|",
"'",
"from",
"every",
"line",
"in",
"a",
"given",
"text",
"."
] | def dedent2(text):
output = ''
lines = textwrap.dedent(text).splitlines(True)
for line in lines:
assert line[:1] == '|'
output += line[1:]
return output | [
"def",
"dedent2",
"(",
"text",
")",
":",
"output",
"=",
"''",
"lines",
"=",
"textwrap",
".",
"dedent",
"(",
"text",
")",
".",
"splitlines",
"(",
"True",
")",
"for",
"line",
"in",
"lines",
":",
"assert",
"line",
"[",
":",
"1",
"]",
"==",
"'|'",
"output",
"+=",
"line",
"[",
"1",
":",
"]",
"return",
"output"
] | Remove any common leading whitespace + '|' from every line in a given text. | [
"Remove",
"any",
"common",
"leading",
"whitespace",
"+",
"'",
"|",
"'",
"from",
"every",
"line",
"in",
"a",
"given",
"text",
"."
] | [
"\"\"\"\n Remove any common leading whitespace + '|' from every line in a given text.\n\n Remove any common leading whitespace + character '|' from every line in a\n given text.\n \"\"\""
] | [
{
"param": "text",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "text",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import textwrap
def dedent2(text):
output = ''
lines = textwrap.dedent(text).splitlines(True)
for line in lines:
assert line[:1] == '|'
output += line[1:]
return output |
4fd414247668b7d588591bb43cc1842d26b71ad0 | petrpavlu/storepass | storepass/model.py | [
"MIT"
] | Python | path_element_to_string | <not_specific> | def path_element_to_string(path_element):
"""Convert a single path element to its escaped string representation."""
res = ""
for char in path_element:
if char == '\\':
res += "\\\\"
elif char == '/':
res += "\\/"
else:
res += char
return res | Convert a single path element to its escaped string representation. | Convert a single path element to its escaped string representation. | [
"Convert",
"a",
"single",
"path",
"element",
"to",
"its",
"escaped",
"string",
"representation",
"."
] | def path_element_to_string(path_element):
res = ""
for char in path_element:
if char == '\\':
res += "\\\\"
elif char == '/':
res += "\\/"
else:
res += char
return res | [
"def",
"path_element_to_string",
"(",
"path_element",
")",
":",
"res",
"=",
"\"\"",
"for",
"char",
"in",
"path_element",
":",
"if",
"char",
"==",
"'\\\\'",
":",
"res",
"+=",
"\"\\\\\\\\\"",
"elif",
"char",
"==",
"'/'",
":",
"res",
"+=",
"\"\\\\/\"",
"else",
":",
"res",
"+=",
"char",
"return",
"res"
] | Convert a single path element to its escaped string representation. | [
"Convert",
"a",
"single",
"path",
"element",
"to",
"its",
"escaped",
"string",
"representation",
"."
] | [
"\"\"\"Convert a single path element to its escaped string representation.\"\"\""
] | [
{
"param": "path_element",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "path_element",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def path_element_to_string(path_element):
res = ""
for char in path_element:
if char == '\\':
res += "\\\\"
elif char == '/':
res += "\\/"
else:
res += char
return res |
15ae12f0046127583343ca0ead7a202117484ca8 | eyangs/transferNILM | model_structure.py | [
"MIT"
] | Python | save_model | null | def save_model(model, network_type, algorithm, appliance, save_model_dir):
""" Saves a model to a specified location. Models are named using a combination of their
target appliance, architecture, and pruning algorithm.
Parameters:
model (tensorflow.keras.Model): The Keras model to save.
network_type (string): The architecture of the model ('', 'reduced', 'dropout', or 'reduced_dropout').
algorithm (string): The pruning algorithm applied to the model.
appliance (string): The appliance the model was trained with.
"""
#model_path = "saved_models/" + appliance + "_" + algorithm + "_" + network_type + "_model.h5"
model_path = save_model_dir
if not os.path.exists (model_path):
open((model_path), 'a').close()
model.save(model_path) | Saves a model to a specified location. Models are named using a combination of their
target appliance, architecture, and pruning algorithm.
Parameters:
model (tensorflow.keras.Model): The Keras model to save.
network_type (string): The architecture of the model ('', 'reduced', 'dropout', or 'reduced_dropout').
algorithm (string): The pruning algorithm applied to the model.
appliance (string): The appliance the model was trained with.
| Saves a model to a specified location. Models are named using a combination of their
target appliance, architecture, and pruning algorithm.
model (tensorflow.keras.Model): The Keras model to save. | [
"Saves",
"a",
"model",
"to",
"a",
"specified",
"location",
".",
"Models",
"are",
"named",
"using",
"a",
"combination",
"of",
"their",
"target",
"appliance",
"architecture",
"and",
"pruning",
"algorithm",
".",
"model",
"(",
"tensorflow",
".",
"keras",
".",
"Model",
")",
":",
"The",
"Keras",
"model",
"to",
"save",
"."
] | def save_model(model, network_type, algorithm, appliance, save_model_dir):
model_path = save_model_dir
if not os.path.exists (model_path):
open((model_path), 'a').close()
model.save(model_path) | [
"def",
"save_model",
"(",
"model",
",",
"network_type",
",",
"algorithm",
",",
"appliance",
",",
"save_model_dir",
")",
":",
"model_path",
"=",
"save_model_dir",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"model_path",
")",
":",
"open",
"(",
"(",
"model_path",
")",
",",
"'a'",
")",
".",
"close",
"(",
")",
"model",
".",
"save",
"(",
"model_path",
")"
] | Saves a model to a specified location. | [
"Saves",
"a",
"model",
"to",
"a",
"specified",
"location",
"."
] | [
"\"\"\" Saves a model to a specified location. Models are named using a combination of their \n target appliance, architecture, and pruning algorithm.\n\n Parameters:\n model (tensorflow.keras.Model): The Keras model to save.\n network_type (string): The architecture of the model ('', 'reduced', 'dropout', or 'reduced_dropout').\n algorithm (string): The pruning algorithm applied to the model.\n appliance (string): The appliance the model was trained with.\n\n \"\"\"",
"#model_path = \"saved_models/\" + appliance + \"_\" + algorithm + \"_\" + network_type + \"_model.h5\""
] | [
{
"param": "model",
"type": null
},
{
"param": "network_type",
"type": null
},
{
"param": "algorithm",
"type": null
},
{
"param": "appliance",
"type": null
},
{
"param": "save_model_dir",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "model",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "network_type",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "algorithm",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "appliance",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "save_model_dir",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import os
def save_model(model, network_type, algorithm, appliance, save_model_dir):
model_path = save_model_dir
if not os.path.exists (model_path):
open((model_path), 'a').close()
model.save(model_path) |
d20ef7f7ae603259ed23e254994e98c70370287c | WojciechMula/canvas2svg | canvasvg.py | [
"BSD-3-Clause"
] | Python | parse_dash | <not_specific> | def parse_dash(string, width):
"parse dash pattern specified with string"
# DashConvert from {tk-sources}/generic/tkCanvUtil.c
w = max(1, int(width + 0.5))
n = len(string)
result = []
for i, c in enumerate(string):
if c == " " and len(result):
result[-1] += w + 1
elif c == "_":
result.append(8*w)
result.append(4*w)
elif c == "-":
result.append(6*w)
result.append(4*w)
elif c == ",":
result.append(4*w)
result.append(4*w)
elif c == ".":
result.append(2*w)
result.append(4*w)
return result | parse dash pattern specified with string | parse dash pattern specified with string | [
"parse",
"dash",
"pattern",
"specified",
"with",
"string"
] | def parse_dash(string, width):
w = max(1, int(width + 0.5))
n = len(string)
result = []
for i, c in enumerate(string):
if c == " " and len(result):
result[-1] += w + 1
elif c == "_":
result.append(8*w)
result.append(4*w)
elif c == "-":
result.append(6*w)
result.append(4*w)
elif c == ",":
result.append(4*w)
result.append(4*w)
elif c == ".":
result.append(2*w)
result.append(4*w)
return result | [
"def",
"parse_dash",
"(",
"string",
",",
"width",
")",
":",
"w",
"=",
"max",
"(",
"1",
",",
"int",
"(",
"width",
"+",
"0.5",
")",
")",
"n",
"=",
"len",
"(",
"string",
")",
"result",
"=",
"[",
"]",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"string",
")",
":",
"if",
"c",
"==",
"\" \"",
"and",
"len",
"(",
"result",
")",
":",
"result",
"[",
"-",
"1",
"]",
"+=",
"w",
"+",
"1",
"elif",
"c",
"==",
"\"_\"",
":",
"result",
".",
"append",
"(",
"8",
"*",
"w",
")",
"result",
".",
"append",
"(",
"4",
"*",
"w",
")",
"elif",
"c",
"==",
"\"-\"",
":",
"result",
".",
"append",
"(",
"6",
"*",
"w",
")",
"result",
".",
"append",
"(",
"4",
"*",
"w",
")",
"elif",
"c",
"==",
"\",\"",
":",
"result",
".",
"append",
"(",
"4",
"*",
"w",
")",
"result",
".",
"append",
"(",
"4",
"*",
"w",
")",
"elif",
"c",
"==",
"\".\"",
":",
"result",
".",
"append",
"(",
"2",
"*",
"w",
")",
"result",
".",
"append",
"(",
"4",
"*",
"w",
")",
"return",
"result"
] | parse dash pattern specified with string | [
"parse",
"dash",
"pattern",
"specified",
"with",
"string"
] | [
"\"parse dash pattern specified with string\"",
"# DashConvert from {tk-sources}/generic/tkCanvUtil.c"
] | [
{
"param": "string",
"type": null
},
{
"param": "width",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "string",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "width",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def parse_dash(string, width):
w = max(1, int(width + 0.5))
n = len(string)
result = []
for i, c in enumerate(string):
if c == " " and len(result):
result[-1] += w + 1
elif c == "_":
result.append(8*w)
result.append(4*w)
elif c == "-":
result.append(6*w)
result.append(4*w)
elif c == ",":
result.append(4*w)
result.append(4*w)
elif c == ".":
result.append(2*w)
result.append(4*w)
return result |
241c36d99c353c53d5ed55f9a59808bea1330510 | chrisk27/fastdifgrow | fastdifgrow/fastdifgrow_main.py | [
"MIT"
] | Python | sim_parameters | null | def sim_parameters():
"""This function defines the initial parameters used in simulations"""
global rows, cols, h, per_cycle, num_cycles
rows = 100
cols = 100
h = 15
per_cycle = 10**7
num_cycles = 10**2 | This function defines the initial parameters used in simulations | This function defines the initial parameters used in simulations | [
"This",
"function",
"defines",
"the",
"initial",
"parameters",
"used",
"in",
"simulations"
] | def sim_parameters():
global rows, cols, h, per_cycle, num_cycles
rows = 100
cols = 100
h = 15
per_cycle = 10**7
num_cycles = 10**2 | [
"def",
"sim_parameters",
"(",
")",
":",
"global",
"rows",
",",
"cols",
",",
"h",
",",
"per_cycle",
",",
"num_cycles",
"rows",
"=",
"100",
"cols",
"=",
"100",
"h",
"=",
"15",
"per_cycle",
"=",
"10",
"**",
"7",
"num_cycles",
"=",
"10",
"**",
"2"
] | This function defines the initial parameters used in simulations | [
"This",
"function",
"defines",
"the",
"initial",
"parameters",
"used",
"in",
"simulations"
] | [
"\"\"\"This function defines the initial parameters used in simulations\"\"\""
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def sim_parameters():
global rows, cols, h, per_cycle, num_cycles
rows = 100
cols = 100
h = 15
per_cycle = 10**7
num_cycles = 10**2 |
241c36d99c353c53d5ed55f9a59808bea1330510 | chrisk27/fastdifgrow | fastdifgrow/fastdifgrow_main.py | [
"MIT"
] | Python | reaction_rates | <not_specific> | def reaction_rates():
"""This function defines the reaction rates for each process"""
global bx, bm, dx, dm, sm, sx, lx
bx = 1 # birth of xantophores
bm = 0 # birth of melanophores
dx = 0 # death of xantophores
dm = 0 # death of melanophores
sm = 1 # short-range killing of xantophore by melanophore
sx = 1 # short-range killing of melanophore by xantophore
lx = 2.5 # long-range activation/birth strength
return | This function defines the reaction rates for each process | This function defines the reaction rates for each process | [
"This",
"function",
"defines",
"the",
"reaction",
"rates",
"for",
"each",
"process"
] | def reaction_rates():
global bx, bm, dx, dm, sm, sx, lx
bx = 1
bm = 0
dx = 0
dm = 0
sm = 1
sx = 1
lx = 2.5
return | [
"def",
"reaction_rates",
"(",
")",
":",
"global",
"bx",
",",
"bm",
",",
"dx",
",",
"dm",
",",
"sm",
",",
"sx",
",",
"lx",
"bx",
"=",
"1",
"bm",
"=",
"0",
"dx",
"=",
"0",
"dm",
"=",
"0",
"sm",
"=",
"1",
"sx",
"=",
"1",
"lx",
"=",
"2.5",
"return"
] | This function defines the reaction rates for each process | [
"This",
"function",
"defines",
"the",
"reaction",
"rates",
"for",
"each",
"process"
] | [
"\"\"\"This function defines the reaction rates for each process\"\"\"",
"# birth of xantophores",
"# birth of melanophores",
"# death of xantophores",
"# death of melanophores",
"# short-range killing of xantophore by melanophore",
"# short-range killing of melanophore by xantophore",
"# long-range activation/birth strength"
] | [] | {
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
} | def reaction_rates():
global bx, bm, dx, dm, sm, sx, lx
bx = 1
bm = 0
dx = 0
dm = 0
sm = 1
sx = 1
lx = 2.5
return |
e308b5520485f58c0a528ff53d5240b4450cc42c | macph/nextbus | nextbus/models/tables.py | [
"MIT"
] | Python | _insert_service_modes | null | def _insert_service_modes(target, connection, **kw):
""" Inserts service mode IDs and names after creating lookup table. """
statement = target.insert().values([
{"id": 1, "name": "bus"},
{"id": 2, "name": "coach"},
{"id": 3, "name": "tram"},
{"id": 4, "name": "metro"},
{"id": 5, "name": "underground"}
])
connection.execute(statement) | Inserts service mode IDs and names after creating lookup table. | Inserts service mode IDs and names after creating lookup table. | [
"Inserts",
"service",
"mode",
"IDs",
"and",
"names",
"after",
"creating",
"lookup",
"table",
"."
] | def _insert_service_modes(target, connection, **kw):
statement = target.insert().values([
{"id": 1, "name": "bus"},
{"id": 2, "name": "coach"},
{"id": 3, "name": "tram"},
{"id": 4, "name": "metro"},
{"id": 5, "name": "underground"}
])
connection.execute(statement) | [
"def",
"_insert_service_modes",
"(",
"target",
",",
"connection",
",",
"**",
"kw",
")",
":",
"statement",
"=",
"target",
".",
"insert",
"(",
")",
".",
"values",
"(",
"[",
"{",
"\"id\"",
":",
"1",
",",
"\"name\"",
":",
"\"bus\"",
"}",
",",
"{",
"\"id\"",
":",
"2",
",",
"\"name\"",
":",
"\"coach\"",
"}",
",",
"{",
"\"id\"",
":",
"3",
",",
"\"name\"",
":",
"\"tram\"",
"}",
",",
"{",
"\"id\"",
":",
"4",
",",
"\"name\"",
":",
"\"metro\"",
"}",
",",
"{",
"\"id\"",
":",
"5",
",",
"\"name\"",
":",
"\"underground\"",
"}",
"]",
")",
"connection",
".",
"execute",
"(",
"statement",
")"
] | Inserts service mode IDs and names after creating lookup table. | [
"Inserts",
"service",
"mode",
"IDs",
"and",
"names",
"after",
"creating",
"lookup",
"table",
"."
] | [
"\"\"\" Inserts service mode IDs and names after creating lookup table. \"\"\""
] | [
{
"param": "target",
"type": null
},
{
"param": "connection",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "target",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "connection",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _insert_service_modes(target, connection, **kw):
statement = target.insert().values([
{"id": 1, "name": "bus"},
{"id": 2, "name": "coach"},
{"id": 3, "name": "tram"},
{"id": 4, "name": "metro"},
{"id": 5, "name": "underground"}
])
connection.execute(statement) |
e308b5520485f58c0a528ff53d5240b4450cc42c | macph/nextbus | nextbus/models/tables.py | [
"MIT"
] | Python | _insert_bank_holidays | null | def _insert_bank_holidays(target, connection, **kw):
""" Inserts bank holiday IDs and names after creating lookup table. """
statement = target.insert().values([
{"id": 1, "name": "NewYearsDay"},
{"id": 2, "name": "Jan2ndScotland"},
{"id": 3, "name": "GoodFriday"},
{"id": 4, "name": "EasterMonday"},
{"id": 5, "name": "MayDay"},
{"id": 6, "name": "SpringBank"},
{"id": 7, "name": "LateSummerBankHolidayNotScotland"},
{"id": 8, "name": "AugustBankHolidayScotland"},
{"id": 9, "name": "ChristmasDay"},
{"id": 10, "name": "BoxingDay"},
{"id": 11, "name": "ChristmasDayHoliday"},
{"id": 12, "name": "BoxingDayHoliday"},
{"id": 13, "name": "NewYearsDayHoliday"},
{"id": 14, "name": "ChristmasEve"},
{"id": 15, "name": "NewYearsEve"},
])
connection.execute(statement) | Inserts bank holiday IDs and names after creating lookup table. | Inserts bank holiday IDs and names after creating lookup table. | [
"Inserts",
"bank",
"holiday",
"IDs",
"and",
"names",
"after",
"creating",
"lookup",
"table",
"."
] | def _insert_bank_holidays(target, connection, **kw):
statement = target.insert().values([
{"id": 1, "name": "NewYearsDay"},
{"id": 2, "name": "Jan2ndScotland"},
{"id": 3, "name": "GoodFriday"},
{"id": 4, "name": "EasterMonday"},
{"id": 5, "name": "MayDay"},
{"id": 6, "name": "SpringBank"},
{"id": 7, "name": "LateSummerBankHolidayNotScotland"},
{"id": 8, "name": "AugustBankHolidayScotland"},
{"id": 9, "name": "ChristmasDay"},
{"id": 10, "name": "BoxingDay"},
{"id": 11, "name": "ChristmasDayHoliday"},
{"id": 12, "name": "BoxingDayHoliday"},
{"id": 13, "name": "NewYearsDayHoliday"},
{"id": 14, "name": "ChristmasEve"},
{"id": 15, "name": "NewYearsEve"},
])
connection.execute(statement) | [
"def",
"_insert_bank_holidays",
"(",
"target",
",",
"connection",
",",
"**",
"kw",
")",
":",
"statement",
"=",
"target",
".",
"insert",
"(",
")",
".",
"values",
"(",
"[",
"{",
"\"id\"",
":",
"1",
",",
"\"name\"",
":",
"\"NewYearsDay\"",
"}",
",",
"{",
"\"id\"",
":",
"2",
",",
"\"name\"",
":",
"\"Jan2ndScotland\"",
"}",
",",
"{",
"\"id\"",
":",
"3",
",",
"\"name\"",
":",
"\"GoodFriday\"",
"}",
",",
"{",
"\"id\"",
":",
"4",
",",
"\"name\"",
":",
"\"EasterMonday\"",
"}",
",",
"{",
"\"id\"",
":",
"5",
",",
"\"name\"",
":",
"\"MayDay\"",
"}",
",",
"{",
"\"id\"",
":",
"6",
",",
"\"name\"",
":",
"\"SpringBank\"",
"}",
",",
"{",
"\"id\"",
":",
"7",
",",
"\"name\"",
":",
"\"LateSummerBankHolidayNotScotland\"",
"}",
",",
"{",
"\"id\"",
":",
"8",
",",
"\"name\"",
":",
"\"AugustBankHolidayScotland\"",
"}",
",",
"{",
"\"id\"",
":",
"9",
",",
"\"name\"",
":",
"\"ChristmasDay\"",
"}",
",",
"{",
"\"id\"",
":",
"10",
",",
"\"name\"",
":",
"\"BoxingDay\"",
"}",
",",
"{",
"\"id\"",
":",
"11",
",",
"\"name\"",
":",
"\"ChristmasDayHoliday\"",
"}",
",",
"{",
"\"id\"",
":",
"12",
",",
"\"name\"",
":",
"\"BoxingDayHoliday\"",
"}",
",",
"{",
"\"id\"",
":",
"13",
",",
"\"name\"",
":",
"\"NewYearsDayHoliday\"",
"}",
",",
"{",
"\"id\"",
":",
"14",
",",
"\"name\"",
":",
"\"ChristmasEve\"",
"}",
",",
"{",
"\"id\"",
":",
"15",
",",
"\"name\"",
":",
"\"NewYearsEve\"",
"}",
",",
"]",
")",
"connection",
".",
"execute",
"(",
"statement",
")"
] | Inserts bank holiday IDs and names after creating lookup table. | [
"Inserts",
"bank",
"holiday",
"IDs",
"and",
"names",
"after",
"creating",
"lookup",
"table",
"."
] | [
"\"\"\" Inserts bank holiday IDs and names after creating lookup table. \"\"\""
] | [
{
"param": "target",
"type": null
},
{
"param": "connection",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "target",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "connection",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _insert_bank_holidays(target, connection, **kw):
statement = target.insert().values([
{"id": 1, "name": "NewYearsDay"},
{"id": 2, "name": "Jan2ndScotland"},
{"id": 3, "name": "GoodFriday"},
{"id": 4, "name": "EasterMonday"},
{"id": 5, "name": "MayDay"},
{"id": 6, "name": "SpringBank"},
{"id": 7, "name": "LateSummerBankHolidayNotScotland"},
{"id": 8, "name": "AugustBankHolidayScotland"},
{"id": 9, "name": "ChristmasDay"},
{"id": 10, "name": "BoxingDay"},
{"id": 11, "name": "ChristmasDayHoliday"},
{"id": 12, "name": "BoxingDayHoliday"},
{"id": 13, "name": "NewYearsDayHoliday"},
{"id": 14, "name": "ChristmasEve"},
{"id": 15, "name": "NewYearsEve"},
])
connection.execute(statement) |
e308b5520485f58c0a528ff53d5240b4450cc42c | macph/nextbus | nextbus/models/tables.py | [
"MIT"
] | Python | _insert_bank_holiday_dates | null | def _insert_bank_holiday_dates(target, connection, **kw):
""" Inserts bank holiday dates after creating table. """
statement = target.insert().values([
{"holiday_ref": 13, "date": "2017-01-02"},
{"holiday_ref": 2, "date": "2017-01-02"},
{"holiday_ref": 3, "date": "2017-04-14"},
{"holiday_ref": 4, "date": "2017-04-17"},
{"holiday_ref": 5, "date": "2017-05-01"},
{"holiday_ref": 6, "date": "2017-05-29"},
{"holiday_ref": 8, "date": "2017-08-05"},
{"holiday_ref": 7, "date": "2017-08-28"},
{"holiday_ref": 9, "date": "2017-12-25"},
{"holiday_ref": 10, "date": "2017-12-26"},
{"holiday_ref": 1, "date": "2018-01-01"},
{"holiday_ref": 2, "date": "2018-01-02"},
{"holiday_ref": 3, "date": "2018-03-30"},
{"holiday_ref": 4, "date": "2018-04-02"},
{"holiday_ref": 5, "date": "2018-05-07"},
{"holiday_ref": 6, "date": "2018-05-28"},
{"holiday_ref": 8, "date": "2018-08-06"},
{"holiday_ref": 7, "date": "2018-08-27"},
{"holiday_ref": 9, "date": "2018-12-25"},
{"holiday_ref": 10, "date": "2018-12-26"},
{"holiday_ref": 1, "date": "2019-01-01"},
{"holiday_ref": 2, "date": "2019-01-02"},
{"holiday_ref": 3, "date": "2019-04-19"},
{"holiday_ref": 4, "date": "2019-04-22"},
{"holiday_ref": 5, "date": "2019-05-06"},
{"holiday_ref": 6, "date": "2019-05-27"},
{"holiday_ref": 8, "date": "2019-08-05"},
{"holiday_ref": 7, "date": "2019-08-26"},
{"holiday_ref": 9, "date": "2019-12-25"},
{"holiday_ref": 10, "date": "2019-12-26"},
{"holiday_ref": 1, "date": "2020-01-01"},
{"holiday_ref": 2, "date": "2020-01-02"},
{"holiday_ref": 3, "date": "2020-04-10"},
{"holiday_ref": 4, "date": "2020-04-13"},
{"holiday_ref": 5, "date": "2020-05-08"},
{"holiday_ref": 6, "date": "2020-05-25"},
{"holiday_ref": 7, "date": "2020-08-03"},
{"holiday_ref": 8, "date": "2020-08-31"},
{"holiday_ref": 14, "date": "2020-12-24"},
{"holiday_ref": 9, "date": "2020-12-25"},
{"holiday_ref": 12, "date": "2020-12-28"},
{"holiday_ref": 15, "date": "2020-12-31"},
])
connection.execute(statement) | Inserts bank holiday dates after creating table. | Inserts bank holiday dates after creating table. | [
"Inserts",
"bank",
"holiday",
"dates",
"after",
"creating",
"table",
"."
] | def _insert_bank_holiday_dates(target, connection, **kw):
statement = target.insert().values([
{"holiday_ref": 13, "date": "2017-01-02"},
{"holiday_ref": 2, "date": "2017-01-02"},
{"holiday_ref": 3, "date": "2017-04-14"},
{"holiday_ref": 4, "date": "2017-04-17"},
{"holiday_ref": 5, "date": "2017-05-01"},
{"holiday_ref": 6, "date": "2017-05-29"},
{"holiday_ref": 8, "date": "2017-08-05"},
{"holiday_ref": 7, "date": "2017-08-28"},
{"holiday_ref": 9, "date": "2017-12-25"},
{"holiday_ref": 10, "date": "2017-12-26"},
{"holiday_ref": 1, "date": "2018-01-01"},
{"holiday_ref": 2, "date": "2018-01-02"},
{"holiday_ref": 3, "date": "2018-03-30"},
{"holiday_ref": 4, "date": "2018-04-02"},
{"holiday_ref": 5, "date": "2018-05-07"},
{"holiday_ref": 6, "date": "2018-05-28"},
{"holiday_ref": 8, "date": "2018-08-06"},
{"holiday_ref": 7, "date": "2018-08-27"},
{"holiday_ref": 9, "date": "2018-12-25"},
{"holiday_ref": 10, "date": "2018-12-26"},
{"holiday_ref": 1, "date": "2019-01-01"},
{"holiday_ref": 2, "date": "2019-01-02"},
{"holiday_ref": 3, "date": "2019-04-19"},
{"holiday_ref": 4, "date": "2019-04-22"},
{"holiday_ref": 5, "date": "2019-05-06"},
{"holiday_ref": 6, "date": "2019-05-27"},
{"holiday_ref": 8, "date": "2019-08-05"},
{"holiday_ref": 7, "date": "2019-08-26"},
{"holiday_ref": 9, "date": "2019-12-25"},
{"holiday_ref": 10, "date": "2019-12-26"},
{"holiday_ref": 1, "date": "2020-01-01"},
{"holiday_ref": 2, "date": "2020-01-02"},
{"holiday_ref": 3, "date": "2020-04-10"},
{"holiday_ref": 4, "date": "2020-04-13"},
{"holiday_ref": 5, "date": "2020-05-08"},
{"holiday_ref": 6, "date": "2020-05-25"},
{"holiday_ref": 7, "date": "2020-08-03"},
{"holiday_ref": 8, "date": "2020-08-31"},
{"holiday_ref": 14, "date": "2020-12-24"},
{"holiday_ref": 9, "date": "2020-12-25"},
{"holiday_ref": 12, "date": "2020-12-28"},
{"holiday_ref": 15, "date": "2020-12-31"},
])
connection.execute(statement) | [
"def",
"_insert_bank_holiday_dates",
"(",
"target",
",",
"connection",
",",
"**",
"kw",
")",
":",
"statement",
"=",
"target",
".",
"insert",
"(",
")",
".",
"values",
"(",
"[",
"{",
"\"holiday_ref\"",
":",
"13",
",",
"\"date\"",
":",
"\"2017-01-02\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"2",
",",
"\"date\"",
":",
"\"2017-01-02\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"3",
",",
"\"date\"",
":",
"\"2017-04-14\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"4",
",",
"\"date\"",
":",
"\"2017-04-17\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"5",
",",
"\"date\"",
":",
"\"2017-05-01\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"6",
",",
"\"date\"",
":",
"\"2017-05-29\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"8",
",",
"\"date\"",
":",
"\"2017-08-05\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"7",
",",
"\"date\"",
":",
"\"2017-08-28\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"9",
",",
"\"date\"",
":",
"\"2017-12-25\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"10",
",",
"\"date\"",
":",
"\"2017-12-26\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"1",
",",
"\"date\"",
":",
"\"2018-01-01\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"2",
",",
"\"date\"",
":",
"\"2018-01-02\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"3",
",",
"\"date\"",
":",
"\"2018-03-30\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"4",
",",
"\"date\"",
":",
"\"2018-04-02\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"5",
",",
"\"date\"",
":",
"\"2018-05-07\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"6",
",",
"\"date\"",
":",
"\"2018-05-28\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"8",
",",
"\"date\"",
":",
"\"2018-08-06\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"7",
",",
"\"date\"",
":",
"\"2018-08-27\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"9",
",",
"\"date\"",
":",
"\"2018-12-25\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"10",
",",
"\"date\"",
":",
"\"2018-12-26\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"1",
",",
"\"date\"",
":",
"\"2019-01-01\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"2",
",",
"\"date\"",
":",
"\"2019-01-02\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"3",
",",
"\"date\"",
":",
"\"2019-04-19\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"4",
",",
"\"date\"",
":",
"\"2019-04-22\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"5",
",",
"\"date\"",
":",
"\"2019-05-06\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"6",
",",
"\"date\"",
":",
"\"2019-05-27\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"8",
",",
"\"date\"",
":",
"\"2019-08-05\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"7",
",",
"\"date\"",
":",
"\"2019-08-26\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"9",
",",
"\"date\"",
":",
"\"2019-12-25\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"10",
",",
"\"date\"",
":",
"\"2019-12-26\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"1",
",",
"\"date\"",
":",
"\"2020-01-01\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"2",
",",
"\"date\"",
":",
"\"2020-01-02\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"3",
",",
"\"date\"",
":",
"\"2020-04-10\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"4",
",",
"\"date\"",
":",
"\"2020-04-13\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"5",
",",
"\"date\"",
":",
"\"2020-05-08\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"6",
",",
"\"date\"",
":",
"\"2020-05-25\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"7",
",",
"\"date\"",
":",
"\"2020-08-03\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"8",
",",
"\"date\"",
":",
"\"2020-08-31\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"14",
",",
"\"date\"",
":",
"\"2020-12-24\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"9",
",",
"\"date\"",
":",
"\"2020-12-25\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"12",
",",
"\"date\"",
":",
"\"2020-12-28\"",
"}",
",",
"{",
"\"holiday_ref\"",
":",
"15",
",",
"\"date\"",
":",
"\"2020-12-31\"",
"}",
",",
"]",
")",
"connection",
".",
"execute",
"(",
"statement",
")"
] | Inserts bank holiday dates after creating table. | [
"Inserts",
"bank",
"holiday",
"dates",
"after",
"creating",
"table",
"."
] | [
"\"\"\" Inserts bank holiday dates after creating table. \"\"\""
] | [
{
"param": "target",
"type": null
},
{
"param": "connection",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "target",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "connection",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _insert_bank_holiday_dates(target, connection, **kw):
statement = target.insert().values([
{"holiday_ref": 13, "date": "2017-01-02"},
{"holiday_ref": 2, "date": "2017-01-02"},
{"holiday_ref": 3, "date": "2017-04-14"},
{"holiday_ref": 4, "date": "2017-04-17"},
{"holiday_ref": 5, "date": "2017-05-01"},
{"holiday_ref": 6, "date": "2017-05-29"},
{"holiday_ref": 8, "date": "2017-08-05"},
{"holiday_ref": 7, "date": "2017-08-28"},
{"holiday_ref": 9, "date": "2017-12-25"},
{"holiday_ref": 10, "date": "2017-12-26"},
{"holiday_ref": 1, "date": "2018-01-01"},
{"holiday_ref": 2, "date": "2018-01-02"},
{"holiday_ref": 3, "date": "2018-03-30"},
{"holiday_ref": 4, "date": "2018-04-02"},
{"holiday_ref": 5, "date": "2018-05-07"},
{"holiday_ref": 6, "date": "2018-05-28"},
{"holiday_ref": 8, "date": "2018-08-06"},
{"holiday_ref": 7, "date": "2018-08-27"},
{"holiday_ref": 9, "date": "2018-12-25"},
{"holiday_ref": 10, "date": "2018-12-26"},
{"holiday_ref": 1, "date": "2019-01-01"},
{"holiday_ref": 2, "date": "2019-01-02"},
{"holiday_ref": 3, "date": "2019-04-19"},
{"holiday_ref": 4, "date": "2019-04-22"},
{"holiday_ref": 5, "date": "2019-05-06"},
{"holiday_ref": 6, "date": "2019-05-27"},
{"holiday_ref": 8, "date": "2019-08-05"},
{"holiday_ref": 7, "date": "2019-08-26"},
{"holiday_ref": 9, "date": "2019-12-25"},
{"holiday_ref": 10, "date": "2019-12-26"},
{"holiday_ref": 1, "date": "2020-01-01"},
{"holiday_ref": 2, "date": "2020-01-02"},
{"holiday_ref": 3, "date": "2020-04-10"},
{"holiday_ref": 4, "date": "2020-04-13"},
{"holiday_ref": 5, "date": "2020-05-08"},
{"holiday_ref": 6, "date": "2020-05-25"},
{"holiday_ref": 7, "date": "2020-08-03"},
{"holiday_ref": 8, "date": "2020-08-31"},
{"holiday_ref": 14, "date": "2020-12-24"},
{"holiday_ref": 9, "date": "2020-12-25"},
{"holiday_ref": 12, "date": "2020-12-28"},
{"holiday_ref": 15, "date": "2020-12-31"},
])
connection.execute(statement) |
016c673a5f440b4ae1b2683cf9387cf302f5a6d5 | macph/nextbus | nextbus/populate/naptan.py | [
"MIT"
] | Python | _find_stop_area_mode | <not_specific> | def _find_stop_area_mode(query_result, ref):
""" Finds the mode of references for each stop area.
The query results must have 3 columns: primary key, foreign key
reference and number of stop points within each area matching that
reference, in that order.
:param ref: Name of the reference column.
:returns: Two lists; one to be to be used with `bulk_update_mappings`
and the other strings for invalid areas.
"""
# Group by stop area and reference
stop_areas = collections.defaultdict(dict)
for row in query_result:
stop_areas[row[0]][row[1]] = row[2]
# Check each area and find mode matching reference
update_areas = []
invalid_areas = {}
for sa, count in stop_areas.items():
max_count = [k for k, v in count.items() if v == max(count.values())]
if len(max_count) == 1:
update_areas.append({"code": sa, ref: max_count[0]})
else:
invalid_areas[sa] = max_count
return update_areas, invalid_areas | Finds the mode of references for each stop area.
The query results must have 3 columns: primary key, foreign key
reference and number of stop points within each area matching that
reference, in that order.
:param ref: Name of the reference column.
:returns: Two lists; one to be to be used with `bulk_update_mappings`
and the other strings for invalid areas.
| Finds the mode of references for each stop area.
The query results must have 3 columns: primary key, foreign key
reference and number of stop points within each area matching that
reference, in that order. | [
"Finds",
"the",
"mode",
"of",
"references",
"for",
"each",
"stop",
"area",
".",
"The",
"query",
"results",
"must",
"have",
"3",
"columns",
":",
"primary",
"key",
"foreign",
"key",
"reference",
"and",
"number",
"of",
"stop",
"points",
"within",
"each",
"area",
"matching",
"that",
"reference",
"in",
"that",
"order",
"."
] | def _find_stop_area_mode(query_result, ref):
stop_areas = collections.defaultdict(dict)
for row in query_result:
stop_areas[row[0]][row[1]] = row[2]
update_areas = []
invalid_areas = {}
for sa, count in stop_areas.items():
max_count = [k for k, v in count.items() if v == max(count.values())]
if len(max_count) == 1:
update_areas.append({"code": sa, ref: max_count[0]})
else:
invalid_areas[sa] = max_count
return update_areas, invalid_areas | [
"def",
"_find_stop_area_mode",
"(",
"query_result",
",",
"ref",
")",
":",
"stop_areas",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"for",
"row",
"in",
"query_result",
":",
"stop_areas",
"[",
"row",
"[",
"0",
"]",
"]",
"[",
"row",
"[",
"1",
"]",
"]",
"=",
"row",
"[",
"2",
"]",
"update_areas",
"=",
"[",
"]",
"invalid_areas",
"=",
"{",
"}",
"for",
"sa",
",",
"count",
"in",
"stop_areas",
".",
"items",
"(",
")",
":",
"max_count",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"count",
".",
"items",
"(",
")",
"if",
"v",
"==",
"max",
"(",
"count",
".",
"values",
"(",
")",
")",
"]",
"if",
"len",
"(",
"max_count",
")",
"==",
"1",
":",
"update_areas",
".",
"append",
"(",
"{",
"\"code\"",
":",
"sa",
",",
"ref",
":",
"max_count",
"[",
"0",
"]",
"}",
")",
"else",
":",
"invalid_areas",
"[",
"sa",
"]",
"=",
"max_count",
"return",
"update_areas",
",",
"invalid_areas"
] | Finds the mode of references for each stop area. | [
"Finds",
"the",
"mode",
"of",
"references",
"for",
"each",
"stop",
"area",
"."
] | [
"\"\"\" Finds the mode of references for each stop area.\n\n The query results must have 3 columns: primary key, foreign key\n reference and number of stop points within each area matching that\n reference, in that order.\n\n :param ref: Name of the reference column.\n :returns: Two lists; one to be to be used with `bulk_update_mappings`\n and the other strings for invalid areas.\n \"\"\"",
"# Group by stop area and reference",
"# Check each area and find mode matching reference"
] | [
{
"param": "query_result",
"type": null
},
{
"param": "ref",
"type": null
}
] | {
"returns": [
{
"docstring": "Two lists; one to be to be used with `bulk_update_mappings`\nand the other strings for invalid areas.",
"docstring_tokens": [
"Two",
"lists",
";",
"one",
"to",
"be",
"to",
"be",
"used",
"with",
"`",
"bulk_update_mappings",
"`",
"and",
"the",
"other",
"strings",
"for",
"invalid",
"areas",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "query_result",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ref",
"type": null,
"docstring": "Name of the reference column.",
"docstring_tokens": [
"Name",
"of",
"the",
"reference",
"column",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import collections
def _find_stop_area_mode(query_result, ref):
stop_areas = collections.defaultdict(dict)
for row in query_result:
stop_areas[row[0]][row[1]] = row[2]
update_areas = []
invalid_areas = {}
for sa, count in stop_areas.items():
max_count = [k for k, v in count.items() if v == max(count.values())]
if len(max_count) == 1:
update_areas.append({"code": sa, ref: max_count[0]})
else:
invalid_areas[sa] = max_count
return update_areas, invalid_areas |
f1dfd1277ba810a4fdb1dd0e7b4ca3a004196f29 | macph/nextbus | nextbus/views.py | [
"MIT"
] | Python | _display_operators | <not_specific> | def _display_operators(operators):
""" Returns sorted list of operators with any information. """
def sort_name(o): return o.name
def filter_op(o): return any([o.email, o.address, o.website, o.twitter])
return sorted(filter(filter_op, operators), key=sort_name) | Returns sorted list of operators with any information. | Returns sorted list of operators with any information. | [
"Returns",
"sorted",
"list",
"of",
"operators",
"with",
"any",
"information",
"."
] | def _display_operators(operators):
def sort_name(o): return o.name
def filter_op(o): return any([o.email, o.address, o.website, o.twitter])
return sorted(filter(filter_op, operators), key=sort_name) | [
"def",
"_display_operators",
"(",
"operators",
")",
":",
"def",
"sort_name",
"(",
"o",
")",
":",
"return",
"o",
".",
"name",
"def",
"filter_op",
"(",
"o",
")",
":",
"return",
"any",
"(",
"[",
"o",
".",
"email",
",",
"o",
".",
"address",
",",
"o",
".",
"website",
",",
"o",
".",
"twitter",
"]",
")",
"return",
"sorted",
"(",
"filter",
"(",
"filter_op",
",",
"operators",
")",
",",
"key",
"=",
"sort_name",
")"
] | Returns sorted list of operators with any information. | [
"Returns",
"sorted",
"list",
"of",
"operators",
"with",
"any",
"information",
"."
] | [
"\"\"\" Returns sorted list of operators with any information. \"\"\""
] | [
{
"param": "operators",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "operators",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _display_operators(operators):
def sort_name(o): return o.name
def filter_op(o): return any([o.email, o.address, o.website, o.twitter])
return sorted(filter(filter_op, operators), key=sort_name) |
b8af42c3035877a1083808f4d71d1c2518314a01 | macph/nextbus | nextbus/graph.py | [
"MIT"
] | Python | _merge_forward | null | def _merge_forward(graph, sequence, path, index):
""" Merges path into sequence, ensuring all new vertices follows the
existing ones in the adjacency list.
"""
i = index
for v in path:
if v in sequence:
continue
# Check if any later vertices have this path and move index
after = [j for j, w in enumerate(sequence[i:], i)
if v in graph.following(w)]
if after:
i = after[-1] + 1
sequence.insert(i, v)
i += 1 | Merges path into sequence, ensuring all new vertices follows the
existing ones in the adjacency list.
| Merges path into sequence, ensuring all new vertices follows the
existing ones in the adjacency list. | [
"Merges",
"path",
"into",
"sequence",
"ensuring",
"all",
"new",
"vertices",
"follows",
"the",
"existing",
"ones",
"in",
"the",
"adjacency",
"list",
"."
] | def _merge_forward(graph, sequence, path, index):
i = index
for v in path:
if v in sequence:
continue
after = [j for j, w in enumerate(sequence[i:], i)
if v in graph.following(w)]
if after:
i = after[-1] + 1
sequence.insert(i, v)
i += 1 | [
"def",
"_merge_forward",
"(",
"graph",
",",
"sequence",
",",
"path",
",",
"index",
")",
":",
"i",
"=",
"index",
"for",
"v",
"in",
"path",
":",
"if",
"v",
"in",
"sequence",
":",
"continue",
"after",
"=",
"[",
"j",
"for",
"j",
",",
"w",
"in",
"enumerate",
"(",
"sequence",
"[",
"i",
":",
"]",
",",
"i",
")",
"if",
"v",
"in",
"graph",
".",
"following",
"(",
"w",
")",
"]",
"if",
"after",
":",
"i",
"=",
"after",
"[",
"-",
"1",
"]",
"+",
"1",
"sequence",
".",
"insert",
"(",
"i",
",",
"v",
")",
"i",
"+=",
"1"
] | Merges path into sequence, ensuring all new vertices follows the
existing ones in the adjacency list. | [
"Merges",
"path",
"into",
"sequence",
"ensuring",
"all",
"new",
"vertices",
"follows",
"the",
"existing",
"ones",
"in",
"the",
"adjacency",
"list",
"."
] | [
"\"\"\" Merges path into sequence, ensuring all new vertices follows the\n existing ones in the adjacency list.\n \"\"\"",
"# Check if any later vertices have this path and move index"
] | [
{
"param": "graph",
"type": null
},
{
"param": "sequence",
"type": null
},
{
"param": "path",
"type": null
},
{
"param": "index",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "graph",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sequence",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "index",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _merge_forward(graph, sequence, path, index):
i = index
for v in path:
if v in sequence:
continue
after = [j for j, w in enumerate(sequence[i:], i)
if v in graph.following(w)]
if after:
i = after[-1] + 1
sequence.insert(i, v)
i += 1 |
b8af42c3035877a1083808f4d71d1c2518314a01 | macph/nextbus | nextbus/graph.py | [
"MIT"
] | Python | _merge_backward | null | def _merge_backward(graph, sequence, path, index):
""" Merges path into sequence, ensuring all new vertices precedes the
existing ones in the adjacency list.
"""
i = index
for v in path[::-1]:
if v in sequence:
continue
# Check if any previous vertices have this path and move index
after = [i - j for j, w in enumerate(sequence[i::-1])
if v in graph.preceding(w)]
if after:
i = after[-1]
sequence.insert(i, v) | Merges path into sequence, ensuring all new vertices precedes the
existing ones in the adjacency list.
| Merges path into sequence, ensuring all new vertices precedes the
existing ones in the adjacency list. | [
"Merges",
"path",
"into",
"sequence",
"ensuring",
"all",
"new",
"vertices",
"precedes",
"the",
"existing",
"ones",
"in",
"the",
"adjacency",
"list",
"."
] | def _merge_backward(graph, sequence, path, index):
i = index
for v in path[::-1]:
if v in sequence:
continue
after = [i - j for j, w in enumerate(sequence[i::-1])
if v in graph.preceding(w)]
if after:
i = after[-1]
sequence.insert(i, v) | [
"def",
"_merge_backward",
"(",
"graph",
",",
"sequence",
",",
"path",
",",
"index",
")",
":",
"i",
"=",
"index",
"for",
"v",
"in",
"path",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"v",
"in",
"sequence",
":",
"continue",
"after",
"=",
"[",
"i",
"-",
"j",
"for",
"j",
",",
"w",
"in",
"enumerate",
"(",
"sequence",
"[",
"i",
":",
":",
"-",
"1",
"]",
")",
"if",
"v",
"in",
"graph",
".",
"preceding",
"(",
"w",
")",
"]",
"if",
"after",
":",
"i",
"=",
"after",
"[",
"-",
"1",
"]",
"sequence",
".",
"insert",
"(",
"i",
",",
"v",
")"
] | Merges path into sequence, ensuring all new vertices precedes the
existing ones in the adjacency list. | [
"Merges",
"path",
"into",
"sequence",
"ensuring",
"all",
"new",
"vertices",
"precedes",
"the",
"existing",
"ones",
"in",
"the",
"adjacency",
"list",
"."
] | [
"\"\"\" Merges path into sequence, ensuring all new vertices precedes the\n existing ones in the adjacency list.\n \"\"\"",
"# Check if any previous vertices have this path and move index"
] | [
{
"param": "graph",
"type": null
},
{
"param": "sequence",
"type": null
},
{
"param": "path",
"type": null
},
{
"param": "index",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "graph",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sequence",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "index",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _merge_backward(graph, sequence, path, index):
i = index
for v in path[::-1]:
if v in sequence:
continue
after = [i - j for j, w in enumerate(sequence[i::-1])
if v in graph.preceding(w)]
if after:
i = after[-1]
sequence.insert(i, v) |
b8af42c3035877a1083808f4d71d1c2518314a01 | macph/nextbus | nextbus/graph.py | [
"MIT"
] | Python | _count_cycles | <not_specific> | def _count_cycles(graph, sequence):
""" Counts number of cycles in a sequence by checking the preceding nodes
for every vertex in order.
"""
cycles = set()
indices = {v: i for i, v in enumerate(sequence)}
for v in sequence:
cycles |= {(u, v) for u in graph.preceding(v)
if indices[u] > indices[v]}
return cycles | Counts number of cycles in a sequence by checking the preceding nodes
for every vertex in order.
| Counts number of cycles in a sequence by checking the preceding nodes
for every vertex in order. | [
"Counts",
"number",
"of",
"cycles",
"in",
"a",
"sequence",
"by",
"checking",
"the",
"preceding",
"nodes",
"for",
"every",
"vertex",
"in",
"order",
"."
] | def _count_cycles(graph, sequence):
cycles = set()
indices = {v: i for i, v in enumerate(sequence)}
for v in sequence:
cycles |= {(u, v) for u in graph.preceding(v)
if indices[u] > indices[v]}
return cycles | [
"def",
"_count_cycles",
"(",
"graph",
",",
"sequence",
")",
":",
"cycles",
"=",
"set",
"(",
")",
"indices",
"=",
"{",
"v",
":",
"i",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"sequence",
")",
"}",
"for",
"v",
"in",
"sequence",
":",
"cycles",
"|=",
"{",
"(",
"u",
",",
"v",
")",
"for",
"u",
"in",
"graph",
".",
"preceding",
"(",
"v",
")",
"if",
"indices",
"[",
"u",
"]",
">",
"indices",
"[",
"v",
"]",
"}",
"return",
"cycles"
] | Counts number of cycles in a sequence by checking the preceding nodes
for every vertex in order. | [
"Counts",
"number",
"of",
"cycles",
"in",
"a",
"sequence",
"by",
"checking",
"the",
"preceding",
"nodes",
"for",
"every",
"vertex",
"in",
"order",
"."
] | [
"\"\"\" Counts number of cycles in a sequence by checking the preceding nodes\n for every vertex in order.\n \"\"\""
] | [
{
"param": "graph",
"type": null
},
{
"param": "sequence",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "graph",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "sequence",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _count_cycles(graph, sequence):
cycles = set()
indices = {v: i for i, v in enumerate(sequence)}
for v in sequence:
cycles |= {(u, v) for u in graph.preceding(v)
if indices[u] > indices[v]}
return cycles |
b8af42c3035877a1083808f4d71d1c2518314a01 | macph/nextbus | nextbus/graph.py | [
"MIT"
] | Python | _median | <not_specific> | def _median(collection):
""" Calculates the median of an collection, eg a list. """
ordered = sorted(collection)
len_ = len(collection)
middle = len_ // 2
if not ordered:
return -1
elif len_ % 2 == 1:
return ordered[middle]
else:
return (ordered[middle - 1] + ordered[middle]) / 2 | Calculates the median of an collection, eg a list. | Calculates the median of an collection, eg a list. | [
"Calculates",
"the",
"median",
"of",
"an",
"collection",
"eg",
"a",
"list",
"."
] | def _median(collection):
ordered = sorted(collection)
len_ = len(collection)
middle = len_ // 2
if not ordered:
return -1
elif len_ % 2 == 1:
return ordered[middle]
else:
return (ordered[middle - 1] + ordered[middle]) / 2 | [
"def",
"_median",
"(",
"collection",
")",
":",
"ordered",
"=",
"sorted",
"(",
"collection",
")",
"len_",
"=",
"len",
"(",
"collection",
")",
"middle",
"=",
"len_",
"//",
"2",
"if",
"not",
"ordered",
":",
"return",
"-",
"1",
"elif",
"len_",
"%",
"2",
"==",
"1",
":",
"return",
"ordered",
"[",
"middle",
"]",
"else",
":",
"return",
"(",
"ordered",
"[",
"middle",
"-",
"1",
"]",
"+",
"ordered",
"[",
"middle",
"]",
")",
"/",
"2"
] | Calculates the median of an collection, eg a list. | [
"Calculates",
"the",
"median",
"of",
"an",
"collection",
"eg",
"a",
"list",
"."
] | [
"\"\"\" Calculates the median of an collection, eg a list. \"\"\""
] | [
{
"param": "collection",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "collection",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _median(collection):
ordered = sorted(collection)
len_ = len(collection)
middle = len_ // 2
if not ordered:
return -1
elif len_ % 2 == 1:
return ordered[middle]
else:
return (ordered[middle - 1] + ordered[middle]) / 2 |
b8af42c3035877a1083808f4d71d1c2518314a01 | macph/nextbus | nextbus/graph.py | [
"MIT"
] | Python | _transpose_order | <not_specific> | def _transpose_order(row, forward=True):
""" Swaps lines within a row to see if the number of crossings improve. """
len_ = len(row.end) if forward else len(row.start)
order = list(range(len_))
if len_ < 2:
return order
crossings = row.count_crossings()
improved = True
while improved:
improved = False
for i in range(len_ - 1):
new_order = order[:i] + [order[i + 1], order[i]] + order[i + 2:]
if forward:
temp = [set(row.end[j]) for j in new_order]
new_crossings = row.count_crossings(end=temp)
else:
temp = [set(row.start[j]) for j in new_order]
new_crossings = row.count_crossings(start=temp)
if new_crossings < crossings:
order = new_order
crossings = new_crossings
improved = True
return order | Swaps lines within a row to see if the number of crossings improve. | Swaps lines within a row to see if the number of crossings improve. | [
"Swaps",
"lines",
"within",
"a",
"row",
"to",
"see",
"if",
"the",
"number",
"of",
"crossings",
"improve",
"."
] | def _transpose_order(row, forward=True):
len_ = len(row.end) if forward else len(row.start)
order = list(range(len_))
if len_ < 2:
return order
crossings = row.count_crossings()
improved = True
while improved:
improved = False
for i in range(len_ - 1):
new_order = order[:i] + [order[i + 1], order[i]] + order[i + 2:]
if forward:
temp = [set(row.end[j]) for j in new_order]
new_crossings = row.count_crossings(end=temp)
else:
temp = [set(row.start[j]) for j in new_order]
new_crossings = row.count_crossings(start=temp)
if new_crossings < crossings:
order = new_order
crossings = new_crossings
improved = True
return order | [
"def",
"_transpose_order",
"(",
"row",
",",
"forward",
"=",
"True",
")",
":",
"len_",
"=",
"len",
"(",
"row",
".",
"end",
")",
"if",
"forward",
"else",
"len",
"(",
"row",
".",
"start",
")",
"order",
"=",
"list",
"(",
"range",
"(",
"len_",
")",
")",
"if",
"len_",
"<",
"2",
":",
"return",
"order",
"crossings",
"=",
"row",
".",
"count_crossings",
"(",
")",
"improved",
"=",
"True",
"while",
"improved",
":",
"improved",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"len_",
"-",
"1",
")",
":",
"new_order",
"=",
"order",
"[",
":",
"i",
"]",
"+",
"[",
"order",
"[",
"i",
"+",
"1",
"]",
",",
"order",
"[",
"i",
"]",
"]",
"+",
"order",
"[",
"i",
"+",
"2",
":",
"]",
"if",
"forward",
":",
"temp",
"=",
"[",
"set",
"(",
"row",
".",
"end",
"[",
"j",
"]",
")",
"for",
"j",
"in",
"new_order",
"]",
"new_crossings",
"=",
"row",
".",
"count_crossings",
"(",
"end",
"=",
"temp",
")",
"else",
":",
"temp",
"=",
"[",
"set",
"(",
"row",
".",
"start",
"[",
"j",
"]",
")",
"for",
"j",
"in",
"new_order",
"]",
"new_crossings",
"=",
"row",
".",
"count_crossings",
"(",
"start",
"=",
"temp",
")",
"if",
"new_crossings",
"<",
"crossings",
":",
"order",
"=",
"new_order",
"crossings",
"=",
"new_crossings",
"improved",
"=",
"True",
"return",
"order"
] | Swaps lines within a row to see if the number of crossings improve. | [
"Swaps",
"lines",
"within",
"a",
"row",
"to",
"see",
"if",
"the",
"number",
"of",
"crossings",
"improve",
"."
] | [
"\"\"\" Swaps lines within a row to see if the number of crossings improve. \"\"\""
] | [
{
"param": "row",
"type": null
},
{
"param": "forward",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "row",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "forward",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _transpose_order(row, forward=True):
len_ = len(row.end) if forward else len(row.start)
order = list(range(len_))
if len_ < 2:
return order
crossings = row.count_crossings()
improved = True
while improved:
improved = False
for i in range(len_ - 1):
new_order = order[:i] + [order[i + 1], order[i]] + order[i + 2:]
if forward:
temp = [set(row.end[j]) for j in new_order]
new_crossings = row.count_crossings(end=temp)
else:
temp = [set(row.start[j]) for j in new_order]
new_crossings = row.count_crossings(start=temp)
if new_crossings < crossings:
order = new_order
crossings = new_crossings
improved = True
return order |
b8af42c3035877a1083808f4d71d1c2518314a01 | macph/nextbus | nextbus/graph.py | [
"MIT"
] | Python | _memoize_graph | <not_specific> | def _memoize_graph(graph, method):
""" Wraps graph method in a function that remembers adjacency list and last
result.
"""
adj = None
result = None
@functools.wraps(method)
def _method(*args, **kwargs):
nonlocal adj, result
new_adj = graph.adj
if adj != new_adj:
result = method(*args, **kwargs)
adj = new_adj
return result
return _method | Wraps graph method in a function that remembers adjacency list and last
result.
| Wraps graph method in a function that remembers adjacency list and last
result. | [
"Wraps",
"graph",
"method",
"in",
"a",
"function",
"that",
"remembers",
"adjacency",
"list",
"and",
"last",
"result",
"."
] | def _memoize_graph(graph, method):
adj = None
result = None
@functools.wraps(method)
def _method(*args, **kwargs):
nonlocal adj, result
new_adj = graph.adj
if adj != new_adj:
result = method(*args, **kwargs)
adj = new_adj
return result
return _method | [
"def",
"_memoize_graph",
"(",
"graph",
",",
"method",
")",
":",
"adj",
"=",
"None",
"result",
"=",
"None",
"@",
"functools",
".",
"wraps",
"(",
"method",
")",
"def",
"_method",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"nonlocal",
"adj",
",",
"result",
"new_adj",
"=",
"graph",
".",
"adj",
"if",
"adj",
"!=",
"new_adj",
":",
"result",
"=",
"method",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"adj",
"=",
"new_adj",
"return",
"result",
"return",
"_method"
] | Wraps graph method in a function that remembers adjacency list and last
result. | [
"Wraps",
"graph",
"method",
"in",
"a",
"function",
"that",
"remembers",
"adjacency",
"list",
"and",
"last",
"result",
"."
] | [
"\"\"\" Wraps graph method in a function that remembers adjacency list and last\n result.\n \"\"\""
] | [
{
"param": "graph",
"type": null
},
{
"param": "method",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "graph",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "method",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import functools
def _memoize_graph(graph, method):
adj = None
result = None
@functools.wraps(method)
def _method(*args, **kwargs):
nonlocal adj, result
new_adj = graph.adj
if adj != new_adj:
result = method(*args, **kwargs)
adj = new_adj
return result
return _method |
b8af42c3035877a1083808f4d71d1c2518314a01 | macph/nextbus | nextbus/graph.py | [
"MIT"
] | Python | from_adj | <not_specific> | def from_adj(cls, adj_list):
""" Creates graph from adjacency list as a dict of vertices and
iterables of following vertices.
"""
adj = {}
for start, end in adj_list.items():
adj[start] = set(end)
for v in set().union(*adj_list.values()):
if v not in adj:
adj[v] = set()
new_graph = cls()
new_graph._v = adj
return new_graph | Creates graph from adjacency list as a dict of vertices and
iterables of following vertices.
| Creates graph from adjacency list as a dict of vertices and
iterables of following vertices. | [
"Creates",
"graph",
"from",
"adjacency",
"list",
"as",
"a",
"dict",
"of",
"vertices",
"and",
"iterables",
"of",
"following",
"vertices",
"."
] | def from_adj(cls, adj_list):
adj = {}
for start, end in adj_list.items():
adj[start] = set(end)
for v in set().union(*adj_list.values()):
if v not in adj:
adj[v] = set()
new_graph = cls()
new_graph._v = adj
return new_graph | [
"def",
"from_adj",
"(",
"cls",
",",
"adj_list",
")",
":",
"adj",
"=",
"{",
"}",
"for",
"start",
",",
"end",
"in",
"adj_list",
".",
"items",
"(",
")",
":",
"adj",
"[",
"start",
"]",
"=",
"set",
"(",
"end",
")",
"for",
"v",
"in",
"set",
"(",
")",
".",
"union",
"(",
"*",
"adj_list",
".",
"values",
"(",
")",
")",
":",
"if",
"v",
"not",
"in",
"adj",
":",
"adj",
"[",
"v",
"]",
"=",
"set",
"(",
")",
"new_graph",
"=",
"cls",
"(",
")",
"new_graph",
".",
"_v",
"=",
"adj",
"return",
"new_graph"
] | Creates graph from adjacency list as a dict of vertices and
iterables of following vertices. | [
"Creates",
"graph",
"from",
"adjacency",
"list",
"as",
"a",
"dict",
"of",
"vertices",
"and",
"iterables",
"of",
"following",
"vertices",
"."
] | [
"\"\"\" Creates graph from adjacency list as a dict of vertices and\n iterables of following vertices.\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "adj_list",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "adj_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def from_adj(cls, adj_list):
adj = {}
for start, end in adj_list.items():
adj[start] = set(end)
for v in set().union(*adj_list.values()):
if v not in adj:
adj[v] = set()
new_graph = cls()
new_graph._v = adj
return new_graph |
af385c977a333a00ce6bbe119e31f05e674dca99 | macph/nextbus | nextbus/models/__init__.py | [
"MIT"
] | Python | define_collation | null | def define_collation(_, connection, **kw):
""" Define the numeric collation required for some text columns. """
connection.execute(
"CREATE COLLATION IF NOT EXISTS utf8_numeric "
"(provider = icu, locale = 'en@colNumeric=yes')"
) | Define the numeric collation required for some text columns. | Define the numeric collation required for some text columns. | [
"Define",
"the",
"numeric",
"collation",
"required",
"for",
"some",
"text",
"columns",
"."
] | def define_collation(_, connection, **kw):
connection.execute(
"CREATE COLLATION IF NOT EXISTS utf8_numeric "
"(provider = icu, locale = 'en@colNumeric=yes')"
) | [
"def",
"define_collation",
"(",
"_",
",",
"connection",
",",
"**",
"kw",
")",
":",
"connection",
".",
"execute",
"(",
"\"CREATE COLLATION IF NOT EXISTS utf8_numeric \"",
"\"(provider = icu, locale = 'en@colNumeric=yes')\"",
")"
] | Define the numeric collation required for some text columns. | [
"Define",
"the",
"numeric",
"collation",
"required",
"for",
"some",
"text",
"columns",
"."
] | [
"\"\"\" Define the numeric collation required for some text columns. \"\"\""
] | [
{
"param": "_",
"type": null
},
{
"param": "connection",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "_",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "connection",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def define_collation(_, connection, **kw):
connection.execute(
"CREATE COLLATION IF NOT EXISTS utf8_numeric "
"(provider = icu, locale = 'en@colNumeric=yes')"
) |
215e59d85b1b9e6cbe706aaa01863855ea64dada | macph/nextbus | nextbus/resources.py | [
"MIT"
] | Python | _list_geojson | <not_specific> | def _list_geojson(list_stops):
""" Creates a list of stop data in GeoJSON format.
:param list_stops: List of StopPoint objects.
:returns: JSON-serializable dict.
"""
geojson = {
"type": "FeatureCollection",
"features": [s.to_geojson() for s in list_stops]
}
return geojson | Creates a list of stop data in GeoJSON format.
:param list_stops: List of StopPoint objects.
:returns: JSON-serializable dict.
| Creates a list of stop data in GeoJSON format. | [
"Creates",
"a",
"list",
"of",
"stop",
"data",
"in",
"GeoJSON",
"format",
"."
] | def _list_geojson(list_stops):
geojson = {
"type": "FeatureCollection",
"features": [s.to_geojson() for s in list_stops]
}
return geojson | [
"def",
"_list_geojson",
"(",
"list_stops",
")",
":",
"geojson",
"=",
"{",
"\"type\"",
":",
"\"FeatureCollection\"",
",",
"\"features\"",
":",
"[",
"s",
".",
"to_geojson",
"(",
")",
"for",
"s",
"in",
"list_stops",
"]",
"}",
"return",
"geojson"
] | Creates a list of stop data in GeoJSON format. | [
"Creates",
"a",
"list",
"of",
"stop",
"data",
"in",
"GeoJSON",
"format",
"."
] | [
"\"\"\" Creates a list of stop data in GeoJSON format.\n\n :param list_stops: List of StopPoint objects.\n :returns: JSON-serializable dict.\n \"\"\""
] | [
{
"param": "list_stops",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "list_stops",
"type": null,
"docstring": "List of StopPoint objects.",
"docstring_tokens": [
"List",
"of",
"StopPoint",
"objects",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _list_geojson(list_stops):
geojson = {
"type": "FeatureCollection",
"features": [s.to_geojson() for s in list_stops]
}
return geojson |
5d7302ec41cec7840082d2f8888a4856f61a9e5b | macph/nextbus | nextbus/timetable.py | [
"MIT"
] | Python | from_row | <not_specific> | def from_row(cls, row):
""" Creates TimetableStop instance from row returned from query. """
return cls(
row.stop_point_ref,
row.arrive,
row.depart,
row.timing_point,
row.utc_arrive,
row.utc_depart,
) | Creates TimetableStop instance from row returned from query. | Creates TimetableStop instance from row returned from query. | [
"Creates",
"TimetableStop",
"instance",
"from",
"row",
"returned",
"from",
"query",
"."
] | def from_row(cls, row):
return cls(
row.stop_point_ref,
row.arrive,
row.depart,
row.timing_point,
row.utc_arrive,
row.utc_depart,
) | [
"def",
"from_row",
"(",
"cls",
",",
"row",
")",
":",
"return",
"cls",
"(",
"row",
".",
"stop_point_ref",
",",
"row",
".",
"arrive",
",",
"row",
".",
"depart",
",",
"row",
".",
"timing_point",
",",
"row",
".",
"utc_arrive",
",",
"row",
".",
"utc_depart",
",",
")"
] | Creates TimetableStop instance from row returned from query. | [
"Creates",
"TimetableStop",
"instance",
"from",
"row",
"returned",
"from",
"query",
"."
] | [
"\"\"\" Creates TimetableStop instance from row returned from query. \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "row",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "row",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def from_row(cls, row):
return cls(
row.stop_point_ref,
row.arrive,
row.depart,
row.timing_point,
row.utc_arrive,
row.utc_depart,
) |
3d75a9c4b8e0c48f6643a1588804a95005dc7426 | macph/nextbus | nextbus/populate/utils.py | [
"MIT"
] | Python | xml_as_dict | <not_specific> | def xml_as_dict(element):
""" Creates a dictionary from a flat XML element.
:param element: XML Element object
:returns: A dictionary with keys matching subelement tags in the
element.
"""
data = {}
for e in element:
if e.tag in data:
raise ValueError(f"Multiple elements have the same tag {e.tag!r}.")
default = e.get("default", None)
data[e.tag] = default if e.text is None else e.text
return data | Creates a dictionary from a flat XML element.
:param element: XML Element object
:returns: A dictionary with keys matching subelement tags in the
element.
| Creates a dictionary from a flat XML element. | [
"Creates",
"a",
"dictionary",
"from",
"a",
"flat",
"XML",
"element",
"."
] | def xml_as_dict(element):
data = {}
for e in element:
if e.tag in data:
raise ValueError(f"Multiple elements have the same tag {e.tag!r}.")
default = e.get("default", None)
data[e.tag] = default if e.text is None else e.text
return data | [
"def",
"xml_as_dict",
"(",
"element",
")",
":",
"data",
"=",
"{",
"}",
"for",
"e",
"in",
"element",
":",
"if",
"e",
".",
"tag",
"in",
"data",
":",
"raise",
"ValueError",
"(",
"f\"Multiple elements have the same tag {e.tag!r}.\"",
")",
"default",
"=",
"e",
".",
"get",
"(",
"\"default\"",
",",
"None",
")",
"data",
"[",
"e",
".",
"tag",
"]",
"=",
"default",
"if",
"e",
".",
"text",
"is",
"None",
"else",
"e",
".",
"text",
"return",
"data"
] | Creates a dictionary from a flat XML element. | [
"Creates",
"a",
"dictionary",
"from",
"a",
"flat",
"XML",
"element",
"."
] | [
"\"\"\" Creates a dictionary from a flat XML element.\n\n :param element: XML Element object\n :returns: A dictionary with keys matching subelement tags in the\n element.\n \"\"\""
] | [
{
"param": "element",
"type": null
}
] | {
"returns": [
{
"docstring": "A dictionary with keys matching subelement tags in the\nelement.",
"docstring_tokens": [
"A",
"dictionary",
"with",
"keys",
"matching",
"subelement",
"tags",
"in",
"the",
"element",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "element",
"type": null,
"docstring": "XML Element object",
"docstring_tokens": [
"XML",
"Element",
"object"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def xml_as_dict(element):
data = {}
for e in element:
if e.tag in data:
raise ValueError(f"Multiple elements have the same tag {e.tag!r}.")
default = e.get("default", None)
data[e.tag] = default if e.text is None else e.text
return data |
3d75a9c4b8e0c48f6643a1588804a95005dc7426 | macph/nextbus | nextbus/populate/utils.py | [
"MIT"
] | Python | _convert_to_text | <not_specific> | def _convert_to_text(result):
""" Takes first element from list and returns text or None. """
if isinstance(result, list) and not result:
node = None
elif isinstance(result, list) and len(result) == 1:
node = result[0]
elif isinstance(result, list):
raise ValueError("XPath query returned multiple elements.")
else:
node = result
try:
return node.text
except AttributeError:
return node | Takes first element from list and returns text or None. | Takes first element from list and returns text or None. | [
"Takes",
"first",
"element",
"from",
"list",
"and",
"returns",
"text",
"or",
"None",
"."
] | def _convert_to_text(result):
if isinstance(result, list) and not result:
node = None
elif isinstance(result, list) and len(result) == 1:
node = result[0]
elif isinstance(result, list):
raise ValueError("XPath query returned multiple elements.")
else:
node = result
try:
return node.text
except AttributeError:
return node | [
"def",
"_convert_to_text",
"(",
"result",
")",
":",
"if",
"isinstance",
"(",
"result",
",",
"list",
")",
"and",
"not",
"result",
":",
"node",
"=",
"None",
"elif",
"isinstance",
"(",
"result",
",",
"list",
")",
"and",
"len",
"(",
"result",
")",
"==",
"1",
":",
"node",
"=",
"result",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"XPath query returned multiple elements.\"",
")",
"else",
":",
"node",
"=",
"result",
"try",
":",
"return",
"node",
".",
"text",
"except",
"AttributeError",
":",
"return",
"node"
] | Takes first element from list and returns text or None. | [
"Takes",
"first",
"element",
"from",
"list",
"and",
"returns",
"text",
"or",
"None",
"."
] | [
"\"\"\" Takes first element from list and returns text or None. \"\"\""
] | [
{
"param": "result",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "result",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _convert_to_text(result):
if isinstance(result, list) and not result:
node = None
elif isinstance(result, list) and len(result) == 1:
node = result[0]
elif isinstance(result, list):
raise ValueError("XPath query returned multiple elements.")
else:
node = result
try:
return node.text
except AttributeError:
return node |
3d75a9c4b8e0c48f6643a1588804a95005dc7426 | macph/nextbus | nextbus/populate/utils.py | [
"MIT"
] | Python | capitalize | <not_specific> | def capitalize(_, text):
""" Capitalises every word in a string, include these enclosed within
brackets and excluding apostrophes.
"""
list_words = text.lower().split()
for _w, word in enumerate(list_words):
for _c, char in enumerate(word):
if char.isalpha():
list_words[_w] = word[:_c] + char.upper() + word[_c+1:]
break
return " ".join(list_words) | Capitalises every word in a string, include these enclosed within
brackets and excluding apostrophes.
| Capitalises every word in a string, include these enclosed within
brackets and excluding apostrophes. | [
"Capitalises",
"every",
"word",
"in",
"a",
"string",
"include",
"these",
"enclosed",
"within",
"brackets",
"and",
"excluding",
"apostrophes",
"."
] | def capitalize(_, text):
list_words = text.lower().split()
for _w, word in enumerate(list_words):
for _c, char in enumerate(word):
if char.isalpha():
list_words[_w] = word[:_c] + char.upper() + word[_c+1:]
break
return " ".join(list_words) | [
"def",
"capitalize",
"(",
"_",
",",
"text",
")",
":",
"list_words",
"=",
"text",
".",
"lower",
"(",
")",
".",
"split",
"(",
")",
"for",
"_w",
",",
"word",
"in",
"enumerate",
"(",
"list_words",
")",
":",
"for",
"_c",
",",
"char",
"in",
"enumerate",
"(",
"word",
")",
":",
"if",
"char",
".",
"isalpha",
"(",
")",
":",
"list_words",
"[",
"_w",
"]",
"=",
"word",
"[",
":",
"_c",
"]",
"+",
"char",
".",
"upper",
"(",
")",
"+",
"word",
"[",
"_c",
"+",
"1",
":",
"]",
"break",
"return",
"\" \"",
".",
"join",
"(",
"list_words",
")"
] | Capitalises every word in a string, include these enclosed within
brackets and excluding apostrophes. | [
"Capitalises",
"every",
"word",
"in",
"a",
"string",
"include",
"these",
"enclosed",
"within",
"brackets",
"and",
"excluding",
"apostrophes",
"."
] | [
"\"\"\" Capitalises every word in a string, include these enclosed within\n brackets and excluding apostrophes.\n \"\"\""
] | [
{
"param": "_",
"type": null
},
{
"param": "text",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "_",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "text",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def capitalize(_, text):
list_words = text.lower().split()
for _w, word in enumerate(list_words):
for _c, char in enumerate(word):
if char.isalpha():
list_words[_w] = word[:_c] + char.upper() + word[_c+1:]
break
return " ".join(list_words) |
3d75a9c4b8e0c48f6643a1588804a95005dc7426 | macph/nextbus | nextbus/populate/utils.py | [
"MIT"
] | Python | _iter_every | null | def _iter_every(iterable, length):
""" Generator for iterable split into lists with maximum length. """
iterator = iter(iterable)
section = list(itertools.islice(iterator, length))
while section:
yield section
section = list(itertools.islice(iterator, length)) | Generator for iterable split into lists with maximum length. | Generator for iterable split into lists with maximum length. | [
"Generator",
"for",
"iterable",
"split",
"into",
"lists",
"with",
"maximum",
"length",
"."
] | def _iter_every(iterable, length):
iterator = iter(iterable)
section = list(itertools.islice(iterator, length))
while section:
yield section
section = list(itertools.islice(iterator, length)) | [
"def",
"_iter_every",
"(",
"iterable",
",",
"length",
")",
":",
"iterator",
"=",
"iter",
"(",
"iterable",
")",
"section",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"iterator",
",",
"length",
")",
")",
"while",
"section",
":",
"yield",
"section",
"section",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"iterator",
",",
"length",
")",
")"
] | Generator for iterable split into lists with maximum length. | [
"Generator",
"for",
"iterable",
"split",
"into",
"lists",
"with",
"maximum",
"length",
"."
] | [
"\"\"\" Generator for iterable split into lists with maximum length. \"\"\""
] | [
{
"param": "iterable",
"type": null
},
{
"param": "length",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "iterable",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "length",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import itertools
def _iter_every(iterable, length):
iterator = iter(iterable)
section = list(itertools.islice(iterator, length))
while section:
yield section
section = list(itertools.islice(iterator, length)) |
cbc58f1846fbb518eafcb252345529fc66de3f4b | macph/nextbus | nextbus/models/derived.py | [
"MIT"
] | Python | _apply_filters | <not_specific> | def _apply_filters(cls, match, groups=None, areas=None):
""" Apply filters to a search expression if they are specified.
:param match: The original query expression
:param groups: Groups, eg 'stop' or 'area'
:param areas: Administrative area codes to filter by
:returns: Query expression with added filters, if any
"""
if groups is not None:
if set(groups) - cls.GROUP_NAMES.keys():
raise ValueError(f"Groups {groups!r} contain invalid values.")
tables = []
for g in groups:
tables.extend(cls.GROUPS[g])
match = match.filter(cls.table_name.in_(tables))
if areas is not None:
match = match.filter(cls.admin_areas.overlap(areas))
return match | Apply filters to a search expression if they are specified.
:param match: The original query expression
:param groups: Groups, eg 'stop' or 'area'
:param areas: Administrative area codes to filter by
:returns: Query expression with added filters, if any
| Apply filters to a search expression if they are specified. | [
"Apply",
"filters",
"to",
"a",
"search",
"expression",
"if",
"they",
"are",
"specified",
"."
] | def _apply_filters(cls, match, groups=None, areas=None):
if groups is not None:
if set(groups) - cls.GROUP_NAMES.keys():
raise ValueError(f"Groups {groups!r} contain invalid values.")
tables = []
for g in groups:
tables.extend(cls.GROUPS[g])
match = match.filter(cls.table_name.in_(tables))
if areas is not None:
match = match.filter(cls.admin_areas.overlap(areas))
return match | [
"def",
"_apply_filters",
"(",
"cls",
",",
"match",
",",
"groups",
"=",
"None",
",",
"areas",
"=",
"None",
")",
":",
"if",
"groups",
"is",
"not",
"None",
":",
"if",
"set",
"(",
"groups",
")",
"-",
"cls",
".",
"GROUP_NAMES",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"f\"Groups {groups!r} contain invalid values.\"",
")",
"tables",
"=",
"[",
"]",
"for",
"g",
"in",
"groups",
":",
"tables",
".",
"extend",
"(",
"cls",
".",
"GROUPS",
"[",
"g",
"]",
")",
"match",
"=",
"match",
".",
"filter",
"(",
"cls",
".",
"table_name",
".",
"in_",
"(",
"tables",
")",
")",
"if",
"areas",
"is",
"not",
"None",
":",
"match",
"=",
"match",
".",
"filter",
"(",
"cls",
".",
"admin_areas",
".",
"overlap",
"(",
"areas",
")",
")",
"return",
"match"
] | Apply filters to a search expression if they are specified. | [
"Apply",
"filters",
"to",
"a",
"search",
"expression",
"if",
"they",
"are",
"specified",
"."
] | [
"\"\"\" Apply filters to a search expression if they are specified.\n\n :param match: The original query expression\n :param groups: Groups, eg 'stop' or 'area'\n :param areas: Administrative area codes to filter by\n :returns: Query expression with added filters, if any\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "match",
"type": null
},
{
"param": "groups",
"type": null
},
{
"param": "areas",
"type": null
}
] | {
"returns": [
{
"docstring": "Query expression with added filters, if any",
"docstring_tokens": [
"Query",
"expression",
"with",
"added",
"filters",
"if",
"any"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "match",
"type": null,
"docstring": "The original query expression",
"docstring_tokens": [
"The",
"original",
"query",
"expression"
],
"default": null,
"is_optional": null
},
{
"identifier": "groups",
"type": null,
"docstring": "Groups, eg 'stop' or 'area'",
"docstring_tokens": [
"Groups",
"eg",
"'",
"stop",
"'",
"or",
"'",
"area",
"'"
],
"default": null,
"is_optional": null
},
{
"identifier": "areas",
"type": null,
"docstring": "Administrative area codes to filter by",
"docstring_tokens": [
"Administrative",
"area",
"codes",
"to",
"filter",
"by"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _apply_filters(cls, match, groups=None, areas=None):
if groups is not None:
if set(groups) - cls.GROUP_NAMES.keys():
raise ValueError(f"Groups {groups!r} contain invalid values.")
tables = []
for g in groups:
tables.extend(cls.GROUPS[g])
match = match.filter(cls.table_name.in_(tables))
if areas is not None:
match = match.filter(cls.admin_areas.overlap(areas))
return match |
9a6ea9567ca64c8e62bbebcb44c40fa08660c859 | macph/nextbus | nextbus/forms.py | [
"MIT"
] | Python | _date_long_form | <not_specific> | def _date_long_form(date):
""" Displays a date in long form, eg 'Monday 29th April 2019'. """
second_last = (date.day // 10) % 10
last = date.day % 10
if second_last != 1 and last == 1:
ordinal = "st"
elif second_last != 1 and last == 2:
ordinal = "nd"
elif second_last != 1 and last == 3:
ordinal = "rd"
else:
ordinal = "th"
return f"{date:%A} {date.day}{ordinal} {date:%B} {date.year}" | Displays a date in long form, eg 'Monday 29th April 2019'. | Displays a date in long form, eg 'Monday 29th April 2019'. | [
"Displays",
"a",
"date",
"in",
"long",
"form",
"eg",
"'",
"Monday",
"29th",
"April",
"2019",
"'",
"."
] | def _date_long_form(date):
second_last = (date.day // 10) % 10
last = date.day % 10
if second_last != 1 and last == 1:
ordinal = "st"
elif second_last != 1 and last == 2:
ordinal = "nd"
elif second_last != 1 and last == 3:
ordinal = "rd"
else:
ordinal = "th"
return f"{date:%A} {date.day}{ordinal} {date:%B} {date.year}" | [
"def",
"_date_long_form",
"(",
"date",
")",
":",
"second_last",
"=",
"(",
"date",
".",
"day",
"//",
"10",
")",
"%",
"10",
"last",
"=",
"date",
".",
"day",
"%",
"10",
"if",
"second_last",
"!=",
"1",
"and",
"last",
"==",
"1",
":",
"ordinal",
"=",
"\"st\"",
"elif",
"second_last",
"!=",
"1",
"and",
"last",
"==",
"2",
":",
"ordinal",
"=",
"\"nd\"",
"elif",
"second_last",
"!=",
"1",
"and",
"last",
"==",
"3",
":",
"ordinal",
"=",
"\"rd\"",
"else",
":",
"ordinal",
"=",
"\"th\"",
"return",
"f\"{date:%A} {date.day}{ordinal} {date:%B} {date.year}\""
] | Displays a date in long form, eg 'Monday 29th April 2019'. | [
"Displays",
"a",
"date",
"in",
"long",
"form",
"eg",
"'",
"Monday",
"29th",
"April",
"2019",
"'",
"."
] | [
"\"\"\" Displays a date in long form, eg 'Monday 29th April 2019'. \"\"\""
] | [
{
"param": "date",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "date",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def _date_long_form(date):
second_last = (date.day // 10) % 10
last = date.day % 10
if second_last != 1 and last == 1:
ordinal = "st"
elif second_last != 1 and last == 2:
ordinal = "nd"
elif second_last != 1 and last == 3:
ordinal = "rd"
else:
ordinal = "th"
return f"{date:%A} {date.day}{ordinal} {date:%B} {date.year}" |
10ea12e47bbfc326a8eff02a32b765fe37a42b11 | macph/nextbus | nextbus/populate/file_ops.py | [
"MIT"
] | Python | _file_name | <not_specific> | def _file_name(response):
""" Gets the file name from the response header or the URL name. """
content = response.headers.get("content-disposition")
if content and "filename" in content:
file_name = re.search(r"filename=(.+)", content).group(1)
else:
# Get the path and split it to get the rightmost part
path = urllib.parse.urlparse(response.url)[2]
file_name = path.split("/")[-1]
return file_name | Gets the file name from the response header or the URL name. | Gets the file name from the response header or the URL name. | [
"Gets",
"the",
"file",
"name",
"from",
"the",
"response",
"header",
"or",
"the",
"URL",
"name",
"."
] | def _file_name(response):
content = response.headers.get("content-disposition")
if content and "filename" in content:
file_name = re.search(r"filename=(.+)", content).group(1)
else:
path = urllib.parse.urlparse(response.url)[2]
file_name = path.split("/")[-1]
return file_name | [
"def",
"_file_name",
"(",
"response",
")",
":",
"content",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"\"content-disposition\"",
")",
"if",
"content",
"and",
"\"filename\"",
"in",
"content",
":",
"file_name",
"=",
"re",
".",
"search",
"(",
"r\"filename=(.+)\"",
",",
"content",
")",
".",
"group",
"(",
"1",
")",
"else",
":",
"path",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"response",
".",
"url",
")",
"[",
"2",
"]",
"file_name",
"=",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"return",
"file_name"
] | Gets the file name from the response header or the URL name. | [
"Gets",
"the",
"file",
"name",
"from",
"the",
"response",
"header",
"or",
"the",
"URL",
"name",
"."
] | [
"\"\"\" Gets the file name from the response header or the URL name. \"\"\"",
"# Get the path and split it to get the rightmost part"
] | [
{
"param": "response",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "response",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
import urllib
def _file_name(response):
content = response.headers.get("content-disposition")
if content and "filename" in content:
file_name = re.search(r"filename=(.+)", content).group(1)
else:
path = urllib.parse.urlparse(response.url)[2]
file_name = path.split("/")[-1]
return file_name |
10ea12e47bbfc326a8eff02a32b765fe37a42b11 | macph/nextbus | nextbus/populate/file_ops.py | [
"MIT"
] | Python | iter_archive | null | def iter_archive(archive):
""" Generator function iterating over all files in a zipped archive file.
The generator will open each file, yielding its file-like object. This
file will be closed before opening the next file. When the iteration
is finished the archive is closed.
:param archive: Path to the archive file.
:returns: File-like object for current archived file.
"""
zip_ = zipfile.ZipFile(archive)
for name in zip_.namelist():
with zip_.open(name) as current:
yield current
zip_.close() | Generator function iterating over all files in a zipped archive file.
The generator will open each file, yielding its file-like object. This
file will be closed before opening the next file. When the iteration
is finished the archive is closed.
:param archive: Path to the archive file.
:returns: File-like object for current archived file.
| Generator function iterating over all files in a zipped archive file.
The generator will open each file, yielding its file-like object. This
file will be closed before opening the next file. When the iteration
is finished the archive is closed. | [
"Generator",
"function",
"iterating",
"over",
"all",
"files",
"in",
"a",
"zipped",
"archive",
"file",
".",
"The",
"generator",
"will",
"open",
"each",
"file",
"yielding",
"its",
"file",
"-",
"like",
"object",
".",
"This",
"file",
"will",
"be",
"closed",
"before",
"opening",
"the",
"next",
"file",
".",
"When",
"the",
"iteration",
"is",
"finished",
"the",
"archive",
"is",
"closed",
"."
] | def iter_archive(archive):
zip_ = zipfile.ZipFile(archive)
for name in zip_.namelist():
with zip_.open(name) as current:
yield current
zip_.close() | [
"def",
"iter_archive",
"(",
"archive",
")",
":",
"zip_",
"=",
"zipfile",
".",
"ZipFile",
"(",
"archive",
")",
"for",
"name",
"in",
"zip_",
".",
"namelist",
"(",
")",
":",
"with",
"zip_",
".",
"open",
"(",
"name",
")",
"as",
"current",
":",
"yield",
"current",
"zip_",
".",
"close",
"(",
")"
] | Generator function iterating over all files in a zipped archive file. | [
"Generator",
"function",
"iterating",
"over",
"all",
"files",
"in",
"a",
"zipped",
"archive",
"file",
"."
] | [
"\"\"\" Generator function iterating over all files in a zipped archive file.\n\n The generator will open each file, yielding its file-like object. This\n file will be closed before opening the next file. When the iteration\n is finished the archive is closed.\n\n :param archive: Path to the archive file.\n :returns: File-like object for current archived file.\n \"\"\""
] | [
{
"param": "archive",
"type": null
}
] | {
"returns": [
{
"docstring": "File-like object for current archived file.",
"docstring_tokens": [
"File",
"-",
"like",
"object",
"for",
"current",
"archived",
"file",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "archive",
"type": null,
"docstring": "Path to the archive file.",
"docstring_tokens": [
"Path",
"to",
"the",
"archive",
"file",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import zipfile
def iter_archive(archive):
zip_ = zipfile.ZipFile(archive)
for name in zip_.namelist():
with zip_.open(name) as current:
yield current
zip_.close() |
6a1c3ea6d5dc629b0e1f2d46d2f4f96c249a68ef | mikeatm/pythontutorial | science/02_vectorize.py | [
"Info-ZIP"
] | Python | convert_to_polar | <not_specific> | def convert_to_polar(N):
"""
Generate a random set of N (x,y) cartesian coordinates,
convert them to polar coordinates.
Hints
tuple (a,b) in python is a sequence of immutable data.
"""
cartesian_set = []
a = 0
while a < N :
cartesian_set.append( tuple (random.sample(range(1, 100), 2) ) )
a+=1
polar_set = []
index = 0
for coordinate in cartesian_set:
x,y = coordinate # coordinate is a tuple, we can split it to x, y
r = math.sqrt(x**2 + y**2)
theta = math.atan2(float(y), x)
polar_set.append ( tuple([r,theta]))
return polar_set |
Generate a random set of N (x,y) cartesian coordinates,
convert them to polar coordinates.
Hints
tuple (a,b) in python is a sequence of immutable data.
| Generate a random set of N (x,y) cartesian coordinates,
convert them to polar coordinates.
Hints
tuple (a,b) in python is a sequence of immutable data. | [
"Generate",
"a",
"random",
"set",
"of",
"N",
"(",
"x",
"y",
")",
"cartesian",
"coordinates",
"convert",
"them",
"to",
"polar",
"coordinates",
".",
"Hints",
"tuple",
"(",
"a",
"b",
")",
"in",
"python",
"is",
"a",
"sequence",
"of",
"immutable",
"data",
"."
] | def convert_to_polar(N):
cartesian_set = []
a = 0
while a < N :
cartesian_set.append( tuple (random.sample(range(1, 100), 2) ) )
a+=1
polar_set = []
index = 0
for coordinate in cartesian_set:
x,y = coordinate
r = math.sqrt(x**2 + y**2)
theta = math.atan2(float(y), x)
polar_set.append ( tuple([r,theta]))
return polar_set | [
"def",
"convert_to_polar",
"(",
"N",
")",
":",
"cartesian_set",
"=",
"[",
"]",
"a",
"=",
"0",
"while",
"a",
"<",
"N",
":",
"cartesian_set",
".",
"append",
"(",
"tuple",
"(",
"random",
".",
"sample",
"(",
"range",
"(",
"1",
",",
"100",
")",
",",
"2",
")",
")",
")",
"a",
"+=",
"1",
"polar_set",
"=",
"[",
"]",
"index",
"=",
"0",
"for",
"coordinate",
"in",
"cartesian_set",
":",
"x",
",",
"y",
"=",
"coordinate",
"r",
"=",
"math",
".",
"sqrt",
"(",
"x",
"**",
"2",
"+",
"y",
"**",
"2",
")",
"theta",
"=",
"math",
".",
"atan2",
"(",
"float",
"(",
"y",
")",
",",
"x",
")",
"polar_set",
".",
"append",
"(",
"tuple",
"(",
"[",
"r",
",",
"theta",
"]",
")",
")",
"return",
"polar_set"
] | Generate a random set of N (x,y) cartesian coordinates,
convert them to polar coordinates. | [
"Generate",
"a",
"random",
"set",
"of",
"N",
"(",
"x",
"y",
")",
"cartesian",
"coordinates",
"convert",
"them",
"to",
"polar",
"coordinates",
"."
] | [
"\"\"\"\n Generate a random set of N (x,y) cartesian coordinates, \n convert them to polar coordinates.\n Hints\n tuple (a,b) in python is a sequence of immutable data. \n \"\"\"",
"# coordinate is a tuple, we can split it to x, y"
] | [
{
"param": "N",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "N",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import math
import random
def convert_to_polar(N):
cartesian_set = []
a = 0
while a < N :
cartesian_set.append( tuple (random.sample(range(1, 100), 2) ) )
a+=1
polar_set = []
index = 0
for coordinate in cartesian_set:
x,y = coordinate
r = math.sqrt(x**2 + y**2)
theta = math.atan2(float(y), x)
polar_set.append ( tuple([r,theta]))
return polar_set |
b86d5068669ed95198fee33bb9790d5ef3512d27 | tensorlayer/TLXZoo | tlxzoo/module/unet/unet.py | [
"Apache-2.0"
] | Python | crop_to_shape | <not_specific> | def crop_to_shape(data, shape: Tuple[int, int, int]):
"""
Crops the array to the given image shape by removing the border
:param data: the array to crop, expects a tensor of shape [batches, nx, ny, channels]
:param shape: the target shape [batches, nx, ny, channels]
"""
diff_nx = (data.shape[0] - shape[0])
diff_ny = (data.shape[1] - shape[1])
if diff_nx == 0 and diff_ny == 0:
return data
offset_nx_left = diff_nx // 2
offset_nx_right = diff_nx - offset_nx_left
offset_ny_left = diff_ny // 2
offset_ny_right = diff_ny - offset_ny_left
cropped = data[offset_nx_left:(-offset_nx_right), offset_ny_left:(-offset_ny_right)]
assert cropped.shape[0] == shape[0]
assert cropped.shape[1] == shape[1]
return cropped |
Crops the array to the given image shape by removing the border
:param data: the array to crop, expects a tensor of shape [batches, nx, ny, channels]
:param shape: the target shape [batches, nx, ny, channels]
| Crops the array to the given image shape by removing the border | [
"Crops",
"the",
"array",
"to",
"the",
"given",
"image",
"shape",
"by",
"removing",
"the",
"border"
] | def crop_to_shape(data, shape: Tuple[int, int, int]):
diff_nx = (data.shape[0] - shape[0])
diff_ny = (data.shape[1] - shape[1])
if diff_nx == 0 and diff_ny == 0:
return data
offset_nx_left = diff_nx // 2
offset_nx_right = diff_nx - offset_nx_left
offset_ny_left = diff_ny // 2
offset_ny_right = diff_ny - offset_ny_left
cropped = data[offset_nx_left:(-offset_nx_right), offset_ny_left:(-offset_ny_right)]
assert cropped.shape[0] == shape[0]
assert cropped.shape[1] == shape[1]
return cropped | [
"def",
"crop_to_shape",
"(",
"data",
",",
"shape",
":",
"Tuple",
"[",
"int",
",",
"int",
",",
"int",
"]",
")",
":",
"diff_nx",
"=",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
"-",
"shape",
"[",
"0",
"]",
")",
"diff_ny",
"=",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
"-",
"shape",
"[",
"1",
"]",
")",
"if",
"diff_nx",
"==",
"0",
"and",
"diff_ny",
"==",
"0",
":",
"return",
"data",
"offset_nx_left",
"=",
"diff_nx",
"//",
"2",
"offset_nx_right",
"=",
"diff_nx",
"-",
"offset_nx_left",
"offset_ny_left",
"=",
"diff_ny",
"//",
"2",
"offset_ny_right",
"=",
"diff_ny",
"-",
"offset_ny_left",
"cropped",
"=",
"data",
"[",
"offset_nx_left",
":",
"(",
"-",
"offset_nx_right",
")",
",",
"offset_ny_left",
":",
"(",
"-",
"offset_ny_right",
")",
"]",
"assert",
"cropped",
".",
"shape",
"[",
"0",
"]",
"==",
"shape",
"[",
"0",
"]",
"assert",
"cropped",
".",
"shape",
"[",
"1",
"]",
"==",
"shape",
"[",
"1",
"]",
"return",
"cropped"
] | Crops the array to the given image shape by removing the border | [
"Crops",
"the",
"array",
"to",
"the",
"given",
"image",
"shape",
"by",
"removing",
"the",
"border"
] | [
"\"\"\"\n Crops the array to the given image shape by removing the border\n\n :param data: the array to crop, expects a tensor of shape [batches, nx, ny, channels]\n :param shape: the target shape [batches, nx, ny, channels]\n \"\"\""
] | [
{
"param": "data",
"type": null
},
{
"param": "shape",
"type": "Tuple[int, int, int]"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": "the array to crop, expects a tensor of shape [batches, nx, ny, channels]",
"docstring_tokens": [
"the",
"array",
"to",
"crop",
"expects",
"a",
"tensor",
"of",
"shape",
"[",
"batches",
"nx",
"ny",
"channels",
"]"
],
"default": null,
"is_optional": null
},
{
"identifier": "shape",
"type": "Tuple[int, int, int]",
"docstring": "the target shape [batches, nx, ny, channels]",
"docstring_tokens": [
"the",
"target",
"shape",
"[",
"batches",
"nx",
"ny",
"channels",
"]"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def crop_to_shape(data, shape: Tuple[int, int, int]):
diff_nx = (data.shape[0] - shape[0])
diff_ny = (data.shape[1] - shape[1])
if diff_nx == 0 and diff_ny == 0:
return data
offset_nx_left = diff_nx // 2
offset_nx_right = diff_nx - offset_nx_left
offset_ny_left = diff_ny // 2
offset_ny_right = diff_ny - offset_ny_left
cropped = data[offset_nx_left:(-offset_nx_right), offset_ny_left:(-offset_ny_right)]
assert cropped.shape[0] == shape[0]
assert cropped.shape[1] == shape[1]
return cropped |
a96271b249ae82bf9d2ee9253de822fda9bf61e8 | tensorlayer/TLXZoo | tlxzoo/module/wav2vec2/transform.py | [
"Apache-2.0"
] | Python | clean_up_tokenization | str | def clean_up_tokenization(out_string: str) -> str:
"""
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
Args:
out_string (:obj:`str`): The text to clean up.
Returns:
:obj:`str`: The cleaned-up string.
"""
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string |
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
Args:
out_string (:obj:`str`): The text to clean up.
Returns:
:obj:`str`: The cleaned-up string.
| Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms. | [
"Clean",
"up",
"a",
"list",
"of",
"simple",
"English",
"tokenization",
"artifacts",
"like",
"spaces",
"before",
"punctuations",
"and",
"abbreviated",
"forms",
"."
] | def clean_up_tokenization(out_string: str) -> str:
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string | [
"def",
"clean_up_tokenization",
"(",
"out_string",
":",
"str",
")",
"->",
"str",
":",
"out_string",
"=",
"(",
"out_string",
".",
"replace",
"(",
"\" .\"",
",",
"\".\"",
")",
".",
"replace",
"(",
"\" ?\"",
",",
"\"?\"",
")",
".",
"replace",
"(",
"\" !\"",
",",
"\"!\"",
")",
".",
"replace",
"(",
"\" ,\"",
",",
"\",\"",
")",
".",
"replace",
"(",
"\" ' \"",
",",
"\"'\"",
")",
".",
"replace",
"(",
"\" n't\"",
",",
"\"n't\"",
")",
".",
"replace",
"(",
"\" 'm\"",
",",
"\"'m\"",
")",
".",
"replace",
"(",
"\" 's\"",
",",
"\"'s\"",
")",
".",
"replace",
"(",
"\" 've\"",
",",
"\"'ve\"",
")",
".",
"replace",
"(",
"\" 're\"",
",",
"\"'re\"",
")",
")",
"return",
"out_string"
] | Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms. | [
"Clean",
"up",
"a",
"list",
"of",
"simple",
"English",
"tokenization",
"artifacts",
"like",
"spaces",
"before",
"punctuations",
"and",
"abbreviated",
"forms",
"."
] | [
"\"\"\"\n Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.\n\n Args:\n out_string (:obj:`str`): The text to clean up.\n\n Returns:\n :obj:`str`: The cleaned-up string.\n \"\"\""
] | [
{
"param": "out_string",
"type": "str"
}
] | {
"returns": [
{
"docstring": ":obj:`str`: The cleaned-up string.",
"docstring_tokens": [
":",
"obj",
":",
"`",
"str",
"`",
":",
"The",
"cleaned",
"-",
"up",
"string",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "out_string",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "out_string (",
"type": null,
"docstring": "`str`): The text to clean up.",
"docstring_tokens": [
"`",
"str",
"`",
")",
":",
"The",
"text",
"to",
"clean",
"up",
"."
],
"default": null,
"is_optional": null
}
],
"others": []
} | def clean_up_tokenization(out_string: str) -> str:
out_string = (
out_string.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return out_string |
ffe01c3c27cc04b4f0477c55adeb7dc896d4af4f | dangvinh1406/CNNForSentenceClassification | cnn/Preprocessor.py | [
"MIT"
] | Python | tokenizeSentence | <not_specific> | def tokenizeSentence(raw):
"""
Function tokenizes a string to sentences based the character "new line"
"""
if type(raw) is not str:
return []
return raw.split("\n") |
Function tokenizes a string to sentences based the character "new line"
| Function tokenizes a string to sentences based the character "new line" | [
"Function",
"tokenizes",
"a",
"string",
"to",
"sentences",
"based",
"the",
"character",
"\"",
"new",
"line",
"\""
] | def tokenizeSentence(raw):
if type(raw) is not str:
return []
return raw.split("\n") | [
"def",
"tokenizeSentence",
"(",
"raw",
")",
":",
"if",
"type",
"(",
"raw",
")",
"is",
"not",
"str",
":",
"return",
"[",
"]",
"return",
"raw",
".",
"split",
"(",
"\"\\n\"",
")"
] | Function tokenizes a string to sentences based the character "new line" | [
"Function",
"tokenizes",
"a",
"string",
"to",
"sentences",
"based",
"the",
"character",
"\"",
"new",
"line",
"\""
] | [
"\"\"\"\n Function tokenizes a string to sentences based the character \"new line\"\n \"\"\""
] | [
{
"param": "raw",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "raw",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def tokenizeSentence(raw):
if type(raw) is not str:
return []
return raw.split("\n") |
ffe01c3c27cc04b4f0477c55adeb7dc896d4af4f | dangvinh1406/CNNForSentenceClassification | cnn/Preprocessor.py | [
"MIT"
] | Python | tokenizeWord | <not_specific> | def tokenizeWord(raw):
"""
Function tokenizes a string to words based the non-word characters
"""
if type(raw) is not str:
return []
return re.findall(r"[\w]+", raw) |
Function tokenizes a string to words based the non-word characters
| Function tokenizes a string to words based the non-word characters | [
"Function",
"tokenizes",
"a",
"string",
"to",
"words",
"based",
"the",
"non",
"-",
"word",
"characters"
] | def tokenizeWord(raw):
if type(raw) is not str:
return []
return re.findall(r"[\w]+", raw) | [
"def",
"tokenizeWord",
"(",
"raw",
")",
":",
"if",
"type",
"(",
"raw",
")",
"is",
"not",
"str",
":",
"return",
"[",
"]",
"return",
"re",
".",
"findall",
"(",
"r\"[\\w]+\"",
",",
"raw",
")"
] | Function tokenizes a string to words based the non-word characters | [
"Function",
"tokenizes",
"a",
"string",
"to",
"words",
"based",
"the",
"non",
"-",
"word",
"characters"
] | [
"\"\"\"\n Function tokenizes a string to words based the non-word characters\n \"\"\""
] | [
{
"param": "raw",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "raw",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def tokenizeWord(raw):
if type(raw) is not str:
return []
return re.findall(r"[\w]+", raw) |
ffe01c3c27cc04b4f0477c55adeb7dc896d4af4f | dangvinh1406/CNNForSentenceClassification | cnn/Preprocessor.py | [
"MIT"
] | Python | filterWord | <not_specific> | def filterWord(listOfWords, blackSet):
"""
Function filters out all stop words and numbers
"""
return [word for word in listOfWords
if word not in blackSet
and not word.isdigit()] |
Function filters out all stop words and numbers
| Function filters out all stop words and numbers | [
"Function",
"filters",
"out",
"all",
"stop",
"words",
"and",
"numbers"
] | def filterWord(listOfWords, blackSet):
return [word for word in listOfWords
if word not in blackSet
and not word.isdigit()] | [
"def",
"filterWord",
"(",
"listOfWords",
",",
"blackSet",
")",
":",
"return",
"[",
"word",
"for",
"word",
"in",
"listOfWords",
"if",
"word",
"not",
"in",
"blackSet",
"and",
"not",
"word",
".",
"isdigit",
"(",
")",
"]"
] | Function filters out all stop words and numbers | [
"Function",
"filters",
"out",
"all",
"stop",
"words",
"and",
"numbers"
] | [
"\"\"\"\n Function filters out all stop words and numbers\n \"\"\""
] | [
{
"param": "listOfWords",
"type": null
},
{
"param": "blackSet",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "listOfWords",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "blackSet",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def filterWord(listOfWords, blackSet):
return [word for word in listOfWords
if word not in blackSet
and not word.isdigit()] |
ffe01c3c27cc04b4f0477c55adeb7dc896d4af4f | dangvinh1406/CNNForSentenceClassification | cnn/Preprocessor.py | [
"MIT"
] | Python | filterSentence | <not_specific> | def filterSentence(listOfSentences, numberOfWordsPerSentence):
"""
Function filters out all sentences which have less than a number of words
"""
return [l for l in listOfSentences if len(l) > numberOfWordsPerSentence] |
Function filters out all sentences which have less than a number of words
| Function filters out all sentences which have less than a number of words | [
"Function",
"filters",
"out",
"all",
"sentences",
"which",
"have",
"less",
"than",
"a",
"number",
"of",
"words"
] | def filterSentence(listOfSentences, numberOfWordsPerSentence):
return [l for l in listOfSentences if len(l) > numberOfWordsPerSentence] | [
"def",
"filterSentence",
"(",
"listOfSentences",
",",
"numberOfWordsPerSentence",
")",
":",
"return",
"[",
"l",
"for",
"l",
"in",
"listOfSentences",
"if",
"len",
"(",
"l",
")",
">",
"numberOfWordsPerSentence",
"]"
] | Function filters out all sentences which have less than a number of words | [
"Function",
"filters",
"out",
"all",
"sentences",
"which",
"have",
"less",
"than",
"a",
"number",
"of",
"words"
] | [
"\"\"\"\n Function filters out all sentences which have less than a number of words\n \"\"\""
] | [
{
"param": "listOfSentences",
"type": null
},
{
"param": "numberOfWordsPerSentence",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "listOfSentences",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "numberOfWordsPerSentence",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def filterSentence(listOfSentences, numberOfWordsPerSentence):
return [l for l in listOfSentences if len(l) > numberOfWordsPerSentence] |
502017bd1c80f619871fcdcc57fa1095da039d36 | carlosasj/gauss-jordan | project/aux_functions.py | [
"MIT"
] | Python | find_pivot | int | def find_pivot(matrix, col: int) -> int:
"""
Given the matrix and the column index,
finds the line that should be swaped with the "current" pivot line.
The number returned is the index of the line
"""
col_terms = (matrix[line][col] for line in range(col, len(matrix)))
col_terms_abs = list(map(abs, col_terms))
max_abs = max(col_terms_abs)
return col_terms_abs.index(max_abs) + col |
Given the matrix and the column index,
finds the line that should be swaped with the "current" pivot line.
The number returned is the index of the line
| Given the matrix and the column index,
finds the line that should be swaped with the "current" pivot line.
The number returned is the index of the line | [
"Given",
"the",
"matrix",
"and",
"the",
"column",
"index",
"finds",
"the",
"line",
"that",
"should",
"be",
"swaped",
"with",
"the",
"\"",
"current",
"\"",
"pivot",
"line",
".",
"The",
"number",
"returned",
"is",
"the",
"index",
"of",
"the",
"line"
] | def find_pivot(matrix, col: int) -> int:
col_terms = (matrix[line][col] for line in range(col, len(matrix)))
col_terms_abs = list(map(abs, col_terms))
max_abs = max(col_terms_abs)
return col_terms_abs.index(max_abs) + col | [
"def",
"find_pivot",
"(",
"matrix",
",",
"col",
":",
"int",
")",
"->",
"int",
":",
"col_terms",
"=",
"(",
"matrix",
"[",
"line",
"]",
"[",
"col",
"]",
"for",
"line",
"in",
"range",
"(",
"col",
",",
"len",
"(",
"matrix",
")",
")",
")",
"col_terms_abs",
"=",
"list",
"(",
"map",
"(",
"abs",
",",
"col_terms",
")",
")",
"max_abs",
"=",
"max",
"(",
"col_terms_abs",
")",
"return",
"col_terms_abs",
".",
"index",
"(",
"max_abs",
")",
"+",
"col"
] | Given the matrix and the column index,
finds the line that should be swaped with the "current" pivot line. | [
"Given",
"the",
"matrix",
"and",
"the",
"column",
"index",
"finds",
"the",
"line",
"that",
"should",
"be",
"swaped",
"with",
"the",
"\"",
"current",
"\"",
"pivot",
"line",
"."
] | [
"\"\"\"\n Given the matrix and the column index,\n finds the line that should be swaped with the \"current\" pivot line.\n\n The number returned is the index of the line\n \"\"\""
] | [
{
"param": "matrix",
"type": null
},
{
"param": "col",
"type": "int"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "matrix",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "col",
"type": "int",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def find_pivot(matrix, col: int) -> int:
col_terms = (matrix[line][col] for line in range(col, len(matrix)))
col_terms_abs = list(map(abs, col_terms))
max_abs = max(col_terms_abs)
return col_terms_abs.index(max_abs) + col |
ebe3d8b8a51bc99de7ac0eb0b09e23195a85a8f5 | AtomCrafty/catsystem-py | src/catsys/crypt/mt19937.py | [
"MIT"
] | Python | temper | int | def temper(cls, y:int) -> int:
"""Returns the tempered state value y, called during genrand.
"""
y ^= (y >> cls._SHIFT_U)
y ^= (y << cls._SHIFT_S) & cls._MASK_B
y ^= (y << cls._SHIFT_T) & cls._MASK_C
y ^= (y >> cls._SHIFT_L)
return y & 0xffffffff | Returns the tempered state value y, called during genrand.
| Returns the tempered state value y, called during genrand. | [
"Returns",
"the",
"tempered",
"state",
"value",
"y",
"called",
"during",
"genrand",
"."
] | def temper(cls, y:int) -> int:
y ^= (y >> cls._SHIFT_U)
y ^= (y << cls._SHIFT_S) & cls._MASK_B
y ^= (y << cls._SHIFT_T) & cls._MASK_C
y ^= (y >> cls._SHIFT_L)
return y & 0xffffffff | [
"def",
"temper",
"(",
"cls",
",",
"y",
":",
"int",
")",
"->",
"int",
":",
"y",
"^=",
"(",
"y",
">>",
"cls",
".",
"_SHIFT_U",
")",
"y",
"^=",
"(",
"y",
"<<",
"cls",
".",
"_SHIFT_S",
")",
"&",
"cls",
".",
"_MASK_B",
"y",
"^=",
"(",
"y",
"<<",
"cls",
".",
"_SHIFT_T",
")",
"&",
"cls",
".",
"_MASK_C",
"y",
"^=",
"(",
"y",
">>",
"cls",
".",
"_SHIFT_L",
")",
"return",
"y",
"&",
"0xffffffff"
] | Returns the tempered state value y, called during genrand. | [
"Returns",
"the",
"tempered",
"state",
"value",
"y",
"called",
"during",
"genrand",
"."
] | [
"\"\"\"Returns the tempered state value y, called during genrand.\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "y",
"type": "int"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "y",
"type": "int",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def temper(cls, y:int) -> int:
y ^= (y >> cls._SHIFT_U)
y ^= (y << cls._SHIFT_S) & cls._MASK_B
y ^= (y << cls._SHIFT_T) & cls._MASK_C
y ^= (y >> cls._SHIFT_L)
return y & 0xffffffff |
ebe3d8b8a51bc99de7ac0eb0b09e23195a85a8f5 | AtomCrafty/catsystem-py | src/catsys/crypt/mt19937.py | [
"MIT"
] | Python | untemper | int | def untemper(cls, y:int) -> int:
"""Returns the un-tempered original state value of y. (for reversing)
"""
y ^= (y >> cls._SHIFT_L)
y ^= (y << cls._SHIFT_T) & cls._MASK_C
for _ in range(7):
y ^= (y << cls._SHIFT_S) & cls._MASK_B
for _ in range(3):
y ^= (y >> cls._SHIFT_U)
return y & 0xffffffff | Returns the un-tempered original state value of y. (for reversing)
| Returns the un-tempered original state value of y. (for reversing) | [
"Returns",
"the",
"un",
"-",
"tempered",
"original",
"state",
"value",
"of",
"y",
".",
"(",
"for",
"reversing",
")"
] | def untemper(cls, y:int) -> int:
y ^= (y >> cls._SHIFT_L)
y ^= (y << cls._SHIFT_T) & cls._MASK_C
for _ in range(7):
y ^= (y << cls._SHIFT_S) & cls._MASK_B
for _ in range(3):
y ^= (y >> cls._SHIFT_U)
return y & 0xffffffff | [
"def",
"untemper",
"(",
"cls",
",",
"y",
":",
"int",
")",
"->",
"int",
":",
"y",
"^=",
"(",
"y",
">>",
"cls",
".",
"_SHIFT_L",
")",
"y",
"^=",
"(",
"y",
"<<",
"cls",
".",
"_SHIFT_T",
")",
"&",
"cls",
".",
"_MASK_C",
"for",
"_",
"in",
"range",
"(",
"7",
")",
":",
"y",
"^=",
"(",
"y",
"<<",
"cls",
".",
"_SHIFT_S",
")",
"&",
"cls",
".",
"_MASK_B",
"for",
"_",
"in",
"range",
"(",
"3",
")",
":",
"y",
"^=",
"(",
"y",
">>",
"cls",
".",
"_SHIFT_U",
")",
"return",
"y",
"&",
"0xffffffff"
] | Returns the un-tempered original state value of y. | [
"Returns",
"the",
"un",
"-",
"tempered",
"original",
"state",
"value",
"of",
"y",
"."
] | [
"\"\"\"Returns the un-tempered original state value of y. (for reversing)\n \"\"\""
] | [
{
"param": "cls",
"type": null
},
{
"param": "y",
"type": "int"
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "y",
"type": "int",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def untemper(cls, y:int) -> int:
y ^= (y >> cls._SHIFT_L)
y ^= (y << cls._SHIFT_T) & cls._MASK_C
for _ in range(7):
y ^= (y << cls._SHIFT_S) & cls._MASK_B
for _ in range(3):
y ^= (y >> cls._SHIFT_U)
return y & 0xffffffff |
12158ebd66fa5889236500b9da66d041b68ccc24 | tkphd/pycalphad | pycalphad/core/utils.py | [
"MIT"
] | Python | sizeof_fmt | <not_specific> | def sizeof_fmt(num, suffix='B'):
"""
Human-readable string for a number of bytes.
http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Y', suffix) |
Human-readable string for a number of bytes.
http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
| Human-readable string for a number of bytes. | [
"Human",
"-",
"readable",
"string",
"for",
"a",
"number",
"of",
"bytes",
"."
] | def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Y', suffix) | [
"def",
"sizeof_fmt",
"(",
"num",
",",
"suffix",
"=",
"'B'",
")",
":",
"for",
"unit",
"in",
"[",
"''",
",",
"'K'",
",",
"'M'",
",",
"'G'",
",",
"'T'",
",",
"'P'",
",",
"'E'",
",",
"'Z'",
"]",
":",
"if",
"abs",
"(",
"num",
")",
"<",
"1000.0",
":",
"return",
"\"%3.1f%s%s\"",
"%",
"(",
"num",
",",
"unit",
",",
"suffix",
")",
"num",
"/=",
"1000.0",
"return",
"\"%.1f%s%s\"",
"%",
"(",
"num",
",",
"'Y'",
",",
"suffix",
")"
] | Human-readable string for a number of bytes. | [
"Human",
"-",
"readable",
"string",
"for",
"a",
"number",
"of",
"bytes",
"."
] | [
"\"\"\"\n Human-readable string for a number of bytes.\n http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size\n \"\"\""
] | [
{
"param": "num",
"type": null
},
{
"param": "suffix",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "num",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "suffix",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Y', suffix) |
12158ebd66fa5889236500b9da66d041b68ccc24 | tkphd/pycalphad | pycalphad/core/utils.py | [
"MIT"
] | Python | unpack_phases | <not_specific> | def unpack_phases(phases):
"Convert a phases list/dict into a sorted list."
active_phases = None
if isinstance(phases, (list, tuple, set)):
active_phases = sorted(phases)
elif isinstance(phases, dict):
active_phases = sorted(phases.keys())
elif type(phases) is str:
active_phases = [phases]
return active_phases | Convert a phases list/dict into a sorted list. | Convert a phases list/dict into a sorted list. | [
"Convert",
"a",
"phases",
"list",
"/",
"dict",
"into",
"a",
"sorted",
"list",
"."
] | def unpack_phases(phases):
active_phases = None
if isinstance(phases, (list, tuple, set)):
active_phases = sorted(phases)
elif isinstance(phases, dict):
active_phases = sorted(phases.keys())
elif type(phases) is str:
active_phases = [phases]
return active_phases | [
"def",
"unpack_phases",
"(",
"phases",
")",
":",
"active_phases",
"=",
"None",
"if",
"isinstance",
"(",
"phases",
",",
"(",
"list",
",",
"tuple",
",",
"set",
")",
")",
":",
"active_phases",
"=",
"sorted",
"(",
"phases",
")",
"elif",
"isinstance",
"(",
"phases",
",",
"dict",
")",
":",
"active_phases",
"=",
"sorted",
"(",
"phases",
".",
"keys",
"(",
")",
")",
"elif",
"type",
"(",
"phases",
")",
"is",
"str",
":",
"active_phases",
"=",
"[",
"phases",
"]",
"return",
"active_phases"
] | Convert a phases list/dict into a sorted list. | [
"Convert",
"a",
"phases",
"list",
"/",
"dict",
"into",
"a",
"sorted",
"list",
"."
] | [
"\"Convert a phases list/dict into a sorted list.\""
] | [
{
"param": "phases",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "phases",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def unpack_phases(phases):
active_phases = None
if isinstance(phases, (list, tuple, set)):
active_phases = sorted(phases)
elif isinstance(phases, dict):
active_phases = sorted(phases.keys())
elif type(phases) is str:
active_phases = [phases]
return active_phases |
12158ebd66fa5889236500b9da66d041b68ccc24 | tkphd/pycalphad | pycalphad/core/utils.py | [
"MIT"
] | Python | filter_phases | <not_specific> | def filter_phases(dbf, comps, candidate_phases=None):
"""Return phases that are valid for equilibrium calculations for the given database and components
Filters out phases that
* Have no active components in any sublattice of a phase
* Are disordered phases in an order-disorder model
Parameters
----------
dbf : Database
Thermodynamic database containing the relevant parameters.
comps : list of v.Species
Species to consider in the calculation.
candidate_phases : list
Names of phases to consider in the calculation, if not passed all phases from DBF will be considered
Returns
-------
list
Sorted list of phases that are valid for the Database and components
"""
# TODO: filter phases that can not charge balance
def all_sublattices_active(comps, phase):
active_sublattices = [len(set(comps).intersection(subl)) > 0 for
subl in phase.constituents]
return all(active_sublattices)
if candidate_phases == None:
candidate_phases = dbf.phases.keys()
else:
candidate_phases = set(candidate_phases).intersection(dbf.phases.keys())
disordered_phases = [dbf.phases[phase].model_hints.get('disordered_phase') for phase in candidate_phases]
phases = [phase for phase in candidate_phases if
all_sublattices_active(comps, dbf.phases[phase]) and
(phase not in disordered_phases or (phase in disordered_phases and
dbf.phases[phase].model_hints.get('ordered_phase') not in candidate_phases))]
return sorted(phases) | Return phases that are valid for equilibrium calculations for the given database and components
Filters out phases that
* Have no active components in any sublattice of a phase
* Are disordered phases in an order-disorder model
Parameters
----------
dbf : Database
Thermodynamic database containing the relevant parameters.
comps : list of v.Species
Species to consider in the calculation.
candidate_phases : list
Names of phases to consider in the calculation, if not passed all phases from DBF will be considered
Returns
-------
list
Sorted list of phases that are valid for the Database and components
| Return phases that are valid for equilibrium calculations for the given database and components
Filters out phases that
Have no active components in any sublattice of a phase
Are disordered phases in an order-disorder model
Parameters
dbf : Database
Thermodynamic database containing the relevant parameters.
comps : list of v.Species
Species to consider in the calculation.
candidate_phases : list
Names of phases to consider in the calculation, if not passed all phases from DBF will be considered
Returns
list
Sorted list of phases that are valid for the Database and components | [
"Return",
"phases",
"that",
"are",
"valid",
"for",
"equilibrium",
"calculations",
"for",
"the",
"given",
"database",
"and",
"components",
"Filters",
"out",
"phases",
"that",
"Have",
"no",
"active",
"components",
"in",
"any",
"sublattice",
"of",
"a",
"phase",
"Are",
"disordered",
"phases",
"in",
"an",
"order",
"-",
"disorder",
"model",
"Parameters",
"dbf",
":",
"Database",
"Thermodynamic",
"database",
"containing",
"the",
"relevant",
"parameters",
".",
"comps",
":",
"list",
"of",
"v",
".",
"Species",
"Species",
"to",
"consider",
"in",
"the",
"calculation",
".",
"candidate_phases",
":",
"list",
"Names",
"of",
"phases",
"to",
"consider",
"in",
"the",
"calculation",
"if",
"not",
"passed",
"all",
"phases",
"from",
"DBF",
"will",
"be",
"considered",
"Returns",
"list",
"Sorted",
"list",
"of",
"phases",
"that",
"are",
"valid",
"for",
"the",
"Database",
"and",
"components"
] | def filter_phases(dbf, comps, candidate_phases=None):
def all_sublattices_active(comps, phase):
active_sublattices = [len(set(comps).intersection(subl)) > 0 for
subl in phase.constituents]
return all(active_sublattices)
if candidate_phases == None:
candidate_phases = dbf.phases.keys()
else:
candidate_phases = set(candidate_phases).intersection(dbf.phases.keys())
disordered_phases = [dbf.phases[phase].model_hints.get('disordered_phase') for phase in candidate_phases]
phases = [phase for phase in candidate_phases if
all_sublattices_active(comps, dbf.phases[phase]) and
(phase not in disordered_phases or (phase in disordered_phases and
dbf.phases[phase].model_hints.get('ordered_phase') not in candidate_phases))]
return sorted(phases) | [
"def",
"filter_phases",
"(",
"dbf",
",",
"comps",
",",
"candidate_phases",
"=",
"None",
")",
":",
"def",
"all_sublattices_active",
"(",
"comps",
",",
"phase",
")",
":",
"active_sublattices",
"=",
"[",
"len",
"(",
"set",
"(",
"comps",
")",
".",
"intersection",
"(",
"subl",
")",
")",
">",
"0",
"for",
"subl",
"in",
"phase",
".",
"constituents",
"]",
"return",
"all",
"(",
"active_sublattices",
")",
"if",
"candidate_phases",
"==",
"None",
":",
"candidate_phases",
"=",
"dbf",
".",
"phases",
".",
"keys",
"(",
")",
"else",
":",
"candidate_phases",
"=",
"set",
"(",
"candidate_phases",
")",
".",
"intersection",
"(",
"dbf",
".",
"phases",
".",
"keys",
"(",
")",
")",
"disordered_phases",
"=",
"[",
"dbf",
".",
"phases",
"[",
"phase",
"]",
".",
"model_hints",
".",
"get",
"(",
"'disordered_phase'",
")",
"for",
"phase",
"in",
"candidate_phases",
"]",
"phases",
"=",
"[",
"phase",
"for",
"phase",
"in",
"candidate_phases",
"if",
"all_sublattices_active",
"(",
"comps",
",",
"dbf",
".",
"phases",
"[",
"phase",
"]",
")",
"and",
"(",
"phase",
"not",
"in",
"disordered_phases",
"or",
"(",
"phase",
"in",
"disordered_phases",
"and",
"dbf",
".",
"phases",
"[",
"phase",
"]",
".",
"model_hints",
".",
"get",
"(",
"'ordered_phase'",
")",
"not",
"in",
"candidate_phases",
")",
")",
"]",
"return",
"sorted",
"(",
"phases",
")"
] | Return phases that are valid for equilibrium calculations for the given database and components
Filters out phases that
Have no active components in any sublattice of a phase
Are disordered phases in an order-disorder model | [
"Return",
"phases",
"that",
"are",
"valid",
"for",
"equilibrium",
"calculations",
"for",
"the",
"given",
"database",
"and",
"components",
"Filters",
"out",
"phases",
"that",
"Have",
"no",
"active",
"components",
"in",
"any",
"sublattice",
"of",
"a",
"phase",
"Are",
"disordered",
"phases",
"in",
"an",
"order",
"-",
"disorder",
"model"
] | [
"\"\"\"Return phases that are valid for equilibrium calculations for the given database and components\n\n Filters out phases that\n * Have no active components in any sublattice of a phase\n * Are disordered phases in an order-disorder model\n\n Parameters\n ----------\n dbf : Database\n Thermodynamic database containing the relevant parameters.\n comps : list of v.Species\n Species to consider in the calculation.\n candidate_phases : list\n Names of phases to consider in the calculation, if not passed all phases from DBF will be considered\n Returns\n -------\n list\n Sorted list of phases that are valid for the Database and components\n \"\"\"",
"# TODO: filter phases that can not charge balance"
] | [
{
"param": "dbf",
"type": null
},
{
"param": "comps",
"type": null
},
{
"param": "candidate_phases",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "dbf",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "comps",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "candidate_phases",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def filter_phases(dbf, comps, candidate_phases=None):
def all_sublattices_active(comps, phase):
active_sublattices = [len(set(comps).intersection(subl)) > 0 for
subl in phase.constituents]
return all(active_sublattices)
if candidate_phases == None:
candidate_phases = dbf.phases.keys()
else:
candidate_phases = set(candidate_phases).intersection(dbf.phases.keys())
disordered_phases = [dbf.phases[phase].model_hints.get('disordered_phase') for phase in candidate_phases]
phases = [phase for phase in candidate_phases if
all_sublattices_active(comps, dbf.phases[phase]) and
(phase not in disordered_phases or (phase in disordered_phases and
dbf.phases[phase].model_hints.get('ordered_phase') not in candidate_phases))]
return sorted(phases) |
11a0d1dc11e5438da33e3e14b60167bea7fd105c | Mrpye/pictoplot | inkscape/svg_parser.py | [
"Apache-2.0"
] | Python | parseLengthWithUnits | <not_specific> | def parseLengthWithUnits( str ):
'''
Parse an SVG value which may or may not have units attached
This version is greatly simplified in that it only allows: no units,
units of px, and units of %. Everything else, it returns None for.
There is a more general routine to consider in scour.py if more
generality is ever needed.
'''
u = 'px'
s = str.strip()
if s[-2:] == 'px':
s = s[:-2]
elif s[-1:] == '%':
u = '%'
s = s[:-1]
try:
v = float( s )
except:
return None, None
return v, u |
Parse an SVG value which may or may not have units attached
This version is greatly simplified in that it only allows: no units,
units of px, and units of %. Everything else, it returns None for.
There is a more general routine to consider in scour.py if more
generality is ever needed.
| Parse an SVG value which may or may not have units attached
This version is greatly simplified in that it only allows: no units,
units of px, and units of %. Everything else, it returns None for.
There is a more general routine to consider in scour.py if more
generality is ever needed. | [
"Parse",
"an",
"SVG",
"value",
"which",
"may",
"or",
"may",
"not",
"have",
"units",
"attached",
"This",
"version",
"is",
"greatly",
"simplified",
"in",
"that",
"it",
"only",
"allows",
":",
"no",
"units",
"units",
"of",
"px",
"and",
"units",
"of",
"%",
".",
"Everything",
"else",
"it",
"returns",
"None",
"for",
".",
"There",
"is",
"a",
"more",
"general",
"routine",
"to",
"consider",
"in",
"scour",
".",
"py",
"if",
"more",
"generality",
"is",
"ever",
"needed",
"."
] | def parseLengthWithUnits( str ):
u = 'px'
s = str.strip()
if s[-2:] == 'px':
s = s[:-2]
elif s[-1:] == '%':
u = '%'
s = s[:-1]
try:
v = float( s )
except:
return None, None
return v, u | [
"def",
"parseLengthWithUnits",
"(",
"str",
")",
":",
"u",
"=",
"'px'",
"s",
"=",
"str",
".",
"strip",
"(",
")",
"if",
"s",
"[",
"-",
"2",
":",
"]",
"==",
"'px'",
":",
"s",
"=",
"s",
"[",
":",
"-",
"2",
"]",
"elif",
"s",
"[",
"-",
"1",
":",
"]",
"==",
"'%'",
":",
"u",
"=",
"'%'",
"s",
"=",
"s",
"[",
":",
"-",
"1",
"]",
"try",
":",
"v",
"=",
"float",
"(",
"s",
")",
"except",
":",
"return",
"None",
",",
"None",
"return",
"v",
",",
"u"
] | Parse an SVG value which may or may not have units attached
This version is greatly simplified in that it only allows: no units,
units of px, and units of %. | [
"Parse",
"an",
"SVG",
"value",
"which",
"may",
"or",
"may",
"not",
"have",
"units",
"attached",
"This",
"version",
"is",
"greatly",
"simplified",
"in",
"that",
"it",
"only",
"allows",
":",
"no",
"units",
"units",
"of",
"px",
"and",
"units",
"of",
"%",
"."
] | [
"'''\n Parse an SVG value which may or may not have units attached\n This version is greatly simplified in that it only allows: no units,\n units of px, and units of %. Everything else, it returns None for.\n There is a more general routine to consider in scour.py if more\n generality is ever needed.\n '''"
] | [
{
"param": "str",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "str",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def parseLengthWithUnits( str ):
u = 'px'
s = str.strip()
if s[-2:] == 'px':
s = s[:-2]
elif s[-1:] == '%':
u = '%'
s = s[:-1]
try:
v = float( s )
except:
return None, None
return v, u |
03a37e67d6478e0c29ef3b504472a33d937b063b | paul-shannon/slexil2 | slexil/ijalLine.py | [
"MIT"
] | Python | replaceHyphensWithNDashes | <not_specific> | def replaceHyphensWithNDashes(list):
''' replace hyphens with n-dashes
'''
newList = []
for text in list:
text = text.replace('-', 'โ')
newList.append(text)
return (newList) | replace hyphens with n-dashes
| replace hyphens with n-dashes | [
"replace",
"hyphens",
"with",
"n",
"-",
"dashes"
] | def replaceHyphensWithNDashes(list):
newList = []
for text in list:
text = text.replace('-', 'โ')
newList.append(text)
return (newList) | [
"def",
"replaceHyphensWithNDashes",
"(",
"list",
")",
":",
"newList",
"=",
"[",
"]",
"for",
"text",
"in",
"list",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"'-'",
",",
"'โ')",
"",
"newList",
".",
"append",
"(",
"text",
")",
"return",
"(",
"newList",
")"
] | replace hyphens with n-dashes | [
"replace",
"hyphens",
"with",
"n",
"-",
"dashes"
] | [
"''' replace hyphens with n-dashes\n '''"
] | [
{
"param": "list",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def replaceHyphensWithNDashes(list):
newList = []
for text in list:
text = text.replace('-', 'โ')
newList.append(text)
return (newList) |
4c506cf14e8e208370ea21563ac3a3d1681e6ee9 | shubhsherl/sympy | sympy/core/compatibility.py | [
"BSD-3-Clause"
] | Python | unwrap | <not_specific> | def unwrap(func, stop=None):
"""Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
*stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example,
:func:`signature` uses this to stop unwrapping if any object in the
chain has a ``__signature__`` attribute defined.
:exc:`ValueError` is raised if a cycle is encountered.
"""
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func | Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
*stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example,
:func:`signature` uses this to stop unwrapping if any object in the
chain has a ``__signature__`` attribute defined.
:exc:`ValueError` is raised if a cycle is encountered.
| Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example. | [
"Get",
"the",
"object",
"wrapped",
"by",
"*",
"func",
"*",
".",
"Follows",
"the",
"chain",
"of",
":",
"attr",
":",
"`",
"__wrapped__",
"`",
"attributes",
"returning",
"the",
"last",
"object",
"in",
"the",
"chain",
".",
"stop",
"*",
"is",
"an",
"optional",
"callback",
"accepting",
"an",
"object",
"in",
"the",
"wrapper",
"chain",
"as",
"its",
"sole",
"argument",
"that",
"allows",
"the",
"unwrapping",
"to",
"be",
"terminated",
"early",
"if",
"the",
"callback",
"returns",
"a",
"true",
"value",
".",
"If",
"the",
"callback",
"never",
"returns",
"a",
"true",
"value",
"the",
"last",
"object",
"in",
"the",
"chain",
"is",
"returned",
"as",
"usual",
".",
"For",
"example",
"."
] | def unwrap(func, stop=None):
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func
memo = {id(f)}
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func | [
"def",
"unwrap",
"(",
"func",
",",
"stop",
"=",
"None",
")",
":",
"if",
"stop",
"is",
"None",
":",
"def",
"_is_wrapper",
"(",
"f",
")",
":",
"return",
"hasattr",
"(",
"f",
",",
"'__wrapped__'",
")",
"else",
":",
"def",
"_is_wrapper",
"(",
"f",
")",
":",
"return",
"hasattr",
"(",
"f",
",",
"'__wrapped__'",
")",
"and",
"not",
"stop",
"(",
"f",
")",
"f",
"=",
"func",
"memo",
"=",
"{",
"id",
"(",
"f",
")",
"}",
"while",
"_is_wrapper",
"(",
"func",
")",
":",
"func",
"=",
"func",
".",
"__wrapped__",
"id_func",
"=",
"id",
"(",
"func",
")",
"if",
"id_func",
"in",
"memo",
":",
"raise",
"ValueError",
"(",
"'wrapper loop when unwrapping {!r}'",
".",
"format",
"(",
"f",
")",
")",
"memo",
".",
"add",
"(",
"id_func",
")",
"return",
"func"
] | Get the object wrapped by *func*. | [
"Get",
"the",
"object",
"wrapped",
"by",
"*",
"func",
"*",
"."
] | [
"\"\"\"Get the object wrapped by *func*.\n\n Follows the chain of :attr:`__wrapped__` attributes returning the last\n object in the chain.\n\n *stop* is an optional callback accepting an object in the wrapper chain\n as its sole argument that allows the unwrapping to be terminated early if\n the callback returns a true value. If the callback never returns a true\n value, the last object in the chain is returned as usual. For example,\n :func:`signature` uses this to stop unwrapping if any object in the\n chain has a ``__signature__`` attribute defined.\n\n :exc:`ValueError` is raised if a cycle is encountered.\n\n \"\"\"",
"# remember the original func for error reporting",
"# Memoise by id to tolerate non-hashable objects"
] | [
{
"param": "func",
"type": null
},
{
"param": "stop",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "func",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "stop",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "func",
"docstring": "`signature` uses this to stop unwrapping if any object in the\nchain has a ``__signature__`` attribute defined.",
"docstring_tokens": [
"`",
"signature",
"`",
"uses",
"this",
"to",
"stop",
"unwrapping",
"if",
"any",
"object",
"in",
"the",
"chain",
"has",
"a",
"`",
"`",
"__signature__",
"`",
"`",
"attribute",
"defined",
"."
]
},
{
"identifier": "exc",
"docstring": "`ValueError` is raised if a cycle is encountered.",
"docstring_tokens": [
"`",
"ValueError",
"`",
"is",
"raised",
"if",
"a",
"cycle",
"is",
"encountered",
"."
]
}
]
} | def unwrap(func, stop=None):
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func
memo = {id(f)}
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func |
d60d52c7975e8401d07d203b07d59bad88c5c55a | zniper/test-blog | src/content/views.py | [
"MIT"
] | Python | normalize_query | <not_specific> | def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
"""Find the term in query string and reduce redundant spaces."""
return [normspace(' ', (t[0] or t[1]).strip())
for t in findterms(query_string)] | Find the term in query string and reduce redundant spaces. | Find the term in query string and reduce redundant spaces. | [
"Find",
"the",
"term",
"in",
"query",
"string",
"and",
"reduce",
"redundant",
"spaces",
"."
] | def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
return [normspace(' ', (t[0] or t[1]).strip())
for t in findterms(query_string)] | [
"def",
"normalize_query",
"(",
"query_string",
",",
"findterms",
"=",
"re",
".",
"compile",
"(",
"r'\"([^\"]+)\"|(\\S+)'",
")",
".",
"findall",
",",
"normspace",
"=",
"re",
".",
"compile",
"(",
"r'\\s{2,}'",
")",
".",
"sub",
")",
":",
"return",
"[",
"normspace",
"(",
"' '",
",",
"(",
"t",
"[",
"0",
"]",
"or",
"t",
"[",
"1",
"]",
")",
".",
"strip",
"(",
")",
")",
"for",
"t",
"in",
"findterms",
"(",
"query_string",
")",
"]"
] | Find the term in query string and reduce redundant spaces. | [
"Find",
"the",
"term",
"in",
"query",
"string",
"and",
"reduce",
"redundant",
"spaces",
"."
] | [
"\"\"\"Find the term in query string and reduce redundant spaces.\"\"\""
] | [
{
"param": "query_string",
"type": null
},
{
"param": "findterms",
"type": null
},
{
"param": "normspace",
"type": null
}
] | {
"returns": [],
"raises": [],
"params": [
{
"identifier": "query_string",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "findterms",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "normspace",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import re
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
return [normspace(' ', (t[0] or t[1]).strip())
for t in findterms(query_string)] |
f1201c77eb98f8ab3338ef2e28f887f61c466539 | elliottd/imagination | nmt/utils.py | [
"BSD-3-Clause"
] | Python | warning | null | def warning(*objs):
"""
Prints warning text/object to stderr
:param objs:
:return:
"""
print(*objs, file=sys.stderr) |
Prints warning text/object to stderr
:param objs:
:return:
| Prints warning text/object to stderr | [
"Prints",
"warning",
"text",
"/",
"object",
"to",
"stderr"
] | def warning(*objs):
print(*objs, file=sys.stderr) | [
"def",
"warning",
"(",
"*",
"objs",
")",
":",
"print",
"(",
"*",
"objs",
",",
"file",
"=",
"sys",
".",
"stderr",
")"
] | Prints warning text/object to stderr | [
"Prints",
"warning",
"text",
"/",
"object",
"to",
"stderr"
] | [
"\"\"\"\n Prints warning text/object to stderr\n\n :param objs:\n :return:\n \"\"\""
] | [] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [],
"outlier_params": [
{
"identifier": "objs",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"others": []
} | import sys
def warning(*objs):
print(*objs, file=sys.stderr) |
f1201c77eb98f8ab3338ef2e28f887f61c466539 | elliottd/imagination | nmt/utils.py | [
"BSD-3-Clause"
] | Python | zipp | null | def zipp(params, theano_params):
"""
Push parameters to Theano shared variables
:param params:
:param theano_params:
:return:
"""
for kk, vv in params.items():
theano_params[kk].set_value(vv) |
Push parameters to Theano shared variables
:param params:
:param theano_params:
:return:
| Push parameters to Theano shared variables | [
"Push",
"parameters",
"to",
"Theano",
"shared",
"variables"
] | def zipp(params, theano_params):
for kk, vv in params.items():
theano_params[kk].set_value(vv) | [
"def",
"zipp",
"(",
"params",
",",
"theano_params",
")",
":",
"for",
"kk",
",",
"vv",
"in",
"params",
".",
"items",
"(",
")",
":",
"theano_params",
"[",
"kk",
"]",
".",
"set_value",
"(",
"vv",
")"
] | Push parameters to Theano shared variables | [
"Push",
"parameters",
"to",
"Theano",
"shared",
"variables"
] | [
"\"\"\"\n Push parameters to Theano shared variables\n\n :param params:\n :param theano_params:\n :return:\n \"\"\""
] | [
{
"param": "params",
"type": null
},
{
"param": "theano_params",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "params",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "theano_params",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | def zipp(params, theano_params):
for kk, vv in params.items():
theano_params[kk].set_value(vv) |
f1201c77eb98f8ab3338ef2e28f887f61c466539 | elliottd/imagination | nmt/utils.py | [
"BSD-3-Clause"
] | Python | load_pickle_dictionary | <not_specific> | def load_pickle_dictionary(dictionary_path):
"""
Load a dictionary and optionally also return the inverted dictionary
:param dictionary_path:
:param invert:
:return dictionary:
:return inverted_dictionary:
"""
with open(dictionary_path, mode='rb') as f:
dictionary = pickle.load(f)
return dictionary |
Load a dictionary and optionally also return the inverted dictionary
:param dictionary_path:
:param invert:
:return dictionary:
:return inverted_dictionary:
| Load a dictionary and optionally also return the inverted dictionary | [
"Load",
"a",
"dictionary",
"and",
"optionally",
"also",
"return",
"the",
"inverted",
"dictionary"
] | def load_pickle_dictionary(dictionary_path):
with open(dictionary_path, mode='rb') as f:
dictionary = pickle.load(f)
return dictionary | [
"def",
"load_pickle_dictionary",
"(",
"dictionary_path",
")",
":",
"with",
"open",
"(",
"dictionary_path",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"dictionary",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"return",
"dictionary"
] | Load a dictionary and optionally also return the inverted dictionary | [
"Load",
"a",
"dictionary",
"and",
"optionally",
"also",
"return",
"the",
"inverted",
"dictionary"
] | [
"\"\"\"\n Load a dictionary and optionally also return the inverted dictionary\n\n :param dictionary_path:\n :param invert:\n :return dictionary:\n :return inverted_dictionary:\n \"\"\""
] | [
{
"param": "dictionary_path",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "dictionary"
},
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "inverted_dictionary"
}
],
"raises": [],
"params": [
{
"identifier": "dictionary_path",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [
{
"identifier": "invert",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"others": []
} | import pickle
def load_pickle_dictionary(dictionary_path):
with open(dictionary_path, mode='rb') as f:
dictionary = pickle.load(f)
return dictionary |
f1201c77eb98f8ab3338ef2e28f887f61c466539 | elliottd/imagination | nmt/utils.py | [
"BSD-3-Clause"
] | Python | load_json | <not_specific> | def load_json(filename):
"""
json loader to load Nematus vocabularies
:param filename:
:return:
"""
with open(filename, mode='rb') as f:
# return unicode_to_utf8(json.load(f))
return json.load(f) |
json loader to load Nematus vocabularies
:param filename:
:return:
| json loader to load Nematus vocabularies | [
"json",
"loader",
"to",
"load",
"Nematus",
"vocabularies"
] | def load_json(filename):
with open(filename, mode='rb') as f:
return json.load(f) | [
"def",
"load_json",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"return",
"json",
".",
"load",
"(",
"f",
")"
] | json loader to load Nematus vocabularies | [
"json",
"loader",
"to",
"load",
"Nematus",
"vocabularies"
] | [
"\"\"\"\n json loader to load Nematus vocabularies\n :param filename:\n :return:\n \"\"\"",
"# return unicode_to_utf8(json.load(f))"
] | [
{
"param": "filename",
"type": null
}
] | {
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
} | import json
def load_json(filename):
with open(filename, mode='rb') as f:
return json.load(f) |
End of preview. Expand
in Data Studio
- Downloads last month
- 7