content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Dict
def dataset_is_open_data(dataset: Dict) -> bool:
"""Check if dataset is tagged as open data."""
is_open_data = dataset.get("isOpenData")
if is_open_data:
return is_open_data["value"] == "true"
return False | fc1591d4a045ba904658bb93577a364145492465 | 3,659,600 |
def _remove_suffix_apple(path):
"""
Strip off .so or .dylib.
>>> _remove_suffix_apple("libpython.so")
'libpython'
>>> _remove_suffix_apple("libpython.dylib")
'libpython'
>>> _remove_suffix_apple("libpython3.7")
'libpython3.7'
"""
if path.endswith(".dylib"):
return path[:-len(".dylib")]
if path.endswith(".so"):
return path[:-len(".so")]
return path | c5526b0f3420625c2efeba225187f72c7a51fb4b | 3,659,601 |
def sparsenet201(**kwargs):
"""
SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs) | 83de415e043876ae90dd3f79c3ca26dd82d8c5df | 3,659,602 |
def alt_text_to_curly_bracket(text):
"""
Converts the text that appears in the alt attribute of image tags from gatherer
to a curly-bracket mana notation.
ex: 'Green'->{G}, 'Blue or Red'->{U/R}
'Variable Colorless' -> {XC}
'Colorless' -> {C}
'N colorless' -> {N}, where N is some number
"""
def convert_color_to_letter(color):
if color.lower() not in ('red', 'white', 'blue', 'green', 'black', 'colorless', 'tap', 'energy'):
# some cards have weird split mana costs where you can pay N colorless
# or one of a specific color.
# Since we're ending up here, and what we're given isn't a color, lets assume its N
return color
else:
if color.lower() == 'blue': return 'U'
else: return color[0].upper()
try:
val = int(text, 10)
except Exception:
pass
else:
# This is just a number. Easy enough.
return f"{{{text}}}"
if ' or ' in text:
# this is a compound color, not as easy to deal with.
text = text.replace('or', '')
text = '/'.join([convert_color_to_letter(x) for x in text.split()])
else:
if 'Variable' in text:
text = 'X'
else:
# hopefully all that's left is just simple color symbols.
text = convert_color_to_letter(text)
# at this point we've hopefully
return f"{{{text}}}" | c604b236a8d0baeff244e0e246176a406674c9e2 | 3,659,603 |
def massage_primary(repo_primary, src_cache, cdt):
"""
Massages the result of dictify() into a less cumbersome form.
In particular:
1. There are many lists that can only be of length one that
don't need to be lists at all.
2. The '_text' entries need to go away.
3. The real information starts at ['metadata']['package']
4. We want the top-level key to be the package name and under
that, an entry for each arch for which the package exists.
"""
new_dict = dict({})
for package in repo_primary['metadata']['package']:
name = package['name'][0]['_text']
arch = package['arch'][0]['_text']
if arch == 'src':
continue
checksum = package['checksum'][0]['_text']
source = package['format'][0]['{rpm}sourcerpm'][0]['_text']
# If you need to check if the sources exist (perhaps you've got the source URL wrong
# or the distro has forgotten to copy them?):
# import requests
# sbase_url = cdt['sbase_url']
# surl = sbase_url + source
# print("{} {}".format(requests.head(surl).status_code, surl))
location = package['location'][0]['href']
version = package['version'][0]
summary = package['summary'][0]['_text']
try:
description = package['description'][0]['_text']
except:
description = "NA"
if '_text' in package['url'][0]:
url = package['url'][0]['_text']
else:
url = ''
license = package['format'][0]['{rpm}license'][0]['_text']
try:
provides = package['format'][0]['{rpm}provides'][0]['{rpm}entry']
provides = massage_primary_requires(provides, cdt)
except:
provides = []
try:
requires = package['format'][0]['{rpm}requires'][0]['{rpm}entry']
requires = massage_primary_requires(requires, cdt)
except:
requires = []
new_package = dict({'checksum': checksum,
'location': location,
'home': url,
'source': source,
'version': version,
'summary': yaml_quote_string(summary),
'description': description,
'license': license,
'provides': provides,
'requires': requires})
if name in new_dict:
if arch in new_dict[name]:
print("WARNING: Duplicate packages exist for {} for arch {}".format(name, arch))
new_dict[name][arch] = new_package
else:
new_dict[name] = dict({arch: new_package})
return new_dict | fd57ff925b46eb5adddee2c180fbc01b3c60ec7c | 3,659,604 |
def ansi_color_name_to_escape_code(name, style="default", cmap=None):
"""Converts a color name to the inner part of an ANSI escape code"""
cmap = _ensure_color_map(style=style, cmap=cmap)
if name in cmap:
return cmap[name]
m = RE_XONSH_COLOR.match(name)
if m is None:
raise ValueError("{!r} is not a color!".format(name))
parts = m.groupdict()
# convert regex match into actual ANSI colors
if parts["reset"] is not None:
if parts["reset"] == "NO_COLOR":
warn_deprecated_no_color()
res = "0"
elif parts["bghex"] is not None:
res = "48;5;" + rgb_to_256(parts["bghex"][3:])[0]
elif parts["background"] is not None:
color = parts["color"]
if "#" in color:
res = "48;5;" + rgb_to_256(color[1:])[0]
else:
fgcolor = cmap[color]
if fgcolor.isdecimal():
res = str(int(fgcolor) + 10)
elif fgcolor.startswith("38;"):
res = "4" + fgcolor[1:]
elif fgcolor == "DEFAULT":
res = "39"
else:
msg = (
"when converting {!r}, did not recognize {!r} within "
"the following color map as a valid color:\n\n{!r}"
)
raise ValueError(msg.format(name, fgcolor, cmap))
else:
# have regular, non-background color
mods = parts["modifiers"]
if mods is None:
mods = []
else:
mods = mods.strip("_").split("_")
mods = [ANSI_ESCAPE_MODIFIERS[mod] for mod in mods]
color = parts["color"]
if "#" in color:
mods.append("38;5;" + rgb_to_256(color[1:])[0])
elif color == "DEFAULT":
res = "39"
else:
mods.append(cmap[color])
res = ";".join(mods)
cmap[name] = res
return res | 70b8fe19fc34d14c678c9e54890a4da7e0e37c24 | 3,659,605 |
import shlex
import getopt
from datetime import datetime
def twitter(bot, message):
"""#twitter [-p 天数]
-p : 几天以前
"""
try:
cmd, *args = shlex.split(message.text)
except ValueError:
return False
if not cmd[0] in config['trigger']:
return False
if not cmd[1:] == 'twitter':
return False
try:
options, args = getopt.gnu_getopt(args, 'hp:')
except getopt.GetoptError:
# 格式不对
reply(bot, message, twitter.__doc__)
return True
days = 0
for o, a in options:
if o == '-p':
# 几天以前
try:
days = int(a)
if days < 0:
raise ValueError
except ValueError:
reply(bot, message, twitter.__doc__)
return True
elif o == '-h':
# 帮助
reply(bot, message, twitter.__doc__)
return True
tweets = Twitter.objects(Q(date__gte=datetime.now().date()+timedelta(days=-days)) & Q(date__lte=datetime.now().date()+timedelta(days=-days+1)))
if tweets:
reply(bot, message, '\n---------\n'.join([str(tweet) for tweet in tweets]))
return True
else:
reply(bot, message, '安娜啥都没说...')
return True | ba02cebb5a680f26f2eb17c32b62ead9ac3995a3 | 3,659,606 |
from typing import List
import os
def checkdir(*args: List[str]) -> bool:
"""
Guard for checking directories
Returns:
bool -- True if all arguments directories
"""
for a in args:
if a and not os.path.isdir(a):
return False
if a and a[0] != '/':
return False
return True | f1fc1c99e9bb6d4fd12129f1287923fc2f3325eb | 3,659,607 |
import logging
def get_zones(request):
"""Returns preprocessed thermal data for a given request or None."""
logging.info("received zone request:", request.building)
zones, err = _get_zones(request.building)
if err is not None:
return None, err
grpc_zones = []
for zones in zones:
grpc_zones.append(
building_zone_names_pb2.NamePoint(name=zones))
return building_zone_names_pb2.Reply(names=grpc_zones), None | b04dca4da5b68faea64744c9c7093a977eb120c1 | 3,659,608 |
def proper_classification(sp):
"""
Uses splat.classifyByStandard to classify spectra using spex standards
"""
#sp.slitpixelwidth=1
#sp.slitwidth=1
#sp.toInstrument('WFC3-G141')
wsp= wisps.Spectrum(wave=sp.wave.value,
flux=sp.flux.value,
noise=sp.noise.value,
contam= np.ones_like(sp.noise.value))
val=wisps.classify(wsp, stripunits=True)
return val | 31529d96fbc4fec69a5996fb33829be4caf51529 | 3,659,609 |
from typing import Tuple
import torch
def sum_last_4_layers(sequence_outputs: Tuple[torch.Tensor]) -> torch.Tensor:
"""Sums the last 4 hidden representations of a sequence output of BERT.
Args:
-----
sequence_output: Tuple of tensors of shape (batch, seq_length, hidden_size).
For BERT base, the Tuple has length 13.
Returns:
--------
summed_layers: Tensor of shape (batch, seq_length, hidden_size)
"""
last_layers = sequence_outputs[-4:]
return torch.stack(last_layers, dim=0).sum(dim=0) | 14bba441a116712d1431b1ee6dda33dc5ec4142c | 3,659,610 |
def TotalCust():
"""(read-only) Total Number of customers served from this line section."""
return lib.Lines_Get_TotalCust() | 58984b853cdd9587c7db5ff6c30b7af20a64985a | 3,659,611 |
import re
def extra_normalize(text_orig: str):
"""
This function allows a simple normalization to the original text to make
possible the aligning process.
The replacement_patterns were obtained during experimentation with real text
it is possible to add more or to get some errors without new rules.
:Note: very important, every rule in replacement_patterns do not change the
length of the original text, only replace patterns with same length string.
This process is different to preProcessFlow.
"""
replacement_patterns = [(r'[:](?=\s*?\n)','##1'),
(r'\xc2|\xa0',' '),
(r'(\w\s*?):(?=\s+?[A-Z]+?)|(\w\s*?):(?=\s*?"+?[A-Z]+?)','\g<1>##2'),
(r'[?!]','##3'),
(r'(\w+?)(\n)(?=["$%()*+&,-/;:¿¡<=>@[\\]^`{|}~\t\s]*(?=.*[A-Z0-9]))','\g<1>##4'), # any alphanumeric char
# follow by \n follow by any number of point sign follow by a capital letter, replace by alphanumerig+.
(r'(\w+?)(\n)(?=["$%()*+&,-/;:¿¡<=>@[\\]^`{|}~\t\s\n]*(?=[a-zA-Z0-9]))','\g<1>##5'),# any alphanumeric char
# follow by \n follow by any number of point sign follow by a letter, replace by alphanumerig+.
(r'[:](?=\s*?)(?=["$%()*+&,-/;:¿¡<=>@[\\]^`{|}~\t\s]*[A-Z]+?)','##6'),
(r'(\w+?\s*?)\|','\g<1>##7'),
(r'\n(?=\s*?[A-Z]+?)','##8'),
(r'##\d','apdbx'),
]
for (pattern, repl) in replacement_patterns:
(text_orig, count) = re.subn(pattern, repl, text_orig)
text_orig = replace_dot_sequence(text_orig)
text_orig = multipart_words(text_orig)
text_orig = abbreviations(text_orig)
text_orig = re.sub(r'apdbx+','.', text_orig)
text_orig = add_doc_ending_point(text_orig)#append . final si el último caracter no tiene punto, evita un ciclo infinito al final.
return text_orig | d06ee939c8035cd7b83ed7f1577b383bfcaf203d | 3,659,612 |
def list2str(lst: list) -> str:
"""
将 list 内的元素转化为字符串,使得打印时能够按行输出并在前面加上序号(从1开始)
e.g.
In:
lst = [a,b,c]
str = list2str(lst)
print(str)
Out:
1. a
2. b
3. c
"""
i = 1
res_list = []
for x in lst:
res_list.append(str(i)+'. '+str(x))
i += 1
res_str = '\n'.join(res_list)
return res_str | 3da11748d650e234c082255b8d7dff5e56e65732 | 3,659,613 |
def _prompt_save(): # pragma: no cover
"""Show a prompt asking the user whether he wants to save or not.
Output is 'save', 'cancel', or 'close'
"""
b = prompt(
"Do you want to save your changes before quitting?",
buttons=['save', 'cancel', 'close'], title='Save')
return show_box(b) | 859cbbe94ef35bf434b1c4f6cac9ec61a6311fb8 | 3,659,614 |
def plot_dataset_samples_1d(
dataset,
n_samples=10,
title="Dataset",
figsize=DFLT_FIGSIZE,
ax=None,
plot_config_kwargs={},
seed=123,
):
"""Plot `n_samples` samples of the a datset."""
np.random.seed(seed)
with plot_config(plot_config_kwargs):
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
alpha = 0.5 + 1 / (n_samples ** 0.5 + 1)
for i in range(n_samples):
x, y = dataset[np.random.randint(len(dataset))]
x = rescale_range(x, (-1, 1), dataset.min_max)
ax.plot(x.numpy(), y.numpy(), alpha=alpha)
ax.set_xlim(*dataset.min_max)
if title is not None:
ax.set_title(title, fontsize=14)
return ax | 41b34e276a0236d46e13d7b0f24797e739384661 | 3,659,615 |
def list_versions(namespace, name, provider):
"""List version for mnodule.
Args:
namespace (str): namespace for the version
name (str): Name of the module
provider (str): Provider for the module
Returns:
response: JSON formatted respnse
"""
try:
return make_response(backend.get_versions(namespace, name, provider),
200)
except ModuleNotFoundException as module_not_found:
return make_response(module_not_found.message, 404) | dca0c24f391cce69a10fe7e61165647c9ce1cf66 | 3,659,616 |
import requests
def script_cbor(self, script_hash: str, **kwargs):
"""
CBOR representation of a plutus script
https://docs.blockfrost.io/#tag/Cardano-Scripts/paths/~1scripts~1{script_hash}~1cbor/get
:param script_hash: Hash of the script.
:type script_hash: str
:param return_type: Optional. "object", "json" or "pandas". Default: "object".
:type return_type: str
:returns A list of ScriptCborResponse objects.
:rtype [ScriptCborResponse]
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/scripts/{script_hash}/cbor",
headers=self.default_headers
) | fdb71d1e95d67da4a18552f4e42a28e27c7ab95a | 3,659,617 |
def ithOfNPointsOnCircleY(i,n,r):
"""
return x coordinate of ith value of n points on circle of radius r
points are numbered from 0 through n-1, spread counterclockwise around circle
point 0 is at angle 0, as of on a unit circle, i.e. at point (0,r)
"""
# Hints: similar to ithOfNPointsOnCircleX, but use r sin (theta)
return "stub" | d4e697145423146b085f8423315c795745498afd | 3,659,618 |
def get_tags(ec2id, ec2type, region):
"""
get tags
return tags (json)
"""
mytags = []
ec2 = connect('ec2', region)
if ec2type == 'volume':
response = ec2.describe_volumes(VolumeIds=[ec2id])
if 'Tags' in response['Volumes'][0]:
mytags = response['Volumes'][0]['Tags']
elif ec2type == 'snapshot':
response = ec2.describe_snapshots(SnapshotIds=[ec2id])
if 'Tags' in response['Snapshots'][0]:
mytags = response['Snapshots'][0]['Tags']
return mytags | c150d83b6563f79140febb65a4ea7e50bc733286 | 3,659,619 |
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) unprocessed output if True
quiet: (boolean) suppress warning messages if True
Returns:
Dictionary. Raw or processed structured data.
"""
jc.utils.compatibility(__name__, info.compatible, quiet)
jc.utils.input_type_check(data)
raw_output = {}
if jc.utils.has_data(data):
for line in filter(None, data.splitlines()):
linedata = line.split(':', maxsplit=1)
key = linedata[0].strip().lower().replace(' ', '_').replace('.', '_')
value = linedata[1].strip()
raw_output[key] = value
if raw:
return raw_output
else:
return _process(raw_output) | dd7da8a23a0691dc2df75391c77fa1448362330a | 3,659,620 |
import os
def download_pretrained_model(model: str, target_path: str = None) -> str:
"""Downloads pretrained model to given target path,
if target path is None, it will use model cache path.
If model already exists in the given target path than it will do notting.
Args:
model (str): pretrained model name to download
target_path (str, optional): target directory to download model. Defaults to None.
Returns:
str: file path of the model
"""
if target_path is None:
target_path = get_model_cache_dir()
registry = get_registry()
assert model in registry, f"given model: {model} is not in the registry"
assert os.path.exists(target_path), f"given target path: {target_path} does not exists"
assert os.path.isdir(target_path), "given target path must be directory not a file"
adapter = registry[model]["adapter"]
file_name = registry[model]["adapter"]["kwargs"]["file_name"]
model_path = os.path.join(target_path,file_name)
if not os.path.isfile(model_path):
# download if model not exists
download_object(adapter['type'],
dest_path=target_path, **adapter['kwargs'])
return model_path | b9fcbec011136929e8240944f5f8c595f941d29e | 3,659,621 |
def callNasaApi(date='empty'):
"""calls NASA APIS
Args:
date (str, optional): date for nasa APOD API. Defaults to 'empty'.
Returns:
Dict: custom API response
"""
print('calling nasa APOD API...')
url = nasaInfo['nasa_apod_api_uri']
if date != 'empty':
params = getApodEndpointParams('True', date)
else:
params = getApodEndpointParams('True')
response = makeApiCall(url, params, HttpMethods.get.value)
return response | 5eaa7fe9434c608df47828c8ce19d4e5e5cfe799 | 3,659,622 |
def train_reduced_model(x_values: np.ndarray, y_values: np.ndarray, n_components: int,
seed: int, max_iter: int = 10000) -> sklearn.base.BaseEstimator:
"""
Train a reduced-quality model by putting a Gaussian random projection in
front of the multinomial logistic regression stage of the pipeline.
:param x_values: input embeddings for training set
:param y_values: integer labels corresponding to embeddings
:param n_components: Number of dimensions to reduce the embeddings to
:param seed: Random seed to drive Gaussian random projection
:param max_iter: Maximum number of iterations of L-BGFS to run. The default
value of 10000 will achieve a tight fit but takes a while.
:returns A model (Python object with a `predict()` method) fit on the
input training data with the specified level of dimension reduction
by random projection.
"""
reduce_pipeline = sklearn.pipeline.Pipeline([
("dimred", sklearn.random_projection.GaussianRandomProjection(
n_components=n_components,
random_state=seed
)),
("mlogreg", sklearn.linear_model.LogisticRegression(
multi_class="multinomial",
max_iter=max_iter
))
])
print(f"Training model with n_components={n_components} and seed={seed}.")
return reduce_pipeline.fit(x_values, y_values) | ab2871875c751b5d7abb56991a55607e79c17e6e | 3,659,623 |
def pv(array):
"""Return the PV value of the valid elements of an array.
Parameters
----------
array : `numpy.ndarray`
array of values
Returns
-------
`float`
PV of the array
"""
non_nan = np.isfinite(array)
return array[non_nan].max() - array[non_nan].min() | 987ae80fa68cd1dee3e179975b283bb7f48dd2aa | 3,659,624 |
def format_bad_frames(bad_frames):
"""Create an array of bad frame indices from string loaded from yml file."""
if bad_frames == "":
bads = []
else:
try:
bads = [x.split("-") for x in bad_frames.split(",")]
bads = [[int(x) for x in y] for y in bads]
bads = np.concatenate(
[
np.array(x) if len(x) == 1 else np.arange(x[0], x[1] + 1)
for x in bads
]
)
except:
bads = []
bads = list(bads)
bads = [x.item() for x in bads]
return bads | 433bcba8cc8bf7985a8103d595759d04099dce6a | 3,659,625 |