content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def offset_zero_by_one(feature):
"""Sets the start coordinate to 1 if it is actually 0.
Required for the flanking to work properly in those cases.
"""
if feature.start == 0:
feature.start += 1
return feature | 3c8fb9754bde7b7efaa5d092e8239aeb099e26a4 | 200 |
def smilesToMolecule(smiles):
"""
Convert a SMILES string to a CDK Molecule object.
Returns: the Molecule object
"""
mol = None
try:
smilesParser = cdk.smiles.SmilesParser(silentChemObjectBuilder)
mol = smilesParser.parseSmiles(smiles)
except cdk.exception.InvalidSmilesException as e:
System.err.println('An error occured while parsing the SMILES')
e.printStackTrace()
return mol | 9a50a21c77a5306de47b39d2290e3e6c04184acc | 201 |
from typing import OrderedDict
def build_pathmatcher(name, defaultServiceUrl):
"""
This builds and returns a full pathMatcher entry, for appending to an existing URL map.
Parameters:
name: The name of the pathMatcher.
defaultServiceUrl: Denotes the URL requests should go to if none of the path patterns match.
"""
matcher = OrderedDict()
matcher['defaultService'] = defaultServiceUrl
matcher['name'] = name
return matcher | e21a79d51b41bd393a8fa2e254c6db7cf61bd441 | 202 |
def gaussian1D_smoothing(input_array, sigma, window_size):
"""
Function to smooth input array using 1D gaussian smoothing
Args:
input_array (numpy.array): input array of values
sigma (float): sigma value for gaussian smoothing
window_size (int): window size for gaussian smoothing
Returns:
numpy.array: smoothed output array
"""
# compute truncate value (#standard_deviations)
truncate = (((window_size - 1)/2)-0.5)/sigma
return gaussian_filter1d(input_array, sigma=sigma, truncate=truncate) | 1e7185e358c3dba77c584e072537c7c3b5d9ca4c | 203 |
import re
def add_whitespace(c_fn):
""" Add two spaces between all tokens of a C function
"""
tok = re.compile(r'[a-zA-Z0-9_]+|\*|\(|\)|\,|\[|\]')
return ' ' + ' '.join(tok.findall(c_fn)) + ' ' | 57d59a5956c3914fa01587b6262e7d4348d77446 | 204 |
def readFlow(fn):
""" Read .flo file in Middlebury format"""
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
#print('Reading %d x %d flo file\n' % (w, h))
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
x=np.resize(data, (int(h), int(w), 2))
return x | d9e5ab6f661d904755c0457827e3cfed87752f95 | 205 |
def plot_umap_list(adata, title, color_groups):
"""
Plots UMAPS based with different coloring groups
:param adata: Adata Object containing a latent space embedding
:param title: Figure title
:param color_groups: Column name in adata.obs used for coloring the UMAP
:return:
"""
try:
if adata.X.shape[1] == 2:
adata.obsm['X_umap'] = adata.X
else:
sc.pp.neighbors(adata, use_rep='X')
sc.tl.umap(adata)
figures = []
for group in color_groups:
fig = sc.pl.umap(adata, color=group, title=title+'_'+group, return_fig=True)
fig.tight_layout()
figures.append(fig)
return figures
except ValueError as e:
print(e)
return [] | a5fc70fb507b575a4b8ab2b0a57bb01f55e390ff | 206 |
import re
import os
def _filename(url, headers):
"""Given the URL and the HTTP headers received while fetching it,
generate a reasonable name for the file. If no suitable name can be
found, return None. (Either uses the Content-Disposition explicit
filename or a filename from the URL.)
"""
filename = None
# Try to get filename from Content-Disposition header.
heads = re.findall(r'^Content-Disposition:\s*(.*?)\r\n',
headers, re.I | re.M)
if heads:
cdisp = rfc6266.parse_headers(heads[-1], relaxed=True)
filename = cdisp.filename_unsafe
# Get filename from URL.
if not filename:
parts = urlparse.urlparse(url).path.split('/')
if parts:
filename = parts[-1]
# Strip unsafe characters from path.
if filename:
filename = filename.strip()
for sep in (os.sep, os.altsep):
if sep:
filename = filename.replace(sep, '_')
for pat in FILENAME_REPLACE:
filename = pat.sub('_', filename)
if filename:
return filename | 84492ec86a704221cb029c4172a9b42faa075593 | 207 |
def MatrixCrossProduct(Mat1, Mat2):
"""
Returns the cross products of Mat1 and Mat2.
:param:
- Mat1 & Mat2 - Required : 5D matrix with shape (3,1,nz,ny,nx).
:return:
- Mat3 : 5D matrix with shape (3,1,nz,ny,nx).
"""
Mat3 = np.zeros_like(Mat1)
Mat3[0] = Mat1[1]*Mat2[2]-Mat1[2]*Mat2[1]
Mat3[1] = Mat1[2]*Mat2[0]-Mat1[0]*Mat2[2]
Mat3[2] = Mat1[0]*Mat2[1]-Mat1[1]*Mat2[0]
return Mat3 | 5789209c1fbd8bfacff3e48e844aa0454f94958d | 208 |
import collections
import json
def partition_preds_by_scrape_type(verify_predictions,
evidence_predictions,
val_examples):
"""Partition predictions by which scrape_type they come from.
The validation fold contains four sets of evidence: drqa, lucene, ukp_pred,
and ukp_wiki. The intention is in this function to partition these into
four sets so that they can each be scored separately to measure the
difference between them on models that are trained on one of these
(train_scrape).
Args:
verify_predictions: Claim verification predictions to partition, a 3-dim
tensor of probabilities (one for each class)
evidence_predictions: Evidence predictions to partition, a scalar
probability of matching
val_examples: Validation examples, typically all of
FeverMetricsCallback._validation_flat
Returns:
Predictions and examples partitioned by scrape type
"""
partitioned_verify = collections.defaultdict(list)
partitioned_match = collections.defaultdict(list)
partitioned_example = collections.defaultdict(list)
for verify_probs, match_prob, example in zip(verify_predictions,
evidence_predictions,
val_examples):
struct, _ = example
metadata = json.loads(unwrap_tensor(struct['metadata']))
scrape_type = metadata['scrape_type']
partitioned_verify[scrape_type].append(verify_probs)
partitioned_match[scrape_type].append(match_prob)
partitioned_example[scrape_type].append(example)
return partitioned_verify, partitioned_match, partitioned_example | 137fdfb4bf1f837c087f597eedd4ce4229b33a02 | 209 |
def apply_delay_turbulence(signal, delay, fs):
"""Apply phase delay due to turbulence.
:param signal: Signal
:param delay: Delay
:param fs: Sample frequency
"""
k_r = np.arange(0, len(signal), 1) # Create vector of indices
k = k_r - delay * fs # Create vector of warped indices
kf = np.floor(k).astype(int) # Floor the warped indices. Convert to integers so we can use them as indices.
dk = kf - k
ko = np.copy(kf)
kf[ko<0] = 0
kf[ko+1>=len(ko)] = 0
R = ( (1.0 + dk) * signal[kf] + (-dk) * signal[kf+1] ) * (ko >= 0) * (ko+1 < len(k)) #+ 0.0 * (kf<0)
return R | f5801b3888867b05c890e4dba8f64d0cd273f610 | 210 |
def binaryContext():
"""Return the registered context for the binary functions.
Return Value:
Ctor() for the binary function context
"""
return bin_func_class | 93ed6627d90e4dfb493b8b851c35b59d56fd558f | 211 |
from pathlib import Path
def validate_vm_file(file_name: Path, nx: int, ny: int, nz: int):
"""
Validates that a velocity model file has the correct size, and no 0 values in a sample of the layers
:param file_name: A Path object representing the file to test
:param nx, ny, nz: The size of the VM in grid spaces (nx*ny*nz)
:return: A possibly empty list of issues with the VM file
"""
errors = []
vm_size = nx * ny * nz
size = file_name.stat().st_size
if size != vm_size * SIZE_FLOAT:
errors.append(
f"VM filesize for {file_name} expected: {vm_size * SIZE_FLOAT} found: {size}"
)
with VelocityModelFile(nx, ny, nz, file_name, writable=False, memmap=True) as vmf:
min_v = vmf.get_values().min()
if min_v <= 0.0:
errors.append(f"File {file_name} has minimum value of {min_v}")
return errors | 0f0cd5a1bb13038ca0455770f4c240973775b891 | 212 |
import traceback
def format_assignment_html(recording, debug=False):
"""Given a single recording, format it into an HTML file.
Each recording will only have one student.
Returns a {content: str, student: str, type: str, assignment: str} dict.
"""
try:
files = format_files_list(recording.get('files', {}))
warnings = format_warnings(recording.get('warnings', {}).items())
header = format_header(recording, warnings)
output = (header + files) + '\n\n'
except Exception as err:
if debug:
raise err
output = format_as_code(traceback.format_exc())
return {
'assignment': recording['spec'],
'content': output,
'student': recording['student'],
'type': 'html',
} | 474c9b3ec95f8b217fe413c2619fa37fb450d649 | 213 |
def compute_xlabel_confusion_matrix(y_true, y_pred, labels_train=None, labels_test=None,
normalize=True, sample_weight=None):
"""Computes confusion matrix when the labels used to train the classifier are
different than those of the test set.
Args:
y_true: Ground truth.
y_pred: Estimated labels.
labels_train: List of labels used to train the classifier. This may be used to reorder
or select a subset of labels. If none is given, those that
appear at least once in y_pred are used in sorted order.
labels_test: List of labels of the test set. This may be used to reorder
or select a subset of labels. If none is given, those that
appear at least once in y_true are used in sorted order.
sample_weight: Sample weights.
Returns:
Confusion matrix (num_classes_test x num_classes_train)
"""
y_true = list2ndarray(y_true)
y_pred = list2ndarray(y_pred)
if labels_train is None:
labels_train = np.unique(y_pred)
else:
labels_train = list2ndarray(labels_train)
if labels_test is None:
labels_test = np.unique(y_true)
else:
labels_test = list2ndarray(labels_test)
assert y_true.dtype == y_pred.dtype, 'y_true and y_pred labels does not have the same type'
assert labels_train.dtype == labels_test.dtype, 'Train and test labels does not have the same type'
assert labels_train.dtype == y_pred.dtype, 'Labels, y_true and y_pred does not have the same type'
num_classes_test = len(labels_test)
if issubclass(y_true.dtype.type, np.integer):
y_pred += num_classes_test
elif issubclass(y_true.dtype.type, np.dtype('U')) or issubclass(
y_true.dtype.type, np.dtype('S')):
y_true = np.asarray(['TEST_' + s for s in y_true])
y_pred = np.asarray(['TRAIN_' + s for s in y_pred])
else:
raise Exception()
if issubclass(labels_train.dtype.type, np.integer):
labels_train += num_classes_test
elif issubclass(labels_train.dtype.type, np.dtype('U')) or issubclass(
labels_train.dtype.type, np.dtype('S')):
labels_test = np.asarray(['TEST_' + s for s in labels_test])
labels_train = np.asarray(['TRAIN_' + s for s in labels_train])
else:
raise Exception()
labels = np.concatenate((labels_test, labels_train))
C = confusion_matrix(y_true, y_pred, labels, sample_weight)
C = C[:num_classes_test, num_classes_test:]
if normalize:
C = C/np.sum(C, axis=1, keepdims=True)
return C | 10f8e8767b98979d0d07dcb6ccfdddfaa8b78c1c | 214 |
def generate_synthetic_data(n=50): #n is the number of generated random training points from normal distribution
"""Create two sets of points from bivariate normal distributions."""
points = np.concatenate((ss.norm(0,1).rvs((n,2)),ss.norm(1,1).rvs((n,2))), axis=0) #norm(mean, standard deviation)
#'.rvs' Random variates of given type. Here we have: .rvs((number of rows, number of columns))
# 'axis = 0' means that we are concatenating along the rows of these arrays
#the whole lemgth/size of points array is 2nx2: nx2 for the first generated points set and nx2 for the second one
outcomes = np.concatenate((np.repeat(0,n), np.repeat(1,n)), axis=0) #generate two ndarrays/classes/outcomes/targets: the first one has 0 values and length(n)
#and the second one with values 1 and length n.
#0 and 1 here refer to the names of classes
#axis =0 means that the concatenating happens along the rows
return (points, outcomes) | e63bc114a1b69dc841f439486fc0b455698a4529 | 215 |
def mask_array(array, idx, n_behind, n_ahead):
"""[summary]
Args:
array ([type]): [description]
idx ([type]): [description]
n_behind ([type]): [description]
n_ahead ([type]): [description]
Returns:
[type]: [description]
"""
first = max(0, idx - n_behind)
last = min(idx + n_ahead + 1, len(array))
array_masked = array[first:last].copy()
return array_masked | 04781f75bd1b0cae5b690759b5da475f59a43fe8 | 216 |
from typing import Set
def get_nfs_acl(path: str, user: str) -> str:
"""
Retrieve the complete list of access control permissions assigned to a file or directory.
"""
raw = command(["/usr/bin/nfs4_getfacl", path], output=True).stdout.decode("utf-8")
allowed: Set[str] = set()
denied: Set[str] = set()
for line in raw.splitlines():
if line.startswith("#"):
continue
type_, _, principal, perms = line.split(":")
if principal != user:
continue
if type_ == "A":
allowed.update(perms)
elif type_ == "D":
denied.update(perms)
return "".join(sorted(allowed - denied)) | bca401e9da9ddcb9419359024268362082c3f64b | 217 |
def PolyMult(p1, p2, debug=False):
"""
Multiply two numbers in the GF(2^8) finite field defined
See http://stackoverflow.com/questions/13202758/multiplying-two-polynomials
For info
"""
binP2 = bin(p2)[2:].zfill(8)
mult = 0
if p1 == 0 or p2 == 0:
return 0
for i in range(8):
bit = binP2[i]
if bit == "1":
mult ^= (p1 << (7 - i))
reducPoly = int("100011011", 2)
while True:
if GetMSBIndex(mult) < GetMSBIndex(reducPoly):
break
elif GetMSBIndex(mult) == GetMSBIndex(reducPoly):
mult ^= reducPoly
else:
degreeDiff = GetMSBIndex(mult) - GetMSBIndex(reducPoly)
mult ^= (reducPoly << degreeDiff)
return mult | 30e9f7b9d567ab93e27702d6db5813e8b650442a | 218 |
from hdbscan import HDBSCAN
def run_hdbscan(X_df, X_tsne, output_dir, transparent):
"""Cluster using density estimation
Parameters
----------
X_df: DataFrame
X_tsne: array-like, [n_samples, 2]
output_dir: str, path
transparent: bool
Returns
-------
clusterer: HDBSCAN object
assignments: numpy array of shape [n_samples,]
"""
clusterer = HDBSCAN(
core_dist_n_jobs=-1,
cluster_selection_method="eom", # 'leaf',
approx_min_span_tree=False,
min_cluster_size=100,
min_samples=1,
leaf_size=100,
gen_min_span_tree=True,
# alpha=10.,
memory=Memory(cachedir=None, verbose=0),
)
assignments = clusterer.fit_predict(X_df)
centroid_labels, counts = np.unique(assignments, return_counts=True)
n_clusters = len(centroid_labels)
assignments[assignments == -1] = n_clusters - 1
logger.info("[HDBSCAN] Found {} clusters".format(n_clusters))
logger.info("[HDBSCAN] Cluster assignments:\n{}".format(counts))
logger.info(
"[HDBSCAN] Cluster persistence:\n{}".format(clusterer.cluster_persistence_)
)
return assignments, clusterer.exemplars_, n_clusters, clusterer | 5b5b89f792cbf5acc3ab3681e0ac8d9ea6ce1705 | 219 |
def check_min_sample_periods(X, time_column, min_sample_periods):
"""
Check if all periods contained in a dataframe for a certain time_column
contain at least min_sample_periods examples.
"""
return (X[time_column].value_counts() >= min_sample_periods).prod() | 074c196a169d65582dbb32cc57c86c82ce4cb9c9 | 220 |
def get_quest_stat(cards): # pylint: disable=R0912,R0915
""" Get quest statistics.
"""
res = {}
encounter_sets = set()
keywords = set()
card_types = {}
for card in cards:
if card.get(lotr.CARD_KEYWORDS):
keywords = keywords.union(
lotr.extract_keywords(card[lotr.CARD_KEYWORDS]))
if (card.get(lotr.CARD_TEXT) and
(' Restricted.' in card[lotr.CARD_TEXT] or
'\nRestricted.' in card[lotr.CARD_TEXT])):
keywords.add('Restricted')
if card.get(lotr.CARD_ENCOUNTER_SET):
encounter_sets.add(card[lotr.CARD_ENCOUNTER_SET])
if card.get(lotr.CARD_ADDITIONAL_ENCOUNTER_SETS):
encounter_sets = encounter_sets.union(
[s.strip() for s in
str(card[lotr.CARD_ADDITIONAL_ENCOUNTER_SETS]).split(';')])
card_type = card[lotr.CARD_TYPE]
if card.get(lotr.CARD_SPHERE) in ('Boon', 'Burden'):
card_type = '{} ({})'.format(card_type, card[lotr.CARD_SPHERE])
card_types[card_type] = (
card_types.get(card_type, 0) + card[lotr.CARD_QUANTITY])
if encounter_sets:
res['encounter_sets'] = '*Encounter Sets*: {}\n'.format(
', '.join(sorted(encounter_sets)))
else:
res['encounter_sets'] = ''
if keywords:
res['keywords'] = '*Keywords*: {}\n'.format(
', '.join(sorted(keywords)))
else:
res['keywords'] = ''
card_types = sorted(list(card_types.items()), key=lambda t: t[0])
card_types = sorted(card_types, key=lambda t: t[1], reverse=True)
res['total'] = '*Cards*: {}\n'.format(sum(t[1] for t in card_types))
res['card_types'] = '\n'.join('*{}*: {}'.format(
t[0], t[1]) for t in card_types)
card_types = {}
threat = 0
max_threat = 0
shadow = 0
surge = 0
res['encounter_deck'] = ''
deck = [card for card in cards if card[CARD_DECK_SECTION] == 'Encounter']
for card in deck:
card_type = card[lotr.CARD_TYPE]
if card.get(lotr.CARD_SPHERE) in ('Boon', 'Burden'):
card_type = '{} ({})'.format(card_type, card[lotr.CARD_SPHERE])
card_types[card_type] = (
card_types.get(card_type, 0) + card[lotr.CARD_QUANTITY])
if lotr.is_positive_int(card.get(lotr.CARD_THREAT)):
threat += int(card[lotr.CARD_THREAT]) * card[lotr.CARD_QUANTITY]
max_threat = max(max_threat, int(card[lotr.CARD_THREAT]))
if card.get(lotr.CARD_SHADOW):
shadow += card[lotr.CARD_QUANTITY]
if card.get(lotr.CARD_KEYWORDS):
if 'Surge' in lotr.extract_keywords(card[lotr.CARD_KEYWORDS]):
surge += card[lotr.CARD_QUANTITY]
if not card_types:
return res
card_types = sorted(list(card_types.items()), key=lambda t: t[0])
card_types = sorted(card_types, key=lambda t: t[1], reverse=True)
total = sum(t[1] for t in card_types)
card_types = [(t[0], '{} ({}%)'.format(t[1], round(t[1] * 100 / total)))
for t in card_types]
res['encounter_deck'] = '**Encounter Deck**\n*Cards*: {}\n\n{}\n\n'.format(
total, '\n'.join('*{}*: {}'.format(t[0], t[1]) for t in card_types))
if shadow:
res['encounter_deck'] += '*Shadow*: {} ({}%)\n'.format(
shadow, round(shadow * 100 / total))
if surge:
res['encounter_deck'] += '*Surge*: {} ({}%)\n'.format(
surge, round(surge * 100 / total))
res['encounter_deck'] += '*Threat*: {} (Avg), {} (Max)\n\n'.format(
round(threat / total, 1), max_threat)
return res | 33de1c65288a82c91dd3ee6f4e31c2ea54f938d8 | 221 |
def bind_type(python_value):
"""Return a Gibica type derived from a Python type."""
binding_table = {'bool': Bool, 'int': Int, 'float': Float}
if python_value is None:
return NoneType()
python_type = type(python_value)
gibica_type = binding_table.get(python_type.__name__)
if gibica_type is None:
raise TypeError('Impossible to recognize underlying type.')
return gibica_type(python_value) | ff1ac8d907a90584694408b8e60996fb7be25eab | 222 |
import traceback
import requests
def delete_server(hostname, instance_id):
"""
Deletes a server by hostname and instance_id.
"""
host = get_host_by_hostname(hostname)
if not host or not instance_id:
return None
try:
r = requests.delete("%s/servers/%i" % (host['uri'], instance_id),
auth=HTTPDigestAuth(host['username'], host['password']),
timeout=(CONNECT_TIMEOUT, READ_TIMEOUT))
if r.ok:
return r.json()
except requests.exceptions.ConnectionError as e:
traceback.print_exc()
return None
return None | 9456e45d49be61672b89427c93542374ff0359e2 | 223 |
def quote_ident(val):
"""
This method returns a new string replacing " with "",
and adding a " at the start and end of the string.
"""
return '"' + val.replace('"', '""') + '"' | 452058861fb5be138db3599755fbf3c6d715c0a8 | 224 |
def TFC_TDF(in_channels, num_layers, gr, kt, kf, f, bn_factor=16, bias=False):
"""
Wrapper Function: -> TDC_TIF
in_channels: number of input channels
num_layers: number of densely connected conv layers
gr: growth rate
kt: kernel size of the temporal axis.
kf: kernel size of the freq. axis
f: num of frequency bins
below are params for TDF
bn_factor: bottleneck factor. if None: single layer. else: MLP that maps f => f//bn_factor => f
bias: bias setting of linear layers
"""
return TFC_TIF(in_channels, num_layers, gr, kt, kf, f, bn_factor, bias) | b1d4aa007b40c920f4c985d102a4094821fbf228 | 225 |
import json
def barplot_data(gene_values, gene_names, cluster_name, x_label,
title=None):
"""
Converts data for top genes into a json for building the
bar plot. Output should be formatted in a way that can be plugged into
Plotly.
Args:
gene_values (list): list of tuples (gene_id, gene_value)
gene_names (list): list of gene names corresponding to
the genes in gene_values.
cluster_name: name of the cluster from which the top genes are drawn.
x_label: label for the x-axis.
title: plot title
"""
if gene_values is None:
gene_values = [(1,1), (2,2), (3,3)]
if gene_names is None:
gene_names = ['placeholder 1', 'placeholder 2', 'placeholder 3']
if title is None:
title = 'Top genes for cluster {0}'.format(cluster_name)
return json.dumps({
'data': [{
'x': list(x[1] for x in gene_values),
'y': gene_names,
'orientation': 'h',
'type': 'bar',
}],
'layout': {
'title': title,
'xaxis': {'title': x_label},
'margin': {'t': 40},
},
}, cls=SimpleEncoder) | 67879df5d4918dddc8f46fd6fa975f3bf53de2b4 | 226 |
def logic_not(operand: ValueOrExpression) -> Expression:
"""
Constructs a logical negation expression.
"""
return Not(operators.NotOperator.NOT, ensure_expr(operand)) | 9b3755e00afc9aa8a843358ef83442614bda0feb | 227 |
def webpage_attribute_getter(attr):
""" Helper function for defining getters for web_page attributes, e.g.
``get_foo_enabled = webpage_attribute_getter("foo")`` returns
a value of ``webpage.foo`` attribute.
"""
def _getter(self):
return getattr(self.web_page, attr)
return _getter | 3626f8e2d8c6fb7fbb490dc72f796599cdbc874e | 228 |
def diff_with_step(a:np.ndarray, step:int=1, **kwargs) -> np.ndarray:
""" finished, checked,
compute a[n+step] - a[n] for all valid n
Parameters
----------
a: ndarray,
the input data
step: int, default 1,
the step to compute the difference
kwargs: dict,
Returns
-------
d: ndarray:
the difference array
"""
if step >= len(a):
raise ValueError(f"step ({step}) should be less than the length ({len(a)}) of `a`")
d = a[step:] - a[:-step]
return d | 8475ec66a983f32d4ed7c06348a8607d335dbdca | 229 |
def rmse(y_true: np.ndarray, y_pred: np.ndarray):
"""
Returns the root mean squared error between y_true and y_pred.
:param y_true: NumPy.ndarray with the ground truth values.
:param y_pred: NumPy.ndarray with the ground predicted values.
:return: root mean squared error (float).
"""
return np.sqrt(mean_squared_error(y_true, y_pred)) | 42d08e8bfd218d1a9dc9702ca45417b6c502d4c5 | 230 |
def party_name_from_key(party_key):
"""returns the relevant party name"""
relevant_parties = {0: 'Alternativet',
1: 'Dansk Folkeparti',
2: 'Det Konservative Folkeparti',
3: 'Enhedslisten - De Rød-Grønne',
4: 'Liberal Alliance',
5: 'Nye Borgerlige',
6: 'Radikale Venstre',
7: 'SF - Socialistisk Folkeparti',
8: 'Socialdemokratiet',
9: 'Venstre, Danmarks Liberale Parti'}
return relevant_parties[party_key] | 86041235738017ae3dbd2a5042c5038c0a3ae786 | 231 |
import os
def GetOutDirectory():
"""Returns the Chromium build output directory.
NOTE: This is determined in the following way:
- From a previous call to SetOutputDirectory()
- Otherwise, from the CHROMIUM_OUTPUT_DIR env variable, if it is defined.
- Otherwise, from the current Chromium source directory, and a previous
call to SetBuildType() or the BUILDTYPE env variable, in combination
with the optional CHROMIUM_OUT_DIR env variable.
"""
if 'CHROMIUM_OUTPUT_DIR' in os.environ:
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
build_type = os.environ.get('BUILDTYPE')
if not build_type:
raise EnvironmentError(_MISSING_OUTPUT_DIR_MESSAGE)
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
build_type)) | 57f48d9d34a82997e98c8a74e44f799e8f3a6736 | 232 |
def __imul__(self,n) :
"""Concatenate the bitstring to itself |n| times, bitreversed if n < 0"""
if not isint(n) :
raise TypeError("Can't multiply bitstring by non int");
if n <= 0 :
if n :
n = -n;
l = self._l;
for i in xrange(l//2) :
self[i],self[l-1-i] = self[l-1-i],self[i];
else :
self._x = 0;
self._l = 0;
if n > 1 :
y = type(self)(self);
for _ in xrange(n-1) :
self.iconcat(y);
return self; | 3305fd98899d0444aea91056712bae1fd4a6db2f | 233 |
import os
def update_cache(force=False, cache_file=None):
"""
Load a build cache, updating it if necessary.
A cache is considered outdated if any of its inputs have changed.
Arguments
force -- Consider a cache outdated regardless of whether its inputs have
been modified.
"""
if not cache_file:
cache_file = find_config()
cache_config = devpipeline_configure.parser.read_config(cache_file)
cache = devpipeline_configure.cache._CachedConfig(cache_config, cache_file)
if force or _is_outdated(cache_file, cache):
cache = devpipeline_configure.config.process_config(
cache_config.get("DEFAULT", "dp.build_config"),
os.path.dirname(cache_file),
"build.cache",
profiles=cache_config.get("DEFAULT", "dp.profile_name", fallback=None),
overrides=cache_config.get("DEFAULT", "dp.overrides", fallback=None),
src_root=cache_config.get("DEFAULT", "dp.src_root"),
)
devpipeline_core.sanitizer.sanitize(
cache, lambda n, m: print("{} [{}]".format(m, n))
)
return cache | 31fc3419e18fa08a124f5424db2753ef8513a310 | 234 |
from pyrado.environments.pysim.quanser_qube import QQubeSim
def create_uniform_masses_lengths_randomizer_qq(frac_halfspan: float):
"""
Get a uniform randomizer that applies to all masses and lengths of the Quanser Qube according to a fraction of their
nominal parameter values
:param frac_halfspan: fraction of the nominal parameter value
:return: `DomainRandomizer` with uniformly distributed masses and lengths
"""
dp_nom = QQubeSim.get_nominal_domain_param()
return DomainRandomizer(
UniformDomainParam(
name="mass_pend_pole",
mean=dp_nom["mass_pend_pole"],
halfspan=dp_nom["mass_pend_pole"] / frac_halfspan,
clip_lo=1e-3,
),
UniformDomainParam(
name="mass_rot_pole",
mean=dp_nom["mass_rot_pole"],
halfspan=dp_nom["mass_rot_pole"] / frac_halfspan,
clip_lo=1e-3,
),
UniformDomainParam(
name="length_rot_pole",
mean=dp_nom["length_rot_pole"],
halfspan=dp_nom["length_rot_pole"] / frac_halfspan,
clip_lo=1e-2,
),
UniformDomainParam(
name="length_pend_pole",
mean=dp_nom["length_pend_pole"],
halfspan=dp_nom["length_pend_pole"] / frac_halfspan,
clip_lo=1e-2,
),
) | 87fc94d17b3fab77b175139d2329c0d67611d402 | 235 |
def compress_table(tbl, condition, blen=None, storage=None, create='table',
**kwargs):
"""Return selected rows of a table."""
# setup
storage = _util.get_storage(storage)
names, columns = _util.check_table_like(tbl)
blen = _util.get_blen_table(tbl, blen)
_util.check_equal_length(columns[0], condition)
length = len(columns[0])
nnz = count_nonzero(condition)
# block iteration
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
bcond = np.asanyarray(condition[i:j])
# don't access any data unless we have to
if np.any(bcond):
bcolumns = [np.asanyarray(c[i:j]) for c in columns]
res = [np.compress(bcond, c, axis=0) for c in bcolumns]
if out is None:
out = getattr(storage, create)(res, names=names,
expectedlen=nnz, **kwargs)
else:
out.append(res)
return out | eb675913da51b48fc6a663ddb858e70abee3f1ce | 236 |
def validate_schedule():
"""Helper routine to report issues with the schedule"""
all_items = prefetch_schedule_items()
errors = []
for validator, _type, msg in SCHEDULE_ITEM_VALIDATORS:
for item in validator(all_items):
errors.append('%s: %s' % (msg, item))
all_slots = prefetch_slots()
for validator, _type, msg in SLOT_VALIDATORS:
for slot in validator(all_slots):
errors.append('%s: %s' % (msg, slot))
return errors | 8f6d0f9670b25c22e4518b53327dffc4fa897a6e | 237 |
from typing import Any
from typing import Dict
from typing import Union
from typing import Callable
from typing import Tuple
def train_gridsearchcv_model(base_model: Any,
X: np.array,
y: np.array,
cv_splitter,
hyperparameter_grid: Dict[str, Any],
scoring: Union[str, Callable[[Any, np.array, np.array], int]]="f1_weighted",
n_jobs: int=4,
verbose: int=3,
) -> Tuple[Dict[str, Any], pd.DataFrame]:
"""Trains given model using gridsearch crossvalidation.
X - numpy array of input vectors
y - numpy array of input labels
cv - spitter that splits X and y to train and validation splits
hyperaparameter_grid - hyperparameters used for grid search
scoring - scoring function which is used to evaluate
n_jobs - number of cores to use
verbose - level of verboseness used for GridSearchCV, see scikit-learn
returns (best_parameters, scores_df) where
best_parameters are best hyperparameters found
scores_df is dataframe with scores over all hyperparameter combinations
"""
model = GridSearchCV(
base_model, hyperparameter_grid,
scoring=scoring,
n_jobs=n_jobs, cv=cv_splitter,
refit=False, verbose=verbose,
return_train_score=True
)
return train_cv_model(model, X, y) | 7fe8677f985db7d3518c7b68e5005fde1dee91c6 | 238 |
def set_resolmatrix(nspec,nwave):
""" Generate a Resolution Matrix
Args:
nspec: int
nwave: int
Returns:
Rdata: np.array
"""
sigma = np.linspace(2,10,nwave*nspec)
ndiag = 21
xx = np.linspace(-ndiag/2.0, +ndiag/2.0, ndiag)
Rdata = np.zeros( (nspec, len(xx), nwave) )
for i in range(nspec):
for j in range(nwave):
kernel = np.exp(-xx**2/(2*sigma[i*nwave+j]**2))
kernel /= sum(kernel)
Rdata[i,:,j] = kernel
return Rdata | 49aac12441c1ef793fa2ded4c5ac031df7ce8049 | 239 |
def assembleR(X, W, fct):
"""
"""
M = W * fct(X)
return M | c792da453b981cc3974e32aa353124f5a5e9c46d | 240 |
import logging
def generate_dictionary_variable_types(
dict_name, key_name, search_dict, indent_level=0
):
"""Generate a dictionary from config with values from either function, variable, or static"""
out_str = []
# Don't escape these:
types_used = ["None", "True", "False", None, True, False]
if len(search_dict) < 1:
logging.warning("Can't search 0 len dict")
return None
if key_exists("function", search_dict):
logging.info("Found funciton in dict")
out_str = f'{dict_name}["{key_name}"] = {search_dict["function"]}'
elif key_exists("variable", search_dict):
logging.info("Found variable in dict")
out_str = f'{dict_name}["{key_name}"] = {search_dict["variable"]}'
elif key_exists("static", search_dict):
if (
isinstance(search_dict["static"], int)
or search_dict["static"] in types_used
):
logging.info("Found static (None / Bool) in dict")
out_str = f'{dict_name}["{key_name}"] = {search_dict["static"]}'
else:
logging.info("Found static (string) in dict")
out_str = f'{dict_name}["{key_name}"] = "{search_dict["static"]}"'
else:
logging.warning("Unable to find function, variable, or static string")
return None
return indent(out_str, indent_level) | 82a7282bc999dcf049ff6fbe6329271577908775 | 241 |
import uuid
def make_uuid(value):
"""Converts a value into a python uuid object."""
if isinstance(value, uuid.UUID):
return value
return uuid.UUID(value) | b65b5739151d84bedd39bc994441d1daa33d1b51 | 242 |
def create_config(config_data,
aliases=False,
prefix=False,
multiple_displays=False,
look_info=None,
custom_output_info=None,
custom_lut_dir=None):
"""
Create the *OCIO* config based on the configuration data
Parameters
----------
config_data : dict
Colorspaces and transforms converting between those colorspaces and
the reference colorspace, *ACES*, along with other data needed to
generate a complete *OCIO* configuration
aliases : bool, optional
Whether or not to include Alias colorspaces
prefix : bool, optional
Whether or not to prefix the colorspace names with their Family names
multiple_displays : bool, optional
Whether to create a single display named *ACES* with Views for each
Output Transform or multiple displays, one for each Output Transform
look_info : array of str or unicode, optional
Paths and names for look data
custom_lut_dir : str or unicode, optional
Directory to use for storing custom look files
Returns
-------
*OCIO* config
The constructed OCIO configuration
"""
if look_info is None:
look_info = []
if custom_output_info is None:
custom_output_info = []
prefixed_names = {}
alias_colorspaces = []
config = ocio.Config()
config.setDescription('An ACES config generated from python')
search_path = ['luts']
if custom_lut_dir:
search_path.append('custom')
config.setSearchPath(':'.join(search_path))
reference_data = config_data['referenceColorSpace']
# Adding the colorspace *Family* into the name which helps with
# applications that present colorspaces as one a flat list.
if prefix:
prefixed_name = colorspace_prefixed_name(reference_data)
prefixed_names[reference_data.name] = prefixed_name
reference_data.name = prefixed_name
print('Adding the reference color space : %s' % reference_data.name)
reference = ocio.ColorSpace(
name=reference_data.name,
bitDepth=reference_data.bit_depth,
description=reference_data.description,
equalityGroup=reference_data.equality_group,
family=reference_data.family,
isData=reference_data.is_data,
allocation=reference_data.allocation_type,
allocationVars=reference_data.allocation_vars)
config.addColorSpace(reference)
if aliases:
if reference_data.aliases:
# Deferring adding alias colorspaces until end, which helps with
# applications listing the colorspaces in the order that they were
# defined in the configuration: alias colorspaces are usually named
# lower case with spaces but normal colorspaces names are longer
# and more verbose, thus it becomes harder for user to visually
# parse the list of colorspaces when there are names such as
# "crv_canonlog" interspersed with names like
# "Input - Canon - Curve - Canon-Log".
# Moving the alias colorspace definitions to the end of the
# configuration avoids the above problem.
alias_colorspaces.append(
[reference_data, reference_data, reference_data.aliases])
print('')
if look_info:
print('Adding looks')
config_data['looks'] = []
for look in look_info:
add_look(config,
look,
custom_lut_dir,
reference_data.name,
config_data)
add_looks_to_views(look_info,
reference_data.name,
config_data,
multiple_displays)
print('')
if custom_output_info:
print('Adding custom output transforms')
for custom_output in custom_output_info:
add_custom_output(config,
custom_output,
custom_lut_dir,
reference_data,
config_data,
alias_colorspaces,
prefix)
print('')
print('Adding regular colorspaces')
for colorspace in sorted(config_data['colorSpaces'],
cmp=lambda x,y: cmp(x.family.lower(), y.family.lower())):
# Adding the colorspace *Family* into the name which helps with
# applications that present colorspaces as one a flat list.
if prefix:
prefixed_name = colorspace_prefixed_name(colorspace)
prefixed_names[colorspace.name] = prefixed_name
colorspace.name = prefixed_name
print('Creating new color space : %s' % colorspace.name)
description = colorspace.description
if colorspace.aces_transform_id:
description += (
'\n\nACES Transform ID : %s' % colorspace.aces_transform_id)
ocio_colorspace = ocio.ColorSpace(
name=colorspace.name,
bitDepth=colorspace.bit_depth,
description=description,
equalityGroup=colorspace.equality_group,
family=colorspace.family,
isData=colorspace.is_data,
allocation=colorspace.allocation_type,
allocationVars=colorspace.allocation_vars)
if colorspace.to_reference_transforms:
print('\tGenerating To-Reference transforms')
ocio_transform = create_ocio_transform(
colorspace.to_reference_transforms)
ocio_colorspace.setTransform(
ocio_transform,
ocio.Constants.COLORSPACE_DIR_TO_REFERENCE)
if colorspace.from_reference_transforms:
print('\tGenerating From-Reference transforms')
ocio_transform = create_ocio_transform(
colorspace.from_reference_transforms)
ocio_colorspace.setTransform(
ocio_transform,
ocio.Constants.COLORSPACE_DIR_FROM_REFERENCE)
config.addColorSpace(ocio_colorspace)
if aliases:
if colorspace.aliases:
# Deferring adding alias colorspaces until end, which helps
# with applications listing the colorspaces in the order that
# they were defined in the configuration.
alias_colorspaces.append(
[reference_data, colorspace, colorspace.aliases])
print('')
print('')
# Adding roles early so that alias colorspaces can be created
# with roles names before remaining colorspace aliases are added
# to the configuration.
print('Setting the roles')
if prefix:
set_config_roles(
config,
color_picking=prefixed_names[
config_data['roles']['color_picking']],
color_timing=prefixed_names[config_data['roles']['color_timing']],
compositing_log=prefixed_names[
config_data['roles']['compositing_log']],
data=prefixed_names[config_data['roles']['data']],
default=prefixed_names[config_data['roles']['default']],
matte_paint=prefixed_names[config_data['roles']['matte_paint']],
reference=prefixed_names[config_data['roles']['reference']],
scene_linear=prefixed_names[config_data['roles']['scene_linear']],
compositing_linear=prefixed_names[config_data['roles']['scene_linear']],
rendering=prefixed_names[config_data['roles']['scene_linear']],
texture_paint=prefixed_names[
config_data['roles']['texture_paint']])
# Add the aliased colorspaces for each role
for role_name, role_colorspace_name in config_data['roles'].iteritems():
role_colorspace_prefixed_name = prefixed_names[role_colorspace_name]
#print( 'Finding colorspace : %s' % role_colorspace_prefixed_name )
# Find the colorspace pointed to by the role
role_colorspaces = [colorspace
for colorspace in config_data['colorSpaces']
if colorspace.name == role_colorspace_prefixed_name]
role_colorspace = None
if len(role_colorspaces) > 0:
role_colorspace = role_colorspaces[0]
else:
if reference_data.name == role_colorspace_prefixed_name:
role_colorspace = reference_data
if role_colorspace:
# The alias colorspace shouldn't match the role name exactly
role_name_alias1 = "role_%s" % role_name
role_name_alias2 = "Role - %s" % role_name
print( 'Adding a role colorspace named %s, pointing to %s' % (
role_name_alias2, role_colorspace.name))
alias_colorspaces.append(
(reference_data, role_colorspace, [role_name_alias1]))
add_colorspace_aliases(
config, reference_data, role_colorspace, [role_name_alias2],
'Utility/Roles')
else:
set_config_roles(
config,
color_picking=config_data['roles']['color_picking'],
color_timing=config_data['roles']['color_timing'],
compositing_log=config_data['roles']['compositing_log'],
data=config_data['roles']['data'],
default=config_data['roles']['default'],
matte_paint=config_data['roles']['matte_paint'],
reference=config_data['roles']['reference'],
scene_linear=config_data['roles']['scene_linear'],
compositing_linear=config_data['roles']['scene_linear'],
rendering=config_data['roles']['scene_linear'],
texture_paint=config_data['roles']['texture_paint'])
# Add the aliased colorspaces for each role
for role_name, role_colorspace_name in config_data['roles'].iteritems():
# Find the colorspace pointed to by the role
role_colorspaces = [colorspace
for colorspace in config_data['colorSpaces']
if colorspace.name == role_colorspace_name]
role_colorspace = None
if len(role_colorspaces) > 0:
role_colorspace = role_colorspaces[0]
else:
if reference_data.name == role_colorspace_name:
role_colorspace = reference_data
if role_colorspace:
# The alias colorspace shouldn't match the role name exactly
role_name_alias1 = "role_%s" % role_name
role_name_alias2 = "Role - %s" % role_name
print('Adding a role colorspace named %s, pointing to %s' % (
role_name_alias2, role_colorspace.name))
alias_colorspaces.append(
(reference_data, role_colorspace, [role_name_alias1]))
add_colorspace_aliases(
config, reference_data, role_colorspace, [role_name_alias2],
'Utility/Roles')
print('')
# Adding alias colorspaces at the end as some applications use
# colorspaces definitions order of the configuration to order
# the colorspaces in their selection lists, some applications
# use alphabetical ordering.
# This should keep the alias colorspaces out of the way for applications
# using the configuration order.
print('Adding the alias colorspaces')
for reference, colorspace, aliases in alias_colorspaces:
add_colorspace_aliases(config, reference, colorspace, aliases,
'Utility/Aliases')
print('')
print('Adding the diplays and views')
# Setting the *color_picking* role to be the first *Display*'s
# *Output Transform* *View*.
default_display_name = config_data['defaultDisplay']
default_display_views = config_data['displays'][default_display_name]
default_display_colorspace = default_display_views['Output Transform']
# Defining *Displays* and *Views*.
displays, views = [], []
# Defining a generic *Display* and *View* setup.
if multiple_displays:
looks = config_data['looks'] if ('looks' in config_data) else []
looks = ', '.join(looks)
print('Creating multiple displays, with looks : %s' % looks)
# *Displays* are not reordered to put the *defaultDisplay* first
# because *OCIO* will order them alphabetically when the configuration
# is written to disk.
for display, view_list in config_data['displays'].iteritems():
for view_name, colorspace in view_list.iteritems():
config.addDisplay(display, view_name, colorspace.name, looks)
if 'Output Transform' in view_name and looks != '':
# *Views* without *Looks*.
config.addDisplay(display, view_name, colorspace.name)
# *Views* with *Looks*.
view_name_with_looks = '%s with %s' % (view_name, looks)
config.addDisplay(display, view_name_with_looks,
colorspace.name, looks)
else:
config.addDisplay(display, view_name, colorspace.name)
if not (view_name in views):
views.append(view_name)
displays.append(display)
# *Displays* and *Views* useful in a *GUI* context.
else:
single_display_name = 'ACES'
displays.append(single_display_name)
# Ensuring the *defaultDisplay* is first.
display_names = sorted(config_data['displays'])
display_names.insert(0, display_names.pop(
display_names.index(default_display_name)))
looks = config_data['looks'] if ('looks' in config_data) else []
look_names = ', '.join(looks)
displays_views_colorspaces = []
for display in display_names:
view_list = config_data['displays'][display]
for view_name, colorspace in view_list.iteritems():
if 'Output Transform' in view_name:
# We use the *Display* names as the *View* names in this
# case as there is a single *Display* containing all the
# *Views*.
# This works for more applications than not,as of the time
# of this implementation.
# Autodesk Maya 2016 doesn't support parentheses in
# *View* names.
sanitised_display = replace(display, {')': '', '(': ''})
# *View* with *Looks*.
if 'with' in view_name:
sanitised_display = '%s with %s' % (
sanitised_display, look_names)
views_with_looks_at_end = False
# Storing combo of *Display*, *View* and *Colorspace*
# name so they can be added to the end of the list.
if views_with_looks_at_end:
displays_views_colorspaces.append(
[single_display_name, sanitised_display,
colorspace.name])
else:
config.addDisplay(single_display_name,
sanitised_display,
colorspace.name)
if not (sanitised_display in views):
views.append(sanitised_display)
# *View* without *Looks*.
else:
config.addDisplay(single_display_name,
sanitised_display,
colorspace.name)
if not (sanitised_display in views):
views.append(sanitised_display)
# Adding to the configuration any *Display*, *View* combinations that
# were saved for later.
# This list should be empty unless `views_with_looks_at_end` is
# set `True` above.
for display_view_colorspace in displays_views_colorspaces:
single_display_name, sanitised_display, colorspace_name = (
display_view_colorspace)
config.addDisplay(single_display_name,
sanitised_display,
colorspace_name)
if not (sanitised_display in views):
views.append(sanitised_display)
raw_display_space_name = config_data['roles']['data']
log_display_space_name = config_data['roles']['compositing_log']
if prefix:
raw_display_space_name = prefixed_names[raw_display_space_name]
log_display_space_name = prefixed_names[log_display_space_name]
config.addDisplay(single_display_name, 'Raw', raw_display_space_name)
views.append('Raw')
config.addDisplay(single_display_name, 'Log', log_display_space_name)
views.append('Log')
config.setActiveDisplays(','.join(sorted(displays)))
config.setActiveViews(','.join(views))
print('')
# Ensuring the configuration is valid.
config.sanityCheck()
# Resetting colorspace names to their non-prefixed versions.
if prefix:
prefixed_names_inverse = {}
for original, prefixed in prefixed_names.iteritems():
prefixed_names_inverse[prefixed] = original
reference_data.name = prefixed_names_inverse[reference_data.name]
try:
for colorspace in config_data['colorSpaces']:
colorspace.name = prefixed_names_inverse[colorspace.name]
except:
print('Error with Prefixed names')
for original, prefixed in prefixed_names.iteritems():
print('%s, %s' % (original, prefixed))
print('\n')
print('Inverse Lookup of Prefixed names')
for prefixed, original in prefixed_names_inverse.iteritems():
print('%s, %s' % (prefixed, original))
raise
return config | c036094b3a8a3debc80d2e66141f8b95e51a41d0 | 243 |
import re
from pathlib import Path
import json
def parse_json_with_comments(pathlike):
"""
Parse a JSON file after removing any comments.
Comments can use either ``//`` for single-line
comments or or ``/* ... */`` for multi-line comments.
The input filepath can be a string or ``pathlib.Path``.
Parameters
----------
filename : str or os.PathLike
Path to the input JSON file either as a string
or as a ``pathlib.Path`` object.
Returns
-------
obj : dict
JSON object representing the input file.
Note
----
This code was adapted from:
https://web.archive.org/web/20150520154859/http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
"""
# Regular expression to identify comments
comment_re = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE)
# if we passed in a string, convert it to a Path
if isinstance(pathlike, str):
pathlike = Path(pathlike)
with open(pathlike, 'r') as file_buff:
content = ''.join(file_buff.readlines())
# Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Return JSON object
config = json.loads(content)
return config | e79a461c210879d66b699fe49e84d0d2c58a964b | 244 |
def _unpack_available_edges(avail, weight=None, G=None):
"""Helper to separate avail into edges and corresponding weights"""
if weight is None:
weight = "weight"
if isinstance(avail, dict):
avail_uv = list(avail.keys())
avail_w = list(avail.values())
else:
def _try_getitem(d):
try:
return d[weight]
except TypeError:
return d
avail_uv = [tup[0:2] for tup in avail]
avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1]) for tup in avail]
if G is not None:
# Edges already in the graph are filtered
flags = [not G.has_edge(u, v) for u, v in avail_uv]
avail_uv = list(it.compress(avail_uv, flags))
avail_w = list(it.compress(avail_w, flags))
return avail_uv, avail_w | 0c4ac0afc209544e385f9214f141cde6f75daa4a | 245 |
import uuid
def triple_str_to_dict(clause):
"""
converts a triple (for a where_clause) in the form
<<#subj, pred_text, #obj/obj_text>>
to dictionary form. it assumed that one of the three entries is
replaced by a "?"
if the obj memid is fixed (as opposed to the obj_text),
use a "#" in front of the memid. subj_text is not a valid
possibility for the first entry of the triple; still, if a query uses
a fixed subj, it should be preceded with a "#".
the order is assumed to be subj, pred, obj.
examples:
"find me a record whose name is bob":
<< ?, has_name, bob >> --> {"pred_text": "has_name", "obj_text": "bob"}
"find me a record who is a friend of the entity with memid
dd2ca5a4c5204fc09c71279f8956a2b1":
<< ?, friend_of, #dd2ca5a4c5204fc09c71279f8956a2b1 >> -->
{"pred_text": "friend_of", "obj": "dd2ca5a4c5204fc09c71279f8956a2b1"}
"find me a record x for which the entity with memid
dd2ca5a4c5204fc09c71279f8956a2b1" is a parent_of x:
<< #dd2ca5a4c5204fc09c71279f8956a2b1, parent_of, ? >> -->
{"pred_text": "parent_of", "subj": "dd2ca5a4c5204fc09c71279f8956a2b1"}
commmas in obj text or subj text need to be escaped with \
"find me a record whose name is bob, the sailor":
<< ?, has_name, bob >> --> {"pred_text": "has_name", "obj_text": "bob\, the sailor"}
TODO:
This does not currently handle nested queries.
This does not currently handle multiple "?"
moar escapes?
"""
comma = uuid.uuid4().hex
clause = clause.replace("\,", comma)
terms = remove_enclosing_symbol(clause, ("<<", ">>")).split(",")
terms = [t.replace(comma, ",") for t in terms]
terms = [t.strip() for t in terms]
assert terms[1] and terms[1] != "?"
out = {"pred_text": terms[1]}
if terms[0] == "?":
if terms[2] == "?":
raise Exception(
"queries with both subj and obj unfixed in a triple are not yet supported"
)
assert terms[2] != "?"
if terms[2][0] == "#":
out["obj"] = terms[2][1:]
else:
out["obj_text"] = terms[2]
else:
if terms[0][0] == "#":
out["subj"] = terms[0][1:]
else:
raise Exception(
'queries with a "subj_text" (as opposed to subj memid) in a triple are not supported'
)
return out | 48c2dff6f0d4cceb7f4d3edca6a498573360b45a | 246 |
import json
def reddit_data(subreddit, time_request = -9999):
"""
@brief function to retrieve the metadata of a gutenberg book given its ID
:param subreddit: the name of the subreddit
:param time_request: unix timestamp of when requested subreddit was generated
:return: a list of reddit objects with the data of the posts
"""
base_url = get_reddit_url()
url = f"{base_url}/cache?subreddit={subreddit}&time_resquest={time_request}"
content = server_request(url)
data = json.loads(content.decode("utf-8"))
reddit_posts = []
for n in data:
post = reddit.reddit
post.id = data[n]["id"]
post.title = data[n]["title"]
post.author = data[n]["author"]
post.score = int(data[n]["score"])
post.vote_ratio = int(data[n]["vote_ratio"])
post.comment_count = int(data[n]["comment_count"])
post.subreddit = data[n]["subreddit"]
post.post_time = int(data[n]["post_time"])
post.url = data[n]["url"]
post.text = data[n]["text"]
reddit_posts.append(post)
return reddit_posts | 78d79e2e917aaa892b4122d657c30a7cd13dfc4b | 247 |
def write_velocity_files(U_25_RHS_str, U_50_RHS_str, U_100_RHS_str, U_125_RHS_str, U_150_RHS_str, U_25_LHS_str, U_50_LHS_str, U_100_LHS_str, U_125_LHS_str, U_150_LHS_str, path_0_100, path_0_125, path_0_150, path_0_25, path_0_50):
"""Create the details file for the surrounding cases, and write the velocities in line two"""
fname = "details" # Filename
file_25_path = path_0_25
file_50_path = path_0_50
file_100_path = path_0_100
file_125_path = path_0_125
file_150_path = path_0_150
details_file_25 = file_25_path + fname
details_file_50 = file_50_path + fname
details_file_100 = file_100_path + fname
details_file_125 = file_125_path + fname
details_file_150 = file_150_path + fname
with open(details_file_25, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_25_RHS_str)
with open(details_file_50, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_50_RHS_str)
with open(details_file_100, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_100_RHS_str)
with open(details_file_125, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_125_RHS_str)
with open(details_file_150, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_150_RHS_str)
return details_file_25, details_file_50, details_file_100, details_file_125, details_file_150 | 6c4af67ea659c09669f7294ec453db5e4e9fb9df | 248 |
def datetime_to_bytes(value):
"""Return bytes representing UTC time in microseconds."""
return pack('>Q', int(value.timestamp() * 1e6)) | e8b1d78615a84fb4279563d948ca807b0f0f7310 | 249 |
from typing import Sequence
from typing import List
def _f7(seq: Sequence) -> List:
"""order preserving de-duplicate sequence"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] | f4dde886503754a09ac4ff545638750bf8fc6d94 | 250 |
def get_config(cfg):
"""
Sets the hypermeters for the optimizer and experiment using the config file
Args:
cfg: A YACS config object.
"""
config_params = {
"train_params": {
"adapt_lambda": cfg.SOLVER.AD_LAMBDA,
"adapt_lr": cfg.SOLVER.AD_LR,
"lambda_init": cfg.SOLVER.INIT_LAMBDA,
"nb_adapt_epochs": cfg.SOLVER.MAX_EPOCHS,
"nb_init_epochs": cfg.SOLVER.MIN_EPOCHS,
"init_lr": cfg.SOLVER.BASE_LR,
"batch_size": cfg.SOLVER.TRAIN_BATCH_SIZE,
"optimizer": {
"type": cfg.SOLVER.TYPE,
"optim_params": {
"momentum": cfg.SOLVER.MOMENTUM,
"weight_decay": cfg.SOLVER.WEIGHT_DECAY,
"nesterov": cfg.SOLVER.NESTEROV
}
}
},
"data_params": {
"dataset_group": cfg.DATASET.NAME,
"dataset_name": cfg.DATASET.SOURCE + '2' + cfg.DATASET.TARGET,
"source": cfg.DATASET.SOURCE,
"target": cfg.DATASET.TARGET,
"size_type": cfg.DATASET.SIZE_TYPE,
"weight_type": cfg.DATASET.WEIGHT_TYPE
}
}
return config_params | a88bc3c8057d969998ab286aaa15ee5e8768c838 | 251 |
def get_nas_transforms():
""" Returns trajectory transformations for NAS. """
return [
PadActions(),
AsArray(),
RewardsAsValueTargets(),
TileValueTargets()
] | f323ef2cd40af81fdd230e4bbb53cfa2ba6e4450 | 252 |
from datetime import datetime
def epoch_to_datetime(epoch):
"""
:param epoch: str of epoch time
:return: converted datetime type
"""
return datetime.datetime.fromtimestamp(float(epoch) / 1000) | 59d9b85489320f5b1db93e6513fc375b9b58b151 | 253 |
def qe_m4(px,mlmax,Talm=None,fTalm=None):
"""
px is a pixelization object, initialized like this:
px = pixelization(shape=shape,wcs=wcs) # for CAR
px = pixelization(nside=nside) # for healpix
output: curved sky multipole=4 estimator
"""
ells = np.arange(mlmax)
#prepare temperature map
rmapT=px.alm2map(np.stack((Talm,Talm)),spin=0,ncomp=1,mlmax=mlmax)[0]
#find tbarf
t_alm=cs.almxfl(fTalm,np.sqrt((ells-3.)*(ells-2.)*(ells-1.)*ells*(ells+1.)*(ells+2.)*(ells+3.)*(ells+4.)))
alms=np.stack((t_alm,t_alm))
rmap=px.alm2map_spin(alms,0,4,ncomp=2,mlmax=mlmax)
#multiply the two fields together
rmap=np.nan_to_num(rmap)
prodmap=rmap*rmapT
prodmap=np.nan_to_num(prodmap)
if not(px.hpix): prodmap=enmap.enmap(prodmap,px.wcs)
realsp2=prodmap[0] #spin +4 real space real space field
if not(px.hpix): realsp2 = enmap.enmap(realsp2,px.wcs)
#convert the above spin4 fields to spin pm 4 alms
res1 = px.map2alm_spin(realsp2,mlmax,4,4) #will return pm4
#spin 4 ylm
ttalmsp2=rot2dalm(res1,4)[0] #pick up the spin 4 alm of the first one
ttalmsm2=rot2dalm(res1,4)[1] #pick up the spin -4 alm of the second one
m4_alm=ttalmsp2+ttalmsm2
return m4_alm | f2b6c7fe03a5dae34aaa58738684e37cf30efc01 | 254 |
def seasonality_plot_df(m, ds):
"""Prepare dataframe for plotting seasonal components.
Parameters
----------
m: Prophet model.
ds: List of dates for column ds.
Returns
-------
A dataframe with seasonal components on ds.
"""
df_dict = {'ds': ds, 'cap': 1., 'floor': 0.}
for name in m.extra_regressors:
df_dict[name] = 0.
# Activate all conditional seasonality columns
for props in m.seasonalities.values():
if props['condition_name'] is not None:
df_dict[props['condition_name']] = True
df = pd.DataFrame(df_dict)
df = m.setup_dataframe(df)
return df | ae362631659ec1652eb1798a73dce786cd269ee5 | 255 |
async def response(request: DiscoveryRequest, xds_type: DiscoveryTypes, host: str = 'none'):
"""
A Discovery **Request** typically looks something like:
.. code-block:: json
{
"version_info": "0",
"node": {
"cluster": "T1",
"build_version": "<revision hash>/<version>/Clean/RELEASE",
"metadata": {
"auth": "..."
}
}
}
When we receive this, we give the client the latest configuration via a
Discovery **Response** that looks something like this:
.. code-block:: json
{
"version_info": "abcdef1234567890",
"resources": []
}
The version_info is derived from :func:`sovereign.discovery.version_hash`
:param request: An envoy Discovery Request
:param xds_type: what type of XDS template to use when rendering
:param host: the host header that was received from the envoy client
:return: An envoy Discovery Response
"""
template: XdsTemplate = XDS_TEMPLATES.get(request.envoy_version, default_templates)[xds_type]
context = make_context(
node_value=extract_node_key(request.node),
template=template,
)
# If the discovery request came from a mock, it will
# typically contain this metadata key.
# This means we should prevent any decryptable data
# from ending up in the response.
if request.node.metadata.get('hide_private_keys'):
context['crypto'] = disabled_suite
config_version = '0'
if config.cache_strategy == 'context':
config_version = version_hash(context, template.checksum, request.node.common, request.resources)
if config_version == request.version_info:
return {'version_info': config_version}
kwargs = dict(
discovery_request=request,
host_header=host,
resource_names=request.resources,
**context
)
if template.is_python_source:
content = {'resources': list(template.code.call(**kwargs))}
else:
content = await template.content.render_async(**kwargs)
if config.cache_strategy == 'content':
config_version = version_hash(content)
if config_version == request.version_info:
return {'version_info': config_version}
# This is the most expensive operation, I think, so it's performed as late as possible.
if not template.is_python_source:
content = deserialize_config(content)
content['version_info'] = config_version
return remove_unwanted_resources(content, request.resources) | 3ffa2ec8c64dd479ea6ecf3494a2db23b95f2ef2 | 256 |
def count_inner_bags(content, start_color):
"""Count inner bags"""
rules = process_content(content)
bags = rules[start_color]
count = len(bags)
while len(bags) != 0:
new_bags = []
for bag in bags:
count += len(rules[bag])
new_bags += rules[bag]
bags = new_bags
return count | f6e188d548beaa5f1b24d96e6394c2bdbfaefd0b | 257 |
def build_generation_data(
egrid_facilities_to_include=None, generation_years=None
):
"""
Build a dataset of facility-level generation using EIA923. This
function will apply filters for positive generation, generation
efficiency within a given range, and a minimum percent of generation
from the primary fuel (if set in the config file). The returned
dataframe also includes the balancing authority for every power
plant.
Parameters
----------
egrid_facilities_to_include : list, optional
List of plant codes to include (default is None, which builds a list)
generation_years : list, optional
Years of generation data to include in the output (default is None,
which builds a list from the inventories of interest and eia_gen_year
parameters)
Returns
----------
DataFrame
Dataframe columns include:
['FacilityID', 'Electricity', 'Year']
"""
if not generation_years:
# Use the years from inventories of interest
generation_years = set(
list(inventories_of_interest.values()) + [eia_gen_year]
)
df_list = []
for year in generation_years:
gen_fuel_data = eia923_download_extract(year)
primary_fuel = eia923_primary_fuel(gen_fuel_data)
gen_efficiency = calculate_plant_efficiency(gen_fuel_data)
final_gen_df = gen_efficiency.merge(primary_fuel, on="Plant Id")
if not egrid_facilities_to_include:
if include_only_egrid_facilities_with_positive_generation:
final_gen_df = final_gen_df.loc[
final_gen_df["Net Generation (Megawatthours)"] >= 0, :
]
if filter_on_efficiency:
final_gen_df = efficiency_filter(final_gen_df)
if filter_on_min_plant_percent_generation_from_primary_fuel and not keep_mixed_plant_category:
final_gen_df = final_gen_df.loc[
final_gen_df["primary fuel percent gen"]
>= min_plant_percent_generation_from_primary_fuel_category,
:,
]
# if filter_non_egrid_emission_on_NAICS:
# # Check with Wes to see what the filter here is supposed to be
# final_gen_df = final_gen_df.loc[
# final_gen_df['NAICS Code'] == '22', :
# ]
else:
final_gen_df = final_gen_df.loc[
final_gen_df["Plant Id"].isin(egrid_facilities_to_include), :
]
ba_match = eia860_balancing_authority(year)
ba_match["Plant Id"] = ba_match["Plant Id"].astype(int)
final_gen_df["Plant Id"] = final_gen_df["Plant Id"].astype(int)
final_gen_df = final_gen_df.merge(ba_match, on="Plant Id", how="left")
final_gen_df["Year"] = int(year)
df_list.append(final_gen_df)
all_years_gen = pd.concat(df_list)
all_years_gen = all_years_gen.rename(
columns={
"Plant Id": "FacilityID",
"Net Generation (Megawatthours)": "Electricity",
}
)
all_years_gen = all_years_gen.loc[:, ["FacilityID", "Electricity", "Year"]]
all_years_gen.reset_index(drop=True, inplace=True)
all_years_gen["Year"] = all_years_gen["Year"].astype("int32")
return all_years_gen | 32a2f1757419e52b7d8ea5b198a70bfd36f7dd4c | 258 |
def get_nessus_scans():
"""Return a paginated list of Nessus scan reports.
**Example request**:
.. sourcecode:: http
GET /api/1.0/analysis/nessus?page=1 HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
Link: <.../api/1.0/analysis/nessus?page=1&per_page=20>; rel="First",
<.../api/1.0/analysis/nessus?page=0&per_page=20>; rel="Last"
{
"count": 3,
"items": [
{
"created": "2016-03-21T16:52:52",
"id": 4,
"report": "...",
"type": "Nessus scan"
},
{
"created": "2016-03-21T16:51:49",
"id": 3,
"report": "...",
"type": "Nessus scan"
},
{
"created": "2016-03-20T17:09:03",
"id": 2,
"report": "...",
"type": "Nessus scan"
}
],
"page": 1
}
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:resheader Link: Describe relationship with other resources
:>json array items: Nessus scan reports
:>jsonarr integer id: Scan unique ID
:>jsonarr object report: Scan report
:>json integer page: Current page number
:>json integer count: Total number of items
:status 200: Reports found
:status 404: Resource not found
"""
return ApiPagedResponse(Report.query.filter_by(type_id=4)) | 1828d42baff7e16c8dac6b1e5ab6c66c14834c3c | 259 |
from doctest import TestResults
from _doctest26 import TestResults
def count_failures(runner):
"""Count number of failures in a doctest runner.
Code modeled after the summarize() method in doctest.
"""
try:
except:
return [TestResults(f, t) for f, t in runner._name2ft.values() if f > 0 ] | 0e755114f5c23be0bdac11876f32bd2ac4ad9625 | 260 |
def fnv1_64(data, hval_init=FNV1_64_INIT):
"""
Returns the 64 bit FNV-1 hash value for the given data.
"""
return fnv(data, hval_init, FNV_64_PRIME, 2**64) | 3981677c02317f63ae62cab75ee0d5db2fec7dc2 | 261 |
import os
import traceback
def recongnize_image_file(args, image_file, output_dir):
"""
遍历图片文件
:param input_dir:
:return:
"""
temp_name = image_file.split('/')[-1].split('.')[0]
textract_json = None
label_file = os.path.join( output_dir, temp_name + '.txt')
#print("label_file ", label_file)
#print("output_dir {} label_file {}".format(output_dir, label_file))
if os.path.exists(label_file):
try:
textract_json = recongnize_sub_image_file(args, image_file, label_file, output_dir)
except Exception as exception:
print("【Error】 图片[{}] 没有解析成功 ".format(image_file))
print('【Error】 exception [{}]'.format(exception))
traceback.print_exc()
else:
print("【Error】 图片[{}] 没有生成对应的label文件 [{}]".format(image_file, label_file))
return textract_json | 09dbf852e3243468b34901c50cff8ad5e2034d31 | 262 |
from pathlib import Path
import tqdm
def parse_data_sp(source_root, parallel_roots, glob_str="**/*.wav", add_source=False):
"""
assert that parallel_root wil contain folders of following structure:
PARALLEL_ROOT/record_17/IPhone 12 Pro Max/JBL CLIP3/distance=60-loudness=15-recording_mode=default/RELATIVE_PATH_TO_WAV_FROM_SOURCE
"""
data = defaultdict(list)
source_root = Path(source_root).resolve()
parallel_roots = [Path(parallel_root) for parallel_root in parallel_roots]
# print(parallel_roots)
_class_ind_maps = defaultdict(list)
source_pathes = list(source_root.glob(glob_str))
if add_source:
_class_ind_maps["spoofing"] = ["genuine", "spoof"]
for source_path in tqdm(source_pathes):
for parallel_root in parallel_roots:
playback_device = parallel_root.parts[-2].lower().replace(" ", "")
recording_device = parallel_root.parts[-3].lower().replace(" ", "")
# print(f"{playback_device}, {recording_device}")
if not (playback_device in _class_ind_maps["playback_device"]):
_class_ind_maps["playback_device"].append(playback_device)
if not (recording_device in _class_ind_maps["recording_device"]):
_class_ind_maps["recording_device"].append(recording_device)
source_rlp = source_path.relative_to(source_root)
parallel_path = parallel_root / source_rlp
if parallel_path.exists():
data[source_path].append({
"path": parallel_path,
"spoofing": "spoof",
"playback_device": playback_device,
"recording_device": recording_device
})
if add_source:
if len(data[source_path]) > 0:
data[source_path].insert(
0, {
"path": source_path,
"spoofing": "genuine",
"playback_device": None,
"recording_device": None
})
class_ind_maps = defaultdict(dict)
print(_class_ind_maps)
for task_name, task_classes in _class_ind_maps.items():
for cls_ind, cls_name in enumerate(sorted(task_classes)):
class_ind_maps[task_name][cls_name] = cls_ind
return data, class_ind_maps | d23613c63d904ca5adb23c10c670ddab2d4148e7 | 263 |
def heat_diffusion(A, t, L, k, eps=0.0001):
"""
Computes the heat diffusion equation
Parameters
----------
A : Tensor or SparseTensor
the (N,N,) density matrix
t : float
the diffusion time
L : Tensor or SparseTensor
the (N,N,) Laplacian matrix
k : Tensor
the (N,D,) initial heat tensor
eps : float (optional)
a regularizer value (default is 0.0001)
Returns
-------
Tensor
the (N,D,) heat tensor
"""
return poisson_equation(A+t*L, k, eps=eps) | 56ee07ed473463116b045700e4923218d72b5aca | 264 |
def unpack_ad_info(ad_info: dict, param_name: str) -> bytes:
"""Проверяет наличие ожидаемой структуры и возвращает значение."""
# Красиво не сработает, потому что применение условий должно быть последовательным
if (
isinstance(ad_info, dict)
and ad_info.get(param_name) # noqa: W503
and isinstance(ad_info[param_name], list) # noqa: W503
and isinstance(ad_info[param_name][0], bytes) # noqa: W503
):
return ad_info[param_name][0]
return None | 85a6c95bac7e35bed4f478b352b2a56203818139 | 265 |
def _read_file(file, sheet_name=0):
"""
Helper function used to read the file and return a pandas dataframe.
Checks if file type is a .csv or excel. If not,
returns a ValueError.
Parameters
----------
file : str
the name of the file, including the filetype extension
sheet_name : int, optional
if passing an excel file, the name of the sheet to analyze,
by default 0
Returns
-------
pandas.Dataframe
pandas dataframe containing data from file
"""
if file.endswith('.csv'):
df = pd.read_csv(file)
else:
try:
df = pd.read_excel(file, sheet_name=sheet_name)
except XLRDError:
raise ValueError("Please use a valid csv or excel file.")
return df | fbe9212084062233ca2073af57b401afc9532701 | 266 |
def get_school_total_students(school_id, aug_school_info):
"""
Gets total number of students associated with a school.
Args:
district_id (str): NCES ID of target district (e.g. '0100005').
aug_school_info (pandas.DataFrame): Target augmented school information
(as formatted by `auxiliary.data_handler.DataHandler`).
Returns:
int: Single number comprising school-level data.
"""
return int(aug_school_info.loc[school_id]["total_students"]) | d0d2ea36a2e3f4b47992aea9cc0c18c5ba7e0ff3 | 267 |
def loci_adjust(ds, *, group, thresh, interp):
"""LOCI: Adjust on one block.
Dataset variables:
hist_thresh : Hist's equivalent thresh from ref
sim : Data to adjust
"""
sth = u.broadcast(ds.hist_thresh, ds.sim, group=group, interp=interp)
factor = u.broadcast(ds.af, ds.sim, group=group, interp=interp)
with xr.set_options(keep_attrs=True):
scen = (factor * (ds.sim - sth) + thresh).clip(min=0)
return scen.rename("scen").to_dataset() | 2bb833a33bf32ed308137342007f7c62acfabe82 | 268 |
def _ClientThread(client_ip, client_user, client_pass, mvip, username, password, purge):
"""delete the volumes for a client, run as a thread"""
log = GetLogger()
SetThreadLogPrefix(client_ip)
log.info("Connecting to client")
client = SFClient(client_ip, client_user, client_pass)
account_name = client.HostnameToAccountName()
cluster = SFCluster(mvip, username, password)
try:
match_volumes = cluster.SearchForVolumes(accountName=account_name)
except UnknownObjectError:
log.passed("Account is already deleted")
return True
if len(list(match_volumes.keys())) <= 0:
log.passed("No volumes to delete")
return True
log.info("Deleting {} volumes".format(len(list(match_volumes.keys()))))
cluster.DeleteVolumes(volumeIDs=list(match_volumes.keys()), purge=purge)
log.passed("Successfully deleted volumes") | 258531ac383271c1a637b38ed3ff4f7a358c1dbc | 269 |
import json
def objective(args: Namespace, trial: optuna.trial._trial.Trial) -> float:
"""Objective function for optimization trials.
Args:
args (Namespace): Input arguments for each trial (see `config/args.json`) for argument names.
trial (optuna.trial._trial.Trial): Optuna optimization trial.
Returns:
F1 score from evaluating the trained model on the test data split.
"""
# Paramters (to tune)
args.embedding_dim = trial.suggest_int("embedding_dim", 128, 512)
args.num_filters = trial.suggest_int("num_filters", 128, 512)
args.hidden_dim = trial.suggest_int("hidden_dim", 128, 512)
args.dropout_p = trial.suggest_uniform("dropout_p", 0.3, 0.8)
args.lr = trial.suggest_loguniform("lr", 5e-5, 5e-4)
# Train (can move some of these outside for efficiency)
logger.info(f"\nTrial {trial.number}:")
logger.info(json.dumps(trial.params, indent=2))
artifacts = run(args=args, trial=trial)
# Set additional attributes
args = artifacts["args"]
performance = artifacts["performance"]
logger.info(json.dumps(performance["overall"], indent=2))
trial.set_user_attr("threshold", args.threshold)
trial.set_user_attr("precision", performance["overall"]["precision"])
trial.set_user_attr("recall", performance["overall"]["recall"])
trial.set_user_attr("f1", performance["overall"]["f1"])
return performance["overall"]["f1"] | 629711996034664654430fba8af541fc934e143a | 270 |
def read_gdwarfs(file=_GDWARFALLFILE,logg=False,ug=False,ri=False,sn=True,
ebv=True,nocoords=False):
"""
NAME:
read_gdwarfs
PURPOSE:
read the spectroscopic G dwarf sample
INPUT:
logg= if True, cut on logg, if number, cut on logg > the number (>4.2)
ug= if True, cut on u-g, if list/array cut to ug[0] < u-g< ug[1]
ri= if True, cut on r-i, if list/array cut to ri[0] < r-i< ri[1]
sn= if False, don't cut on SN, if number cut on SN > the number (15)
ebv= if True, cut on E(B-V), if number cut on EBV < the number (0.3)
nocoords= if True, don't calculate distances or transform coordinates
OUTPUT:
cut data, returns numpy.recarray
HISTORY:
2011-07-08 - Written - Bovy@MPIA (NYU)
"""
raw= _load_fits(file)
#First cut on r
indx= (raw.field('dered_r') < 20.2)*(raw.field('dered_r') > 14.5)
raw= raw[indx]
#Then cut on g-r
indx= ((raw.field('dered_g')-raw.field('dered_r')) < 0.55)\
*((raw.field('dered_g')-raw.field('dered_r')) > .48)
raw= raw[indx]
#Cut on velocity errs
indx= (raw.field('pmra_err') > 0.)*(raw.field('pmdec_err') > 0.)\
*(raw.field('vr_err') > 0.)
raw= raw[indx]
#Cut on logg?
if (isinstance(logg,bool) and logg):
indx= (raw.field('logga') > 4.2)
raw= raw[indx]
elif not isinstance(logg,bool):
indx= (raw.field('logga') > logg)
raw= raw[indx]
if isinstance(ug,bool) and ug:
indx= ((raw.field('dered_u')-raw.field('dered_g')) < 2.)\
*((raw.field('dered_u')-raw.field('dered_g')) > .6)
raw= raw[indx]
if not isinstance(ug,bool):
indx= ((raw.field('dered_u')-raw.field('dered_g')) < ug[1])\
*((raw.field('dered_u')-raw.field('dered_g')) > ug[0])
raw= raw[indx]
if isinstance(ri,bool) and ri:
indx= ((raw.field('dered_r')-raw.field('dered_i')) < .4)\
*((raw.field('dered_r')-raw.field('dered_i')) > -.1)
raw= raw[indx]
elif not isinstance(ri,bool):
indx= ((raw.field('dered_r')-raw.field('dered_i')) < ri[1])\
*((raw.field('dered_r')-raw.field('dered_i')) > ri[0])
raw= raw[indx]
if (isinstance(sn,bool) and sn):
indx= (raw.field('sna') > 15.)
raw= raw[indx]
elif not isinstance(sn,bool):
indx= (raw.field('sna') > sn)
raw= raw[indx]
if isinstance(ebv,bool) and ebv:
indx= (raw.field('ebv') < .3)
raw= raw[indx]
elif not isinstance(ebv,bool):
indx= (raw.field('ebv') < ebv)
raw= raw[indx]
if nocoords: return raw
raw= _add_distances(raw)
raw= _add_velocities(raw)
return raw | da073917d825dac283d8157dec771036588b0cec | 271 |
def add_item(category_slug=None):
"""
Add a new Item Form.
:param category_slug: The category slug
"""
# Get the current category using the slug
current_category = Category.where('slug', category_slug).first()
return render_template(
'items/add.html',
categories=Category.all(),
current_category=current_category
) | 6bbaabdab6da6290c1de17ecbc36e849b7fd4c5e | 272 |
def track_type(time, lat, tmax=1):
"""
Determines ascending and descending tracks.
Defines unique tracks as segments with time breaks > tmax,
and tests whether lat increases or decreases w/time.
"""
# Generate track segment
tracks = np.zeros(lat.shape)
# Set values for segment
tracks[0:np.argmax(np.abs(lat))] = 1
# Output index array
i_asc = np.zeros(tracks.shape, dtype=bool)
# Loop trough individual tracks
for track in np.unique(tracks):
# Get all points from an individual track
i_track, = np.where(track == tracks)
# Test tracks length
if len(i_track) < 2:
continue
# Test if lat increases (asc) or decreases (des) w/time
i_min = time[i_track].argmin()
i_max = time[i_track].argmax()
lat_diff = lat[i_track][i_max] - lat[i_track][i_min]
# Determine track type
if lat_diff > 0:
i_asc[i_track] = True
# Output index vector's
return i_asc, np.invert(i_asc) | 5872deaf6ff5d5e651705f40b8ad3df192ec98de | 273 |
def get_installed_procnames():
"""Get a list of procs currently on the file system."""
return set(get_procs()) | 6f3f83d579033ef407c72ccbd8d973f39d41ae22 | 274 |
def pam_bw_as_matrix(buff, border):
"""\
Returns the QR code as list of [0, 1] lists.
:param io.BytesIO buff: Buffer to read the matrix from.
:param int border: The QR code border
"""
res = []
data, size = _image_data(buff)
for i, offset in enumerate(range(0, len(data), size)):
if i < border:
continue
if i >= size - border:
break
row_data = bytearray(data[offset + border:offset + size - border])
# Invert bytes since PAM uses 0x0 = black, 0x1 = white
res.append([b ^ 0x1 for b in row_data])
return res | 0360ee9d9e22fd667bc80063bd799fbaa2cb3a44 | 275 |
def delete_task(task_id: int):
"""Remove task with associated ID from the database."""
send_to_login = ensure_login()
if send_to_login:
return send_to_login
else:
old_task = Task.delete(task_id)
flash(f'You deleted "{old_task.title}"', "info")
return redirect(url_for("task.view_task_list")) | 976c8aedc47ca342809d82904c4a1eab31e8886f | 276 |
def KDPReboot(cmd_args=None):
""" Restart the remote target
"""
if "kdp" != GetConnectionProtocol():
print "Target is not connected over kdp. Nothing to do here."
return False
print "Rebooting the remote machine."
lldb.debugger.HandleCommand('process plugin packet send --command 0x13')
lldb.debugger.HandleCommand('detach')
return True | bcd4bab8fcb3abb1f512b349ce8468e7a09ceab0 | 277 |
def get_version(): # noqa: E501
"""API version
The API version # noqa: E501
:rtype: str
"""
return '1.0.0' | 75df6627bb2aaec205a0679d86c190d7b861baf5 | 278 |
import tqdm
import torch
def bert_evaluate(model, eval_dataloader, device):
"""Evaluation of trained checkpoint."""
model.to(device)
model.eval()
predictions = []
true_labels = []
data_iterator = tqdm(eval_dataloader, desc="Iteration")
for step, batch in enumerate(data_iterator):
input_ids, input_mask, labels = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
with torch.no_grad():
outputs = model(input_ids, token_type_ids=None, attention_mask=input_mask)
#loss is only output when labels are provided as input to the model ... real smooth
logits = outputs[0]
print(type(logits))
logits = logits.to('cpu').numpy()
label_ids = labels.to('cpu').numpy()
for label, logit in zip(label_ids, logits):
true_labels.append(label)
predictions.append(np.argmax(logit))
#print(predictions)
#print(true_labels)
metrics = get_metrics(true_labels, predictions)
return metrics | 3534796f06a89378dec9c23788cb52f75d088423 | 279 |
def get_cached_scts(hex_ee_hash):
""" get_cached_scts returns previously fetched valid SCT from this certificate. The key to perform this search is
the hex-encoded hash of the end-entity certificate
:param hex_ee_hash: the hex-encoded hash of the end-entity certificate
:return: a dictionary of SCTs where the keys are the log URL
"""
c = dbconn.cursor()
c.execute('''
SELECT logs.log, scts.sct
FROM certs
INNER JOIN scts
ON certs.id = scts.cert_id
INNER JOIN logs
ON scts.log_id = logs.id
WHERE certs.ee_hash = ?
AND scts.valid = 1
''', (hex_ee_hash,))
return {
log: {'sct': sct, 'valid': True}
for (log, sct) in c.fetchall()
} | 5f86f1ccd7488f9712b16d1c71077ceb73098ea3 | 280 |
import torch
def all_gather_multigpu(
output_tensor_lists, input_tensor_list, group=None, async_op=False
):
"""
Gathers tensors from the whole group in a list.
Each tensor in ``tensor_list`` should reside on a separate GPU
Only nccl backend is currently supported
tensors should only be GPU tensors
Complex tensors are supported.
Args:
output_tensor_lists (List[List[Tensor]]): Output lists. It should
contain correctly-sized tensors on each GPU to be used for output
of the collective, e.g. ``output_tensor_lists[i]`` contains the
all_gather result that resides on the GPU of
``input_tensor_list[i]``.
Note that each element of ``output_tensor_lists`` has the size of
``world_size * len(input_tensor_list)``, since the function all
gathers the result from every single GPU in the group. To interpret
each element of ``output_tensor_lists[i]``, note that
``input_tensor_list[j]`` of rank k will be appear in
``output_tensor_lists[i][k * world_size + j]``
Also note that ``len(output_tensor_lists)``, and the size of each
element in ``output_tensor_lists`` (each element is a list,
therefore ``len(output_tensor_lists[i])``) need to be the same
for all the distributed processes calling this function.
input_tensor_list (List[Tensor]): List of tensors(on different GPUs) to
be broadcast from current process.
Note that ``len(input_tensor_list)`` needs to be the same for
all the distributed processes calling this function.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
output_tensor_lists = [
[t if not t.is_complex() else torch.view_as_real(t) for t in l]
for l in output_tensor_lists
]
input_tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list
]
if group is None:
default_pg = _get_default_group()
work = default_pg.allgather(output_tensor_lists, input_tensor_list)
else:
work = group.allgather(output_tensor_lists, input_tensor_list)
if async_op:
return work
else:
work.wait() | e948709a209877c0d994699106e06bd13ddb46a7 | 281 |
import uuid
def get_uuid_from_str(input_id: str) -> str:
"""
Returns an uuid3 string representation generated from an input string.
:param input_id:
:return: uuid3 string representation
"""
return str(uuid.uuid3(uuid.NAMESPACE_DNS, input_id)) | 51ce9ceab7c4f9d63d45fbee93286711bcba3093 | 282 |
import requests
def extend_request(request_id=None, workload_id=None, lifetime=30):
"""
extend an request's lifetime.
:param request_id: The id of the request.
:param workload_id: The workload_id of the request.
:param lifetime: The life time as umber of days.
"""
return requests.extend_request(request_id=request_id, workload_id=workload_id, lifetime=lifetime) | 4b5c523f1af2b1c7c6f55bf522bb2c32e0f14995 | 283 |
def from_bytes(buf: bytes) -> str:
"""Return MIME type from content in form of bytes-like type.
Example:
>>> import defity
>>> defity.from_bytes(b'some-binary-content')
'image/png'
"""
_guard_buf_arg(buf)
# We accept many input data types just for user's convenience. We still convert
# it to immutable bytes to pass down to Rust function.
return _mod.from_bytes(bytes(buf)) | 5b997bc8d9b6d5e3fc7e38c5956bcefe3f0244cc | 284 |
def pl__5__create_train_frame_sequences(ctvusts_by_tcp__lte_1, frame_sequences__by__tcpctvustsfs, train_tcpctvustsfs__gt__1):
"""
returns:
train_tcpctvustsfs__all
(
<TokenID>,
<CameraPerspective>,
<ASLConsultantID>,
<TargetVideoFilename>,
<UtteranceSequence>,
<TokenSequence>,
<FrameSequence>
)
"""
train__ctvusts_by_tcp__lte_1__keys = (
ctvusts_by_tcp__lte_1
| "Beam PL: extract ((TokenID,CameraPerspective,ASLConsultantID,TargetVideoFilename,UtteranceSequence,TokenSequence), '<ctvusts_by_tcp__lte_1_tpl__has_key>') for join to tcpctvustsfs" >> beam.Map(
lambda ctvusts_by_tcp__lte_1_tpl : (
(
ctvusts_by_tcp__lte_1_tpl[0], # TokenID
ctvusts_by_tcp__lte_1_tpl[1], # CameraPerspective
ctvusts_by_tcp__lte_1_tpl[2], # ASLConsultantID
ctvusts_by_tcp__lte_1_tpl[3], # TargetVideoFilename
ctvusts_by_tcp__lte_1_tpl[4], # UtteranceSequence
ctvusts_by_tcp__lte_1_tpl[5] # TokenSequence
),
"<ctvusts_by_tcp__lte_1_tpl__has_key>"
)
)
)
train_tcpctvustsfs__lte_1 = (
({
'has_key': train__ctvusts_by_tcp__lte_1__keys,
'frame_sequences': frame_sequences__by__tcpctvustsfs
})
| "Beam PL: join ctvusts_by_tcp__lte_1 to tcpctvustsfs" >> beam.CoGroupByKey()
# the above produces tuples of the form:
# (
# (
# <TokenID>,
# <CameraPerspective>,
# <ASLConsultantID>,
# <TargetVideoFilename>,
# <UtteranceSequence>,
# <TokenSequence>
# ),
# {
# 'has_key': listof('<ctvusts_by_tcp__lte_1_tpl__has_key>'), # should have only one/single element
# 'frame_sequences': listof(<FrameSequence>) # many
# }
# )
| "Beam PL: filter out mismatches from joined train__ctvusts_by_tcp__lte_1 to tcpctvustsfs" >> beam.Filter(
lambda joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl:
len(joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[1]['has_key'])>0 and \
len(joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[1]['frame_sequences'])>0
)
| "Beam PL: 'explode' listof(<FrameSequence>) from joined train__ctvusts_by_tcp__lte_1 to tcpctvustsfs to list of tuples" >> beam.Map(
lambda joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl: [
(
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][0], # TokenID
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][1], # CameraPerspective
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][2], # ASLConsultantID
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][3], # TargetVideoFilename
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][4], # UtteranceSequence
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][5], # TokenSequence
frame_seq
) for frame_seq in sorted(joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[1]['frame_sequences'])
]
)
| "Beam PL: 'explode' listof((TokenID,CameraPerspective,ASLConsultantID,TargetVideoFilename,UtteranceSequence,TokenSequence, FrameSequence)) from joined ttrain__ctvusts_by_tcp__lte_1 to tcpctvustsfs" >> beam.FlatMap(
lambda list_joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl: list_joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl
)
)
train_tcpctvustsfs__all = (
(train_tcpctvustsfs__gt__1, train_tcpctvustsfs__lte_1)
| f"Beam PL: merge train_tcpctvustsfs__gt__1 with train_tcpctvustsfs__lte_1" >> beam.Flatten()
)
return train_tcpctvustsfs__all | 1824b26a449a24ae02e72c1fe1fc6931c9658875 | 285 |
def createList(value, n):
"""
@param value: value to initialize the list
@param n: list size to be created
@return: size n list initialized to value
"""
return [value for i in range (n)] | ff419e6c816f9b916a156e21c68fd66b36de9cfb | 286 |
def label_class_num(label):
"""
标签的种类
:param label:
:return:
"""
return label.shape[1] | d3b9f6e7b84c10af289878587d7f36bf18147b9e | 287 |
def heur(puzzle, item_total_calc, total_calc):
"""
Heuristic template that provides the current and target position for each number and the
total function.
Parameters:
puzzle - the puzzle
item_total_calc - takes 4 parameters: current row, target row, current col, target col.
Returns int.
total_calc - takes 1 parameter, the sum of item_total_calc over all entries, and returns int.
This is the value of the heuristic function
"""
t = 0
for row in range(3):
for col in range(3):
val = puzzle.peek(row, col) - 1
target_col = val % 3
target_row = val / 3
# account for 0 as blank
if target_row < 0:
target_row = 2
t += item_total_calc(row, target_row, col, target_col)
return total_calc(t) | bed67110858733a20b89bc1aacd6c5dc3ea04e13 | 288 |
def make_argparse_help_safe(s):
"""Make strings safe for argparse's help.
Argparse supports %{} - templates. This is sometimes not needed.
Make user supplied strings safe for this.
"""
return s.replace('%', '%%').replace('%%%', '%%') | 3a1e6e072a8307df884e39b5b3a0218678d08462 | 289 |
import os
def _file_content_hash(file_name, encoding, database=None, newline=None):
"""
Returns the file content as well as the hash of the content
Use the database to keep a persistent cache of the last content
hash. If the file modification date has not changed assume the
hash is the same and do not re-open the file.
"""
if database is None:
content = read_file(file_name, encoding=encoding, newline=newline)
return content, hash_string(content)
key = f"cached._file_content_hash({file_name!s}, newline={newline!s})".encode()
if key not in database:
content = read_file(file_name, encoding=encoding, newline=newline)
content_hash = hash_string(content)
timestamp = os.path.getmtime(file_name)
database[key] = timestamp, content_hash
return content, content_hash
timestamp = os.path.getmtime(file_name)
last_timestamp, last_content_hash = database[key]
if timestamp != last_timestamp:
content = read_file(file_name, encoding=encoding, newline=newline)
content_hash = hash_string(content)
database[key] = timestamp, content_hash
return content, content_hash
return None, last_content_hash | eb544ea82ce6baeb8260627d15da3bd5b8e964ba | 290 |
import tempfile
import os
def leaderboard(avatars, usernames, levels):
"""
Draw the leaderboard. Return the path of the image.
The path points to a temporary file with extension png. The caller
is responsible for removing this temporary file.
avatars is 10 top users' avatar images. It should be a list of
BytesIO or path openable by PIL.Image.open()
usernames is 10 top users' usernames.
levels is 10 top users' levels.
"""
template = Image.open("assets/leaderboard_template.png")
canvas = ImageDraw.Draw(template)
iterator = enumerate(zip(avatars, usernames, levels))
for i, (avatar, username, level) in iterator:
offset_y = 75 * i
avatar_img = Image.open(avatar).resize((66, 66))
template.paste(avatar_img, (5, 99 + offset_y))
template.paste(AVATAR_MASK_66, (5, 99 + offset_y), AVATAR_MASK_66)
canvas.text((175, 113 + offset_y), username, font=UBUNTU_31)
canvas.text((565, 115 + offset_y), f"Level: {level}", font=UBUNTU_25)
fd, filename = tempfile.mkstemp(suffix=".png")
os.close(fd)
template.save(filename)
template.close()
return filename | a8607ab75856a208a1e950ddde7b8253173db34c | 291 |
def create_pysm_commands(
mapfile,
nside,
bandcenter_ghz,
bandwidth_ghz,
beam_arcmin,
coord,
mpi_launch,
mpi_procs,
mpi_nodes,
):
"""
Return lines of shell code to generate the precomputed input sky map.
"""
mpistr = "{}".format(mpi_launch)
if mpi_procs != "":
mpistr = "{} {} 1".format(mpistr, mpi_procs)
if mpi_nodes != "":
mpistr = "{} {} 1".format(mpistr, mpi_nodes)
outstr = "# Create sky model\n"
outstr = '{}if [ ! -e "{}" ]; then\n'.format(outstr, mapfile)
outstr = '{} echo "Creating sky model {} ..."\n'.format(outstr, mapfile)
outstr = '{} {} ./pysm_sky.py --output "{}" --nside {} --bandcenter_ghz {} --bandwidth_ghz {} --beam_arcmin {} --coord {}\n'.format(
outstr,
mpistr,
mapfile,
nside,
bandcenter_ghz,
bandwidth_ghz,
beam_arcmin,
coord,
)
outstr = "{}fi\n".format(outstr)
outstr = "{}\n".format(outstr)
return outstr | f0528968096f41a291a369477d8e2071f4b52339 | 292 |
def edits1(word):
""" All edits that are one edit away from `word`. """
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts) | ec78ba3648e04c59b380cd37760b7865e5f364ea | 293 |
def process_text_cn(text: str):
"""中文文本处理"""
text = del_white_chars(text)
text = sub_punctuation(text)
return text | 18fd44ee2c5929fd6fe3c4aef9fde332773b016c | 294 |
import math
def total_elastic_cross_section_browning1994_cm2(atomic_number, energy_keV):
"""
From browning1994
Valid in the range 100 eV to 30 keV for elements 1 to 92.
"""
Z = atomic_number
E = energy_keV
factor = 3.0e-18
power_z = math.pow(Z, 1.7)
power_e = math.pow(E, 0.5)
nominator = factor*power_z
denominator = E + 0.005 * power_z * power_e + 0.0007 * Z * Z / power_e
cross_section_cm2 = nominator/denominator
return cross_section_cm2 | bf12a49e3aba07a44e44bfb6df87212745fd5ed3 | 295 |
def plot_fancy(nodes, elems, phi=None, charge=None, u=None, charge_max=None,
show=False, save=None, num_intp=100, title=None, clabel=None,
animation_mode=True, latex=False):
""" Plots fancily. """
if animation_mode:
fig = Figure(colorbar=False, tight_layout=True, show=show,
xlabel="", ylabel="", save=save, ticks=False, latex=latex)
else:
fig = Figure(colorbar=True, tight_layout=False, show=show,
xlabel=tex_escape("x"), ylabel=tex_escape("y"),
save=save, ticks=True, latex=latex)
if phi is None:
phi = -np.ones(len(nodes))
if charge is None:
charge = np.zeros(len(nodes))
if charge_max is None:
charge_max = max(np.max(np.abs(charge)), 1e-10)
cmap = plt.cm.get_cmap('Greys')
cmap._init()
cmap._lut[:, :] = 0.
length = len(cmap._lut[:, -1])
# cmap._lut[:, -1] = np.linspace(0., 1.0, length)
cmap._lut[:length//2, -1] = 0.
cmap._lut[length//2:, -1] = 1.
phi[phi > 1.] = 1.
phi[phi < -1.] = -1.
plt.tripcolor(nodes[:, 0], nodes[:, 1], elems, charge,
cmap=plt.get_cmap("coolwarm"), shading="gouraud",
vmin=-charge_max, vmax=charge_max)
plt.tricontourf(nodes[:, 0], nodes[:, 1], elems, phi,
cmap=cmap, levels=[-2.0, 0., 2.0], antialiased=True)
if u is not None:
Lx = nodes[:, 0].max()-nodes[:, 0].min()
Ly = nodes[:, 1].max()-nodes[:, 1].min()
dx = max(Lx, Ly)/num_intp
Nx = int(Lx/dx)
Ny = int(Ly/dx)
x_i, y_i = np.meshgrid(
np.linspace(dx+nodes[:, 0].min(),
nodes[:, 0].max()-dx, Nx),
np.linspace(dx+nodes[:, 1].min(),
nodes[:, 1].max()-dx, Ny))
triang = mtri.Triangulation(nodes[:, 0], nodes[:, 1], elems)
ux_interp = mtri.LinearTriInterpolator(triang, u[:, 0])
uy_interp = mtri.LinearTriInterpolator(triang, u[:, 1])
phi_interp = mtri.LinearTriInterpolator(triang, phi)
ux_i = ux_interp(x_i, y_i)
uy_i = uy_interp(x_i, y_i)
phi_i = phi_interp(x_i, y_i)
ux_i = np.array(ux_i.filled(0.))
uy_i = np.array(uy_i.filled(0.))
phi_i = np.array(phi_i.filled(0.))
u_norm = np.sqrt(ux_i**2 + uy_i**2)
lw = np.zeros_like(ux_i)
lw[:] += 5*u_norm/(u_norm.max() + 1e-10)
mask = np.zeros(ux_i.shape, dtype=bool)
mask[phi_i > 0.] = True
ux_i_2 = np.ma.array(ux_i, mask=mask)
fig.ax.streamplot(x_i, y_i,
ux_i_2, uy_i,
color="k",
density=0.6,
linewidth=lw)
mask = np.zeros(ux_i.shape, dtype=bool)
mask[phi_i < 0.] = True
ux_i_2 = np.ma.array(ux_i, mask=mask)
fig.ax.streamplot(x_i, y_i,
ux_i_2, uy_i,
color="w",
density=0.6,
linewidth=lw)
return fig | a334c581b9601c73a1b003aec14f4f179dab5202 | 296 |
def buildModelGPT(modelType='gpt2-medium'):
"""
This function builds the model of the function und returns it based on GPT
"""
## Create Model
# Load pre-trained model tokenizer (vocabulary)
tokenizer = GPT2Tokenizer.from_pretrained(modelType)
# Load pre-trained model (weights)
model = GPT2LMHeadModel.from_pretrained(modelType)
# Set the model in evaluation mode to deactivate the DropOut modules
# This is IMPORTANT to have reproducible results during evaluation!
model.eval()
return model, tokenizer | 51b2dca333a06ed9168d3056b681b1ed192c5761 | 297 |
def vid_to_list(filepath):
"""
Converts a video file to a list of 3d arrays of dim (h, w, c)
Input:
filepath: (str) full filepath of video
Output:
vid: (ndarray) list of 3d numpy arrays, of shape (height, width, color)
"""
cap = cv.VideoCapture(filepath)
list_of_frames = []
while True:
ret, frame = cap.read()
if ret:
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
list_of_frames.append(frame)
else:
break
return list_of_frames | 062423bcf10749705e767edf6721dd20903653ef | 298 |
import pickle
def from_pickle(fname=interpolator_path):
"""Loads grid inperpolator from pickle located at `fname`.
"""
with open(fname, "rb") as f:
grid = pickle.load(f)
return grid | 5b4f2a94ba3024ea63a5859284f1b6877bac1623 | 299 |