hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
958k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
2 classes
is_sharp_comment_removed
bool
1 class
f700c6cfc5f9d8f51a0ee57bbf7174a70d5c088e
1,413
py
Python
hamgr/hamgr/db/versions/001_add_initial_tables.py
platform9/pf9-ha
7d64f9fe6b72fb4c1e5ed5d23e372a62c9e218a8
[ "Apache-2.0" ]
11
2016-09-06T09:59:29.000Z
2021-10-02T07:24:07.000Z
hamgr/hamgr/db/versions/001_add_initial_tables.py
platform9/pf9-ha
7d64f9fe6b72fb4c1e5ed5d23e372a62c9e218a8
[ "Apache-2.0" ]
5
2017-10-16T06:47:14.000Z
2020-07-06T07:20:13.000Z
hamgr/hamgr/db/versions/001_add_initial_tables.py
platform9/pf9-ha
7d64f9fe6b72fb4c1e5ed5d23e372a62c9e218a8
[ "Apache-2.0" ]
3
2016-09-01T06:20:51.000Z
2017-10-16T02:27:07.000Z
# Copyright (c) 2019 Platform9 Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table meta = MetaData() cluster = Table( 'clusters', meta, Column('id', Integer, primary_key=True), Column('deleted', Integer, default=None), Column('name', String(255), default=None), Column('enabled', Boolean, default=False), Column('status', String(36), default=1), Column('updated_at', DateTime, default=None), Column('created_at', DateTime, default=None), Column('deleted_at', DateTime, default=None) ) def upgrade(migrate_engine): meta.bind = migrate_engine cluster.create() def downgrade(migrate_engine): meta.bind = migrate_engine cluster.drop()
30.06383
74
0.740269
from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table meta = MetaData() cluster = Table( 'clusters', meta, Column('id', Integer, primary_key=True), Column('deleted', Integer, default=None), Column('name', String(255), default=None), Column('enabled', Boolean, default=False), Column('status', String(36), default=1), Column('updated_at', DateTime, default=None), Column('created_at', DateTime, default=None), Column('deleted_at', DateTime, default=None) ) def upgrade(migrate_engine): meta.bind = migrate_engine cluster.create() def downgrade(migrate_engine): meta.bind = migrate_engine cluster.drop()
true
true
f700c70aaa457cdd5bf6ddadd89ea5f4c679594c
2,113
py
Python
scons/scons-local-2.3.3/SCons/Tool/sunf77.py
pedrishi/pdb2pqr_pypka
74f64948658d021a8bfc8fd78936ce4186ffc88e
[ "BSD-3-Clause" ]
null
null
null
scons/scons-local-2.3.3/SCons/Tool/sunf77.py
pedrishi/pdb2pqr_pypka
74f64948658d021a8bfc8fd78936ce4186ffc88e
[ "BSD-3-Clause" ]
null
null
null
scons/scons-local-2.3.3/SCons/Tool/sunf77.py
pedrishi/pdb2pqr_pypka
74f64948658d021a8bfc8fd78936ce4186ffc88e
[ "BSD-3-Clause" ]
null
null
null
"""SCons.Tool.sunf77 Tool-specific initialization for sunf77, the Sun Studio F77 compiler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001 - 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/sunf77.py 2014/08/24 12:12:31 garyo" import SCons.Util from FortranCommon import add_all_to_env compilers = ['sunf77', 'f77'] def generate(env): """Add Builders and construction variables for sunf77 to an Environment.""" add_all_to_env(env) fcomp = env.Detect(compilers) or 'f77' env['FORTRAN'] = fcomp env['F77'] = fcomp env['SHFORTRAN'] = '$FORTRAN' env['SHF77'] = '$F77' env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC') env['SHF77FLAGS'] = SCons.Util.CLVar('$F77FLAGS -KPIC') def exists(env): return env.Detect(compilers) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
33.015625
79
0.737814
__revision__ = "src/engine/SCons/Tool/sunf77.py 2014/08/24 12:12:31 garyo" import SCons.Util from FortranCommon import add_all_to_env compilers = ['sunf77', 'f77'] def generate(env): add_all_to_env(env) fcomp = env.Detect(compilers) or 'f77' env['FORTRAN'] = fcomp env['F77'] = fcomp env['SHFORTRAN'] = '$FORTRAN' env['SHF77'] = '$F77' env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC') env['SHF77FLAGS'] = SCons.Util.CLVar('$F77FLAGS -KPIC') def exists(env): return env.Detect(compilers)
true
true
f700c767ff92c13aef1a23a878df02eea4e86053
3,656
py
Python
src/Application/PythonScriptModule/pymodules_old/circuits/core/values.py
antont/tundra
5c9b0a3957071f08ab425dff701cdbb34f9e1868
[ "Apache-2.0" ]
1
2018-04-02T15:38:10.000Z
2018-04-02T15:38:10.000Z
src/Application/PythonScriptModule/pymodules_old/circuits/core/values.py
antont/tundra
5c9b0a3957071f08ab425dff701cdbb34f9e1868
[ "Apache-2.0" ]
null
null
null
src/Application/PythonScriptModule/pymodules_old/circuits/core/values.py
antont/tundra
5c9b0a3957071f08ab425dff701cdbb34f9e1868
[ "Apache-2.0" ]
1
2021-09-04T12:37:34.000Z
2021-09-04T12:37:34.000Z
# Package: values # Date: 11th April 2010 # Author: James Mills, prologic at shortcircuit dot net dot au """Values This defines the Value object used by components and events. """ from types import ListType from itertools import imap from events import Event class ValueChanged(Event): """Value Changed Event This Event is triggered when the return Value of an Event Handler has changed it's value. """ def __init__(self, value): "x.__init__(...) initializes x; see x.__class__.__doc__ for signature" super(ValueChanged, self).__init__(value) class Value(object): """Create a new future Value Object Creates a new future Value Object which is used by Event Objects and the Manager to store the result(s) of an Event Handler's exeuction of some Event in the system. :param event: The Event this Value is associated with. :type event: Event instance :param manager: The Manager/Component used to trigger notifications. :type manager: A Manager/Component instance. :param onSet: The channel used when triggering ValueChagned events. :type onSet: A (channel, target) tuple. :ivar result: True if this value has been changed. :ivar errors: True if while setting this value an exception occured. This is a Future/Promise implementation. """ def __init__(self, event=None, manager=None, onSet=None): "x.__init__(...) initializes x; see x.__class__.__doc__ for signature" self.event = event self.manager = manager self.onSet = onSet self.result = False self.errors = False self._parent = self self._value = None def __getstate__(self): keys = ("event", "onSet", "result", "errors", "_value") return dict([(k, getattr(self, k, None)) for k in keys]) def __contains__(self, y): value = self.value return y in value if type(value) is ListType else y == value def __getitem__(self, y): v = self.value[y] if isinstance(v, Value): return v.value else: return v def __iter__(self): return imap(lambda v: v.value if isinstance(v, Value) else v, self.value) def __repr__(self): "x.__repr__() <==> repr(x)" value = "" if self.result: value = repr(self.value) format = "<Value (%s) result: %r errors: %r for %r" return format % (value, self.result, self.errors, self.event) def __str__(self): "x.__str__() <==> str(x)" return str(self.value) def getValue(self): value = self._value while isinstance(value, Value): value = value._value return value def setValue(self, value): if isinstance(value, Value): value._parent = self if self.result and type(self._value) is ListType: self._value.append(value) elif self.result: self._value = [self._value] self._value.append(value) else: self._value = value def notify(o, v): if not isinstance(v, Value) and v is not None: o.result = True if o.manager is not None and o.onSet is not None: o.manager.fireEvent(ValueChanged(o), *o.onSet) elif isinstance(v, Value): o.errors = v.errors o.result = v.result if not o._parent == o: notify(o._parent, v) notify(self, value) value = property(getValue, setValue, None, "Value of this Value")
28.341085
78
0.602298
from types import ListType from itertools import imap from events import Event class ValueChanged(Event): def __init__(self, value): super(ValueChanged, self).__init__(value) class Value(object): def __init__(self, event=None, manager=None, onSet=None): self.event = event self.manager = manager self.onSet = onSet self.result = False self.errors = False self._parent = self self._value = None def __getstate__(self): keys = ("event", "onSet", "result", "errors", "_value") return dict([(k, getattr(self, k, None)) for k in keys]) def __contains__(self, y): value = self.value return y in value if type(value) is ListType else y == value def __getitem__(self, y): v = self.value[y] if isinstance(v, Value): return v.value else: return v def __iter__(self): return imap(lambda v: v.value if isinstance(v, Value) else v, self.value) def __repr__(self): value = "" if self.result: value = repr(self.value) format = "<Value (%s) result: %r errors: %r for %r" return format % (value, self.result, self.errors, self.event) def __str__(self): return str(self.value) def getValue(self): value = self._value while isinstance(value, Value): value = value._value return value def setValue(self, value): if isinstance(value, Value): value._parent = self if self.result and type(self._value) is ListType: self._value.append(value) elif self.result: self._value = [self._value] self._value.append(value) else: self._value = value def notify(o, v): if not isinstance(v, Value) and v is not None: o.result = True if o.manager is not None and o.onSet is not None: o.manager.fireEvent(ValueChanged(o), *o.onSet) elif isinstance(v, Value): o.errors = v.errors o.result = v.result if not o._parent == o: notify(o._parent, v) notify(self, value) value = property(getValue, setValue, None, "Value of this Value")
true
true
f700c828e1ae0ff1deb8636e189c09f5c64ea253
20,605
py
Python
pumml/learners.py
ncfrey/mlmsynth
99fc8fabba511aefd6f0a0be4e85c78c54dd3648
[ "MIT" ]
20
2019-08-22T16:29:37.000Z
2021-12-14T10:35:57.000Z
pumml/learners.py
ncfrey/mlmsynth
99fc8fabba511aefd6f0a0be4e85c78c54dd3648
[ "MIT" ]
5
2020-04-25T02:59:03.000Z
2020-11-13T21:05:02.000Z
pumml/learners.py
ncfrey/mlmsynth
99fc8fabba511aefd6f0a0be4e85c78c54dd3648
[ "MIT" ]
10
2019-08-19T14:29:21.000Z
2022-01-22T03:08:00.000Z
""" Deploy semi-supervised PU machine learning models. This module provides classes for training, testing, and deploying a PU learning model for predicting material synthesizability. Utility functions for plotting aid in visualizing and analyzing results. References: [1] DOI: 10.1021/acsnano.8b08014 [2] DOI: 10.1145/1401890.1401920 [3] DOI: 10.1016/j.patrec.2013.06.010 """ from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import precision_recall_fscore_support from sklearn.cluster import KMeans from sklearn.mixture import GaussianMixture, BayesianGaussianMixture from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import precision_recall_curve from sklearn.model_selection import RepeatedKFold from sklearn.utils import resample from mpl_toolkits.mplot3d import Axes3D from monty.serialization import dumpfn import pandas as pd import seaborn as sns import os import pickle import numpy as np import matplotlib.pyplot as plt from pylab import rcParams __author__ = "Nathan C. Frey, Jin Wang" __copyright__ = "MIT License" __version__ = "0.0.1" __maintainer__ = "Nathan C. Frey" __email__ = "[email protected]" __status__ = "Development" __date__ = "Aug 2017" class PULearner: def __init__(self): """A machine learning model that predicts material synthesizability. Positive samples are experimentally synthesized materials. Unlabeled samples are not-yet synthesized materials. Features for training data might be generated by first-principles (density functional theory) calculations, or structural or chemical data looked up from a table. Hyperparameters are initialized with sensible defaults, but any newly trained model should have hyperparams carefully converged. Attributes: pu_stats (dict): Outputs of cv_baggingDT df_U (DataFrame): Unlabeled data. df_P (DataFrame): Positive data. synth_scores (list): Synthesizability scores (between 0 and 1) of unlabeled samples. labels (list): Likely synthesizable (1) or not (0) feat_importances (DataFrame): Feature importances from trained decision tree classifiers. Index corresponds to feature index in original data. """ self.pu_stats = None self.df_U = None self.df_P = None self.synth_scores = None self.labels = None self.feat_importances = None def cv_baggingDT(self, pu_data, splits=10, repeats=10, bags=100, filename=""): """ Train bagged decision tree base classifiers and do repeated k-fold CV. Synthesizability scores (0 = not synthesizable, 1 = already synthesized) are generated for an unlabeled sample by averaging the scores from the ensemble of decision tree classifiers that have not been trained on that sample. Args: pu_data (json): A file where each row describes a material. There MUST be a column called "PU_label" where a 1 value indicates a synthesized (positive) compound and a 0 value indicates an unlabeled compound. splits (int): Number of splits in k-fold CV. repeats (int): Number of repeated k-fold CV. bags (int): Number of bags in bootstrap aggregation. filename (string): Save model training results to file with filename ending in .json or .pkl. Returns: pu_stats (dict): Metrics and outputs of PU learning model training. """ print("Start PU Learning.") # Preprocess data and set attributes df = pd.read_json(pu_data) df_P, df_U, X_P, X_U = self._process_pu_data(df) self.df_P = df_P self.df_U = df_U # Split data into training and test splits for k-fold CV kfold = RepeatedKFold(n_splits=splits, n_repeats=repeats, random_state=42) # Scores for PU learning (tpr = True Positive Rate) scores = [] tprs = [] # Predicted synthesis probability of CVed P and U sets prob_P = np.ones(shape=(X_P.shape[0], splits * repeats)) prob_U = -np.ones(shape=(X_U.shape[0], splits * repeats)) # Feature importance feat_rank = np.zeros(shape=(X_P.shape[1], splits * repeats)) idsp = 0 # index of repeated k splits # Loop over P and U training/test samples for (ptrain, ptest), (utrain, utest) in zip(kfold.split(X_P), kfold.split(X_U)): # Number of P and U training samples N_ptrain = X_P[ptrain].shape[0] N_utrain = X_U[utrain].shape[0] d = X_P.shape[1] K = N_ptrain train_label = np.zeros(shape=(N_ptrain + K,)) train_label[:N_ptrain] = 1.0 # Synthesized (positive) # Out of bag samples n_oob = np.zeros(shape=(N_utrain,)) f_oob = np.zeros(shape=(N_utrain, 2)) # Sums of probabilities of test sets f_ptest = np.zeros(shape=(X_P[ptest].shape[0], 2)) f_utest = np.zeros(shape=(X_U[utest].shape[0], 2)) # Bootstrap resampling for each bag for i in range(bags): bootstrap_sample = np.random.choice( np.arange(N_utrain), replace=True, size=K ) # Positive samples and bootstrapped unlabeled samples data_bootstrap = np.concatenate( (X_P[ptrain], X_U[bootstrap_sample, :]), axis=0 ) # Train decision tree classifier model = DecisionTreeClassifier( max_depth=None, max_features=None, criterion="gini", class_weight="balanced", ) model.fit(data_bootstrap, train_label) # Index for the oob samples idx_oob = sorted( set(range(N_utrain)) - set(np.unique(bootstrap_sample)) ) # Transductive learning on oob samples f_oob[idx_oob] += model.predict_proba(X_U[utrain][idx_oob]) n_oob[idx_oob] += 1 f_ptest += model.predict_proba(X_P[ptest]) f_utest += model.predict_proba(X_U[utest]) feat_rank[:, idsp] = model.feature_importances_ # Predicted synthesis probabilities of unlabeled samples predict_utrain = f_oob[:, 1] / n_oob # Predicted probabilities for P and U test sets predict_ptest = f_ptest[:, 1] / bags predict_utest = f_utest[:, 1] / bags # Find predicted positives true_pos = predict_ptest[np.where(predict_ptest > 0.5)].shape[0] u_pos = predict_utest[np.where(predict_utest > 0.5)].shape[0] N_ptest = X_P[ptest].shape[0] N_utest = X_U[utest].shape[0] # Predicted positive ratio in test set p_pred_pos = (true_pos + u_pos) / (N_ptest + N_utest) + 0.0001 # Compute PU recall (TPR) and score metrics recall = true_pos / N_ptest score = recall ** 2 / p_pred_pos scores.append(score) tprs.append(recall) # Predicted probabilities prob_P[ptest, idsp] = predict_ptest prob_U[utrain, idsp] = predict_utrain prob_U[utest, idsp] = predict_utest idsp += 1 # Progress update if (idsp + 1) % splits == 0: tpr_tmp = np.asarray(tprs[-splits - 1 : -1]) print( "Performed Repeated " + str(splits) + "-fold: " + str(idsp // splits + 1) + " out of " + str(repeats) ) print( "True Positive Rate: %0.2f (+/- %0.2f)" % (tpr_tmp.mean(), tpr_tmp.std() * 2) ) # Predicted labels from k-fold CV label_U = np.zeros(shape=(X_U.shape[0], splits * repeats + 1), dtype=int) label_U[:, : splits * repeats][np.where(prob_U > 0.5)] = 1 label_U[:, splits * repeats] = np.sum( label_U[:, : splits * repeats + 1], axis=1 ) tprs = np.asarray(tprs) scores = np.asarray(scores) # Metrics for each model in the k-folds label_U_rp = np.zeros(shape=(X_U.shape[0], repeats), dtype=int) prob_U_rp = np.zeros(shape=(X_U.shape[0], repeats)) feat_rank_rp = np.zeros(shape=(X_U.shape[1], repeats)) tpr_rp = np.zeros(shape=(repeats,)) scores_rp = np.zeros(shape=(repeats,)) labels = np.zeros(shape=(X_U.shape[0],)) for i in range(repeats): prob_U_rp[:, i] = prob_U[:, i * splits : (i + 1) * splits].mean(axis=1) feat_rank_rp[:, i] = feat_rank[:, i * splits : (i + 1) * splits].mean( axis=1 ) tpr_rp[i] = tprs[i * splits : (i + 1) * splits].mean() scores_rp[i] = scores[i * splits : (i + 1) * splits].mean() label_U_rp[np.where(prob_U_rp > 0.5)] = 1 prob = prob_U_rp.mean(axis=1) labels[np.where(prob > 0.5)] = 1 # Get confidence interval of TPR for each kfold tpr_low, tpr_up = self.bootstrapCI(tpr_rp) scores_low, scores_up = self.bootstrapCI(scores_rp) # PU learning metrics metrics = np.asarray( [tpr_rp.mean(), tpr_low, tpr_up, scores_rp.mean(), scores_low, scores_up] ) print("Accuracy: %0.2f" % (tpr_rp.mean())) print("95%% confidence interval: [%0.2f, %0.2f]" % (tpr_low, tpr_up)) # Metrics and results from training / testing pu_stats = { "prob": prob, "labels": labels, "metrics": metrics, "prob_rp": prob_U_rp, "label_rp": label_U_rp, "tpr_rp": tpr_rp, "scores_rp": scores_rp, "feat_rank_rp": feat_rank_rp, } # Save results if filename: if filename.endswith(".json"): dumpfn(pu_stats, filename) if filename.endswith(".pkl"): with open(filename, "wb") as file: pickle.dump(pu_stats, file, protocol=pickle.HIGHEST_PROTOCOL) self.pu_stats = pu_stats return pu_stats def bootstrapCI(self, data, ci=95, ns=10000): """Compute confidence interval of the TPR. Args: data (array): Array of TPRs for each kfold. ci (int): Confidence interval. ns (int): Number of bootstrap resamplings. Returns: lower (float): Lower endpoint of CI. upper (float): Upper endpoint of CI. """ bs_rsample = [] for _ in range(ns): rsample = resample(data, n_samples=len(data)) bs_rsample.append(np.mean(rsample)) bs_rsample = np.asarray(bs_rsample) lower = np.percentile(bs_rsample, (100 - ci) / 2) upper = np.percentile(bs_rsample, ci + (100 - ci) / 2) return lower, upper def corr_heatmap(self, num_feats=10, fname=""): """Plot correlation matrix between synthesizability and features. cv_baggingDT must be run first. Args: num_feats (int): How many features to consider. fname (str): Filename if correlation plot should be saved. Returns: None (generates plots) """ pu_stats = self.pu_stats df_U = self.df_U df_U_copy = df_U.drop(columns=["PU_label"]) # Get normalized, sorted & ranked list of most important features synth_scores = pu_stats["prob"] df_U_copy["synth_score"] = synth_scores # Make correlation matrix of top "num_feats" features corrmat = df_U_copy.corr() cols = corrmat.nlargest(num_feats, "synth_score")["synth_score"].index cm = np.corrcoef(df_U_copy[cols].values.T) sns.set(style='ticks') rcParams['figure.dpi'] = 300 fig, ax = plt.subplots(1, 1) hm = sns.heatmap( cm, ax=ax, cbar=True, annot=True, square=True, fmt=".2f", annot_kws={"size": 7}, yticklabels=cols.values, xticklabels=cols.values, ) if fname: self.save_plot(fname + ".png", fig, ax) def get_feat_importances(self, plot_format=""): """Process output from PU learning k-fold cross validation. cv_baggingDT must be run first. If plot_format is specified, a feature importance plot will be saved. Args: plot_format (str): svg, png, or pdf file format for saving simple visualizations of feature importance and correlation. """ pu_stats = self.pu_stats # Feature importances for individual repetitions of kfold CV feat_rank_rp = pu_stats["feat_rank_rp"] feat_importances = np.sum(feat_rank_rp, axis=1) df_U = self.df_U df_U = df_U._get_numeric_data() df_U_copy = df_U.drop(columns=["PU_label"]) feat_names = df_U_copy.columns # Index corresponds to feature in original data df_feat = pd.DataFrame(columns=["feature", "importance"]) df_feat["feature"] = feat_names df_feat["importance"] = feat_importances # Sort by importance df_feat_sort = df_feat.sort_values(by="importance", ascending=False) max_value = df_feat["importance"].max() # Normalize to 1 df_feat_sort["importance"] = df_feat_sort["importance"] / max_value # Set feature importance attribute self.feat_importances = df_feat if plot_format in ["svg", "pdf", "png"]: # Feature importance plot fig, ax = plt.subplots(figsize=(10, 4)) with sns.axes_style(style="ticks"): sns.barplot(x="feature", y="importance", data=df_feat_sort) ax.set_xticklabels( ax.get_xticklabels(), rotation=45, ha="right", fontsize=7 ) filename = "feat_importance." + plot_format self.save_plot(filename, fig, ax) @staticmethod def _process_pu_data(data): """Utility method for processing input data. Args: data (DataFrame): Data with positive and unlabeled samples. Returns: X_P (array): Positive sample set. X_U (array): Unlabeled sample set. """ df_P = data.query("PU_label == 1") # Positive value is 1 df_U = data.query("PU_label == 0") # Unlabeled value is 0 # Chop off PU label and drop non-numeric columns for sklearn X_P = np.asarray(df_P.drop(columns=["PU_label"])._get_numeric_data()) X_U = np.asarray(df_U.drop(columns=["PU_label"])._get_numeric_data()) return df_P, df_U, X_P, X_U @staticmethod def save_plot(filename, fig, ax): """Utility method for saving simple visualizations. Args: filename (str): Name ending in .svg, .png, or .pdf fig, ax (objects): Matplotlib objects. Returns: None """ sns.set_style("ticks") fig.tight_layout() fig.savefig(filename) class PUInteract: def __init__(self, df_parent, pu_parent, df_child, pu_child, merge_on=(), feats=()): """Consider parent and child phase PU learning scores. This class looks at PU learning scores for parent bulk compounds (e.g. layered h-BN) and scores of the child phases along with descriptors like exfoliation energy and changes in structural/electronic properties to predict (parent, child) pairs that can be synthesized. Parent and child must be linked by a column that allows the dataframes to be merged. There should also be additional features that characterize the structural and chemical differences between parents and children, e.g. changes in bond lengths, etc. Unsupervised clustering models are used to identify synthesizable (parent/child) pairs. Args: df_parent (str): Parent data filename. pu_parent (dict): Output from PULearner.cv_baggingDT. df_child (str): Child data filename. pu_child (dict): Output from PULearner.cv_baggingDT. merge_on (tuple): Column name(s) on which to merge. feats (tuple): Column names to use as features. If empty, use all possible columns. Attributes: merged_df (DataFrame): (Parent, child) pair data. X (array): Array representation of merged_df. Returns: None """ df_parent = pd.read_json(df_parent) df_child = pd.read_json(df_child) # Set scores from PULearner df_parent["synth_score"] = 1 df_child["synth_score"] = 1 df_parent.loc[df_parent.eval("PU_label == 0"), "synth_score"] = pu_parent[ "prob" ] df_child.loc[df_child.eval("PU_label == 0"), "synth_score"] = pu_child["prob"] # Merge parent and child dfs merge_on = list(merge_on) df = pd.merge( df_parent, df_child, on=merge_on, how="outer", suffixes=["_p", "_c"] ) df.drop(columns=["PU_label_p", "PU_label_c"], inplace=True, axis=1) if feats: feat_names = [f + "_p" for f in feats] + [f + "_c" for f in feats] df = df[feat_names] self.merged_df = df self.X = np.array(df) def do_kmeans(self, n_clusters=2, seed=42): """Do k-means clustering on (parent, child) pairs. Args: n_clusters (int): Number of clusters. seed (int): Fix random seed for kmeans reproducibility. Returns: kmeans_output (dict): kmeans cluster centers, cluster labels for each (parent, child) """ np.random.seed(seed) km = KMeans(n_clusters=n_clusters, random_state=seed) km.fit(self.X) kmeans_output = { "cluster_centers": km.cluster_centers_, "cluster_labels": km.labels_, } return kmeans_output def do_gmixture(self, n_components=2, seed=42): """ Estimate parameters of a Gaussian mixture distribution of (parent, child) data. Args: n_components (int): Number of components in GMM. seed (int): Random seed. Returns: gmm_output (dict): Predicted labels of (parent, child) pairs and predicted posterior probabilities of each component. """ np.random.seed(seed) gmm = GaussianMixture( n_components=n_components, random_state=seed, covariance_type="full" ) gmm.fit(self.X) gmm_labels = gmm.predict(self.X) gmm_prob = gmm.predict_proba(self.X)[:, 0] gmm_output = {"gmm_labels": gmm_labels, "gmm_prob": gmm_prob} return gmm_output def do_bgm(self, n_components=6, seed=42): """Bayesian Gaussian Mixture. Infer the effective number of components in a Gaussian Mixture Model via variational Bayesian estimation. n_effective_componenents < n_components if the model sets some weights close to 0. Args: n_components (int): Number of components in GMM. seed (int): Random seed. Returns: bgm_output (dict): Labels and probabilities. """ np.random.seed(seed) bgm = BayesianGaussianMixture( n_components=n_components, covariance_type="full", weight_concentration_prior=1e-2, weight_concentration_prior_type="dirichlet_process", mean_precision_prior=1e-2, init_params="random", max_iter=100, random_state=seed, ) bgm.fit(self.X) bgm_labels = bgm.predict(self.X) bgm_prob = bgm.predict_proba(self.X)[:, 0] bgm_output = {"bgm_labels": bgm_labels, "bgm_prob": bgm_prob} return bgm_output
33.889803
88
0.586168
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import precision_recall_fscore_support from sklearn.cluster import KMeans from sklearn.mixture import GaussianMixture, BayesianGaussianMixture from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import precision_recall_curve from sklearn.model_selection import RepeatedKFold from sklearn.utils import resample from mpl_toolkits.mplot3d import Axes3D from monty.serialization import dumpfn import pandas as pd import seaborn as sns import os import pickle import numpy as np import matplotlib.pyplot as plt from pylab import rcParams __author__ = "Nathan C. Frey, Jin Wang" __copyright__ = "MIT License" __version__ = "0.0.1" __maintainer__ = "Nathan C. Frey" __email__ = "[email protected]" __status__ = "Development" __date__ = "Aug 2017" class PULearner: def __init__(self): self.pu_stats = None self.df_U = None self.df_P = None self.synth_scores = None self.labels = None self.feat_importances = None def cv_baggingDT(self, pu_data, splits=10, repeats=10, bags=100, filename=""): print("Start PU Learning.") df = pd.read_json(pu_data) df_P, df_U, X_P, X_U = self._process_pu_data(df) self.df_P = df_P self.df_U = df_U kfold = RepeatedKFold(n_splits=splits, n_repeats=repeats, random_state=42) scores = [] tprs = [] prob_P = np.ones(shape=(X_P.shape[0], splits * repeats)) prob_U = -np.ones(shape=(X_U.shape[0], splits * repeats)) feat_rank = np.zeros(shape=(X_P.shape[1], splits * repeats)) idsp = 0 for (ptrain, ptest), (utrain, utest) in zip(kfold.split(X_P), kfold.split(X_U)): N_ptrain = X_P[ptrain].shape[0] N_utrain = X_U[utrain].shape[0] d = X_P.shape[1] K = N_ptrain train_label = np.zeros(shape=(N_ptrain + K,)) train_label[:N_ptrain] = 1.0 n_oob = np.zeros(shape=(N_utrain,)) f_oob = np.zeros(shape=(N_utrain, 2)) f_ptest = np.zeros(shape=(X_P[ptest].shape[0], 2)) f_utest = np.zeros(shape=(X_U[utest].shape[0], 2)) for i in range(bags): bootstrap_sample = np.random.choice( np.arange(N_utrain), replace=True, size=K ) data_bootstrap = np.concatenate( (X_P[ptrain], X_U[bootstrap_sample, :]), axis=0 ) model = DecisionTreeClassifier( max_depth=None, max_features=None, criterion="gini", class_weight="balanced", ) model.fit(data_bootstrap, train_label) idx_oob = sorted( set(range(N_utrain)) - set(np.unique(bootstrap_sample)) ) f_oob[idx_oob] += model.predict_proba(X_U[utrain][idx_oob]) n_oob[idx_oob] += 1 f_ptest += model.predict_proba(X_P[ptest]) f_utest += model.predict_proba(X_U[utest]) feat_rank[:, idsp] = model.feature_importances_ predict_utrain = f_oob[:, 1] / n_oob predict_ptest = f_ptest[:, 1] / bags predict_utest = f_utest[:, 1] / bags true_pos = predict_ptest[np.where(predict_ptest > 0.5)].shape[0] u_pos = predict_utest[np.where(predict_utest > 0.5)].shape[0] N_ptest = X_P[ptest].shape[0] N_utest = X_U[utest].shape[0] p_pred_pos = (true_pos + u_pos) / (N_ptest + N_utest) + 0.0001 recall = true_pos / N_ptest score = recall ** 2 / p_pred_pos scores.append(score) tprs.append(recall) prob_P[ptest, idsp] = predict_ptest prob_U[utrain, idsp] = predict_utrain prob_U[utest, idsp] = predict_utest idsp += 1 if (idsp + 1) % splits == 0: tpr_tmp = np.asarray(tprs[-splits - 1 : -1]) print( "Performed Repeated " + str(splits) + "-fold: " + str(idsp // splits + 1) + " out of " + str(repeats) ) print( "True Positive Rate: %0.2f (+/- %0.2f)" % (tpr_tmp.mean(), tpr_tmp.std() * 2) ) label_U = np.zeros(shape=(X_U.shape[0], splits * repeats + 1), dtype=int) label_U[:, : splits * repeats][np.where(prob_U > 0.5)] = 1 label_U[:, splits * repeats] = np.sum( label_U[:, : splits * repeats + 1], axis=1 ) tprs = np.asarray(tprs) scores = np.asarray(scores) label_U_rp = np.zeros(shape=(X_U.shape[0], repeats), dtype=int) prob_U_rp = np.zeros(shape=(X_U.shape[0], repeats)) feat_rank_rp = np.zeros(shape=(X_U.shape[1], repeats)) tpr_rp = np.zeros(shape=(repeats,)) scores_rp = np.zeros(shape=(repeats,)) labels = np.zeros(shape=(X_U.shape[0],)) for i in range(repeats): prob_U_rp[:, i] = prob_U[:, i * splits : (i + 1) * splits].mean(axis=1) feat_rank_rp[:, i] = feat_rank[:, i * splits : (i + 1) * splits].mean( axis=1 ) tpr_rp[i] = tprs[i * splits : (i + 1) * splits].mean() scores_rp[i] = scores[i * splits : (i + 1) * splits].mean() label_U_rp[np.where(prob_U_rp > 0.5)] = 1 prob = prob_U_rp.mean(axis=1) labels[np.where(prob > 0.5)] = 1 tpr_low, tpr_up = self.bootstrapCI(tpr_rp) scores_low, scores_up = self.bootstrapCI(scores_rp) metrics = np.asarray( [tpr_rp.mean(), tpr_low, tpr_up, scores_rp.mean(), scores_low, scores_up] ) print("Accuracy: %0.2f" % (tpr_rp.mean())) print("95%% confidence interval: [%0.2f, %0.2f]" % (tpr_low, tpr_up)) pu_stats = { "prob": prob, "labels": labels, "metrics": metrics, "prob_rp": prob_U_rp, "label_rp": label_U_rp, "tpr_rp": tpr_rp, "scores_rp": scores_rp, "feat_rank_rp": feat_rank_rp, } if filename: if filename.endswith(".json"): dumpfn(pu_stats, filename) if filename.endswith(".pkl"): with open(filename, "wb") as file: pickle.dump(pu_stats, file, protocol=pickle.HIGHEST_PROTOCOL) self.pu_stats = pu_stats return pu_stats def bootstrapCI(self, data, ci=95, ns=10000): bs_rsample = [] for _ in range(ns): rsample = resample(data, n_samples=len(data)) bs_rsample.append(np.mean(rsample)) bs_rsample = np.asarray(bs_rsample) lower = np.percentile(bs_rsample, (100 - ci) / 2) upper = np.percentile(bs_rsample, ci + (100 - ci) / 2) return lower, upper def corr_heatmap(self, num_feats=10, fname=""): pu_stats = self.pu_stats df_U = self.df_U df_U_copy = df_U.drop(columns=["PU_label"]) synth_scores = pu_stats["prob"] df_U_copy["synth_score"] = synth_scores corrmat = df_U_copy.corr() cols = corrmat.nlargest(num_feats, "synth_score")["synth_score"].index cm = np.corrcoef(df_U_copy[cols].values.T) sns.set(style='ticks') rcParams['figure.dpi'] = 300 fig, ax = plt.subplots(1, 1) hm = sns.heatmap( cm, ax=ax, cbar=True, annot=True, square=True, fmt=".2f", annot_kws={"size": 7}, yticklabels=cols.values, xticklabels=cols.values, ) if fname: self.save_plot(fname + ".png", fig, ax) def get_feat_importances(self, plot_format=""): pu_stats = self.pu_stats feat_rank_rp = pu_stats["feat_rank_rp"] feat_importances = np.sum(feat_rank_rp, axis=1) df_U = self.df_U df_U = df_U._get_numeric_data() df_U_copy = df_U.drop(columns=["PU_label"]) feat_names = df_U_copy.columns df_feat = pd.DataFrame(columns=["feature", "importance"]) df_feat["feature"] = feat_names df_feat["importance"] = feat_importances df_feat_sort = df_feat.sort_values(by="importance", ascending=False) max_value = df_feat["importance"].max() df_feat_sort["importance"] = df_feat_sort["importance"] / max_value self.feat_importances = df_feat if plot_format in ["svg", "pdf", "png"]: fig, ax = plt.subplots(figsize=(10, 4)) with sns.axes_style(style="ticks"): sns.barplot(x="feature", y="importance", data=df_feat_sort) ax.set_xticklabels( ax.get_xticklabels(), rotation=45, ha="right", fontsize=7 ) filename = "feat_importance." + plot_format self.save_plot(filename, fig, ax) @staticmethod def _process_pu_data(data): df_P = data.query("PU_label == 1") df_U = data.query("PU_label == 0") X_P = np.asarray(df_P.drop(columns=["PU_label"])._get_numeric_data()) X_U = np.asarray(df_U.drop(columns=["PU_label"])._get_numeric_data()) return df_P, df_U, X_P, X_U @staticmethod def save_plot(filename, fig, ax): sns.set_style("ticks") fig.tight_layout() fig.savefig(filename) class PUInteract: def __init__(self, df_parent, pu_parent, df_child, pu_child, merge_on=(), feats=()): df_parent = pd.read_json(df_parent) df_child = pd.read_json(df_child) df_parent["synth_score"] = 1 df_child["synth_score"] = 1 df_parent.loc[df_parent.eval("PU_label == 0"), "synth_score"] = pu_parent[ "prob" ] df_child.loc[df_child.eval("PU_label == 0"), "synth_score"] = pu_child["prob"] merge_on = list(merge_on) df = pd.merge( df_parent, df_child, on=merge_on, how="outer", suffixes=["_p", "_c"] ) df.drop(columns=["PU_label_p", "PU_label_c"], inplace=True, axis=1) if feats: feat_names = [f + "_p" for f in feats] + [f + "_c" for f in feats] df = df[feat_names] self.merged_df = df self.X = np.array(df) def do_kmeans(self, n_clusters=2, seed=42): np.random.seed(seed) km = KMeans(n_clusters=n_clusters, random_state=seed) km.fit(self.X) kmeans_output = { "cluster_centers": km.cluster_centers_, "cluster_labels": km.labels_, } return kmeans_output def do_gmixture(self, n_components=2, seed=42): np.random.seed(seed) gmm = GaussianMixture( n_components=n_components, random_state=seed, covariance_type="full" ) gmm.fit(self.X) gmm_labels = gmm.predict(self.X) gmm_prob = gmm.predict_proba(self.X)[:, 0] gmm_output = {"gmm_labels": gmm_labels, "gmm_prob": gmm_prob} return gmm_output def do_bgm(self, n_components=6, seed=42): np.random.seed(seed) bgm = BayesianGaussianMixture( n_components=n_components, covariance_type="full", weight_concentration_prior=1e-2, weight_concentration_prior_type="dirichlet_process", mean_precision_prior=1e-2, init_params="random", max_iter=100, random_state=seed, ) bgm.fit(self.X) bgm_labels = bgm.predict(self.X) bgm_prob = bgm.predict_proba(self.X)[:, 0] bgm_output = {"bgm_labels": bgm_labels, "bgm_prob": bgm_prob} return bgm_output
true
true
f700c852a705d112cf9cacca70852bbd27ce7263
1,925
py
Python
tools/validators/instance_validator/tests/generate_universe_test.py
ljulliar/digitalbuildings
5b5be8db9e00d967911065f5247a8d39512e6504
[ "Apache-2.0" ]
null
null
null
tools/validators/instance_validator/tests/generate_universe_test.py
ljulliar/digitalbuildings
5b5be8db9e00d967911065f5247a8d39512e6504
[ "Apache-2.0" ]
null
null
null
tools/validators/instance_validator/tests/generate_universe_test.py
ljulliar/digitalbuildings
5b5be8db9e00d967911065f5247a8d39512e6504
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for generate_universe.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from os import path from absl.testing import absltest from tests import test_constants from validate import generate_universe _DEFAULT_ONTOLOGY_LOCATION = test_constants.ONTOLOGY_ROOT _BAD_MODIFIED_ONTOLOGY = path.join(test_constants.TEST_RESOURCES, 'BAD', 'BAD_FORMAT') _NONEXISTENT_LOCATION = path.join(test_constants.TEST_ROOT, 'nonexistent') _EMPTY_FOLDER = path.join(test_constants.TEST_RESOURCES, 'BAD', 'BAD_EMPTY') class GenerateUniverseTest(absltest.TestCase): def testCanGenerateUniverse(self): universe = generate_universe.BuildUniverse(_DEFAULT_ONTOLOGY_LOCATION) self.assertTrue(universe) def testCatchInvalidModifiedOntology(self): with self.assertRaises(Exception) as context: generate_universe.BuildUniverse(_BAD_MODIFIED_ONTOLOGY) self.assertIn('no longer valid', str(context.exception)) def testModifiedTypesCatchesNonexistent(self): self.assertRaises(Exception, generate_universe.BuildUniverse(_NONEXISTENT_LOCATION)) def testModifiedTypesCatchesEmpty(self): self.assertRaises(Exception, generate_universe.BuildUniverse(_EMPTY_FOLDER)) if __name__ == '__main__': absltest.main()
35
80
0.78026
from __future__ import absolute_import from __future__ import division from __future__ import print_function from os import path from absl.testing import absltest from tests import test_constants from validate import generate_universe _DEFAULT_ONTOLOGY_LOCATION = test_constants.ONTOLOGY_ROOT _BAD_MODIFIED_ONTOLOGY = path.join(test_constants.TEST_RESOURCES, 'BAD', 'BAD_FORMAT') _NONEXISTENT_LOCATION = path.join(test_constants.TEST_ROOT, 'nonexistent') _EMPTY_FOLDER = path.join(test_constants.TEST_RESOURCES, 'BAD', 'BAD_EMPTY') class GenerateUniverseTest(absltest.TestCase): def testCanGenerateUniverse(self): universe = generate_universe.BuildUniverse(_DEFAULT_ONTOLOGY_LOCATION) self.assertTrue(universe) def testCatchInvalidModifiedOntology(self): with self.assertRaises(Exception) as context: generate_universe.BuildUniverse(_BAD_MODIFIED_ONTOLOGY) self.assertIn('no longer valid', str(context.exception)) def testModifiedTypesCatchesNonexistent(self): self.assertRaises(Exception, generate_universe.BuildUniverse(_NONEXISTENT_LOCATION)) def testModifiedTypesCatchesEmpty(self): self.assertRaises(Exception, generate_universe.BuildUniverse(_EMPTY_FOLDER)) if __name__ == '__main__': absltest.main()
true
true
f700c9c005ed5bc2dfc7fc9791084a3f9be88c91
3,443
py
Python
tests/learners/scikit_learn/test_gpr_skl.py
CitrineInformatics/smlb
28a3689bd36aa8d51031b4faf7e2331bbd8148a9
[ "Apache-2.0" ]
6
2020-07-27T21:08:55.000Z
2021-05-04T07:00:29.000Z
tests/learners/scikit_learn/test_gpr_skl.py
CitrineInformatics/smlb
28a3689bd36aa8d51031b4faf7e2331bbd8148a9
[ "Apache-2.0" ]
18
2020-09-01T00:47:04.000Z
2021-09-15T22:16:56.000Z
tests/learners/scikit_learn/test_gpr_skl.py
CitrineInformatics/smlb
28a3689bd36aa8d51031b4faf7e2331bbd8148a9
[ "Apache-2.0" ]
2
2020-08-24T21:50:16.000Z
2020-12-06T05:18:57.000Z
"""GaussianProcessRegressionSklearn tests. Scientific Machine Learning Benchmark: A benchmark of regression models in chem- and materials informatics. """ import pytest import numpy as np skl = pytest.importorskip("sklearn") import smlb from smlb.learners.scikit_learn.gaussian_process_regression_sklearn import GaussianProcessRegressionSklearn def test_GaussianProcessRegressionSklearn_1(): """Simple examples.""" # linear function with linear kernel kernel = skl.gaussian_process.kernels.DotProduct(sigma_0=0, sigma_0_bounds="fixed") gpr = GaussianProcessRegressionSklearn(kernel=kernel, optimizer=None, rng=1) train_data = smlb.TabularData(data=np.array([[-1], [1]]), labels=np.array([-1, 1])) valid_data = smlb.TabularData(data=np.array([[-2], [-1], [0], [1], [2]])) preds = gpr.fit(train_data).apply(valid_data) mean, stddev = preds.mean, preds.stddev assert np.allclose(mean, [-2, -1, 0, 1, 2]) assert stddev[0] > stddev[1] > stddev[2] < stddev[3] < stddev[4] def test_GaussianProcessRegressionSklearn_2(): """All predictive distributions. Linear noise-free function, linear kernel + white noise kernel. The optimized noise level is expected to go to its lower bound. """ kernel = skl.gaussian_process.kernels.DotProduct( sigma_0=0, sigma_0_bounds="fixed" ) + skl.gaussian_process.kernels.WhiteKernel(noise_level=0.1, noise_level_bounds=(1e-5, 1e-5)) gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1) n = 100 train_data = smlb.TabularData( data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3 ) valid_data = smlb.TabularData(data=train_data.samples()) preds = gpr.fit(train_data).apply(valid_data) assert preds.has_signal_part and preds.has_noise_part conf, noise = preds.signal_part, preds.noise_part assert np.allclose(conf.mean, train_data.labels()) assert np.allclose(conf.stddev, np.ones(n) * np.sqrt(1e-5), atol=1e-3) assert (preds.mean == conf.mean).all() assert np.allclose(preds.stddev, np.ones(n) * np.sqrt(np.square(conf.stddev) + 1e-5)) assert np.allclose(noise.mean, np.zeros(shape=n)) assert np.allclose(noise.stddev, np.sqrt(1e-5)) def test_GaussianProcessRegressionSklearn_3(): """All predictive distributions. Linear noisy function, linear kernel + white noise kernel. The optimized noise level is expected to go to its true value. """ kernel = skl.gaussian_process.kernels.DotProduct( sigma_0=0, sigma_0_bounds="fixed" ) + skl.gaussian_process.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-5, 1e5)) gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1) n, nlsd = 100, 0.5 data = smlb.TabularData(data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3) data = smlb.LabelNoise(noise=smlb.NormalNoise(stddev=nlsd, rng=1)).fit(data).apply(data) preds = gpr.fit(data).apply(data) assert preds.has_signal_part and preds.has_noise_part conf, noise = preds.signal_part, preds.noise_part assert np.allclose(conf.mean, np.ones(n) * 3, atol=1e-1) assert np.allclose(conf.stddev, np.ones(n) * nlsd, atol=1e-1) assert (preds.mean == conf.mean).all() assert np.allclose(preds.stddev, np.sqrt(np.square(conf.stddev) + np.square(nlsd)), atol=1e-1) assert np.allclose(noise.mean, np.zeros(shape=n)) assert np.allclose(noise.stddev, nlsd, atol=1e-1)
38.255556
107
0.711008
import pytest import numpy as np skl = pytest.importorskip("sklearn") import smlb from smlb.learners.scikit_learn.gaussian_process_regression_sklearn import GaussianProcessRegressionSklearn def test_GaussianProcessRegressionSklearn_1(): kernel = skl.gaussian_process.kernels.DotProduct(sigma_0=0, sigma_0_bounds="fixed") gpr = GaussianProcessRegressionSklearn(kernel=kernel, optimizer=None, rng=1) train_data = smlb.TabularData(data=np.array([[-1], [1]]), labels=np.array([-1, 1])) valid_data = smlb.TabularData(data=np.array([[-2], [-1], [0], [1], [2]])) preds = gpr.fit(train_data).apply(valid_data) mean, stddev = preds.mean, preds.stddev assert np.allclose(mean, [-2, -1, 0, 1, 2]) assert stddev[0] > stddev[1] > stddev[2] < stddev[3] < stddev[4] def test_GaussianProcessRegressionSklearn_2(): kernel = skl.gaussian_process.kernels.DotProduct( sigma_0=0, sigma_0_bounds="fixed" ) + skl.gaussian_process.kernels.WhiteKernel(noise_level=0.1, noise_level_bounds=(1e-5, 1e-5)) gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1) n = 100 train_data = smlb.TabularData( data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3 ) valid_data = smlb.TabularData(data=train_data.samples()) preds = gpr.fit(train_data).apply(valid_data) assert preds.has_signal_part and preds.has_noise_part conf, noise = preds.signal_part, preds.noise_part assert np.allclose(conf.mean, train_data.labels()) assert np.allclose(conf.stddev, np.ones(n) * np.sqrt(1e-5), atol=1e-3) assert (preds.mean == conf.mean).all() assert np.allclose(preds.stddev, np.ones(n) * np.sqrt(np.square(conf.stddev) + 1e-5)) assert np.allclose(noise.mean, np.zeros(shape=n)) assert np.allclose(noise.stddev, np.sqrt(1e-5)) def test_GaussianProcessRegressionSklearn_3(): kernel = skl.gaussian_process.kernels.DotProduct( sigma_0=0, sigma_0_bounds="fixed" ) + skl.gaussian_process.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-5, 1e5)) gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1) n, nlsd = 100, 0.5 data = smlb.TabularData(data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3) data = smlb.LabelNoise(noise=smlb.NormalNoise(stddev=nlsd, rng=1)).fit(data).apply(data) preds = gpr.fit(data).apply(data) assert preds.has_signal_part and preds.has_noise_part conf, noise = preds.signal_part, preds.noise_part assert np.allclose(conf.mean, np.ones(n) * 3, atol=1e-1) assert np.allclose(conf.stddev, np.ones(n) * nlsd, atol=1e-1) assert (preds.mean == conf.mean).all() assert np.allclose(preds.stddev, np.sqrt(np.square(conf.stddev) + np.square(nlsd)), atol=1e-1) assert np.allclose(noise.mean, np.zeros(shape=n)) assert np.allclose(noise.stddev, nlsd, atol=1e-1)
true
true
f700cb5fa6b3a65b0f27e583671a9cfd14e15279
486
py
Python
Algorithms/Implementation/picking-numbers.py
Owngithub10101/Hackerrank-Problem-Solving
4e35b609c9f5b94c5bda292b9991baa054a944b6
[ "MIT" ]
23
2020-02-28T16:18:48.000Z
2021-12-21T11:51:07.000Z
Algorithms/Implementation/picking-numbers.py
ramanagali/Hackerrank-Problem-Solving
98f654f984013140d52b9a344146e9e38e46fb81
[ "MIT" ]
null
null
null
Algorithms/Implementation/picking-numbers.py
ramanagali/Hackerrank-Problem-Solving
98f654f984013140d52b9a344146e9e38e46fb81
[ "MIT" ]
16
2020-04-08T10:46:39.000Z
2021-11-15T03:46:56.000Z
# Picking Numbers # Developer: Murillo Grubler # Link: https://www.hackerrank.com/challenges/picking-numbers/problem def picking_number(n, arr): max_combinations = 0 for i in range(n): combination = arr.count(arr[i]) + arr.count(arr[i] + 1) if combination > max_combinations: max_combinations = combination return max_combinations n = int(input().strip()) a = [int(a_temp) for a_temp in input().strip().split(' ')] print (picking_number(n, a))
32.4
69
0.674897
def picking_number(n, arr): max_combinations = 0 for i in range(n): combination = arr.count(arr[i]) + arr.count(arr[i] + 1) if combination > max_combinations: max_combinations = combination return max_combinations n = int(input().strip()) a = [int(a_temp) for a_temp in input().strip().split(' ')] print (picking_number(n, a))
true
true
f700cc1f7004dfa3ecb59471f0af8c146b7edf34
1,280
py
Python
python/script/run_cleaning_cache.py
aachenmax/vmaf
e65143f36ac9324a1242614bdd6256861d4f46f6
[ "BSD-2-Clause-Patent" ]
2,874
2016-06-06T16:11:37.000Z
2022-03-31T10:10:22.000Z
python/script/run_cleaning_cache.py
aachenmax/vmaf
e65143f36ac9324a1242614bdd6256861d4f46f6
[ "BSD-2-Clause-Patent" ]
619
2016-06-07T19:30:53.000Z
2022-03-31T16:36:05.000Z
python/script/run_cleaning_cache.py
aachenmax/vmaf
e65143f36ac9324a1242614bdd6256861d4f46f6
[ "BSD-2-Clause-Patent" ]
723
2016-06-05T02:44:33.000Z
2022-03-31T03:29:12.000Z
#!/usr/bin/env python3 import os import sys from vmaf.core.quality_runner import QualityRunner from vmaf.core.result_store import FileSystemResultStore from vmaf.routine import run_remove_results_for_dataset from vmaf.tools.misc import import_python_file __copyright__ = "Copyright 2016-2020, Netflix, Inc." __license__ = "BSD+Patent" def print_usage(): quality_runner_types = ['VMAF', 'PSNR', 'SSIM', 'MS_SSIM'] print("usage: " + os.path.basename(sys.argv[0]) + \ " quality_type dataset_filepath\n") print("quality_type:\n\t" + "\n\t".join(quality_runner_types) +"\n") def main(): if len(sys.argv) < 3: print_usage() return 2 try: quality_type = sys.argv[1] dataset_filepath = sys.argv[2] except ValueError: print_usage() return 2 try: dataset = import_python_file(dataset_filepath) except Exception as e: print("Error: " + str(e)) return 1 try: runner_class = QualityRunner.find_subclass(quality_type) except: print_usage() return 2 result_store = FileSystemResultStore() run_remove_results_for_dataset(result_store, dataset, runner_class) return 0 if __name__ == '__main__': ret = main() exit(ret)
23.703704
72
0.666406
import os import sys from vmaf.core.quality_runner import QualityRunner from vmaf.core.result_store import FileSystemResultStore from vmaf.routine import run_remove_results_for_dataset from vmaf.tools.misc import import_python_file __copyright__ = "Copyright 2016-2020, Netflix, Inc." __license__ = "BSD+Patent" def print_usage(): quality_runner_types = ['VMAF', 'PSNR', 'SSIM', 'MS_SSIM'] print("usage: " + os.path.basename(sys.argv[0]) + \ " quality_type dataset_filepath\n") print("quality_type:\n\t" + "\n\t".join(quality_runner_types) +"\n") def main(): if len(sys.argv) < 3: print_usage() return 2 try: quality_type = sys.argv[1] dataset_filepath = sys.argv[2] except ValueError: print_usage() return 2 try: dataset = import_python_file(dataset_filepath) except Exception as e: print("Error: " + str(e)) return 1 try: runner_class = QualityRunner.find_subclass(quality_type) except: print_usage() return 2 result_store = FileSystemResultStore() run_remove_results_for_dataset(result_store, dataset, runner_class) return 0 if __name__ == '__main__': ret = main() exit(ret)
true
true
f700cc2db4300e6b6c079786b6be91828b98ea16
2,146
py
Python
src/cupyopt/nuggets/dataframe.py
d33bs/cupyopt
b29724d574667be8023d50ffc80113e0a7bb218e
[ "Apache-2.0" ]
3
2021-08-10T16:38:23.000Z
2022-01-03T19:12:11.000Z
src/cupyopt/nuggets/dataframe.py
d33bs/cupyopt
b29724d574667be8023d50ffc80113e0a7bb218e
[ "Apache-2.0" ]
7
2022-01-03T19:45:05.000Z
2022-03-16T19:48:16.000Z
src/cupyopt/nuggets/dataframe.py
UCBoulder/cupyopt
fcfcef33553c2e453cd222bb6c02492f1e4b1963
[ "Apache-2.0" ]
null
null
null
""" Dataframe functions """ import logging import os from tempfile import mkstemp import pandas as pd from box import Box # pylint: disable=too-many-arguments logger = logging.getLogger(__name__) # pylint: disable=C0103 def pd_export( dataframe: pd.DataFrame, export_type: str, df_name: str, temp_name: bool = False, df_name_prefix: str = "", df_name_suffix: str = "", dir_name: str = ".", config_box: Box = None, index=True, header=True, ) -> str: """ Exports dataframe to file formats using various options Return a filepaths for the exported Dataframe """ if temp_name and dir_name != "": filepath = mkstemp(suffix=df_name_suffix, prefix=df_name_prefix, dir=dir_name)[ 1 ] elif config_box and dir_name == "": filepath = os.path.join( config_box.extracttempdir, f"{df_name_prefix}{df_name}{df_name_suffix}.{export_type}", ) else: filename = f"{df_name_prefix}{df_name}{df_name_suffix}.{export_type}" filepath = os.path.join(dir_name, filename) logger.info("Creating %s file %s from dataframe.", export_type, filepath) if export_type == "parquet": dataframe.to_parquet(path=filepath, index=index) elif export_type == "csv": dataframe.to_csv(filepath, index=index, header=header) return filepath def pd_colupdate(dataframe: pd.DataFrame, coldict: dict) -> pd.DataFrame: """ Rename and filter Pandas Dataframe columns using python dictionary. Column names provided in coldict follow the same format as expected by pd.DataFrame.rename(columns=dict). For example: {"current":"new", "current2":"new2"} Columns in returned dataframe are filtered by those provided to be renamed. Returns a modified pd.Dataframe copy """ logger.info("Renaming and filtering dataframe columns using coldict key:values.") # Remap column names dataframe = dataframe.rename(columns=coldict) # Filter columns based on the new names dataframe = dataframe[[val for key, val in coldict.items()]].copy() return dataframe
27.87013
88
0.676142
import logging import os from tempfile import mkstemp import pandas as pd from box import Box logger = logging.getLogger(__name__) def pd_export( dataframe: pd.DataFrame, export_type: str, df_name: str, temp_name: bool = False, df_name_prefix: str = "", df_name_suffix: str = "", dir_name: str = ".", config_box: Box = None, index=True, header=True, ) -> str: if temp_name and dir_name != "": filepath = mkstemp(suffix=df_name_suffix, prefix=df_name_prefix, dir=dir_name)[ 1 ] elif config_box and dir_name == "": filepath = os.path.join( config_box.extracttempdir, f"{df_name_prefix}{df_name}{df_name_suffix}.{export_type}", ) else: filename = f"{df_name_prefix}{df_name}{df_name_suffix}.{export_type}" filepath = os.path.join(dir_name, filename) logger.info("Creating %s file %s from dataframe.", export_type, filepath) if export_type == "parquet": dataframe.to_parquet(path=filepath, index=index) elif export_type == "csv": dataframe.to_csv(filepath, index=index, header=header) return filepath def pd_colupdate(dataframe: pd.DataFrame, coldict: dict) -> pd.DataFrame: logger.info("Renaming and filtering dataframe columns using coldict key:values.") dataframe = dataframe.rename(columns=coldict) dataframe = dataframe[[val for key, val in coldict.items()]].copy() return dataframe
true
true
f700ccaf9f6d6e05af8e6c7133accb6e5c06ec6f
23
py
Python
utils/__init__.py
alexandrepoirier/XB1_SYNTH
b97ed09829e54c15ff64d8881dce3f8ff7dc38bc
[ "MIT" ]
null
null
null
utils/__init__.py
alexandrepoirier/XB1_SYNTH
b97ed09829e54c15ff64d8881dce3f8ff7dc38bc
[ "MIT" ]
null
null
null
utils/__init__.py
alexandrepoirier/XB1_SYNTH
b97ed09829e54c15ff64d8881dce3f8ff7dc38bc
[ "MIT" ]
null
null
null
from .MidiInfo import *
23
23
0.782609
from .MidiInfo import *
true
true
f700ccd0c37daf9880b1808354c4f39fac9ad540
948
py
Python
location.py
TonyJenkins/redesigned-octo-palm-tree
a88b751a9ad023731ddecd1f30cc13a4dea8e434
[ "Unlicense" ]
1
2017-03-30T16:19:05.000Z
2017-03-30T16:19:05.000Z
location.py
TonyJenkins/redesigned-octo-palm-tree
a88b751a9ad023731ddecd1f30cc13a4dea8e434
[ "Unlicense" ]
null
null
null
location.py
TonyJenkins/redesigned-octo-palm-tree
a88b751a9ad023731ddecd1f30cc13a4dea8e434
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python3 import argparse import json import urllib.request if __name__ == '__main__': parser = argparse.ArgumentParser () parser.add_argument ('-v', '--verbose', help = 'Enable Verbose Mode', action = 'store_true') parser.add_argument ('-ip', help = 'IP Address to Test') args = parser.parse_args () if args.ip: location_url = 'http://ipinfo.io/{:}/json'.format(args.ip) else: location_url = 'http://ipinfo.io/json' if args.verbose: print ('Retrieving location information ...') location_facts = json.loads ((urllib.request.urlopen (location_url).read ()) .decode ("utf-8")) print ('This IP is in {:}, {:}, {:}.'.format (location_facts ['city'], location_facts ['region'], location_facts ['country'])) if args.verbose: print ('All done.')
30.580645
94
0.556962
import argparse import json import urllib.request if __name__ == '__main__': parser = argparse.ArgumentParser () parser.add_argument ('-v', '--verbose', help = 'Enable Verbose Mode', action = 'store_true') parser.add_argument ('-ip', help = 'IP Address to Test') args = parser.parse_args () if args.ip: location_url = 'http://ipinfo.io/{:}/json'.format(args.ip) else: location_url = 'http://ipinfo.io/json' if args.verbose: print ('Retrieving location information ...') location_facts = json.loads ((urllib.request.urlopen (location_url).read ()) .decode ("utf-8")) print ('This IP is in {:}, {:}, {:}.'.format (location_facts ['city'], location_facts ['region'], location_facts ['country'])) if args.verbose: print ('All done.')
true
true
f700ce19f231a90c836b91d616c94f8404fc8fd4
17,895
py
Python
core/model.py
superstap/jimi
d921b815c726e169c5a35f01a81eea8a75b8321d
[ "Apache-2.0" ]
null
null
null
core/model.py
superstap/jimi
d921b815c726e169c5a35f01a81eea8a75b8321d
[ "Apache-2.0" ]
null
null
null
core/model.py
superstap/jimi
d921b815c726e169c5a35f01a81eea8a75b8321d
[ "Apache-2.0" ]
null
null
null
import os import json from pathlib import Path import jimi # Initialize dbCollectionName = "model" class _model(jimi.db._document): name = str() className = str() classType = str() location = str() hidden = bool() manifest = dict() _dbCollection = jimi.db.db[dbCollectionName] def new(self,name,className,classType,location,hidden): self.name = name self.className = className self.classType = classType self.location = location self.hidden = hidden self.acl = { "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] } return super(_model, self).new() def classObject(self): # ClassID wont exist if the className is model try: mod = __import__("{0}".format(self.location), fromlist=["{0}".format(self.className)]) except ModuleNotFoundError: jimi.logging.debug("Error unable to find class='{0}', className='{1}', classType='{2}', location='{3}'".format(self.classID,self.className,self.classType,self.location),-1) if self.classType == "_action": return jimi.action._action elif self.classType == "_trigger": return jimi.trigger._trigger else: return jimi.db._document class_ = getattr(mod, "{0}".format(self.className)) # Injecting manifest from model into the loaded class - this is only held in memory and never committed to the database class_.manifest__ = self.manifest return class_ def registerModel(name,className,classType,location,hidden=False): # Checking that a model with the same name does not already exist ( this is due to identification within GUI, future changes could be made to allow this?? ) results = _model(False).query(query={ "name" : name })["results"] if len(results) == 0: return _model().new(name,className,classType,location,hidden) else: if jimi.logging.debugEnabled: jimi.logging.debug("Register model failed as it already exists modelName='{0}', className='{1}', classType='{2}', location='{3}'".format(name,className,classType,location),4) def deregisterModel(name,className,classType,location): loadModels = _model(False).query(query={ "name" : name})["results"] if loadModels: loadModels = loadModels[0] # This really does need to clean up the models objects that are left #from core.models import trigger, action #trigger._action().api_delete(query={"classID" : ObjectId(loadModels["_id"]) }) #action._action().api_delete(query={"classID" : ObjectId(loadModels["_id"]) }) results = _model().api_delete(query={ "name" : name, "classType" : classType }) if results["result"]: return True if jimi.logging.debugEnabled: jimi.logging.debug("deregister model failed modelName='{0}', className='{1}', classType='{2}', location='{3}'".format(name,className,classType,location),4) def getClassID(name): loadModels = _model(False).query(query={ "name" : name})["results"] if loadModels: loadModels = loadModels[0] return loadModels["_id"] return None def loadModel(modelName): results = _model(False).query(query={ "name" : modelName })["results"] if len(results) == 1: results = results[0] _class = _model().get(results["_id"]) return _class return None def getClassObject(classID,sessionData): return _model().getAsClass(id=classID) ######### --------- API --------- ######### if jimi.api.webServer: if not jimi.api.webServer.got_first_request: if jimi.api.webServer.name == "jimi_web": @jimi.api.webServer.route(jimi.api.base+"models/", methods=["GET"]) def getModels(): result = [] jimi.api.g.sessionData models = _model(False).query(jimi.api.g.sessionData,query={ "_id" : { "$exists": True } })["results"] for model in models: result.append(model["name"]) return { "models" : result }, 200 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/", methods=["GET"]) def getModel(modelName): class_ = loadModel(modelName).classObject() if class_: results = _model(False).query(jimi.api.g.sessionData,query={ "className" : class_.__name__ })["results"] if len(results) == 1: results = results[0] return class_().query(jimi.api.g.sessionData,query={ "classID" : results["_id"] },fields=["_id","name","classType"]), 200 return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/extra/", methods=["GET"]) def getModelExtra(modelName): class_ = loadModel(modelName).classObject() if class_: results = _model(False).query(jimi.api.g.sessionData,query={ "className" : class_.__name__ })["results"] if len(results) == 1: results = results[0] results = class_(False).query(jimi.api.g.sessionData,query={ "classID" : results["_id"] },fields=["_id","name","classType","lastUpdateTime"])["results"] ids = [ x["_id"] for x in results ] # Possible for ID trigger and action to be the same ( although unlikey but keep in mind this could be an issue in future ) ConductsCache = jimi.conduct._conduct().query(query={ "$or" : [ { "flow.triggerID" : { "$in" : ids } }, { "flow.actionID" : { "$in" : ids } } ] },fields=["_id","name","flow"])["results"] for result in results: usedIn = [] for ConductCache in ConductsCache: for flow in ConductCache["flow"]: if "triggerID" in flow: if flow["triggerID"] == result["_id"]: usedIn.append({ "conductID" : ConductCache["_id"], "conductName" : ConductCache["name"] }) if "actionID" in flow: if flow["actionID"] == result["_id"]: usedIn.append({ "conductID" : ConductCache["_id"], "conductName" : ConductCache["name"] }) result["whereUsed"] = usedIn return { "results" : results }, 200 return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/all/", methods=["GET"]) def getModelAndChildren(modelName): class_ = loadModel(modelName).classObject() classIDs = [] if class_: results = _model(False).query(jimi.api.g.sessionData,query={ "className" : class_.__name__ })["results"] if len(results) == 1: results = results[0] classIDs.append(results["_id"]) results = _model(False).query(jimi.api.g.sessionData,query={ "classType" : results["className"] })["results"] for result in results: classIDs.append(result["_id"]) result = [] for classID in classIDs: for foundObject in class_(False).query(jimi.api.g.sessionData,query={ "classID" : classID })["results"]: result.append(foundObject) return { "results" : result}, 200 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/schema/", methods=["GET"]) def getModelSchema(modelName): class_ = loadModel(modelName) if class_: access = jimi.db.ACLAccess(jimi.api.g.sessionData,class_.acl,"read") if access: return class_.classObject()(False).api_getSchema(), 200 else: return {}, 403 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/<objectID>/", methods=["GET"]) def getModelObject(modelName,objectID): class_ = loadModel(modelName).classObject() if class_: classObject = class_(False).getAsClass(jimi.api.g.sessionData,id=objectID) if classObject: classObject = classObject[0] members = jimi.helpers.classToJson(classObject) return members, 200 else: return {}, 404 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/<objectID>/", methods=["DELETE"]) def deleteModelObject(modelName,objectID): class_ = loadModel(modelName) if class_: _class = class_.classObject()(False).getAsClass(jimi.api.g.sessionData,id=objectID) if len(_class) == 1: _class = _class[0] access = jimi.db.ACLAccess(jimi.api.g.sessionData,_class.acl,"delete") if access: if "_id" in jimi.api.g.sessionData: jimi.audit._audit().add("model","delete",{ "_id" : jimi.api.g.sessionData["_id"], "user" : jimi.api.g.sessionData["user"], "modelName" : modelName, "objectID" : objectID }) else: jimi.audit._audit().add("model","delete",{ "user" : "system", "objectID" : objectID }) result = class_.classObject()(False).api_delete(id=objectID) if result["result"]: return result, 200 else: return {}, 403 return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/", methods=["PUT"]) def newModelObject(modelName): class_ = loadModel(modelName) if class_: access = jimi.db.ACLAccess(jimi.api.g.sessionData,class_.acl,"read") if access: class_ = class_.classObject()(False) if jimi.api.g.sessionData: class_.acl = { "ids" : [ { "accessID" : jimi.api.g.sessionData["primaryGroup"], "read" : True, "write" : True, "delete" : True } ] } newObjectID = super(type(class_), class_).new().inserted_id if "_id" in jimi.api.g.sessionData: jimi.audit._audit().add("model","create",{ "_id" : jimi.api.g.sessionData["_id"], "user" : jimi.api.g.sessionData["user"], "modelName" : modelName, "objectID" : str(newObjectID) }) else: jimi.audit._audit().add("model","create",{ "user" : "system", "objectID" : str(newObjectID) }) return { "_id" : str(newObjectID) }, 200 return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/<objectID>/", methods=["POST"]) def updateModelObject(modelName,objectID): class_ = loadModel(modelName) if class_: data = json.loads(jimi.api.request.data) updateItemsList = [] changeLog = {} _class = class_.classObject()(False).getAsClass(jimi.api.g.sessionData,id=objectID) if len(_class) == 1: _class = _class[0] # Builds list of permitted ACL access = jimi.db.ACLAccess(jimi.api.g.sessionData,_class.acl,"write") adminBypass = False if "admin" in jimi.api.g.sessionData: if jimi.api.g.sessionData["admin"]: adminBypass = True if access: for dataKey, dataValue in data.items(): fieldAccessPermitted = True # Checking if sessionData is permitted field level access if _class.acl != {} and not adminBypass: fieldAccessPermitted = jimi.db.fieldACLAccess(jimi.api.g.sessionData,_class.acl,dataKey,"write") if fieldAccessPermitted: # _id is a protected mongodb object and cant be updated if dataKey != "_id": if hasattr(_class, dataKey): changeLog[dataKey] = {} changeLog[dataKey]["currentValue"] = getattr(_class, dataKey) if type(getattr(_class, dataKey)) is str: if _class.setAttribute(dataKey, str(dataValue),sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) elif type(getattr(_class, dataKey)) is int: try: if _class.setAttribute(dataKey, int(dataValue),sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) except ValueError: if _class.setAttribute(dataKey, 0,sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) elif type(getattr(_class, dataKey)) is float: try: if _class.setAttribute(dataKey, float(dataValue),sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) except ValueError: if _class.setAttribute(dataKey, 0,sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) elif type(getattr(_class, dataKey)) is bool: # Convert string object to bool if type(dataValue) is str: if dataValue.lower() == "true": dataValue = True else: dataValue = False if _class.setAttribute(dataKey, dataValue,sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) elif type(getattr(_class, dataKey)) is dict or type(getattr(_class, dataKey)) is list: if dataValue: if _class.setAttribute(dataKey, json.loads(dataValue),sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) # Commit back to database if updateItemsList: # Adding audit record if "_id" in jimi.api.g.sessionData: jimi.audit._audit().add("model","update",{ "_id" : jimi.api.g.sessionData["_id"], "user" : jimi.api.g.sessionData["user"], "objects" : changeLog, "modelName" : modelName, "objectID" : objectID }) else: jimi.audit._audit().add("model","update",{ "user" : "system", "objects" : changeLog, "modelName" : modelName, "objectID" : objectID }) _class.update(updateItemsList,sessionData=jimi.api.g.sessionData,revisioning=True) return {}, 200 else: return {}, 403 return {}, 404
59.65
231
0.484325
import os import json from pathlib import Path import jimi dbCollectionName = "model" class _model(jimi.db._document): name = str() className = str() classType = str() location = str() hidden = bool() manifest = dict() _dbCollection = jimi.db.db[dbCollectionName] def new(self,name,className,classType,location,hidden): self.name = name self.className = className self.classType = classType self.location = location self.hidden = hidden self.acl = { "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] } return super(_model, self).new() def classObject(self): try: mod = __import__("{0}".format(self.location), fromlist=["{0}".format(self.className)]) except ModuleNotFoundError: jimi.logging.debug("Error unable to find class='{0}', className='{1}', classType='{2}', location='{3}'".format(self.classID,self.className,self.classType,self.location),-1) if self.classType == "_action": return jimi.action._action elif self.classType == "_trigger": return jimi.trigger._trigger else: return jimi.db._document class_ = getattr(mod, "{0}".format(self.className)) class_.manifest__ = self.manifest return class_ def registerModel(name,className,classType,location,hidden=False): results = _model(False).query(query={ "name" : name })["results"] if len(results) == 0: return _model().new(name,className,classType,location,hidden) else: if jimi.logging.debugEnabled: jimi.logging.debug("Register model failed as it already exists modelName='{0}', className='{1}', classType='{2}', location='{3}'".format(name,className,classType,location),4) def deregisterModel(name,className,classType,location): loadModels = _model(False).query(query={ "name" : name})["results"] if loadModels: loadModels = loadModels[0] results = _model().api_delete(query={ "name" : name, "classType" : classType }) if results["result"]: return True if jimi.logging.debugEnabled: jimi.logging.debug("deregister model failed modelName='{0}', className='{1}', classType='{2}', location='{3}'".format(name,className,classType,location),4) def getClassID(name): loadModels = _model(False).query(query={ "name" : name})["results"] if loadModels: loadModels = loadModels[0] return loadModels["_id"] return None def loadModel(modelName): results = _model(False).query(query={ "name" : modelName })["results"] if len(results) == 1: results = results[0] _class = _model().get(results["_id"]) return _class return None def getClassObject(classID,sessionData): return _model().getAsClass(id=classID) if jimi.api.webServer: if not jimi.api.webServer.got_first_request: if jimi.api.webServer.name == "jimi_web": @jimi.api.webServer.route(jimi.api.base+"models/", methods=["GET"]) def getModels(): result = [] jimi.api.g.sessionData models = _model(False).query(jimi.api.g.sessionData,query={ "_id" : { "$exists": True } })["results"] for model in models: result.append(model["name"]) return { "models" : result }, 200 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/", methods=["GET"]) def getModel(modelName): class_ = loadModel(modelName).classObject() if class_: results = _model(False).query(jimi.api.g.sessionData,query={ "className" : class_.__name__ })["results"] if len(results) == 1: results = results[0] return class_().query(jimi.api.g.sessionData,query={ "classID" : results["_id"] },fields=["_id","name","classType"]), 200 return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/extra/", methods=["GET"]) def getModelExtra(modelName): class_ = loadModel(modelName).classObject() if class_: results = _model(False).query(jimi.api.g.sessionData,query={ "className" : class_.__name__ })["results"] if len(results) == 1: results = results[0] results = class_(False).query(jimi.api.g.sessionData,query={ "classID" : results["_id"] },fields=["_id","name","classType","lastUpdateTime"])["results"] ids = [ x["_id"] for x in results ] ConductsCache = jimi.conduct._conduct().query(query={ "$or" : [ { "flow.triggerID" : { "$in" : ids } }, { "flow.actionID" : { "$in" : ids } } ] },fields=["_id","name","flow"])["results"] for result in results: usedIn = [] for ConductCache in ConductsCache: for flow in ConductCache["flow"]: if "triggerID" in flow: if flow["triggerID"] == result["_id"]: usedIn.append({ "conductID" : ConductCache["_id"], "conductName" : ConductCache["name"] }) if "actionID" in flow: if flow["actionID"] == result["_id"]: usedIn.append({ "conductID" : ConductCache["_id"], "conductName" : ConductCache["name"] }) result["whereUsed"] = usedIn return { "results" : results }, 200 return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/all/", methods=["GET"]) def getModelAndChildren(modelName): class_ = loadModel(modelName).classObject() classIDs = [] if class_: results = _model(False).query(jimi.api.g.sessionData,query={ "className" : class_.__name__ })["results"] if len(results) == 1: results = results[0] classIDs.append(results["_id"]) results = _model(False).query(jimi.api.g.sessionData,query={ "classType" : results["className"] })["results"] for result in results: classIDs.append(result["_id"]) result = [] for classID in classIDs: for foundObject in class_(False).query(jimi.api.g.sessionData,query={ "classID" : classID })["results"]: result.append(foundObject) return { "results" : result}, 200 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/schema/", methods=["GET"]) def getModelSchema(modelName): class_ = loadModel(modelName) if class_: access = jimi.db.ACLAccess(jimi.api.g.sessionData,class_.acl,"read") if access: return class_.classObject()(False).api_getSchema(), 200 else: return {}, 403 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/<objectID>/", methods=["GET"]) def getModelObject(modelName,objectID): class_ = loadModel(modelName).classObject() if class_: classObject = class_(False).getAsClass(jimi.api.g.sessionData,id=objectID) if classObject: classObject = classObject[0] members = jimi.helpers.classToJson(classObject) return members, 200 else: return {}, 404 else: return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/<objectID>/", methods=["DELETE"]) def deleteModelObject(modelName,objectID): class_ = loadModel(modelName) if class_: _class = class_.classObject()(False).getAsClass(jimi.api.g.sessionData,id=objectID) if len(_class) == 1: _class = _class[0] access = jimi.db.ACLAccess(jimi.api.g.sessionData,_class.acl,"delete") if access: if "_id" in jimi.api.g.sessionData: jimi.audit._audit().add("model","delete",{ "_id" : jimi.api.g.sessionData["_id"], "user" : jimi.api.g.sessionData["user"], "modelName" : modelName, "objectID" : objectID }) else: jimi.audit._audit().add("model","delete",{ "user" : "system", "objectID" : objectID }) result = class_.classObject()(False).api_delete(id=objectID) if result["result"]: return result, 200 else: return {}, 403 return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/", methods=["PUT"]) def newModelObject(modelName): class_ = loadModel(modelName) if class_: access = jimi.db.ACLAccess(jimi.api.g.sessionData,class_.acl,"read") if access: class_ = class_.classObject()(False) if jimi.api.g.sessionData: class_.acl = { "ids" : [ { "accessID" : jimi.api.g.sessionData["primaryGroup"], "read" : True, "write" : True, "delete" : True } ] } newObjectID = super(type(class_), class_).new().inserted_id if "_id" in jimi.api.g.sessionData: jimi.audit._audit().add("model","create",{ "_id" : jimi.api.g.sessionData["_id"], "user" : jimi.api.g.sessionData["user"], "modelName" : modelName, "objectID" : str(newObjectID) }) else: jimi.audit._audit().add("model","create",{ "user" : "system", "objectID" : str(newObjectID) }) return { "_id" : str(newObjectID) }, 200 return {}, 404 @jimi.api.webServer.route(jimi.api.base+"models/<modelName>/<objectID>/", methods=["POST"]) def updateModelObject(modelName,objectID): class_ = loadModel(modelName) if class_: data = json.loads(jimi.api.request.data) updateItemsList = [] changeLog = {} _class = class_.classObject()(False).getAsClass(jimi.api.g.sessionData,id=objectID) if len(_class) == 1: _class = _class[0] access = jimi.db.ACLAccess(jimi.api.g.sessionData,_class.acl,"write") adminBypass = False if "admin" in jimi.api.g.sessionData: if jimi.api.g.sessionData["admin"]: adminBypass = True if access: for dataKey, dataValue in data.items(): fieldAccessPermitted = True if _class.acl != {} and not adminBypass: fieldAccessPermitted = jimi.db.fieldACLAccess(jimi.api.g.sessionData,_class.acl,dataKey,"write") if fieldAccessPermitted: if dataKey != "_id": if hasattr(_class, dataKey): changeLog[dataKey] = {} changeLog[dataKey]["currentValue"] = getattr(_class, dataKey) if type(getattr(_class, dataKey)) is str: if _class.setAttribute(dataKey, str(dataValue),sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) elif type(getattr(_class, dataKey)) is int: try: if _class.setAttribute(dataKey, int(dataValue),sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) except ValueError: if _class.setAttribute(dataKey, 0,sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) elif type(getattr(_class, dataKey)) is float: try: if _class.setAttribute(dataKey, float(dataValue),sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) except ValueError: if _class.setAttribute(dataKey, 0,sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) elif type(getattr(_class, dataKey)) is bool: if type(dataValue) is str: if dataValue.lower() == "true": dataValue = True else: dataValue = False if _class.setAttribute(dataKey, dataValue,sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) elif type(getattr(_class, dataKey)) is dict or type(getattr(_class, dataKey)) is list: if dataValue: if _class.setAttribute(dataKey, json.loads(dataValue),sessionData=jimi.api.g.sessionData): updateItemsList.append(dataKey) changeLog[dataKey]["newValue"] = getattr(_class, dataKey) if updateItemsList: if "_id" in jimi.api.g.sessionData: jimi.audit._audit().add("model","update",{ "_id" : jimi.api.g.sessionData["_id"], "user" : jimi.api.g.sessionData["user"], "objects" : changeLog, "modelName" : modelName, "objectID" : objectID }) else: jimi.audit._audit().add("model","update",{ "user" : "system", "objects" : changeLog, "modelName" : modelName, "objectID" : objectID }) _class.update(updateItemsList,sessionData=jimi.api.g.sessionData,revisioning=True) return {}, 200 else: return {}, 403 return {}, 404
true
true
f700cf5b133d1f6bcaf30ab756470f1a226e2fa5
7,671
py
Python
cs3api4lab/tests/test_locks.py
SoftwareMind-ScienceMesh-org/cs3api4lab
3de2e3d9fcb920110697686fa37ac06257cd650e
[ "Apache-2.0" ]
null
null
null
cs3api4lab/tests/test_locks.py
SoftwareMind-ScienceMesh-org/cs3api4lab
3de2e3d9fcb920110697686fa37ac06257cd650e
[ "Apache-2.0" ]
null
null
null
cs3api4lab/tests/test_locks.py
SoftwareMind-ScienceMesh-org/cs3api4lab
3de2e3d9fcb920110697686fa37ac06257cd650e
[ "Apache-2.0" ]
null
null
null
import json from unittest import TestCase from time import sleep from cs3api4lab.tests.share_test_base import ShareTestBase from traitlets.config import LoggingConfigurable import urllib.parse class TestLocks(ShareTestBase, TestCase): einstein_id = '4c510ada-c86b-4815-8820-42cdf82c3d51' einstein_idp = 'cernbox.cern.ch' marie_id = 'f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c' marie_idp = 'cesnet.cz' richard_id = '932b4540-8d16-481e-8ef4-588e4b6b151c' richard_idp = 'example.org' receiver_role = 'viewer' receiver_grantee_type = 'user' file_path = '/home/test_locks.txt' shared_file_path = '/reva/einstein/test_locks.txt' storage_id = '123e4567-e89b-12d3-a456-426655440000' share_id = None conflict_name = None def test_lock_created_when_file_written(self): self.file_name = self.file_path + self.get_random_suffix() try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') file_ref = self.storage_logic.get_unified_file_ref(self.file_name, '/') file_info = self.storage_logic._stat_internal(file_ref).info self.assertTrue(file_info.arbitrary_metadata.metadata) self.assertIn("lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51", file_info.arbitrary_metadata.metadata) lock = json.loads(urllib.parse.unquote(file_info.arbitrary_metadata.metadata["lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51"])) self.assertEquals(lock['username'], 'einstein') self.assertEquals(lock['idp'], 'cernbox.cern.ch') self.assertEquals(lock['opaque_id'], '4c510ada-c86b-4815-8820-42cdf82c3d51') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) def test_lock_created_when_file_read(self): self.file_name = self.file_path + self.get_random_suffix() try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] for chunk in self.file_api.read_file(self.file_name): continue file_ref = self.storage_logic.get_unified_file_ref(self.file_name, '/') file_info = self.storage_logic._stat_internal(file_ref).info self.assertTrue(file_info.arbitrary_metadata.metadata) self.assertIn("lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51", file_info.arbitrary_metadata.metadata) lock = json.loads(urllib.parse.unquote(file_info.arbitrary_metadata.metadata["lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51"])) self.assertEquals(lock['username'], 'einstein') self.assertEquals(lock['idp'], 'cernbox.cern.ch') self.assertEquals(lock['opaque_id'], '4c510ada-c86b-4815-8820-42cdf82c3d51') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) def test_write_file_locked_conflict_created(self): suffix = self.get_random_suffix() self.file_name = self.file_path + suffix shared_name = self.shared_file_path + suffix try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') self.conflict_name = self.richard_file_api.write_file(shared_name, "richard_content") lock_stat = self.richard_file_api.stat(self.conflict_name) self.assertEqual(lock_stat['filepath'], self.conflict_name) content = self.read_file_content(self.richard_file_api, self.conflict_name) self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) if self.conflict_name: self.remove_test_file('richard', self.conflict_name) def test_write_dir_file_locked(self): suffix = self.get_random_suffix() self.file_name = '/home/testdir/test_locks.txt' + suffix shared_name = '/reva/einstein/testdir/test_locks.txt' + suffix try: try: self.file_api.create_directory('/home/testdir') except: pass #ignore already existing directory created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') self.conflict_name = self.richard_file_api.write_file(shared_name, "richard_content") lock_stat = self.richard_file_api.stat(self.conflict_name) self.assertEqual(lock_stat['filepath'], self.conflict_name) content = self.read_file_content(self.richard_file_api, self.conflict_name) self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) if self.conflict_name: self.remove_test_file('richard', self.conflict_name) def test_write_file_lock_expired(self): suffix = self.get_random_suffix() self.file_name = self.file_path + suffix shared_name = self.shared_file_path + suffix try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') sleep(12) self.richard_file_api.write_file(shared_name, "richard_content") content = self.read_file_content(self.richard_file_api, shared_name) self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) def test_write_by_lock_owner_file_locked(self): self.file_name = self.file_path + self.get_random_suffix() try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') self.file_api.write_file(self.file_name, 'new_content') content = self.read_file_content(self.file_api, self.file_name) self.assertEqual(content, 'new_content', 'File ' + self.file_name + ' should contain the string: ' + 'new_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name)
47.06135
160
0.671229
import json from unittest import TestCase from time import sleep from cs3api4lab.tests.share_test_base import ShareTestBase from traitlets.config import LoggingConfigurable import urllib.parse class TestLocks(ShareTestBase, TestCase): einstein_id = '4c510ada-c86b-4815-8820-42cdf82c3d51' einstein_idp = 'cernbox.cern.ch' marie_id = 'f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c' marie_idp = 'cesnet.cz' richard_id = '932b4540-8d16-481e-8ef4-588e4b6b151c' richard_idp = 'example.org' receiver_role = 'viewer' receiver_grantee_type = 'user' file_path = '/home/test_locks.txt' shared_file_path = '/reva/einstein/test_locks.txt' storage_id = '123e4567-e89b-12d3-a456-426655440000' share_id = None conflict_name = None def test_lock_created_when_file_written(self): self.file_name = self.file_path + self.get_random_suffix() try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') file_ref = self.storage_logic.get_unified_file_ref(self.file_name, '/') file_info = self.storage_logic._stat_internal(file_ref).info self.assertTrue(file_info.arbitrary_metadata.metadata) self.assertIn("lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51", file_info.arbitrary_metadata.metadata) lock = json.loads(urllib.parse.unquote(file_info.arbitrary_metadata.metadata["lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51"])) self.assertEquals(lock['username'], 'einstein') self.assertEquals(lock['idp'], 'cernbox.cern.ch') self.assertEquals(lock['opaque_id'], '4c510ada-c86b-4815-8820-42cdf82c3d51') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) def test_lock_created_when_file_read(self): self.file_name = self.file_path + self.get_random_suffix() try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] for chunk in self.file_api.read_file(self.file_name): continue file_ref = self.storage_logic.get_unified_file_ref(self.file_name, '/') file_info = self.storage_logic._stat_internal(file_ref).info self.assertTrue(file_info.arbitrary_metadata.metadata) self.assertIn("lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51", file_info.arbitrary_metadata.metadata) lock = json.loads(urllib.parse.unquote(file_info.arbitrary_metadata.metadata["lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51"])) self.assertEquals(lock['username'], 'einstein') self.assertEquals(lock['idp'], 'cernbox.cern.ch') self.assertEquals(lock['opaque_id'], '4c510ada-c86b-4815-8820-42cdf82c3d51') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) def test_write_file_locked_conflict_created(self): suffix = self.get_random_suffix() self.file_name = self.file_path + suffix shared_name = self.shared_file_path + suffix try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') self.conflict_name = self.richard_file_api.write_file(shared_name, "richard_content") lock_stat = self.richard_file_api.stat(self.conflict_name) self.assertEqual(lock_stat['filepath'], self.conflict_name) content = self.read_file_content(self.richard_file_api, self.conflict_name) self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) if self.conflict_name: self.remove_test_file('richard', self.conflict_name) def test_write_dir_file_locked(self): suffix = self.get_random_suffix() self.file_name = '/home/testdir/test_locks.txt' + suffix shared_name = '/reva/einstein/testdir/test_locks.txt' + suffix try: try: self.file_api.create_directory('/home/testdir') except: pass created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') self.conflict_name = self.richard_file_api.write_file(shared_name, "richard_content") lock_stat = self.richard_file_api.stat(self.conflict_name) self.assertEqual(lock_stat['filepath'], self.conflict_name) content = self.read_file_content(self.richard_file_api, self.conflict_name) self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) if self.conflict_name: self.remove_test_file('richard', self.conflict_name) def test_write_file_lock_expired(self): suffix = self.get_random_suffix() self.file_name = self.file_path + suffix shared_name = self.shared_file_path + suffix try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') sleep(12) self.richard_file_api.write_file(shared_name, "richard_content") content = self.read_file_content(self.richard_file_api, shared_name) self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name) def test_write_by_lock_owner_file_locked(self): self.file_name = self.file_path + self.get_random_suffix() try: created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name) self.share_id = created_share['opaque_id'] self.file_api.write_file(self.file_name, 'content') self.file_api.write_file(self.file_name, 'new_content') content = self.read_file_content(self.file_api, self.file_name) self.assertEqual(content, 'new_content', 'File ' + self.file_name + ' should contain the string: ' + 'new_content') finally: if self.share_id: self.remove_test_share('einstein', self.share_id) self.remove_test_file('einstein', self.file_name)
true
true
f700d08368e70d12a9fe5d31f3bb701d4f1002f4
7,826
py
Python
vissl/data/ssl_transforms/__init__.py
pcanas/vissl
d293b8295f03a4caeaebd25f3e5ed38866dd4d10
[ "MIT" ]
null
null
null
vissl/data/ssl_transforms/__init__.py
pcanas/vissl
d293b8295f03a4caeaebd25f3e5ed38866dd4d10
[ "MIT" ]
null
null
null
vissl/data/ssl_transforms/__init__.py
pcanas/vissl
d293b8295f03a4caeaebd25f3e5ed38866dd4d10
[ "MIT" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from typing import Any, Dict import torchvision.transforms as pth_transforms from classy_vision.dataset.transforms import build_transform, register_transform from classy_vision.dataset.transforms.classy_transform import ClassyTransform from classy_vision.generic.registry_utils import import_all_modules # Below the transforms that require passing the labels as well. This is specifc # to SSL only where we automatically generate the labels for training. All other # transforms (including torchvision) require passing image only as input. _TRANSFORMS_WITH_LABELS = ["ImgRotatePil", "ShuffleImgPatches"] _TRANSFORMS_WITH_COPIES = [ "ImgReplicatePil", "ImgPilToPatchesAndImage", "ImgPilToMultiCrop", ] _TRANSFORMS_WITH_GROUPING = ["ImgPilMultiCropRandomApply"] # we wrap around transforms so that they work with the multimodal input @register_transform("SSLTransformsWrapper") class SSLTransformsWrapper(ClassyTransform): """ VISSL wraps around transforms so that they work with the multimodal input. VISSL supports batches that come from several datasets and sources. Hence the input batch (images, labels) always is a list. To apply the user defined transforms, VISSL takes "indices" as input which defines on what dataset/source data in the sample should the transform be applied to. For example: Assuming input sample is { "data": [dataset1_imgX, dataset2_imgY], "label": [dataset1_lblX, dataset2_lblY] } and the transform is: TRANSFORMS: - name: RandomGrayscale p: 0.2 indices: 0 then the transform is applied only on dataset1_imgX. If however, the indices are either not specified or set to 0, 1 then the transform is applied on both dataset1_imgX and dataset2_imgY Since this structure of data is introduced by vissl, the SSLTransformsWrapper takes care of dealing with the multi-modality input by wrapping the original transforms (pytorch transforms or custom transforms defined by user) and calling each transform on each index. VISSL also supports _TRANSFORMS_WITH_LABELS transforms that modify the label or are used to generate the labels used in self-supervised learning tasks like Jigsaw. When the transforms in _TRANSFORMS_WITH_LABELS are called, the new label is also returned besides the transformed image. VISSL also supports the _TRANSFORMS_WITH_COPIES which are transforms that basically generate several copies of image. Common example of self-supervised training methods that do this is SimCLR, SwAV, MoCo etc When a transform from _TRANSFORMS_WITH_COPIES is used, the SSLTransformsWrapper will flatten the transform output. For example for the input [img1], if we apply ImgReplicatePil to replicate the image 2 times: SSLTransformsWrapper( ImgReplicatePil(num_times=2), [img1] ) will output [img1_1, img1_2] instead of nested list [[img1_1, img1_2]]. The benefit of this is that the next set of transforms specified by user can now operate on img1_1 and img1_2 as the input becomes multi-modal nature. VISSL also supports _TRANSFORMS_WITH_GROUPING which essentially means that a single transform should be applied on the full multi-modal input together instead of separately. This is common transform used in BYOL/ For example: SSLTransformsWrapper( ImgPilMultiCropRandomApply( RandomApply, prob=[0.0, 0.2] ), [img1_1, img1_2] ) this will apply RandomApply on img1_1 with prob=0.0 and on img1_2 with prob=0.2 """ def __init__(self, indices, **args): """ Args: indices (List[int]) (Optional): the indices list on which transform should be applied for the input which is always a list Example: minibatch of size=2 looks like [[img1], [img2]]). If indices is not specified, transform is applied to all the multi-modal input. args (dict): the arguments that the transform takes """ self.indices = set(indices) self.name = args["name"] self.transform = build_transform(args) def _is_transform_with_labels(self): """ _TRANSFORMS_WITH_LABELS = ["ImgRotatePil", "ShuffleImgPatches"] """ if self.name in _TRANSFORMS_WITH_LABELS: return True return False def _is_transform_with_copies(self): """ _TRANSFORMS_WITH_COPIES = [ "ImgReplicatePil", "ImgPilToPatchesAndImage", "ImgPilToMultiCrop", ] """ if self.name in _TRANSFORMS_WITH_COPIES: return True return False def _is_grouping_transform(self): """ _TRANSFORMS_WITH_GROUPING = ["ImgPilMultiCropRandomApply"] """ if self.name in _TRANSFORMS_WITH_GROUPING: return True return False def __call__(self, sample): """ Apply each transform on the specified indices of each entry in the input sample. """ # Run on all indices if empty set is passed. indices = self.indices if self.indices else set(range(len(sample["data"]))) if self._is_grouping_transform(): # if the transform needs to be applied to all the indices # together. For example: one might want to vary the intensity # of a transform across several crops of an image as in BYOL. output = self.transform(sample["data"]) sample["data"] = output else: for idx in indices: output = self.transform(sample["data"][idx]) if self._is_transform_with_labels(): sample["data"][idx] = output[0] sample["label"][-1] = output[1] else: sample["data"][idx] = output if self._is_transform_with_copies(): # if the transform makes copies of the data, we just flatten the list # so the next set of transforms will operate on more indices sample["data"] = [val for sublist in sample["data"] for val in sublist] # now we replicate the rest of the metadata as well num_times = len(sample["data"]) sample["label"] = sample["label"] * num_times sample["data_valid"] = sample["data_valid"] * num_times sample["data_idx"] = sample["data_idx"] * num_times return sample @classmethod def from_config(cls, config: Dict[str, Any]) -> "SSLTransformsWrapper": indices = config.get("indices", []) return cls(indices, **config) def get_transform(input_transforms_list): """ Given the list of user specified transforms, return the torchvision.transforms.Compose() version of the transforms. Each transform in the composition is SSLTransformsWrapper which wraps the original transforms to handle multi-modal nature of input. """ output_transforms = [] for transform_config in input_transforms_list: transform = SSLTransformsWrapper.from_config(transform_config) output_transforms.append(transform) return pth_transforms.Compose(output_transforms) FILE_ROOT = Path(__file__).parent import_all_modules(FILE_ROOT, "vissl.data.ssl_transforms") __all__ = ["SSLTransformsWrapper", "get_transform"]
41.407407
91
0.669307
from pathlib import Path from typing import Any, Dict import torchvision.transforms as pth_transforms from classy_vision.dataset.transforms import build_transform, register_transform from classy_vision.dataset.transforms.classy_transform import ClassyTransform from classy_vision.generic.registry_utils import import_all_modules _TRANSFORMS_WITH_LABELS = ["ImgRotatePil", "ShuffleImgPatches"] _TRANSFORMS_WITH_COPIES = [ "ImgReplicatePil", "ImgPilToPatchesAndImage", "ImgPilToMultiCrop", ] _TRANSFORMS_WITH_GROUPING = ["ImgPilMultiCropRandomApply"] @register_transform("SSLTransformsWrapper") class SSLTransformsWrapper(ClassyTransform): def __init__(self, indices, **args): self.indices = set(indices) self.name = args["name"] self.transform = build_transform(args) def _is_transform_with_labels(self): if self.name in _TRANSFORMS_WITH_LABELS: return True return False def _is_transform_with_copies(self): if self.name in _TRANSFORMS_WITH_COPIES: return True return False def _is_grouping_transform(self): if self.name in _TRANSFORMS_WITH_GROUPING: return True return False def __call__(self, sample): indices = self.indices if self.indices else set(range(len(sample["data"]))) if self._is_grouping_transform(): output = self.transform(sample["data"]) sample["data"] = output else: for idx in indices: output = self.transform(sample["data"][idx]) if self._is_transform_with_labels(): sample["data"][idx] = output[0] sample["label"][-1] = output[1] else: sample["data"][idx] = output if self._is_transform_with_copies(): sample["data"] = [val for sublist in sample["data"] for val in sublist] num_times = len(sample["data"]) sample["label"] = sample["label"] * num_times sample["data_valid"] = sample["data_valid"] * num_times sample["data_idx"] = sample["data_idx"] * num_times return sample @classmethod def from_config(cls, config: Dict[str, Any]) -> "SSLTransformsWrapper": indices = config.get("indices", []) return cls(indices, **config) def get_transform(input_transforms_list): output_transforms = [] for transform_config in input_transforms_list: transform = SSLTransformsWrapper.from_config(transform_config) output_transforms.append(transform) return pth_transforms.Compose(output_transforms) FILE_ROOT = Path(__file__).parent import_all_modules(FILE_ROOT, "vissl.data.ssl_transforms") __all__ = ["SSLTransformsWrapper", "get_transform"]
true
true
f700d1f41ebb403d3a84638d399c5f99730dafef
470
py
Python
plotly/validators/layout/scene/zaxis/_categoryarraysrc.py
faezs/plotly.py
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
[ "MIT" ]
2
2020-03-24T11:41:14.000Z
2021-01-14T07:59:43.000Z
plotly/validators/layout/scene/zaxis/_categoryarraysrc.py
faezs/plotly.py
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
[ "MIT" ]
null
null
null
plotly/validators/layout/scene/zaxis/_categoryarraysrc.py
faezs/plotly.py
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
[ "MIT" ]
4
2019-06-03T14:49:12.000Z
2022-01-06T01:05:12.000Z
import _plotly_utils.basevalidators class CategoryarraysrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name='categoryarraysrc', parent_name='layout.scene.zaxis', **kwargs ): super(CategoryarraysrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type='none', role='info', **kwargs )
24.736842
75
0.617021
import _plotly_utils.basevalidators class CategoryarraysrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name='categoryarraysrc', parent_name='layout.scene.zaxis', **kwargs ): super(CategoryarraysrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type='none', role='info', **kwargs )
true
true
f700d29025437917f4c7b0d0e788e73f4ece2f27
1,295
py
Python
setup.py
fabiobatalha/wayta
e4d2b9f619f38ec31055bae918ff02046f27825e
[ "BSD-2-Clause" ]
null
null
null
setup.py
fabiobatalha/wayta
e4d2b9f619f38ec31055bae918ff02046f27825e
[ "BSD-2-Clause" ]
null
null
null
setup.py
fabiobatalha/wayta
e4d2b9f619f38ec31055bae918ff02046f27825e
[ "BSD-2-Clause" ]
null
null
null
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.txt')) as f: README = f.read() with open(os.path.join(here, 'CHANGES.txt')) as f: CHANGES = f.read() requires = [ 'elasticsearch', 'pyramid', 'pyramid_chameleon', 'pyramid_debugtoolbar', 'gunicorn', ] tests_requires = [ 'mocker' ] setup(name='wayta', version='1.0b', description='A tool to suggest the name of an institution or country in the original form and language.', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: Pyramid", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='SciELO', author_email='[email protected]', url='http://docs.scielo.org', keywords='web pyramid pylons', packages=find_packages(), include_package_data=True, zip_safe=False, install_requires=requires, setup_requires=["nose>=1.0", "coverage"], tests_require=tests_requires, test_suite="nose.collector", entry_points="""\ [paste.app_factory] main = wayta:main """, )
26.428571
111
0.620077
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.txt')) as f: README = f.read() with open(os.path.join(here, 'CHANGES.txt')) as f: CHANGES = f.read() requires = [ 'elasticsearch', 'pyramid', 'pyramid_chameleon', 'pyramid_debugtoolbar', 'gunicorn', ] tests_requires = [ 'mocker' ] setup(name='wayta', version='1.0b', description='A tool to suggest the name of an institution or country in the original form and language.', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: Pyramid", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='SciELO', author_email='[email protected]', url='http://docs.scielo.org', keywords='web pyramid pylons', packages=find_packages(), include_package_data=True, zip_safe=False, install_requires=requires, setup_requires=["nose>=1.0", "coverage"], tests_require=tests_requires, test_suite="nose.collector", entry_points="""\ [paste.app_factory] main = wayta:main """, )
true
true
f700d349785d32abf8d10d9e46ea09cced369e49
12,762
py
Python
tensorflow/python/platform/default/_gfile.py
ln0119/tensorflow-fast-rcnn
e937e6394818c9a320754237651d7fe083b1020d
[ "Apache-2.0" ]
73
2017-01-05T09:06:08.000Z
2021-11-06T14:00:50.000Z
tensorflow/python/platform/default/_gfile.py
minhhoai2/tensorflow
da88903d5e29230d68d861053aa1dea1432c0696
[ "Apache-2.0" ]
8
2017-04-10T10:36:20.000Z
2021-02-07T01:02:32.000Z
tensorflow/python/platform/default/_gfile.py
minhhoai2/tensorflow
da88903d5e29230d68d861053aa1dea1432c0696
[ "Apache-2.0" ]
151
2016-11-10T09:01:15.000Z
2022-01-18T08:13:49.000Z
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """File processing utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import errno import functools import glob as _glob import os import shutil import threading import six class _GFileBase(six.Iterator): """Base I/O wrapper class. Similar semantics to Python's file object.""" # pylint: disable=protected-access def _synchronized(fn): """Synchronizes file I/O for methods in GFileBase.""" @functools.wraps(fn) def sync(self, *args, **kwargs): # Sometimes a GFileBase method is called before the instance # has been properly initialized. Check that _locker is available. if hasattr(self, '_locker'): self._locker.lock() try: return fn(self, *args, **kwargs) finally: if hasattr(self, '_locker'): self._locker.unlock() return sync # pylint: enable=protected-access def __init__(self, name, mode, locker): """Create the GFileBase object with the given filename, mode, and locker. Args: name: string, the filename. mode: string, the mode to open the file with (e.g. "r", "w", "a+"). locker: the thread locking object (e.g. _PythonLocker) for controlling thread access to the I/O methods of this class. """ self._name = name self._mode = mode self._locker = locker self._fp = open(name, mode) def __enter__(self): """Make GFileBase usable with "with" statement.""" return self def __exit__(self, unused_type, unused_value, unused_traceback): """Make GFileBase usable with "with" statement.""" self.close() @_synchronized def __del__(self): # __del__ is sometimes called before initialization, in which # case the object is not fully constructed. Check for this here # before trying to close the file handle. if hasattr(self, '_fp'): self._fp.close() @_synchronized def flush(self): """Flush the underlying file handle.""" return self._fp.flush() @property @_synchronized def closed(self): """Returns "True" if the file handle is closed. Otherwise False.""" return self._fp.closed @_synchronized def write(self, data): """Write data to the underlying file handle. Args: data: The string to write to the file handle. """ self._fp.write(data) @_synchronized def writelines(self, seq): """Write a sequence of strings to the underlying file handle.""" self._fp.writelines(seq) @_synchronized def tell(self): """Return the location from the underlying file handle. Returns: An integer location (which can be used in e.g., seek). """ return self._fp.tell() @_synchronized def seek(self, offset, whence=0): """Seek to offset (conditioned on whence) in the underlying file handle. Args: offset: int, the offset within the file to seek to. whence: 0, 1, or 2. See python's seek() documentation for details. """ self._fp.seek(offset, whence) @_synchronized def truncate(self, new_size=None): """Truncate the underlying file handle to new_size. Args: new_size: Size after truncation. If None, the file handle is truncated to 0 bytes. """ self._fp.truncate(new_size) @_synchronized def readline(self, max_length=-1): """Read a single line (up to max_length) from the underlying file handle. Args: max_length: The maximum number of chsaracters to read. Returns: A string, including any newline at the end, or empty string if at EOF. """ return self._fp.readline(max_length) @_synchronized def readlines(self, sizehint=None): """Read lines from the underlying file handle. Args: sizehint: See the python file.readlines() documentation. Returns: A list of strings from the underlying file handle. """ if sizehint is not None: return self._fp.readlines(sizehint) else: return self._fp.readlines() def __iter__(self): """Enable line iteration on the underlying handle (not synchronized).""" return self # Not synchronized def __next__(self): """Enable line iteration on the underlying handle (not synchronized). Returns: An line iterator from the underlying handle. Example: # read a file's lines by consuming the iterator with a list with open("filename", "r") as fp: lines = list(fp) """ return next(self._fp) @_synchronized def Size(self): # pylint: disable=invalid-name """Get byte size of the file from the underlying file handle.""" cur = self.tell() try: self.seek(0, 2) size = self.tell() finally: self.seek(cur) return size @_synchronized def read(self, n=-1): """Read n bytes from the underlying file handle. Args: n: Number of bytes to read (if negative, read to end of file handle.) Returns: A string of the bytes read, up to the end of file. """ return self._fp.read(n) @_synchronized def close(self): """Close the underlying file handle.""" self._fp.close() # Declare wrappers as staticmethods at the end so that we can # use them as decorators. _synchronized = staticmethod(_synchronized) class GFile(_GFileBase): """File I/O wrappers with thread locking.""" def __init__(self, name, mode='r'): super(GFile, self).__init__(name, mode, _Pythonlocker()) class FastGFile(_GFileBase): """File I/O wrappers without thread locking.""" def __init__(self, name, mode='r'): super(FastGFile, self).__init__(name, mode, _Nulllocker()) # locker classes. Note that locks must be reentrant, so that multiple # lock() calls by the owning thread will not block. class _Pythonlocker(object): """A locking strategy that uses standard locks from the thread module.""" def __init__(self): self._lock = threading.RLock() def lock(self): self._lock.acquire() def unlock(self): self._lock.release() class _Nulllocker(object): """A locking strategy where lock() and unlock() methods are no-ops.""" def lock(self): pass def unlock(self): pass def Exists(path): # pylint: disable=invalid-name """Returns True iff "path" exists (as a dir, file, non-broken symlink).""" return os.path.exists(path) def IsDirectory(path): # pylint: disable=invalid-name """Return True iff "path" exists and is a directory.""" return os.path.isdir(path) def Glob(glob): # pylint: disable=invalid-name """Return a list of filenames matching the glob "glob".""" return _glob.glob(glob) def MkDir(path, mode=0o755): # pylint: disable=invalid-name """Create the directory "path" with the given mode. Args: path: The directory path mode: The file mode for the directory Returns: None Raises: OSError: if the path already exists """ os.mkdir(path, mode) def MakeDirs(path, mode=0o755): # pylint: disable=invalid-name """Recursively create the directory "path" with the given mode. Args: path: The directory path. mode: The file mode for the created directories Raises: OSError: if the path already exists """ # NOTE(mrry): MakeDirs("") should be a no-op to match other # implementations of tf.gfile. if path: os.makedirs(path, mode) def RmDir(directory): # pylint: disable=invalid-name """Removes the directory "directory" iff the directory is empty. Args: directory: The directory to remove. Raises: OSError: If the directory does not exist or is not empty. """ os.rmdir(directory) def Remove(path): # pylint: disable=invalid-name """Delete the (non-directory) file "path". Args: path: The file to remove. Raises: OSError: If "path" does not exist, is a directory, or cannot be deleted. """ os.remove(path) def Rename(oldpath, newpath, overwrite=False): """Rename or move a file, or a local directory. Args: oldpath: string; a pathname of a file. newpath: string; a pathname to which the file will be moved. overwrite: boolean; if false, it is an error for newpath to be occupied by an existing file. Raises: OSError: If "newpath" is occupied by an existing file and overwrite=False. """ if not overwrite and Exists(newpath) and not IsDirectory(newpath): raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath) os.rename(oldpath, newpath) def DeleteRecursively(path): # pylint: disable=invalid-name """Delete the file or directory "path" recursively. Args: path: The path to remove (may be a non-empty directory). Raises: OSError: If the path does not exist or cannot be deleted. """ if IsDirectory(path): shutil.rmtree(path) else: Remove(path) def ListDirectory(directory, return_dotfiles=False): # pylint: disable=invalid-name """Returns a list of files in dir. As with the standard os.listdir(), the filenames in the returned list will be the basenames of the files in dir (not absolute paths). To get a list of absolute paths of files in a directory, a client could do: file_list = gfile.ListDir(my_dir) file_list = [os.path.join(my_dir, f) for f in file_list] (assuming that my_dir itself specified an absolute path to a directory). Args: directory: the directory to list return_dotfiles: if True, dotfiles will be returned as well. Even if this arg is True, '.' and '..' will not be returned. Returns: ['list', 'of', 'files']. The entries '.' and '..' are never returned. Other entries starting with a dot will only be returned if return_dotfiles is True. Raises: OSError: if there is an error retrieving the directory listing. """ files = os.listdir(directory) if not return_dotfiles: files = [f for f in files if not f.startswith('.')] return files def Walk(top, topdown=1, onerror=None): """Recursive directory tree generator. Args: top: string, a pathname. topdown: bool, should traversal be pre-order (True) or post-order (False) onerror: function, optional callback for errors. By default, errors that occur when listing a directory are ignored. (This is the same semantics as Python's os.walk() generator.) If the optional argument "onerror" is specified, it should be a function. It will be called with one argument, an os.error instance. It can return to continue with the walk, or reraise the exception to abort the walk. Yields: # Each yield is a 3-tuple: the pathname of a directory, followed # by lists of all its subdirectories and leaf files. (dirname, [subdirname, subdirname, ...], [filename, filename, ...]) """ return os.walk(top, topdown=topdown, onerror=onerror) def Stat(path): # pylint: disable=invalid-name """Gets the status of a file. Args: path: The file to call Stat() on. Does the equivalent of Stat() on the specified "path" and return file properties. Returns: An object whose attributes give information on the file. Raises: OSError: If "path" does not exist. """ statinfo = os.stat(path) filestat = collections.namedtuple('FileStat', ['mtime']) filestat.mtime = statinfo.st_mtime return filestat def Copy(oldpath, newpath, overwrite=False): """Copy a file. Args: oldpath: string; a pathname of a file. newpath: string; a pathname to which the file will be copied. overwrite: boolean; if false, it is an error for newpath to be occupied by an existing file. Raises: OSError: If "newpath" is occupied by an existing file and overwrite=False, or any error thrown by shutil.copy. """ if not overwrite and Exists(newpath): raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath) shutil.copy(oldpath, newpath) def Open(name, mode='r'): """Exact API match to the standard open. Args: name: a file name, either local or a gfile compatible. mode: for example "w" to open the file for writing. Returns: A threadsafe gfile.GFile object. """ return GFile(name, mode=mode)
28.172185
84
0.681555
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import errno import functools import glob as _glob import os import shutil import threading import six class _GFileBase(six.Iterator): def _synchronized(fn): @functools.wraps(fn) def sync(self, *args, **kwargs): if hasattr(self, '_locker'): self._locker.lock() try: return fn(self, *args, **kwargs) finally: if hasattr(self, '_locker'): self._locker.unlock() return sync def __init__(self, name, mode, locker): self._name = name self._mode = mode self._locker = locker self._fp = open(name, mode) def __enter__(self): return self def __exit__(self, unused_type, unused_value, unused_traceback): self.close() @_synchronized def __del__(self): if hasattr(self, '_fp'): self._fp.close() @_synchronized def flush(self): return self._fp.flush() @property @_synchronized def closed(self): return self._fp.closed @_synchronized def write(self, data): self._fp.write(data) @_synchronized def writelines(self, seq): self._fp.writelines(seq) @_synchronized def tell(self): return self._fp.tell() @_synchronized def seek(self, offset, whence=0): self._fp.seek(offset, whence) @_synchronized def truncate(self, new_size=None): self._fp.truncate(new_size) @_synchronized def readline(self, max_length=-1): return self._fp.readline(max_length) @_synchronized def readlines(self, sizehint=None): if sizehint is not None: return self._fp.readlines(sizehint) else: return self._fp.readlines() def __iter__(self): return self def __next__(self): return next(self._fp) @_synchronized def Size(self): cur = self.tell() try: self.seek(0, 2) size = self.tell() finally: self.seek(cur) return size @_synchronized def read(self, n=-1): return self._fp.read(n) @_synchronized def close(self): self._fp.close() _synchronized = staticmethod(_synchronized) class GFile(_GFileBase): def __init__(self, name, mode='r'): super(GFile, self).__init__(name, mode, _Pythonlocker()) class FastGFile(_GFileBase): def __init__(self, name, mode='r'): super(FastGFile, self).__init__(name, mode, _Nulllocker()) class _Pythonlocker(object): def __init__(self): self._lock = threading.RLock() def lock(self): self._lock.acquire() def unlock(self): self._lock.release() class _Nulllocker(object): def lock(self): pass def unlock(self): pass def Exists(path): return os.path.exists(path) def IsDirectory(path): return os.path.isdir(path) def Glob(glob): return _glob.glob(glob) def MkDir(path, mode=0o755): os.mkdir(path, mode) def MakeDirs(path, mode=0o755): if path: os.makedirs(path, mode) def RmDir(directory): os.rmdir(directory) def Remove(path): os.remove(path) def Rename(oldpath, newpath, overwrite=False): if not overwrite and Exists(newpath) and not IsDirectory(newpath): raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath) os.rename(oldpath, newpath) def DeleteRecursively(path): if IsDirectory(path): shutil.rmtree(path) else: Remove(path) def ListDirectory(directory, return_dotfiles=False): files = os.listdir(directory) if not return_dotfiles: files = [f for f in files if not f.startswith('.')] return files def Walk(top, topdown=1, onerror=None): return os.walk(top, topdown=topdown, onerror=onerror) def Stat(path): statinfo = os.stat(path) filestat = collections.namedtuple('FileStat', ['mtime']) filestat.mtime = statinfo.st_mtime return filestat def Copy(oldpath, newpath, overwrite=False): if not overwrite and Exists(newpath): raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath) shutil.copy(oldpath, newpath) def Open(name, mode='r'): return GFile(name, mode=mode)
true
true
f700d35286f0c2a1efc2a733ed08780f7bb2901e
3,846
py
Python
azure-mgmt-cdn/azure/mgmt/cdn/operations/edge_nodes_operations.py
Christina-Kang/azure-sdk-for-python
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
[ "MIT" ]
1
2022-03-30T22:39:15.000Z
2022-03-30T22:39:15.000Z
azure-mgmt-cdn/azure/mgmt/cdn/operations/edge_nodes_operations.py
Christina-Kang/azure-sdk-for-python
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
[ "MIT" ]
54
2016-03-25T17:25:01.000Z
2018-10-22T17:27:54.000Z
azure-mgmt-cdn/azure/mgmt/cdn/operations/edge_nodes_operations.py
Christina-Kang/azure-sdk-for-python
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
[ "MIT" ]
2
2017-01-20T18:25:46.000Z
2017-05-12T21:31:47.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from .. import models class EdgeNodesOperations(object): """EdgeNodesOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Version of the API to be used with the client request. Current version is 2017-04-02. Constant value: "2017-04-02". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2017-04-02" self.config = config def list( self, custom_headers=None, raw=False, **operation_config): """Edgenodes are the global Point of Presence (POP) locations used to deliver CDN content to end users. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of EdgeNode :rtype: ~azure.mgmt.cdn.models.EdgeNodePaged[~azure.mgmt.cdn.models.EdgeNode] :raises: :class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response deserialized = models.EdgeNodePaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.EdgeNodePaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list.metadata = {'url': '/providers/Microsoft.Cdn/edgenodes'}
38.848485
144
0.632865
import uuid from msrest.pipeline import ClientRawResponse from .. import models class EdgeNodesOperations(object): models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2017-04-02" self.config = config def list( self, custom_headers=None, raw=False, **operation_config): def internal_paging(next_link=None, raw=False): if not next_link: url = self.list.metadata['url'] query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response deserialized = models.EdgeNodePaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.EdgeNodePaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list.metadata = {'url': '/providers/Microsoft.Cdn/edgenodes'}
true
true
f700d4316842c3ceb0981946e2d5fd713d664c46
190,376
py
Python
pandas/core/frame.py
lmarti/pandas
fdfd66cdf3f357fb52831eb644897e144a0d7f30
[ "PSF-2.0", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause" ]
null
null
null
pandas/core/frame.py
lmarti/pandas
fdfd66cdf3f357fb52831eb644897e144a0d7f30
[ "PSF-2.0", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause" ]
null
null
null
pandas/core/frame.py
lmarti/pandas
fdfd66cdf3f357fb52831eb644897e144a0d7f30
[ "PSF-2.0", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause" ]
2
2016-02-26T05:47:12.000Z
2020-01-08T18:05:00.000Z
""" DataFrame --------- An efficient 2D container for potentially mixed-type time series or other labeled data series. Similar to its R counterpart, data.frame, except providing automatic data alignment and a host of useful data manipulation methods having to do with the labeling information """ from __future__ import division # pylint: disable=E1101,E1103 # pylint: disable=W0212,W0231,W0703,W0622 import functools import collections import itertools import sys import types import warnings from numpy import nan as NA import numpy as np import numpy.ma as ma from pandas.core.common import (isnull, notnull, PandasError, _try_sort, _default_index, _maybe_upcast, is_sequence, _infer_dtype_from_scalar, _values_from_object, is_list_like, _get_dtype, _maybe_box_datetimelike, is_categorical_dtype, is_object_dtype, _possibly_infer_to_datetimelike) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks) from pandas.core.series import Series from pandas.core.categorical import Categorical import pandas.computation.expressions as expressions from pandas.computation.eval import eval as _eval from numpy import percentile as _quantile from pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) from pandas import compat from pandas.sparse.array import SparseArray from pandas.util.decorators import deprecate, Appender, Substitution, \ deprecate_kwarg from pandas.tseries.period import PeriodIndex from pandas.tseries.index import DatetimeIndex import pandas.core.algorithms as algos import pandas.core.common as com import pandas.core.format as fmt import pandas.core.nanops as nanops import pandas.core.ops as ops import pandas.lib as lib import pandas.algos as _algos from pandas.core.config import get_option #---------------------------------------------------------------------- # Docstring templates _shared_doc_kwargs = dict(axes='index, columns', klass='DataFrame', axes_single_arg="{0,1,'index','columns'}") _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use everything, then use only numeric data """ _merge_doc = """ Merge DataFrame objects by performing a database-style join operation by columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. Parameters ----------%s right : DataFrame how : {'left', 'right', 'outer', 'inner'}, default 'inner' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) * outer: use union of keys from both frames (SQL: full outer join) * inner: use intersection of keys from both frames (SQL: inner join) on : label or list Field names to join on. Must be found in both DataFrames. If on is None and not merging on indexes, then it merges on the intersection of the columns by default. left_on : label or list, or array-like Field names to join on in left DataFrame. Can be a vector or list of vectors of the length of the DataFrame to use a particular vector as the join key instead of columns right_on : label or list, or array-like Field names to join on in right DataFrame or vector/list of vectors per left_on docs left_index : boolean, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels right_index : boolean, default False Use the index from the right DataFrame as the join key. Same caveats as left_index sort : boolean, default False Sort the join keys lexicographically in the result DataFrame suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively copy : boolean, default True If False, do not copy data unnecessarily Examples -------- >>> A >>> B lkey value rkey value 0 foo 1 0 foo 5 1 bar 2 1 bar 6 2 baz 3 2 qux 7 3 foo 4 3 bar 8 >>> merge(A, B, left_on='lkey', right_on='rkey', how='outer') lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 4 foo 5 2 bar 2 bar 6 3 bar 2 bar 8 4 baz 3 NaN NaN 5 NaN NaN qux 7 Returns ------- merged : DataFrame The output type will the be same as 'left', if it is a subclass of DataFrame. """ #---------------------------------------------------------------------- # DataFrame class class DataFrame(NDFrame): """ Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects index : Index or array-like Index to use for resulting frame. Will default to np.arange(n) if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to np.arange(n) if no column labels are provided dtype : dtype, default None Data type to force, otherwise infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- >>> d = {'col1': ts1, 'col2': ts2} >>> df = DataFrame(data=d, index=index) >>> df2 = DataFrame(np.random.randn(10, 5)) >>> df3 = DataFrame(np.random.randn(10, 5), ... columns=['a', 'b', 'c', 'd', 'e']) See also -------- DataFrame.from_records : constructor from tuples, also record arrays DataFrame.from_dict : from dicts of Series, arrays, or dicts DataFrame.from_csv : from CSV files DataFrame.from_items : from sequence of (key, value) pairs pandas.read_csv, pandas.read_table, pandas.read_clipboard """ _auto_consolidate = True @property def _constructor(self): return DataFrame _constructor_sliced = Series def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._data if isinstance(data, BlockManager): mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = self._init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): mgr = _masked_rec_array_to_mgr(data, index, columns, dtype, copy) # a masked array else: mask = ma.getmaskarray(data) if mask.any(): data, fill_value = _maybe_upcast(data, copy=True) data[mask] = fill_value else: data = data.copy() mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: data_columns = list(data.dtype.names) data = dict((k, data[k]) for k in data_columns) if columns is None: columns = data_columns mgr = self._init_dict(data, index, columns, dtype=dtype) elif getattr(data, 'name', None): mgr = self._init_dict({data.name: data}, index, columns, dtype=dtype) else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (list, types.GeneratorType)): if isinstance(data, types.GeneratorType): data = list(data) if len(data) > 0: if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1: arrays, columns = _to_arrays(data, columns, dtype=dtype) columns = _ensure_index(columns) # set the index if index is None: if isinstance(data[0], Series): index = _get_names_from_index(data) elif isinstance(data[0], Categorical): index = _default_index(len(data[0])) else: index = _default_index(len(data)) mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, collections.Iterator): raise TypeError("data argument can't be an iterator") else: try: arr = np.array(data, dtype=dtype, copy=copy) except (ValueError, TypeError) as e: exc = TypeError('DataFrame constructor called with ' 'incompatible data and dtype: %s' % e) raise_with_traceback(exc) if arr.ndim == 0 and index is not None and columns is not None: if isinstance(data, compat.string_types) and dtype is None: dtype = np.object_ if dtype is None: dtype, data = _infer_dtype_from_scalar(data) values = np.empty((len(index), len(columns)), dtype=dtype) values.fill(data) mgr = self._init_ndarray(values, index, columns, dtype=dtype, copy=False) else: raise PandasError('DataFrame constructor not properly called!') NDFrame.__init__(self, mgr, fastpath=True) def _init_dict(self, data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if columns is not None: columns = _ensure_index(columns) # prefilter if columns passed data = dict((k, v) for k, v in compat.iteritems(data) if k in columns) if index is None: index = extract_index(list(data.values())) else: index = _ensure_index(index) arrays = [] data_names = [] for k in columns: if k not in data: # no obvious "empty" int column if dtype is not None and issubclass(dtype.type, np.integer): continue if dtype is None: # 1783 v = np.empty(len(index), dtype=object) else: v = np.empty(len(index), dtype=dtype) v.fill(NA) else: v = data[k] data_names.append(k) arrays.append(v) else: keys = list(data.keys()) if not isinstance(data, OrderedDict): keys = _try_sort(keys) columns = data_names = Index(keys) arrays = [data[k] for k in keys] return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype) def _init_ndarray(self, values, index, columns, dtype=None, copy=False): # input must be a ndarray, list, Series, index if isinstance(values, Series): if columns is None: if values.name is not None: columns = [values.name] if index is None: index = values.index else: values = values.reindex(index) # zero len case (GH #2234) if not len(values) and columns is not None and len(columns): values = np.empty((0, 1), dtype=object) # helper to create the axes as indexes def _get_axes(N, K, index=index, columns=columns): # return axes or defaults if index is None: index = _default_index(N) else: index = _ensure_index(index) if columns is None: columns = _default_index(K) else: columns = _ensure_index(columns) return index, columns # we could have a categorical type passed or coerced to 'category' # recast this to an _arrays_to_mgr if is_categorical_dtype(getattr(values,'dtype',None)) or is_categorical_dtype(dtype): if not hasattr(values,'dtype'): values = _prep_ndarray(values, copy=copy) values = values.ravel() elif copy: values = values.copy() index, columns = _get_axes(len(values),1) return _arrays_to_mgr([ values ], columns, index, columns, dtype=dtype) # by definition an array here # the dtypes will be coerced to a single dtype values = _prep_ndarray(values, copy=copy) if dtype is not None: if values.dtype != dtype: try: values = values.astype(dtype) except Exception as orig: e = ValueError("failed to cast to '%s' (Exception was: %s)" % (dtype, orig)) raise_with_traceback(e) index, columns = _get_axes(*values.shape) values = values.T # if we don't have a dtype specified, then try to convert objects # on the entire block; this is to convert if we have datetimelike's # embedded in an object type if dtype is None and is_object_dtype(values): values = _possibly_infer_to_datetimelike(values) return create_block_manager_from_blocks([values], [columns, index]) @property def axes(self): return [self.index, self.columns] @property def shape(self): return (len(self.index), len(self.columns)) def _repr_fits_vertical_(self): """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width=False): """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case off non-interactive session, no boundaries apply. ignore_width is here so ipnb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = fmt.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if ((max_columns and nb_columns > max_columns) or ((not ignore_width) and width and nb_columns > (width // 2))): return False if (ignore_width # used by repr_html under IPython notebook # scripts ignore terminal dims or not com.in_interactive_session()): return True if (get_option('display.width') is not None or com.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actualy checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if not (max_rows is None): # unlimited rows # min of two, where one may be None d = d.iloc[:min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max([len(l) for l in value.split('\n')]) return repr_width < width def _info_repr(self): """True if the repr should show the info view.""" info_repr_option = (get_option("display.large_repr") == "info") return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __unicode__(self): """ Return a string representation for a particular DataFrame Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. """ buf = StringIO(u("")) if self._info_repr(): self.info(buf=buf) return buf.getvalue() max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") if get_option("display.expand_frame_repr"): width, _ = fmt.get_console_size() else: width = None self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols, line_width=width, show_dimensions=show_dimensions) return buf.getvalue() def _repr_html_(self): """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ # qtconsole doesn't report its line width, and also # behaves badly when outputting an HTML table # that doesn't fit the window, so disable it. # XXX: In IPython 3.x and above, the Qt console will not attempt to # display HTML, so this check can be removed when support for IPython 2.x # is no longer needed. if com.in_qtconsole(): # 'HTML output is disabled in QtConsole' return None if self._info_repr(): buf = StringIO(u("")) self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace('<', r'&lt;', 1).replace('>', r'&gt;', 1) return '<pre>' + val + '</pre>' if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") return ('<div style="max-height:1000px;' 'max-width:1500px;overflow:auto;">\n' + self.to_html(max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions) + '\n</div>') else: return None def iteritems(self): """Iterator over (column, series) pairs""" if self.columns.is_unique and hasattr(self, '_item_cache'): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self.icol(i) def iterrows(self): """ Iterate over rows of DataFrame as (index, Series) pairs. Notes ----- * ``iterrows`` does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = DataFrame([[1, 1.0]], columns=['x', 'y']) >>> row = next(df.iterrows())[1] >>> print(row['x'].dtype) float64 >>> print(df['x'].dtype) int64 Returns ------- it : generator A generator that iterates over the rows of the frame. """ columns = self.columns for k, v in zip(self.index, self.values): s = Series(v, index=columns, name=k) yield k, s def itertuples(self, index=True): """ Iterate over rows of DataFrame as tuples, with index value as first element of the tuple """ arrays = [] if index: arrays.append(self.index) # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) return zip(*arrays) if compat.PY3: # pragma: no cover items = iteritems def __len__(self): """Returns length of info axis, but here we use the index """ return len(self.index) def dot(self, other): """ Matrix multiplication with DataFrame or Series objects Parameters ---------- other : DataFrame or Series Returns ------- dot_product : DataFrame or Series """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if (len(common) > len(self.columns) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError('Dot product shape mismatch, %s vs %s' % (lvals.shape, rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=left.index, columns=other.columns) elif isinstance(other, Series): return Series(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return Series(result, index=left.index) else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other)) #---------------------------------------------------------------------- # IO methods (to / from other formats) @classmethod def from_dict(cls, data, orient='columns', dtype=None): """ Construct DataFrame from dict of array-like or dicts Parameters ---------- data : dict {field : array-like} or {field : dict} orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. Returns ------- DataFrame """ index, columns = None, None orient = orient.lower() if orient == 'index': if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient != 'columns': # pragma: no cover raise ValueError('only recognize index or columns for orient') return cls(data, index=index, columns=columns, dtype=dtype) @deprecate_kwarg(old_arg_name='outtype', new_arg_name='orient') def to_dict(self, orient='dict'): """Convert DataFrame to dictionary. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records'} Determines the type of the values of the dictionary. - dict (default) : dict like {column -> {index -> value}} - list : dict like {column -> [values]} - series : dict like {column -> Series(values)} - split : dict like {index -> [index], columns -> [columns], data -> [values]} - records : list like [{column -> value}, ... , {column -> value}] Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. Returns ------- result : dict like {column -> {index -> value}} """ if not self.columns.is_unique: warnings.warn("DataFrame columns are not unique, some " "columns will be omitted.", UserWarning) if orient.lower().startswith('d'): return dict((k, v.to_dict()) for k, v in compat.iteritems(self)) elif orient.lower().startswith('l'): return dict((k, v.tolist()) for k, v in compat.iteritems(self)) elif orient.lower().startswith('sp'): return {'index': self.index.tolist(), 'columns': self.columns.tolist(), 'data': self.values.tolist()} elif orient.lower().startswith('s'): return dict((k, v) for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): return [dict((k, v) for k, v in zip(self.columns, row)) for row in self.values] else: raise ValueError("orient '%s' not understood" % orient) def to_gbq(self, destination_table, project_id=None, chunksize=10000, verbose=True, reauth=False): """Write a DataFrame to a Google BigQuery table. THIS IS AN EXPERIMENTAL LIBRARY If the table exists, the dataframe will be written to the table using the defined table schema and column types. For simplicity, this method uses the Google BigQuery streaming API. The to_gbq method chunks data into a default chunk size of 10,000. Failures return the complete error response which can be quite long depending on the size of the insert. There are several important limitations of the Google streaming API which are detailed at: https://developers.google.com/bigquery/streaming-data-into-bigquery. Parameters ---------- dataframe : DataFrame DataFrame to be written destination_table : string Name of table to be written, in the form 'dataset.tablename' project_id : str Google BigQuery Account project ID. chunksize : int (default 10000) Number of rows to be inserted in each chunk from the dataframe. verbose : boolean (default True) Show percentage complete reauth : boolean (default False) Force Google BigQuery to reauthenticate the user. This is useful if multiple accounts are used. """ from pandas.io import gbq return gbq.to_gbq(self, destination_table, project_id=project_id, chunksize=chunksize, verbose=verbose, reauth=reauth) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, coerce_float=False, nrows=None): """ Convert structured or record ndarray to DataFrame Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets Returns ------- df : DataFrame """ # Make a copy of the input columns so we can modify it if columns is not None: columns = _ensure_index(columns) if com.is_iterator(data): if nrows == 0: return cls() try: if compat.PY3: first_row = next(data) else: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, 'dtype') and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = _ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns = [] for k, v in compat.iteritems(data): if k in columns: arr_columns.append(k) arrays.append(v) arrays, arr_columns = _reorder_arrays(arrays, arr_columns, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = _to_arrays(data, columns) if columns is not None: columns = _ensure_index(columns) arr_columns = columns else: arrays, arr_columns = _to_arrays(data, columns, coerce_float=coerce_float) arr_columns = _ensure_index(arr_columns) if columns is not None: columns = _ensure_index(columns) else: columns = arr_columns if exclude is None: exclude = set() else: exclude = set(exclude) result_index = None if index is not None: if (isinstance(index, compat.string_types) or not hasattr(index, "__iter__")): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: to_remove = [arr_columns.get_loc(field) for field in index] result_index = MultiIndex.from_arrays( [arrays[i] for i in to_remove], names=index) exclude.update(index) except Exception: result_index = index if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] arr_columns = arr_columns.drop(arr_exclude) columns = columns.drop(exclude) mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns) return cls(mgr) def to_records(self, index=True, convert_datetime64=True): """ Convert DataFrame to record array. Index will be put in the 'index' field of the record array if requested Parameters ---------- index : boolean, default True Include index in resulting record array, stored in 'index' field convert_datetime64 : boolean, default True Whether to convert the index to datetime.datetime if it is a DatetimeIndex Returns ------- y : recarray """ if index: if com.is_datetime64_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] else: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = lmap(np.array, zip(*self.index.values)) else: ix_vals = [self.index.values] arrays = ix_vals + [self[c].get_values() for c in self.columns] count = 0 index_names = list(self.index.names) if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = 'level_%d' % count count += 1 elif index_names[0] is None: index_names = ['index'] names = index_names + lmap(str, self.columns) else: arrays = [self[c].get_values() for c in self.columns] names = lmap(str, self.columns) dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)]) return np.rec.fromarrays(arrays, dtype=dtype, names=names) @classmethod def from_items(cls, items, columns=None, orient='columns'): """ Convert (key, value) pairs to DataFrame. The keys will be the axis index (usually the columns, but depends on the specified orientation). The values should be arrays or Series. Parameters ---------- items : sequence of (key, value) pairs Values should be arrays or Series. columns : sequence of column labels, optional Must be passed if orient='index'. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the input correspond to column labels, pass 'columns' (default). Otherwise if the keys correspond to the index, pass 'index'. Returns ------- frame : DataFrame """ keys, values = lzip(*items) if orient == 'columns': if columns is not None: columns = _ensure_index(columns) idict = dict(items) if len(idict) < len(items): if not columns.equals(_ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: columns = _ensure_index(keys) arrays = values return cls._from_arrays(arrays, columns, None) elif orient == 'index': if columns is None: raise TypeError("Must pass columns with orient='index'") keys = _ensure_index(keys) arr = np.array(values, dtype=object).T data = [lib.maybe_convert_objects(v) for v in arr] return cls._from_arrays(data, columns, keys) else: # pragma: no cover raise ValueError("'orient' must be either 'columns' or 'index'") @classmethod def _from_arrays(cls, arrays, columns, index, dtype=None): mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) return cls(mgr) @classmethod def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=False, infer_datetime_format=False): """ Read delimited file into DataFrame Parameters ---------- path : string file path or file handle / StringIO header : int, default 0 Row to use at header (skip prior rows) sep : string, default ',' Field delimiter index_col : int or sequence, default 0 Column to use for index. If a sequence is given, a MultiIndex is used. Different default from read_table parse_dates : boolean, default True Parse dates. Different default from read_table tupleize_cols : boolean, default False write multi_index columns as a list of tuples (if True) or new (expanded format) if False) infer_datetime_format: boolean, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. Notes ----- Preferable to use read_table for most general purposes but from_csv makes for an easy roundtrip to and from file, especially with a DataFrame of time series data Returns ------- y : DataFrame """ from pandas.io.parsers import read_table return read_table(path, header=header, sep=sep, parse_dates=parse_dates, index_col=index_col, encoding=encoding, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format) def to_sparse(self, fill_value=None, kind='block'): """ Convert to SparseDataFrame Parameters ---------- fill_value : float, default NaN kind : {'block', 'integer'} Returns ------- y : SparseDataFrame """ from pandas.core.sparse import SparseDataFrame return SparseDataFrame(self._series, index=self.index, default_kind=kind, default_fill_value=fill_value) def to_panel(self): """ Transform long (stacked) format (DataFrame) into wide (3D, Panel) format. Currently the index of the DataFrame must be a 2-level MultiIndex. This may be generalized later Returns ------- panel : Panel """ from pandas.core.panel import Panel # only support this kind for now if (not isinstance(self.index, MultiIndex) or # pragma: no cover len(self.index.levels) != 2): raise NotImplementedError('Only 2-level MultiIndex are supported.') if not self.index.is_unique: raise ValueError("Can't convert non-uniquely indexed " "DataFrame to Panel") self._consolidate_inplace() # minor axis must be sorted if self.index.lexsort_depth < 2: selfsorted = self.sortlevel(0) else: selfsorted = self major_axis, minor_axis = selfsorted.index.levels major_labels, minor_labels = selfsorted.index.labels shape = len(major_axis), len(minor_axis) # preserve names, if any major_axis = major_axis.copy() major_axis.name = self.index.names[0] minor_axis = minor_axis.copy() minor_axis.name = self.index.names[1] # create new axes new_axes = [selfsorted.columns, major_axis, minor_axis] # create new manager new_mgr = selfsorted._data.reshape_nd(axes=new_axes, labels=[major_labels, minor_labels], shape=shape, ref_items=selfsorted.columns) return Panel(new_mgr) to_wide = deprecate('to_wide', to_panel) def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, mode='w', encoding=None, quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=False, date_format=None, doublequote=True, escapechar=None, decimal='.', **kwds): r"""Write DataFrame to a comma-separated values (csv) file Parameters ---------- path_or_buf : string or file handle, default None File path or object, if None is provided the result is returned as a string. sep : character, default "," Field delimiter for the output file. na_rep : string, default '' Missing data representation float_format : string, default None Format string for floating point numbers columns : sequence, optional Columns to write header : boolean or list of string, default True Write out column names. If a list of string is given it is assumed to be aliases for the column names index : boolean, default True Write row names (index) index_label : string or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R nanRep : None deprecated, use na_rep mode : str Python write mode, default 'w' encoding : string, optional A string representing the encoding to use in the output file, defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. line_terminator : string, default '\\n' The newline character or character sequence to use in the output file quoting : optional constant from csv module defaults to csv.QUOTE_MINIMAL quotechar : string (length 1), default '"' character used to quote fields doublequote : boolean, default True Control quoting of `quotechar` inside a field escapechar : string (length 1), default None character used to escape `sep` and `quotechar` when appropriate chunksize : int or None rows to write at a time tupleize_cols : boolean, default False write multi_index columns as a list of tuples (if True) or new (expanded format) if False) date_format : string, default None Format string for datetime objects decimal: string, default '.' Character recognized as decimal separator. E.g. use ',' for European data """ formatter = fmt.CSVFormatter(self, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, quoting=quoting, na_rep=na_rep, float_format=float_format, cols=columns, header=header, index=index, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, engine=kwds.get("engine"), tupleize_cols=tupleize_cols, date_format=date_format, doublequote=doublequote, escapechar=escapechar, decimal=decimal) formatter.save() if path_or_buf is None: return formatter.path_or_buf.getvalue() def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep='inf'): """ Write DataFrame to a excel sheet Parameters ---------- excel_writer : string or ExcelWriter object File path or existing ExcelWriter sheet_name : string, default 'Sheet1' Name of sheet which will contain DataFrame na_rep : string, default '' Missing data representation float_format : string, default None Format string for floating point numbers columns : sequence, optional Columns to write header : boolean or list of string, default True Write out column names. If a list of string is given it is assumed to be aliases for the column names index : boolean, default True Write row names (index) index_label : string or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame engine : string, default None write engine to use - you can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. merge_cells : boolean, default True Write MultiIndex and Hierarchical Rows as merged cells. encoding: string, default None encoding of the resulting excel file. Only necessary for xlwt, other writers support unicode natively. inf_rep : string, default 'inf' Representation for infinity (there is no native representation for infinity in Excel) Notes ----- If passing an existing ExcelWriter object, then the sheet will be added to the existing workbook. This can be used to save different DataFrames to one workbook: >>> writer = ExcelWriter('output.xlsx') >>> df1.to_excel(writer,'Sheet1') >>> df2.to_excel(writer,'Sheet2') >>> writer.save() """ from pandas.io.excel import ExcelWriter need_save = False if encoding == None: encoding = 'ascii' if isinstance(excel_writer, compat.string_types): excel_writer = ExcelWriter(excel_writer, engine=engine) need_save = True formatter = fmt.ExcelFormatter(self, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep) formatted_cells = formatter.get_formatted_cells() excel_writer.write_cells(formatted_cells, sheet_name, startrow=startrow, startcol=startcol) if need_save: excel_writer.save() def to_stata( self, fname, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None): """ A class for writing Stata binary dta files from array-like objects Parameters ---------- fname : file path or buffer Where to save the dta file. convert_dates : dict Dictionary mapping column of datetime types to the stata internal format that you want to use for the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a number or a name. encoding : str Default is latin-1. Note that Stata does not support unicode. byteorder : str Can be ">", "<", "little", or "big". The default is None which uses `sys.byteorder` Examples -------- >>> writer = StataWriter('./data_file.dta', data) >>> writer.write_file() Or with dates >>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'}) >>> writer.write_file() """ from pandas.io.stata import StataWriter writer = StataWriter(fname, self, convert_dates=convert_dates, encoding=encoding, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index) writer.write_file() @Appender(fmt.docstring_to_string, indents=1) def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, line_width=None, max_rows=None, max_cols=None, show_dimensions=False): """ Render a DataFrame to a console-friendly tabular output. """ if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) col_space = colSpace formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, line_width=line_width, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions) formatter.to_string() if buf is None: result = formatter.buf.getvalue() return result @Appender(fmt.docstring_to_string, indents=1) def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, bold_rows=True, classes=None, escape=True, max_rows=None, max_cols=None, show_dimensions=False): """ Render a DataFrame as an HTML table. `to_html`-specific options: bold_rows : boolean, default True Make the row labels bold in the output classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table escape : boolean, default True Convert the characters <, >, and & to HTML-safe sequences.= max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. max_cols : int, optional Maximum number of columns to show before truncating. If None, show all. """ if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) col_space = colSpace formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions) formatter.to_html(classes=classes) if buf is None: return formatter.buf.getvalue() @Appender(fmt.docstring_to_string, indents=1) def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=True, longtable=False, escape=True): """ Render a DataFrame to a tabular environment table. You can splice this into a LaTeX document. Requires \\usepackage{booktabs}. `to_latex`-specific options: bold_rows : boolean, default True Make the row labels bold in the output longtable : boolean, default False Use a longtable environment instead of tabular. Requires adding a \\usepackage{longtable} to your LaTeX preamble. escape : boolean, default True When set to False prevents from escaping latex special characters in column names. """ if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) col_space = colSpace formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, index_names=index_names, escape=escape) formatter.to_latex(longtable=longtable) if buf is None: return formatter.buf.getvalue() def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None): """ Concise summary of a DataFrame. Parameters ---------- verbose : {None, True, False}, optional Whether to print the full summary. None follows the `display.max_info_columns` setting. True or False overrides the `display.max_info_columns` setting. buf : writable buffer, defaults to sys.stdout max_cols : int, default None Determines whether full summary or short summary is printed. None follows the `display.max_info_columns` setting. memory_usage : boolean, default None Specifies whether total memory usage of the DataFrame elements (including index) should be displayed. None follows the `display.memory_usage` setting. True or False overrides the `display.memory_usage` setting. Memory usage is shown in human-readable units (base-2 representation). null_counts : boolean, default None Whether to show the non-null counts If None, then only show if the frame is smaller than max_info_rows and max_info_columns. If True, always show counts. If False, never show counts. """ from pandas.core.format import _put_lines if buf is None: # pragma: no cover buf = sys.stdout lines = [] lines.append(str(type(self))) lines.append(self.index.summary()) if len(self.columns) == 0: lines.append('Empty %s' % type(self).__name__) _put_lines(buf, lines) return cols = self.columns # hack if max_cols is None: max_cols = get_option( 'display.max_info_columns', len(self.columns) + 1) max_rows = get_option('display.max_info_rows', len(self) + 1) if null_counts is None: show_counts = ((len(self.columns) <= max_cols) and (len(self) < max_rows)) else: show_counts = null_counts exceeds_info_cols = len(self.columns) > max_cols def _verbose_repr(): lines.append('Data columns (total %d columns):' % len(self.columns)) space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4 counts = None tmpl = "%s%s" if show_counts: counts = self.count() if len(cols) != len(counts): # pragma: no cover raise AssertionError('Columns must equal counts (%d != %d)' % (len(cols), len(counts))) tmpl = "%s non-null %s" dtypes = self.dtypes for i, col in enumerate(self.columns): dtype = dtypes[col] col = com.pprint_thing(col) count = "" if show_counts: count = counts.iloc[i] lines.append(_put_str(col, space) + tmpl % (count, dtype)) def _non_verbose_repr(): lines.append(self.columns.summary(name='Columns')) def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return "%3.1f%s %s" % (num, size_qualifier, x) num /= 1024.0 return "%3.1f%s %s" % (num, size_qualifier, 'PB') if verbose: _verbose_repr() elif verbose is False: # specifically set to False, not nesc None _non_verbose_repr() else: if exceeds_info_cols: _non_verbose_repr() else: _verbose_repr() counts = self.get_dtype_counts() dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))] lines.append('dtypes: %s' % ', '.join(dtypes)) if memory_usage is None: memory_usage = get_option('display.memory_usage') if memory_usage: # append memory usage of df to display # size_qualifier is just a best effort; not guaranteed to catch all # cases (e.g., it misses categorical data even with object # categories) size_qualifier = ('+' if 'object' in counts or is_object_dtype(self.index) else '') mem_usage = self.memory_usage(index=True).sum() lines.append("memory usage: %s\n" % _sizeof_fmt(mem_usage, size_qualifier)) _put_lines(buf, lines) def memory_usage(self, index=False): """Memory usage of DataFrame columns. Parameters ---------- index : bool Specifies whether to include memory usage of DataFrame's index in returned Series. If `index=True` (default is False) the first index of the Series is `Index`. Returns ------- sizes : Series A series with column names as index and memory usage of columns with units of bytes. Notes ----- Memory usage does not include memory consumed by elements that are not components of the array. See Also -------- numpy.ndarray.nbytes """ result = Series([ c.values.nbytes for col, c in self.iteritems() ], index=self.columns) if index: result = Series(self.index.nbytes, index=['Index']).append(result) return result def transpose(self): """Transpose index and columns""" return super(DataFrame, self).transpose(1, 0) T = property(transpose) #---------------------------------------------------------------------- # Picklability # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover from pandas.core.common import _unpickle_array if len(state) == 2: # pragma: no cover series, idx = state columns = sorted(series) else: series, cols, idx = state columns = _unpickle_array(cols) index = _unpickle_array(idx) self._data = self._init_dict(series, index, columns, None) def _unpickle_matrix_compat(self, state): # pragma: no cover from pandas.core.common import _unpickle_array # old unpickling (vals, idx, cols), object_state = state index = _unpickle_array(idx) dm = DataFrame(vals, index=index, columns=_unpickle_array(cols), copy=False) if object_state is not None: ovals, _, ocols = object_state objects = DataFrame(ovals, index=index, columns=_unpickle_array(ocols), copy=False) dm = dm.join(objects) self._data = dm._data #---------------------------------------------------------------------- #---------------------------------------------------------------------- # Getting and setting elements def get_value(self, index, col, takeable=False): """ Quickly retrieve single value at passed column and index Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- value : scalar value """ if takeable: series = self._iget_item_cache(col) return _maybe_box_datetimelike(series.values[index]) series = self._get_item_cache(col) engine = self.index._engine return engine.get_value(series.get_values(), index) def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index Parameters ---------- index : row label col : column label value : scalar value takeable : interpret the index/col as indexers, default False Returns ------- frame : DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object """ try: if takeable is True: series = self._iget_item_cache(col) return series.set_value(index, value, takeable=True) series = self._get_item_cache(col) engine = self.index._engine engine.set_value(series.values, index, value) return self except (KeyError, TypeError): # set using a non-recursive method & reset the cache self.loc[index, col] = value self._item_cache.pop(col, None) return self def irow(self, i, copy=False): return self._ixs(i, axis=0) def icol(self, i): return self._ixs(i, axis=1) def _ixs(self, i, axis=0): """ i : int, slice, or sequence of integers axis : int """ # irow if axis == 0: """ Notes ----- If slice passed, the resulting data will be a view """ if isinstance(i, slice): return self[i] else: label = self.index[i] if isinstance(label, Index): # a location index by definition result = self.take(i, axis=axis) copy=True else: new_values = self._data.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_values,np.ndarray) and new_values.base is None result = Series(new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype) result._set_is_copy(self, copy=copy) return result # icol else: """ Notes ----- If slice passed, the resulting data will be a view """ label = self.columns[i] if isinstance(i, slice): # need to return view lab_slice = slice(label[0], label[-1]) return self.ix[:, lab_slice] else: label = self.columns[i] if isinstance(label, Index): return self.take(i, axis=1, convert=True) # if the values returned are not the same length # as the index (iow a not found value), iget returns # a 0-len ndarray. This is effectively catching # a numpy error (as numpy should really raise) values = self._data.iget(i) if not len(values): values = np.array([np.nan] * len(self.index), dtype=object) result = self._constructor_sliced.from_array( values, index=self.index, name=label, fastpath=True) # this is a cached value, mark it so result._set_as_cached(label, self) return result def iget_value(self, i, j): return self.iat[i, j] def __getitem__(self, key): # shortcut if we are an actual column is_mi_columns = isinstance(self.columns, MultiIndex) try: if key in self.columns and not is_mi_columns: return self._getitem_column(key) except: pass # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._getitem_slice(indexer) if isinstance(key, (Series, np.ndarray, Index, list)): # either boolean or fancy integer index return self._getitem_array(key) elif isinstance(key, DataFrame): return self._getitem_frame(key) elif is_mi_columns: return self._getitem_multilevel(key) else: return self._getitem_column(key) def _getitem_column(self, key): """ return the actual column """ # get column if self.columns.is_unique: return self._get_item_cache(key) # duplicate columns & possible reduce dimensionaility result = self._constructor(self._data.get(key)) if result.columns.is_unique: result = result[key] return result def _getitem_slice(self, key): return self._slice(key, axis=0) def _getitem_array(self, key): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn("Boolean Series key will be reindexed to match " "DataFrame index.", UserWarning) elif len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d.' % (len(key), len(self.index))) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] return self.take(indexer, axis=0, convert=False) else: indexer = self.ix._convert_to_indexer(key, axis=1) return self.take(indexer, axis=1, convert=True) def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self.values[:, loc] result = DataFrame(new_values, index=self.index, columns=result_columns).__finalize__(self) if len(result.columns) == 1: top = result.columns[0] if ((type(top) == str and top == '') or (type(top) == tuple and top[0] == '')): result = result[''] if isinstance(result, Series): result = Series(result, index=self.index, name=key) result._set_is_copy(self) return result else: return self._get_item_cache(key) def _getitem_frame(self, key): if key.values.dtype != np.bool_: raise ValueError('Must pass DataFrame with boolean values only') return self.where(key) def query(self, expr, **kwargs): """Query the columns of a frame with a boolean expression. .. versionadded:: 0.13 Parameters ---------- expr : string The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. kwargs : dict See the documentation for :func:`pandas.eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- q : DataFrame Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`pandas.eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. See Also -------- pandas.eval DataFrame.eval Examples -------- >>> from numpy.random import randn >>> from pandas import DataFrame >>> df = DataFrame(randn(10, 2), columns=list('ab')) >>> df.query('a > b') >>> df[df.a > df.b] # same result as the previous expression """ kwargs['level'] = kwargs.pop('level', 0) + 1 res = self.eval(expr, **kwargs) try: return self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query return self[res] def eval(self, expr, **kwargs): """Evaluate an expression in the context of the calling DataFrame instance. Parameters ---------- expr : string The expression string to evaluate. kwargs : dict See the documentation for :func:`~pandas.eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ret : ndarray, scalar, or pandas object See Also -------- pandas.DataFrame.query pandas.eval Notes ----- For more details see the API documentation for :func:`~pandas.eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> from numpy.random import randn >>> from pandas import DataFrame >>> df = DataFrame(randn(10, 2), columns=list('ab')) >>> df.eval('a + b') >>> df.eval('c = a + b') """ resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() resolvers = dict(self.iteritems()), index_resolvers kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers return _eval(expr, **kwargs) def select_dtypes(self, include=None, exclude=None): """Return a subset of a DataFrame including/excluding columns based on their ``dtype``. Parameters ---------- include, exclude : list-like A list of dtypes or strings to be included/excluded. You must pass in a non-empty sequence for at least one of these. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. TypeError * If either of ``include`` or ``exclude`` is not a sequence Returns ------- subset : DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Notes ----- * To select all *numeric* types use the numpy dtype ``numpy.number`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ * To select Pandas categorical dtypes, use 'category' Examples -------- >>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'), ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 0.3962 True 1 1 0.1459 False 2 2 0.2623 True 1 3 0.0764 False 2 4 -0.9703 True 1 5 -1.2094 False 2 >>> df.select_dtypes(include=['float64']) c 0 1 1 2 2 1 3 2 4 1 5 2 >>> df.select_dtypes(exclude=['floating']) b 0 True 1 False 2 True 3 False 4 True 5 False """ include, exclude = include or (), exclude or () if not (com.is_list_like(include) and com.is_list_like(exclude)): raise TypeError('include and exclude must both be non-string' ' sequences') selection = tuple(map(frozenset, (include, exclude))) if not any(selection): raise ValueError('at least one of include or exclude must be ' 'nonempty') # convert the myriad valid dtypes object to a single representation include, exclude = map(lambda x: frozenset(map(com._get_dtype_from_object, x)), selection) for dtypes in (include, exclude): com._invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError('include and exclude overlap on %s' % (include & exclude)) # empty include/exclude -> defaults to True # three cases (we've already raised if both are empty) # case 1: empty include, nonempty exclude # we have True, True, ... True for include, same for exclude # in the loop below we get the excluded # and when we call '&' below we get only the excluded # case 2: nonempty include, empty exclude # same as case 1, but with include # case 3: both nonempty # the "union" of the logic of case 1 and case 2: # we get the included and excluded, and return their logical and include_these = Series(not bool(include), index=self.columns) exclude_these = Series(not bool(exclude), index=self.columns) def is_dtype_instance_mapper(column, dtype): return column, functools.partial(issubclass, dtype.type) for column, f in itertools.starmap(is_dtype_instance_mapper, self.dtypes.iteritems()): if include: # checks for the case of empty include or exclude include_these[column] = any(map(f, include)) if exclude: exclude_these[column] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these return self.loc[com._get_info_slice(self, dtype_indexer)] def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] if values.ndim == 2: return self._constructor(values.T, columns=items, index=self.index) else: return self._box_col_values(values, items) def _box_col_values(self, values, items): """ provide boxed values for a column """ return self._constructor_sliced.from_array(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._setitem_slice(indexer, value) if isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(key, DataFrame): self._setitem_frame(key, value) else: # set column self._set_item(key, value) def _setitem_slice(self, key, value): self._check_setitem_copy() self.ix._setitem_with_indexer(key, value) def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): if len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d!' % (len(key), len(self.index))) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() self.ix._setitem_with_indexer(indexer, value) else: if isinstance(value, DataFrame): if len(value.columns) != len(key): raise ValueError('Columns must be same length as key') for k1, k2 in zip(key, value.columns): self[k1] = value[k2] else: indexer = self.ix._convert_to_indexer(key, axis=1) self._check_setitem_copy() self.ix._setitem_with_indexer((slice(None), indexer), value) def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if key.values.dtype != np.bool_: raise TypeError('Must pass DataFrame with boolean values only') self._check_inplace_setting(value) self._check_setitem_copy() self.where(-key, value, inplace=True) def _ensure_valid_index(self, value): """ ensure that if we don't have an index, that we can create one from the passed value """ if not len(self.index): # GH5632, make sure that we are a Series convertible if is_list_like(value): try: value = Series(value) except: pass if not isinstance(value, Series): raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') self._data = self._data.reindex_axis(value.index.copy(), axis=1, fill_value=np.nan) # we are a scalar # noop else: pass def _set_item(self, key, value): """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) # check if we are modifying a copy # try to set first as we want an invalid # value exeption to occur first if len(self): self._check_setitem_copy() def insert(self, loc, column, value, allow_duplicates=False): """ Insert column into DataFrame at specified location. If `allow_duplicates` is False, raises Exception if column is already contained in the DataFrame. Parameters ---------- loc : int Must have 0 <= loc <= len(columns) column : object value : int, Series, or array-like """ self._ensure_valid_index(value) value = self._sanitize_column(column, value) self._data.insert( loc, column, value, allow_duplicates=allow_duplicates) def assign(self, **kwargs): """ Assign new columns to a DataFrame, returning a new object (a copy) with all the original columns in addition to the new ones. .. versionadded:: 0.16.0 Parameters ---------- kwargs : keyword, value pairs keywords are the column names. If the values are callable, they are computed on the DataFrame and assigned to the new columns. If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- df : DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Since ``kwargs`` is a dictionary, the order of your arguments may not be preserved, and so the order of the new columns is not well defined. Assigning multiple columns within the same ``assign`` is possible, but you cannot reference other columns created within the same ``assign`` call. Examples -------- >>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)}) Where the value is a callable, evaluated on `df`: >>> df.assign(ln_A = lambda x: np.log(x.A)) A B ln_A 0 1 0.426905 0.000000 1 2 -0.780949 0.693147 2 3 -0.418711 1.098612 3 4 -0.269708 1.386294 4 5 -0.274002 1.609438 5 6 -0.500792 1.791759 6 7 1.649697 1.945910 7 8 -1.495604 2.079442 8 9 0.549296 2.197225 9 10 -0.758542 2.302585 Where the value already exists and is inserted: >>> newcol = np.log(df['A']) >>> df.assign(ln_A=newcol) A B ln_A 0 1 0.426905 0.000000 1 2 -0.780949 0.693147 2 3 -0.418711 1.098612 3 4 -0.269708 1.386294 4 5 -0.274002 1.609438 5 6 -0.500792 1.791759 6 7 1.649697 1.945910 7 8 -1.495604 2.079442 8 9 0.549296 2.197225 9 10 -0.758542 2.302585 """ data = self.copy() # do all calculations first... results = {} for k, v in kwargs.items(): if callable(v): results[k] = v(data) else: results[k] = v # ... and then assign for k, v in results.items(): data[k] = v return data def _sanitize_column(self, key, value): # Need to make sure new columns (which go into the BlockManager as new # blocks) are always copied def reindexer(value): # reindex if necessary if value.index.equals(self.index) or not len(self.index): value = value.values.copy() else: # GH 4107 try: value = value.reindex(self.index).values except Exception as e: # duplicate axis if not value.index.is_unique: raise e # other raise TypeError('incompatible index of inserted column ' 'with frame index') return value if isinstance(value, Series): value = reindexer(value) elif isinstance(value, DataFrame): # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and key in self.columns: loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) if len(cols) and not cols.equals(value.columns): value = value.reindex_axis(cols, axis=1) # now align rows value = reindexer(value).T elif isinstance(value, Categorical): value = value.copy() elif (isinstance(value, Index) or is_sequence(value)): from pandas.core.series import _sanitize_index # turn me into an ndarray value = _sanitize_index(value, self.index, copy=False) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = com._possibly_convert_platform(value) else: value = com._asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T else: value = value.copy() # possibly infer to datetimelike if is_object_dtype(value.dtype): value = _possibly_infer_to_datetimelike(value.ravel()).reshape(value.shape) else: # upcast the scalar dtype, value = _infer_dtype_from_scalar(value) value = np.repeat(value, len(self.index)).astype(dtype) value = com._possibly_cast_to_datetime(value, dtype) # return unconsolidatables directly if isinstance(value, (Categorical, SparseArray)): return value # broadcast across multiple columns if necessary if key in self.columns and value.ndim == 1: if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) return np.atleast_2d(np.asarray(value)) @property def _series(self): result = {} for idx, item in enumerate(self.columns): result[item] = Series(self._data.iget(idx), index=self.index, name=item) return result def lookup(self, row_labels, col_labels): """Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Notes ----- Akin to:: result = [] for row, col in zip(row_labels, col_labels): result.append(df.get_value(row, col)) Examples -------- values : ndarray The found values """ n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self.get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result #---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, method, fill_value, copy): frame = self columns = axes['columns'] if columns is not None: frame = frame._reindex_columns(columns, copy, level, fill_value, limit) index = axes['index'] if index is not None: frame = frame._reindex_index(index, method, copy, level, fill_value, limit) return frame def _reindex_index(self, new_index, method, copy, level, fill_value=NA, limit=None): new_index, indexer = self.index.reindex(new_index, method, level, limit=limit) return self._reindex_with_indexers({0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_columns(self, new_columns, copy, level, fill_value=NA, limit=None): new_columns, indexer = self.columns.reindex(new_columns, level=level, limit=limit) return self._reindex_with_indexers({1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_multi(self, axes, copy, fill_value): """ we are guaranteed non-Nones in the axes! """ new_index, row_indexer = self.index.reindex(axes['index']) new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer new_values = com.take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value) @Appender(_shared_docs['reindex'] % _shared_doc_kwargs) def reindex(self, index=None, columns=None, **kwargs): return super(DataFrame, self).reindex(index=index, columns=columns, **kwargs) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan): return super(DataFrame, self).reindex_axis(labels=labels, axis=axis, method=method, level=level, copy=copy, limit=limit, fill_value=fill_value) @Appender(_shared_docs['rename'] % _shared_doc_kwargs) def rename(self, index=None, columns=None, **kwargs): return super(DataFrame, self).rename(index=index, columns=columns, **kwargs) def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ Set the DataFrame index (row labels) using one or more existing columns. By default yields a new object. Parameters ---------- keys : column label or list of column labels / arrays drop : boolean, default True Delete columns to be used as the new index append : boolean, default False Whether to append columns to existing index inplace : boolean, default False Modify the DataFrame in place (do not create a new object) verify_integrity : boolean, default False Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve the performance of this method Examples -------- >>> indexed_df = df.set_index(['A', 'B']) >>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]]) >>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]]) Returns ------- dataframe : DataFrame """ if not isinstance(keys, list): keys = [keys] if inplace: frame = self else: frame = self.copy() arrays = [] names = [] if append: names = [x for x in self.index.names] if isinstance(self.index, MultiIndex): for i in range(self.index.nlevels): arrays.append(self.index.get_level_values(i)) else: arrays.append(self.index) to_remove = [] for col in keys: if isinstance(col, MultiIndex): # append all but the last column so we don't have to modify # the end of this loop for n in range(col.nlevels - 1): arrays.append(col.get_level_values(n)) level = col.get_level_values(col.nlevels - 1) names.extend(col.names) elif isinstance(col, Series): level = col.values names.append(col.name) elif isinstance(col, Index): level = col names.append(col.name) elif isinstance(col, (list, np.ndarray, Index)): level = col names.append(None) else: level = frame[col].values names.append(col) if drop: to_remove.append(col) arrays.append(level) index = MultiIndex.from_arrays(arrays, names=names) if verify_integrity and not index.is_unique: duplicates = index.get_duplicates() raise ValueError('Index has duplicate keys: %s' % duplicates) for c in to_remove: del frame[c] # clear up memory usage index._cleanup() frame.index = index if not inplace: return frame def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''): """ For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default drop : boolean, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : boolean, default False Modify the DataFrame in place (do not create a new object) col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- resetted : DataFrame """ if inplace: new_obj = self else: new_obj = self.copy() def _maybe_casted_values(index, labels=None): if isinstance(index, PeriodIndex): values = index.asobject.values elif (isinstance(index, DatetimeIndex) and index.tz is not None): values = index.asobject else: values = index.values if values.dtype == np.object_: values = lib.maybe_convert_objects(values) # if we have the labels, extract the values with a mask if labels is not None: mask = labels == -1 values = values.take(labels) if mask.any(): values, changed = com._maybe_upcast_putmask(values, mask, np.nan) return values new_index = np.arange(len(new_obj),dtype='int64') if isinstance(self.index, MultiIndex): if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < len(self.index.levels): new_index = self.index.droplevel(level) if not drop: names = self.index.names zipped = lzip(self.index.levels, self.index.labels) multi_col = isinstance(self.columns, MultiIndex) for i, (lev, lab) in reversed(list(enumerate(zipped))): col_name = names[i] if col_name is None: col_name = 'level_%d' % i if multi_col: if col_fill is None: col_name = tuple([col_name] * self.columns.nlevels) else: name_lst = [col_fill] * self.columns.nlevels lev_num = self.columns._get_level_number(col_level) name_lst[lev_num] = col_name col_name = tuple(name_lst) # to ndarray and maybe infer different dtype level_values = _maybe_casted_values(lev, lab) if level is None or i in level: new_obj.insert(0, col_name, level_values) elif not drop: name = self.index.name if name is None or name == 'index': name = 'index' if 'index' not in self else 'level_0' if isinstance(self.columns, MultiIndex): if col_fill is None: name = tuple([name] * self.columns.nlevels) else: name_lst = [col_fill] * self.columns.nlevels lev_num = self.columns._get_level_number(col_level) name_lst[lev_num] = name name = tuple(name_lst) values = _maybe_casted_values(self.index) new_obj.insert(0, name, values) new_obj.index = new_index if not inplace: return new_obj #---------------------------------------------------------------------- # Reindex-based selection methods def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Return object with labels on given axis omitted where alternately any or all of the data are missing Parameters ---------- axis : {0, 1}, or tuple/list thereof Pass tuple or list to drop on multiple axes how : {'any', 'all'} * any : if any NA values are present, drop that label * all : if all values are NA, drop that label thresh : int, default None int value : require that many non-NA values subset : array-like Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include inplace : boolean, defalt False If True, do operation inplace and return None. Returns ------- dropped : DataFrame """ if isinstance(axis, (tuple, list)): result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) else: axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check,subset))) agg_obj = self.take(indices,axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == 'any': mask = count == len(agg_obj._get_axis(agg_axis)) elif how == 'all': mask = count > 0 else: if how is not None: raise ValueError('invalid how option: %s' % how) else: raise TypeError('must specify how or thresh') result = self.take(mask.nonzero()[0], axis=axis, convert=False) if inplace: self._update_inplace(result) else: return result @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset') def drop_duplicates(self, subset=None, take_last=False, inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns take_last : boolean, default False Take the last observed row in a row. Defaults to the first row inplace : boolean, default False Whether to drop duplicates in place or to return a copy cols : kwargs only argument of subset [deprecated] Returns ------- deduplicated : DataFrame """ duplicated = self.duplicated(subset, take_last=take_last) if inplace: inds, = (-duplicated).nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: return self[-duplicated] @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset') def duplicated(self, subset=None, take_last=False): """ Return boolean Series denoting duplicate rows, optionally only considering certain columns Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns take_last : boolean, default False For a set of distinct duplicate rows, flag all but the last row as duplicated. Default is for all but the first row to be flagged cols : kwargs only argument of subset [deprecated] Returns ------- duplicated : Series """ from pandas.core.groupby import get_group_index from pandas.core.algorithms import factorize from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT def f(vals): labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) return labels.astype('i8',copy=False), len(shape) if subset is None: subset = self.columns elif not np.iterable(subset) or \ isinstance(subset, compat.string_types) or \ isinstance(subset, tuple) and subset in self.columns: subset = subset, vals = (self[col].values for col in subset) labels, shape = map(list, zip( * map(f, vals))) ids = get_group_index(labels, shape, sort=False, xnull=False) return Series(duplicated_int64(ids, take_last), index=self.index) #---------------------------------------------------------------------- # Sorting def sort(self, columns=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): """ Sort DataFrame either by labels (along either axis) or by the values in column(s) Parameters ---------- columns : object Column name(s) in frame. Accepts a column name or a list for a nested sort. A tuple will be interpreted as the levels of a multi-index. ascending : boolean or list, default True Sort ascending vs. descending. Specify list for multiple sort orders axis : {0, 1} Sort index/rows versus columns inplace : boolean, default False Sort the DataFrame without creating a new instance kind : {'quicksort', 'mergesort', 'heapsort'}, optional This option is only applied when sorting on a single column or label. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Examples -------- >>> result = df.sort(['A', 'B'], ascending=[1, 0]) Returns ------- sorted : DataFrame """ return self.sort_index(by=columns, axis=axis, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position) def sort_index(self, axis=0, by=None, ascending=True, inplace=False, kind='quicksort', na_position='last'): """ Sort DataFrame either by labels (along either axis) or by the values in a column Parameters ---------- axis : {0, 1} Sort index/rows versus columns by : object Column name(s) in frame. Accepts a column name or a list for a nested sort. A tuple will be interpreted as the levels of a multi-index. ascending : boolean or list, default True Sort ascending vs. descending. Specify list for multiple sort orders inplace : boolean, default False Sort the DataFrame without creating a new instance na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end kind : {'quicksort', 'mergesort', 'heapsort'}, optional This option is only applied when sorting on a single column or label. Examples -------- >>> result = df.sort_index(by=['A', 'B'], ascending=[True, False]) Returns ------- sorted : DataFrame """ from pandas.core.groupby import _lexsort_indexer, _nargsort axis = self._get_axis_number(axis) if axis not in [0, 1]: # pragma: no cover raise AssertionError('Axis must be 0 or 1, got %s' % str(axis)) labels = self._get_axis(axis) if by is not None: if axis != 0: raise ValueError('When sorting by column, axis must be 0 ' '(rows)') if not isinstance(by, list): by = [by] if com.is_sequence(ascending) and len(by) != len(ascending): raise ValueError('Length of ascending (%d) != length of by' ' (%d)' % (len(ascending), len(by))) if len(by) > 1: def trans(v): if com.needs_i8_conversion(v): return v.view('i8') return v keys = [] for x in by: k = self[x].values if k.ndim == 2: raise ValueError('Cannot sort by duplicate column %s' % str(x)) keys.append(trans(k)) indexer = _lexsort_indexer(keys, orders=ascending, na_position=na_position) indexer = com._ensure_platform_int(indexer) else: by = by[0] k = self[by].values if k.ndim == 2: # try to be helpful if isinstance(self.columns, MultiIndex): raise ValueError('Cannot sort by column %s in a multi-index' ' you need to explicity provide all the levels' % str(by)) raise ValueError('Cannot sort by duplicate column %s' % str(by)) if isinstance(ascending, (tuple, list)): ascending = ascending[0] indexer = _nargsort(k, kind=kind, ascending=ascending, na_position=na_position) elif isinstance(labels, MultiIndex): # make sure that the axis is lexsorted to start # if not we need to reconstruct to get the correct indexer if not labels.is_lexsorted(): labels = MultiIndex.from_tuples(labels.values) indexer = _lexsort_indexer(labels.labels, orders=ascending, na_position=na_position) indexer = com._ensure_platform_int(indexer) else: indexer = _nargsort(labels, kind=kind, ascending=ascending, na_position=na_position) bm_axis = self._get_block_manager_axis(axis) new_data = self._data.take(indexer, axis=bm_axis, convert=False, verify=False) if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) def sortlevel(self, level=0, axis=0, ascending=True, inplace=False, sort_remaining=True): """ Sort multilevel index by chosen axis and primary level. Data will be lexicographically sorted by the chosen level followed by the other levels (in order) Parameters ---------- level : int axis : {0, 1} ascending : boolean, default True inplace : boolean, default False Sort the DataFrame without creating a new instance sort_remaining : boolean, default True Sort by the other levels too. Returns ------- sorted : DataFrame """ axis = self._get_axis_number(axis) the_axis = self._get_axis(axis) if not isinstance(the_axis, MultiIndex): raise TypeError('can only sort by level with a hierarchical index') new_axis, indexer = the_axis.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) if self._is_mixed_type and not inplace: ax = 'index' if axis == 0 else 'columns' if new_axis.is_unique: return self.reindex(**{ax: new_axis}) else: return self.take(indexer, axis=axis, convert=False) bm_axis = self._get_block_manager_axis(axis) new_data = self._data.take(indexer, axis=bm_axis, convert=False, verify=False) if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) def swaplevel(self, i, j, axis=0): """ Swap levels i and j in a MultiIndex on a particular axis Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- swapped : type of caller (new object) """ result = self.copy() axis = self._get_axis_number(axis) if axis == 0: result.index = result.index.swaplevel(i, j) else: result.columns = result.columns.swaplevel(i, j) return result def reorder_levels(self, order, axis=0): """ Rearrange index levels using input order. May not drop or duplicate levels Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : int Where to reorder levels. Returns ------- type of caller (new object) """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() if axis == 0: result.index = result.index.reorder_levels(order) else: result.columns = result.columns.reorder_levels(order) return result #---------------------------------------------------------------------- # Arithmetic / combination related def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns def _arith_op(left, right): if fill_value is not None: left_mask = isnull(left) right_mask = isnull(right) left = left.copy() right = right.copy() # one but not both mask = left_mask ^ right_mask left[left_mask & mask] = fill_value right[right_mask & mask] = fill_value return func(left, right) if this._is_mixed_type or other._is_mixed_type: # unique if this.columns.is_unique: def f(col): r = _arith_op(this[col].values, other[col].values) return self._constructor_sliced(r, index=new_index, dtype=r.dtype) result = dict([(col, f(col)) for col in this]) # non-unique else: def f(i): r = _arith_op(this.iloc[:, i].values, other.iloc[:, i].values) return self._constructor_sliced(r, index=new_index, dtype=r.dtype) result = dict([ (i, f(i)) for i, col in enumerate(this.columns) ]) result = self._constructor(result, index=new_index, copy=False) result.columns = new_columns return result else: result = _arith_op(this.values, other.values) return self._constructor(result, index=new_index, columns=new_columns, copy=False) def _combine_series(self, other, func, fill_value=None, axis=None, level=None): if axis is not None: axis = self._get_axis_name(axis) if axis == 'index': return self._combine_match_index(other, func, level=level, fill_value=fill_value) else: return self._combine_match_columns(other, func, level=level, fill_value=fill_value) return self._combine_series_infer(other, func, level=level, fill_value=fill_value) def _combine_series_infer(self, other, func, level=None, fill_value=None): if len(other) == 0: return self * NA if len(self) == 0: # Ambiguous case, use _series so works with DataFrame return self._constructor(data=self._series, index=self.index, columns=self.columns) # teeny hack because one does DataFrame + TimeSeries all the time if self.index.is_all_dates and other.index.is_all_dates: warnings.warn(("TimeSeries broadcasting along DataFrame index " "by default is deprecated. Please use " "DataFrame.<op> to explicitly broadcast arithmetic " "operations along the index"), FutureWarning) return self._combine_match_index(other, func, level=level, fill_value=fill_value) else: return self._combine_match_columns(other, func, level=level, fill_value=fill_value) def _combine_match_index(self, other, func, level=None, fill_value=None): left, right = self.align(other, join='outer', axis=0, level=level, copy=False) if fill_value is not None: raise NotImplementedError("fill_value %r not supported." % fill_value) return self._constructor(func(left.values.T, right.values).T, index=left.index, columns=self.columns, copy=False) def _combine_match_columns(self, other, func, level=None, fill_value=None): left, right = self.align(other, join='outer', axis=1, level=level, copy=False) if fill_value is not None: raise NotImplementedError("fill_value %r not supported" % fill_value) new_data = left._data.eval( func=func, other=right, axes=[left.columns, self.index]) return self._constructor(new_data) def _combine_const(self, other, func, raise_on_error=True): if self.empty: return self new_data = self._data.eval(func=func, other=other, raise_on_error=raise_on_error) return self._constructor(new_data) def _compare_frame_evaluate(self, other, func, str_rep): # unique if self.columns.is_unique: def _compare(a, b): return dict([(col, func(a[col], b[col])) for col in a.columns]) new_data = expressions.evaluate(_compare, str_rep, self, other) return self._constructor(data=new_data, index=self.index, columns=self.columns, copy=False) # non-unique else: def _compare(a, b): return dict([(i, func(a.iloc[:, i], b.iloc[:, i])) for i, col in enumerate(a.columns)]) new_data = expressions.evaluate(_compare, str_rep, self, other) result = self._constructor(data=new_data, index=self.index, copy=False) result.columns = self.columns return result def _compare_frame(self, other, func, str_rep): if not self._indexed_same(other): raise ValueError('Can only compare identically-labeled ' 'DataFrame objects') return self._compare_frame_evaluate(other, func, str_rep) def _flex_compare_frame(self, other, func, str_rep, level): if not self._indexed_same(other): self, other = self.align(other, 'outer', level=level, copy=False) return self._compare_frame_evaluate(other, func, str_rep) def combine(self, other, func, fill_value=None, overwrite=True): """ Add two DataFrame objects and do not propagate NaN values, so if for a (column, time) one frame is missing a value, it will default to the other frame's value (which might be NaN as well) Parameters ---------- other : DataFrame func : function fill_value : scalar value overwrite : boolean, default True If True then overwrite values for common keys in the calling frame Returns ------- result : DataFrame """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isnull(series) other_mask = isnull(otherSeries) # don't overwrite columns unecessarily # DO propogate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value # if we have different dtypes, possibily promote new_dtype = this_dtype if this_dtype != other_dtype: new_dtype = com._lcd_dtypes(this_dtype, other_dtype) series = series.astype(new_dtype) otherSeries = otherSeries.astype(new_dtype) # see if we need to be represented as i8 (datetimelike) # try to keep us at this dtype needs_i8_conversion = com.needs_i8_conversion(new_dtype) if needs_i8_conversion: this_dtype = new_dtype arr = func(series, otherSeries, True) else: arr = func(series, otherSeries) if do_fill: arr = com.ensure_float(arr) arr[this_mask & other_mask] = NA # try to downcast back to the original dtype if needs_i8_conversion: arr = com._possibly_cast_to_datetime(arr, this_dtype) else: arr = com._possibly_downcast_to_dtype(arr, this_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns).convert_objects( convert_dates=True, copy=False) def combine_first(self, other): """ Combine two DataFrame objects and default to non-null values in frame calling the method. Result index columns will be the union of the respective indexes and columns Parameters ---------- other : DataFrame Examples -------- a's values prioritized, use values from b to fill holes: >>> a.combine_first(b) Returns ------- combined : DataFrame """ def combiner(x, y, needs_i8_conversion=False): x_values = x.values if hasattr(x, 'values') else x y_values = y.values if hasattr(y, 'values') else y if needs_i8_conversion: mask = isnull(x) x_values = x_values.view('i8') y_values = y_values.view('i8') else: mask = isnull(x_values) return expressions.where(mask, y_values, x_values, raise_on_error=True) return self.combine(other, combiner, overwrite=False) def update(self, other, join='left', overwrite=True, filter_func=None, raise_conflict=False): """ Modify DataFrame in place using non-NA values from passed DataFrame. Aligns on indices Parameters ---------- other : DataFrame, or object coercible into a DataFrame join : {'left'}, default 'left' overwrite : boolean, default True If True then overwrite values for common keys in the calling frame filter_func : callable(1d-array) -> 1d-array<boolean>, default None Can choose to replace values other than NA. Return True for values that should be updated raise_conflict : boolean If True, will raise an error if the DataFrame and other both contain data in the same place. """ # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col].values that = other[col].values if filter_func is not None: mask = ~filter_func(this) | isnull(that) else: if raise_conflict: mask_this = notnull(that) mask_that = notnull(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isnull(that) # don't overwrite columns unecessarily if mask.all(): continue else: mask = notnull(this) self[col] = expressions.where( mask, this, that, raise_on_error=True) #---------------------------------------------------------------------- # Misc methods def first_valid_index(self): """ Return label for first non-NA/null value """ return self.index[self.count(1) > 0][0] def last_valid_index(self): """ Return label for last non-NA/null value """ return self.index[self.count(1) > 0][-1] #---------------------------------------------------------------------- # Data reshaping def pivot(self, index=None, columns=None, values=None): """ Reshape data (produce a "pivot" table) based on column values. Uses unique values from index / columns to form axes and return either DataFrame or Panel, depending on whether you request a single value column (DataFrame) or all columns (Panel) Parameters ---------- index : string or object Column name to use to make new frame's index columns : string or object Column name to use to make new frame's columns values : string or object, optional Column name to use for populating new frame's values Notes ----- For finer-tuned control, see hierarchical indexing documentation along with the related stack/unstack methods Examples -------- >>> df foo bar baz 0 one A 1. 1 one B 2. 2 one C 3. 3 two A 4. 4 two B 5. 5 two C 6. >>> df.pivot('foo', 'bar', 'baz') A B C one 1 2 3 two 4 5 6 >>> df.pivot('foo', 'bar')['baz'] A B C one 1 2 3 two 4 5 6 Returns ------- pivoted : DataFrame If no values column specified, will have hierarchically indexed columns """ from pandas.core.reshape import pivot return pivot(self, index=index, columns=columns, values=values) def stack(self, level=-1, dropna=True): """ Pivot a level of the (possibly hierarchical) column labels, returning a DataFrame (or Series in the case of an object with a single level of column labels) having a hierarchical index with a new inner-most level of row labels. The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default last level Level(s) to stack, can pass level name dropna : boolean, default True Whether to drop rows in the resulting Frame/Series with no valid values Examples ---------- >>> s a b one 1. 2. two 3. 4. >>> s.stack() one a 1 b 2 two a 3 b 4 Returns ------- stacked : DataFrame or Series """ from pandas.core.reshape import stack, stack_multiple if isinstance(level, (tuple, list)): return stack_multiple(self, level, dropna=dropna) else: return stack(self, level, dropna=dropna) def unstack(self, level=-1): """ Pivot a level of the (necessarily hierarchical) index labels, returning a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series (the analogue of stack when the columns are not a MultiIndex). The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name See also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from `unstack`). Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s one a 1 b 2 two a 3 b 4 dtype: float64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 >>> df = s.unstack(level=0) >>> df.unstack() one a 1. b 3. two a 2. b 4. Returns ------- unstacked : DataFrame or Series """ from pandas.core.reshape import unstack return unstack(self, level) #---------------------------------------------------------------------- # Time series-related def diff(self, periods=1): """ 1st discrete difference of object Parameters ---------- periods : int, default 1 Periods to shift for forming difference Returns ------- diffed : DataFrame """ new_data = self._data.diff(n=periods) return self._constructor(new_data) #---------------------------------------------------------------------- # Function application def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds): """ Applies function along input axis of DataFrame. Objects passed to functions are Series objects having index either the DataFrame's index (axis=0) or the columns (axis=1). Return type depends on whether passed function aggregates, or the reduce argument if the DataFrame is empty. Parameters ---------- func : function Function to apply to each column/row axis : {0, 1} * 0 : apply function to each column * 1 : apply function to each row broadcast : boolean, default False For aggregation functions, return object of same size with values propagated reduce : boolean or None, default None Try to apply reduction procedures. If the DataFrame is empty, apply will use reduce to determine whether the result should be a Series or a DataFrame. If reduce is None (the default), apply's return value will be guessed by calling func an empty Series (note: while guessing, exceptions raised by func will be ignored). If reduce is True a Series will always be returned, and if False a DataFrame will always be returned. raw : boolean, default False If False, convert each row or column into a Series. If raw=True the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance args : tuple Positional arguments to pass to function in addition to the array/series Additional keyword arguments will be passed as keywords to the function Notes ----- In the current implementation apply calls func twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if func has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df.apply(numpy.sqrt) # returns DataFrame >>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0) >>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1) See also -------- DataFrame.applymap: For elementwise operations Returns ------- applied : Series or DataFrame """ axis = self._get_axis_number(axis) if kwds or args and not isinstance(func, np.ufunc): f = lambda x: func(x, *args, **kwds) else: f = func if len(self.columns) == 0 and len(self.index) == 0: return self._apply_empty_result(func, axis, reduce, *args, **kwds) if isinstance(f, np.ufunc): results = f(self.values) return self._constructor(data=results, index=self.index, columns=self.columns, copy=False) else: if not broadcast: if not all(self.shape): return self._apply_empty_result(func, axis, reduce, *args, **kwds) if raw and not self._is_mixed_type: return self._apply_raw(f, axis) else: if reduce is None: reduce = True return self._apply_standard(f, axis, reduce=reduce) else: return self._apply_broadcast(f, axis) def _apply_empty_result(self, func, axis, reduce, *args, **kwds): if reduce is None: reduce = False try: reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds), Series) except Exception: pass if reduce: return Series(NA, index=self._get_agg_axis(axis)) else: return self.copy() def _apply_raw(self, func, axis): try: result = lib.reduce(self.values, func, axis=axis) except Exception: result = np.apply_along_axis(func, axis, self.values) # TODO: mixed type case if result.ndim == 2: return DataFrame(result, index=self.index, columns=self.columns) else: return Series(result, index=self._get_agg_axis(axis)) def _apply_standard(self, func, axis, ignore_failures=False, reduce=True): # skip if we are mixed datelike and trying reduce across axes # GH6125 if reduce and axis==1 and self._is_mixed_type and self._is_datelike_mixed_type: reduce=False # try to reduce first (by default) # this only matters if the reduction in values is of different dtype # e.g. if we want to apply to a SparseFrame, then can't directly reduce if reduce: try: # the is the fast-path values = self.values dummy = Series(NA, index=self._get_axis(axis), dtype=values.dtype) labels = self._get_agg_axis(axis) result = lib.reduce(values, func, axis=axis, dummy=dummy, labels=labels) return Series(result, index=labels) except Exception: pass dtype = object if self._is_mixed_type else None if axis == 0: series_gen = (self.icol(i) for i in range(len(self.columns))) res_index = self.columns res_columns = self.index elif axis == 1: res_index = self.index res_columns = self.columns values = self.values series_gen = (Series.from_array(arr, index=res_columns, name=name, dtype=dtype) for i, (arr, name) in enumerate(zip(values, res_index))) else: # pragma : no cover raise AssertionError('Axis must be 0 or 1, got %s' % str(axis)) i = None keys = [] results = {} if ignore_failures: successes = [] for i, v in enumerate(series_gen): try: results[i] = func(v) keys.append(v.name) successes.append(i) except Exception: pass # so will work with MultiIndex if len(successes) < len(res_index): res_index = res_index.take(successes) else: try: for i, v in enumerate(series_gen): results[i] = func(v) keys.append(v.name) except Exception as e: if hasattr(e, 'args'): # make sure i is defined if i is not None: k = res_index[i] e.args = e.args + ('occurred at index %s' % com.pprint_thing(k),) raise if len(results) > 0 and is_sequence(results[0]): if not isinstance(results[0], Series): index = res_columns else: index = None result = self._constructor(data=results, index=index) result.columns = res_index if axis == 1: result = result.T result = result.convert_objects(copy=False) else: result = Series(results) result.index = res_index return result def _apply_broadcast(self, func, axis): if axis == 0: target = self elif axis == 1: target = self.T else: # pragma: no cover raise AssertionError('Axis must be 0 or 1, got %s' % axis) result_values = np.empty_like(target.values) columns = target.columns for i, col in enumerate(columns): result_values[:, i] = func(target[col]) result = self._constructor(result_values, index=target.index, columns=target.columns) if axis == 1: result = result.T return result def applymap(self, func): """ Apply a function to a DataFrame that is intended to operate elementwise, i.e. like doing map(func, series) for each series in the DataFrame Parameters ---------- func : function Python function, returns a single value from a single value Returns ------- applied : DataFrame See also -------- DataFrame.apply : For operations on rows/columns """ # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if com.needs_i8_conversion(x): f = com.i8_boxer(x) x = lib.map_infer(_values_from_object(x), f) return lib.map_infer(_values_from_object(x), func) return self.apply(infer) #---------------------------------------------------------------------- # Merging / joining methods def append(self, other, ignore_index=False, verify_integrity=False): """ Append rows of `other` to the end of this frame, returning a new object. Columns not in this frame are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. Returns ------- appended : DataFrame Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. See also -------- pandas.concat : General function to concatenate DataFrame, Series or Panel objects Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df A B 0 1 2 1 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) >>> df.append(df2) A B 0 1 2 1 3 4 0 5 6 1 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 """ if isinstance(other, (Series, dict)): if isinstance(other, dict): other = Series(other) if other.name is None and not ignore_index: raise TypeError('Can only append a Series if ignore_index=True' ' or if the Series has a name') index = None if other.name is None else [other.name] combined_columns = self.columns.tolist() + self.columns.union(other.index).difference(self.columns).tolist() other = other.reindex(combined_columns, copy=False) other = DataFrame(other.values.reshape((1, len(other))), index=index, columns=combined_columns).convert_objects() if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list) and not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.ix[:, self.columns] from pandas.tools.merge import concat if isinstance(other, (list, tuple)): to_concat = [self] + other else: to_concat = [self, other] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity) def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): """ Join columns with other DataFrame either on index or on a key column. Efficiently Join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series with name field set, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame on : column name, tuple/list of column names, or array-like Column(s) to use for joining, otherwise join on index. If multiples columns given, the passed DataFrame must have a MultiIndex. Can pass an array as the join key if not already contained in the calling DataFrame. Like an Excel VLOOKUP operation how : {'left', 'right', 'outer', 'inner'} How to handle indexes of the two objects. Default: 'left' for joining on index, None otherwise * left: use calling frame's index * right: use input frame's index * outer: form union of indexes * inner: use intersection of indexes lsuffix : string Suffix to use from left frame's overlapping columns rsuffix : string Suffix to use from right frame's overlapping columns sort : boolean, default False Order result DataFrame lexicographically by the join key. If False, preserves the index order of the calling (left) DataFrame Notes ----- on, lsuffix, and rsuffix options are not supported when passing a list of DataFrame objects Returns ------- joined : DataFrame """ # For SparseDataFrame's benefit return self._join_compat(other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): from pandas.tools.merge import merge, concat if isinstance(other, Series): if other.name is None: raise ValueError('Other Series must have a name') other = DataFrame({other.name: other}) if isinstance(other, DataFrame): return merge(self, other, left_on=on, how=how, left_index=on is None, right_index=True, suffixes=(lsuffix, rsuffix), sort=sort) else: if on is not None: raise ValueError('Joining multiple DataFrames only supported' ' for joining on index') # join indexes only using concat if how == 'left': how = 'outer' join_axes = [self.index] else: join_axes = None frames = [self] + list(other) can_concat = all(df.index.is_unique for df in frames) if can_concat: return concat(frames, axis=1, join=how, join_axes=join_axes, verify_integrity=True) joined = frames[0] for frame in frames[1:]: joined = merge(joined, frame, how=how, left_index=True, right_index=True) return joined @Substitution('') @Appender(_merge_doc, indents=2) def merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True): from pandas.tools.merge import merge return merge(self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy) #---------------------------------------------------------------------- # Statistical methods, etc. def corr(self, method='pearson', min_periods=1): """ Compute pairwise correlation of columns, excluding NA/null values Parameters ---------- method : {'pearson', 'kendall', 'spearman'} * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for pearson and spearman correlation Returns ------- y : DataFrame """ numeric_df = self._get_numeric_data() cols = numeric_df.columns mat = numeric_df.values if method == 'pearson': correl = _algos.nancorr(com._ensure_float64(mat), minp=min_periods) elif method == 'spearman': correl = _algos.nancorr_spearman(com._ensure_float64(mat), minp=min_periods) else: if min_periods is None: min_periods = 1 mat = mat.T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): valid = mask[i] & mask[j] if valid.sum() < min_periods: c = NA elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c return self._constructor(correl, index=cols, columns=cols) def cov(self, min_periods=None): """ Compute pairwise covariance of columns, excluding NA/null values Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Returns ------- y : DataFrame Notes ----- `y` contains the covariance matrix of the DataFrame's time series. The covariance is normalized by N-1 (unbiased estimator). """ numeric_df = self._get_numeric_data() cols = numeric_df.columns mat = numeric_df.values if notnull(mat).all(): if min_periods is not None and min_periods > len(mat): baseCov = np.empty((mat.shape[1], mat.shape[1])) baseCov.fill(np.nan) else: baseCov = np.cov(mat.T) baseCov = baseCov.reshape((len(cols), len(cols))) else: baseCov = _algos.nancorr(com._ensure_float64(mat), cov=True, minp=min_periods) return self._constructor(baseCov, index=cols, columns=cols) def corrwith(self, other, axis=0, drop=False): """ Compute pairwise correlation between rows or columns of two DataFrame objects. Parameters ---------- other : DataFrame axis : {0, 1} 0 to compute column-wise, 1 for row-wise drop : boolean, default False Drop missing indices from result, default returns union of all Returns ------- correls : Series """ axis = self._get_axis_number(axis) if isinstance(other, Series): return self.apply(other.corr, axis=axis) this = self._get_numeric_data() other = other._get_numeric_data() left, right = this.align(other, join='inner', copy=False) # mask missing values left = left + right * 0 right = right + left * 0 if axis == 1: left = left.T right = right.T # demeaned data ldem = left - left.mean() rdem = right - right.mean() num = (ldem * rdem).sum() dom = (left.count() - 1) * left.std() * right.std() correl = num / dom if not drop: raxis = 1 if axis == 0 else 0 result_index = this._get_axis(raxis).union(other._get_axis(raxis)) correl = correl.reindex(result_index) return correl #---------------------------------------------------------------------- # ndarray-like stats methods def count(self, axis=0, level=None, numeric_only=False): """ Return Series with number of non-NA/null observations over requested axis. Works with non-floating point data as well (detects NaN and None) Parameters ---------- axis : {0, 1} 0 for row-wise, 1 for column-wise level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a DataFrame numeric_only : boolean, default False Include only float, int, boolean data Returns ------- count : Series (or DataFrame if level specified) """ axis = self._get_axis_number(axis) if level is not None: return self._count_level(level, axis=axis, numeric_only=numeric_only) if numeric_only: frame = self._get_numeric_data() else: frame = self # GH #423 if len(frame._get_axis(axis)) == 0: result = Series(0, index=frame._get_agg_axis(axis)) else: if frame._is_mixed_type: result = notnull(frame).sum(axis=axis) else: counts = notnull(frame.values).sum(axis=axis) result = Series(counts, index=frame._get_agg_axis(axis)) return result.astype('int64') def _count_level(self, level, axis=0, numeric_only=False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, MultiIndex): raise TypeError("Can only count levels on hierarchical %s." % self._get_axis_name(axis)) if frame._is_mixed_type: # Since we have mixed types, calling notnull(frame.values) might # upcast everything to object mask = notnull(frame).values else: # But use the speedup when we have homogeneous dtypes mask = notnull(frame.values) if axis == 1: # We're transposing the mask rather than frame to avoid potential # upcasts to object, which induces a ~20x slowdown mask = mask.T if isinstance(level, compat.string_types): level = count_axis._get_level_number(level) level_index = count_axis.levels[level] labels = com._ensure_int64(count_axis.labels[level]) counts = lib.count_level_2d(mask, labels, len(level_index)) result = DataFrame(counts, index=level_index, columns=agg_axis) if axis == 1: # Undo our earlier transpose return result.T else: return result def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): axis = self._get_axis_number(axis) f = lambda x: op(x, axis=axis, skipna=skipna, **kwds) labels = self._get_agg_axis(axis) # exclude timedelta/datetime unless we are uniform types if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type: numeric_only = True if numeric_only is None: try: values = self.values result = f(values) except Exception as e: # try by-column first if filter_type is None and axis == 0: try: # this can end up with a non-reduction # but not always. if the types are mixed # with datelike then need to make sure a series result = self.apply(f,reduce=False) if result.ndim == self.ndim: result = result.iloc[0] return result except: pass if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover e = NotImplementedError("Handling exception with filter_" "type %s not implemented." % filter_type) raise_with_traceback(e) result = f(data.values) labels = data._get_agg_axis(axis) else: if numeric_only: if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover msg = ("Generating numeric_only data with filter_type %s" "not supported." % filter_type) raise NotImplementedError(msg) values = data.values labels = data._get_agg_axis(axis) else: values = self.values result = f(values) if is_object_dtype(result.dtype): try: if filter_type is None or filter_type == 'numeric': result = result.astype(np.float64) elif filter_type == 'bool' and notnull(result).all(): result = result.astype(np.bool_) except (ValueError, TypeError): # try to coerce to the original dtypes item by item if we can if axis == 0: result = com._coerce_to_dtypes(result, self.dtypes) return Series(result, index=labels) def idxmin(self, axis=0, skipna=True): """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0, 1} 0 for row-wise, 1 for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA Returns ------- idxmin : Series Notes ----- This method is the DataFrame version of ``ndarray.argmin``. See Also -------- Series.idxmin """ axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else NA for i in indices] return Series(result, index=self._get_agg_axis(axis)) def idxmax(self, axis=0, skipna=True): """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0, 1} 0 for row-wise, 1 for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be first index. Returns ------- idxmax : Series Notes ----- This method is the DataFrame version of ``ndarray.argmax``. See Also -------- Series.idxmax """ axis = self._get_axis_number(axis) indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else NA for i in indices] return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): """ let's be explict about this """ if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num) def mode(self, axis=0, numeric_only=False): """ Gets the mode(s) of each element along the axis selected. Empty if nothing has 2+ occurrences. Adds a row for each mode per label, fills in gaps with nan. Note that there could be multiple values returned for the selected axis (when more than one item share the maximum frequency), which is the reason why a dataframe is returned. If you want to impute missing values with the mode in a dataframe ``df``, you can just do this: ``df.fillna(df.mode().iloc[0])`` Parameters ---------- axis : {0, 1, 'index', 'columns'} (default 0) * 0/'index' : get mode of each column * 1/'columns' : get mode of each row numeric_only : boolean, default False if True, only apply to numeric columns Returns ------- modes : DataFrame (sorted) Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]}) >>> df.mode() A 0 1 1 2 """ data = self if not numeric_only else self._get_numeric_data() f = lambda s: s.mode() return data.apply(f, axis=axis) def quantile(self, q=0.5, axis=0, numeric_only=True): """ Return values at the given quantile over requested axis, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute axis : {0, 1} 0 for row-wise, 1 for column-wise Returns ------- quantiles : Series or DataFrame If ``q`` is an array, a DataFrame will be returned where the index is ``q``, the columns are the columns of self, and the values are the quantiles. If ``q`` is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. Examples -------- >>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), columns=['a', 'b']) >>> df.quantile(.1) a 1.3 b 3.7 dtype: float64 >>> df.quantile([.1, .5]) a b 0.1 1.3 3.7 0.5 2.5 55.0 """ per = np.asarray(q) * 100 if not com.is_list_like(per): per = [per] q = [q] squeeze = True else: squeeze = False def f(arr, per): if arr._is_datelike_mixed_type: values = _values_from_object(arr).view('i8') else: values = arr.astype(float) values = values[notnull(values)] if len(values) == 0: return NA else: return _quantile(values, per) data = self._get_numeric_data() if numeric_only else self if axis == 1: data = data.T # need to know which cols are timestamp going in so that we can # map timestamp over them after getting the quantile. is_dt_col = data.dtypes.map(com.is_datetime64_dtype) is_dt_col = is_dt_col[is_dt_col].index quantiles = [[f(vals, x) for x in per] for (_, vals) in data.iteritems()] result = DataFrame(quantiles, index=data._info_axis, columns=q).T if len(is_dt_col) > 0: result[is_dt_col] = result[is_dt_col].applymap(lib.Timestamp) if squeeze: if result.shape == (1, 1): result = result.T.iloc[:, 0] # don't want scalar else: result = result.T.squeeze() result.name = None # For groupby, so it can set an index name return result def rank(self, axis=0, numeric_only=None, method='average', na_option='keep', ascending=True, pct=False): """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values Parameters ---------- axis : {0, 1}, default 0 Ranks over columns (0) or rows (1) numeric_only : boolean, default None Include only float, int, boolean data method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups na_option : {'keep', 'top', 'bottom'} * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending ascending : boolean, default True False for ranks by high (1) to low (N) pct : boolean, default False Computes percentage rank of data Returns ------- ranks : DataFrame """ axis = self._get_axis_number(axis) if numeric_only is None: try: ranks = algos.rank(self.values, axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct) return self._constructor(ranks, index=self.index, columns=self.columns) except TypeError: numeric_only = True if numeric_only: data = self._get_numeric_data() else: data = self ranks = algos.rank(data.values, axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct) return self._constructor(ranks, index=data.index, columns=data.columns) def to_timestamp(self, freq=None, how='start', axis=0, copy=True): """ Cast to DatetimeIndex of timestamps, at *beginning* of period Parameters ---------- freq : string, default frequency of PeriodIndex Desired frequency how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end axis : {0, 1} default 0 The axis to convert (the index by default) copy : boolean, default True If false then underlying input data is not copied Returns ------- df : DataFrame with DatetimeIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how)) elif axis == 1: new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis)) return self._constructor(new_data) def to_period(self, freq=None, axis=0, copy=True): """ Convert DataFrame from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed) Parameters ---------- freq : string, default axis : {0, 1}, default 0 The axis to convert (the index by default) copy : boolean, default True If False then underlying input data is not copied Returns ------- ts : TimeSeries with PeriodIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_period(freq=freq)) elif axis == 1: new_data.set_axis(0, self.columns.to_period(freq=freq)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis)) return self._constructor(new_data) def isin(self, values): """ Return boolean DataFrame showing whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable, Series, DataFrame or dictionary The result will only be true at a location if all the labels match. If `values` is a Series, that's the index. If `values` is a dictionary, the keys must be the column names, which must match. If `values` is a DataFrame, then both the index and column labels must match. Returns ------- DataFrame of booleans Examples -------- When ``values`` is a list: >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) >>> df.isin([1, 3, 12, 'a']) A B 0 True True 1 False False 2 True False When ``values`` is a dict: >>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]}) >>> df.isin({'A': [1, 3], 'B': [4, 7, 12]}) A B 0 True False # Note that B didn't match the 1 here. 1 False True 2 True True When ``values`` is a Series or DataFrame: >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) >>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']}) >>> df.isin(other) A B 0 True False 1 False False # Column A in `other` has a 3, but not at index 1. 2 True True """ if isinstance(values, dict): from collections import defaultdict from pandas.tools.merge import concat values = defaultdict(list, values) return concat((self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns)), axis=1) elif isinstance(values, Series): if not values.index.is_unique: raise ValueError("ValueError: cannot compute isin with" " a duplicate axis.") return self.eq(values.reindex_like(self), axis='index') elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): raise ValueError("ValueError: cannot compute isin with" " a duplicate axis.") return self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError("only list-like or dict-like objects are" " allowed to be passed to DataFrame.isin(), " "you passed a " "{0!r}".format(type(values).__name__)) return DataFrame(lib.ismember(self.values.ravel(), set(values)).reshape(self.shape), self.index, self.columns) #---------------------------------------------------------------------- # Deprecated stuff def combineAdd(self, other): """ Add two DataFrame objects and do not propagate NaN values, so if for a (column, time) one frame is missing a value, it will default to the other frame's value (which might be NaN as well) Parameters ---------- other : DataFrame Returns ------- DataFrame """ return self.add(other, fill_value=0.) def combineMult(self, other): """ Multiply two DataFrame objects and do not propagate NaN values, so if for a (column, time) one frame is missing a value, it will default to the other frame's value (which might be NaN as well) Parameters ---------- other : DataFrame Returns ------- DataFrame """ return self.mul(other, fill_value=1.) DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True, aliases={'rows': 0}) DataFrame._add_numeric_operations() _EMPTY_SERIES = Series([]) def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ # figure out the index, if necessary if index is None: index = extract_index(arrays) else: index = _ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays = _homogenize(arrays, index, dtype) # from BlockManager perspective axes = [_ensure_index(columns), _ensure_index(index)] return create_block_manager_from_arrays(arrays, arr_names, axes) def extract_index(data): from pandas.core.index import _union_indexes index = None if len(data) == 0: index = Index([]) elif len(data) > 0: raw_lengths = [] indexes = [] have_raw_arrays = False have_series = False have_dicts = False for v in data: if isinstance(v, Series): have_series = True indexes.append(v.index) elif isinstance(v, dict): have_dicts = True indexes.append(list(v.keys())) elif is_list_like(v) and getattr(v, 'ndim', 1) == 1: have_raw_arrays = True raw_lengths.append(len(v)) if not indexes and not raw_lengths: raise ValueError('If using all scalar values, you must pass' ' an index') if have_series or have_dicts: index = _union_indexes(indexes) if have_raw_arrays: lengths = list(set(raw_lengths)) if len(lengths) > 1: raise ValueError('arrays must all be same length') if have_dicts: raise ValueError('Mixing dicts with non-Series may lead to ' 'ambiguous ordering.') if have_series: if lengths[0] != len(index): msg = ('array length %d does not match index length %d' % (lengths[0], len(index))) raise ValueError(msg) else: index = Index(np.arange(lengths[0])) return _ensure_index(index) def _prep_ndarray(values, copy=True): if not isinstance(values, (np.ndarray, Series, Index)): if len(values) == 0: return np.empty((0, 0), dtype=object) def convert(v): return com._possibly_convert_platform(v) # we could have a 1-dim or 2-dim list here # this is equiv of np.asarray, but does object conversion # and platform dtype preservation try: if com.is_list_like(values[0]) or hasattr(values[0], 'len'): values = np.array([convert(v) for v in values]) else: values = convert(values) except: values = convert(values) else: # drop subclass info, do not copy data values = np.asarray(values) if copy: values = values.copy() if values.ndim == 1: values = values.reshape((values.shape[0], 1)) elif values.ndim != 2: raise ValueError('Must pass 2-d input') return values def _to_arrays(data, columns, coerce_float=False, dtype=None): """ Return list of arrays, columns """ if isinstance(data, DataFrame): if columns is not None: arrays = [data.icol(i).values for i, col in enumerate(data.columns) if col in columns] else: columns = data.columns arrays = [data.icol(i).values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): columns = data.dtype.names if columns is not None: return [[]] * len(columns), columns return [], [] # columns if columns is not None else [] if isinstance(data[0], (list, tuple)): return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], collections.Mapping): return _list_of_dict_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Series): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: columns = _default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, Series, Index)) and data.dtype.names is not None): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns else: # last ditch effort data = lmap(tuple, data) return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) def _masked_rec_array_to_mgr(data, index, columns, dtype, copy): """ extract from a masked rec array and create the manager """ # essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) if index is None: index = _get_names_from_index(fdata) if index is None: index = _default_index(len(data)) index = _ensure_index(index) if columns is not None: columns = _ensure_index(columns) arrays, arr_columns = _to_arrays(fdata, columns) # fill if needed new_arrays = [] for fv, arr, col in zip(fill_value, arrays, arr_columns): mask = ma.getmaskarray(data[col]) if mask.any(): arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True) arr[mask] = fv new_arrays.append(arr) # create the manager arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns) if columns is None: columns = arr_columns mgr = _arrays_to_mgr(arrays, arr_columns, index, columns) if copy: mgr = mgr.copy() return mgr def _reorder_arrays(arrays, arr_columns, columns): # reorder according to the columns if (columns is not None and len(columns) and arr_columns is not None and len(arr_columns)): indexer = _ensure_index( arr_columns).get_indexer(columns) arr_columns = _ensure_index( [arr_columns[i] for i in indexer]) arrays = [arrays[i] for i in indexer] return arrays, arr_columns def _list_to_arrays(data, columns, coerce_float=False, dtype=None): if len(data) > 0 and isinstance(data[0], tuple): content = list(lib.to_object_array_tuples(data).T) else: # list of lists content = list(lib.to_object_array(data).T) return _convert_object_array(content, columns, dtype=dtype, coerce_float=coerce_float) def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): from pandas.core.index import _get_combined_index if columns is None: columns = _get_combined_index([ s.index for s in data if getattr(s, 'index', None) is not None ]) indexer_cache = {} aligned_values = [] for s in data: index = getattr(s, 'index', None) if index is None: index = _default_index(len(s)) if id(index) in indexer_cache: indexer = indexer_cache[id(index)] else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) values = _values_from_object(s) aligned_values.append(com.take_1d(values, indexer)) values = np.vstack(aligned_values) if values.dtype == np.object_: content = list(values.T) return _convert_object_array(content, columns, dtype=dtype, coerce_float=coerce_float) else: return values.T, columns def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): if columns is None: gen = (list(x.keys()) for x in data) columns = lib.fast_unique_multiple_list_gen(gen) # assure that they are of the base dict class and not of derived # classes data = [(type(d) is dict) and d or dict(d) for d in data] content = list(lib.dicts_to_array(data, list(columns)).T) return _convert_object_array(content, columns, dtype=dtype, coerce_float=coerce_float) def _convert_object_array(content, columns, coerce_float=False, dtype=None): if columns is None: columns = _default_index(len(content)) else: if len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... raise AssertionError('%d columns passed, passed data had %s ' 'columns' % (len(columns), len(content))) # provide soft conversion of object dtypes def convert(arr): if dtype != object and dtype != np.object: arr = lib.maybe_convert_objects(arr, try_float=coerce_float) arr = com._possibly_cast_to_datetime(arr, dtype) return arr arrays = [ convert(arr) for arr in content ] return arrays, columns def _get_names_from_index(data): index = lrange(len(data)) has_some_name = any([getattr(s, 'name', None) is not None for s in data]) if not has_some_name: return index count = 0 for i, s in enumerate(data): n = getattr(s, 'name', None) if n is not None: index[i] = n else: index[i] = 'Unnamed %d' % count count += 1 return index def _homogenize(data, index, dtype=None): from pandas.core.series import _sanitize_array oindex = None homogenized = [] for v in data: if isinstance(v, Series): if dtype is not None: v = v.astype(dtype) if v.index is not index: # Forces alignment. No need to copy data since we # are putting it into an ndarray later v = v.reindex(index, copy=False) else: if isinstance(v, dict): if oindex is None: oindex = index.astype('O') if type(v) == dict: # fast cython method v = lib.fast_multiget(v, oindex.values, default=NA) else: v = lib.map_infer(oindex.values, v.get) v = _sanitize_array(v, index, dtype=dtype, copy=False, raise_cast_failure=False) homogenized.append(v) return homogenized def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() for index, s in compat.iteritems(data): for col, v in compat.iteritems(s): new_data[col] = new_data.get(col, OrderedDict()) new_data[col][index] = v return new_data def _put_str(s, space): return ('%s' % s)[:space].ljust(space) #---------------------------------------------------------------------- # Add plotting methods to DataFrame import pandas.tools.plotting as gfx DataFrame.plot = gfx.plot_frame DataFrame.hist = gfx.hist_frame @Appender(_shared_docs['boxplot'] % _shared_doc_kwargs) def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None, layout=None, return_type=None, **kwds): import pandas.tools.plotting as plots import matplotlib.pyplot as plt ax = plots.boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, grid=grid, rot=rot, figsize=figsize, layout=layout, return_type=return_type, **kwds) plt.draw_if_interactive() return ax DataFrame.boxplot = boxplot ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs) ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs) if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
37.066978
120
0.544134
from __future__ import division import functools import collections import itertools import sys import types import warnings from numpy import nan as NA import numpy as np import numpy.ma as ma from pandas.core.common import (isnull, notnull, PandasError, _try_sort, _default_index, _maybe_upcast, is_sequence, _infer_dtype_from_scalar, _values_from_object, is_list_like, _get_dtype, _maybe_box_datetimelike, is_categorical_dtype, is_object_dtype, _possibly_infer_to_datetimelike) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks) from pandas.core.series import Series from pandas.core.categorical import Categorical import pandas.computation.expressions as expressions from pandas.computation.eval import eval as _eval from numpy import percentile as _quantile from pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) from pandas import compat from pandas.sparse.array import SparseArray from pandas.util.decorators import deprecate, Appender, Substitution, \ deprecate_kwarg from pandas.tseries.period import PeriodIndex from pandas.tseries.index import DatetimeIndex import pandas.core.algorithms as algos import pandas.core.common as com import pandas.core.format as fmt import pandas.core.nanops as nanops import pandas.core.ops as ops import pandas.lib as lib import pandas.algos as _algos from pandas.core.config import get_option _shared_doc_kwargs = dict(axes='index, columns', klass='DataFrame', axes_single_arg="{0,1,'index','columns'}") _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use everything, then use only numeric data """ _merge_doc = """ Merge DataFrame objects by performing a database-style join operation by columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. Parameters ----------%s right : DataFrame how : {'left', 'right', 'outer', 'inner'}, default 'inner' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) * outer: use union of keys from both frames (SQL: full outer join) * inner: use intersection of keys from both frames (SQL: inner join) on : label or list Field names to join on. Must be found in both DataFrames. If on is None and not merging on indexes, then it merges on the intersection of the columns by default. left_on : label or list, or array-like Field names to join on in left DataFrame. Can be a vector or list of vectors of the length of the DataFrame to use a particular vector as the join key instead of columns right_on : label or list, or array-like Field names to join on in right DataFrame or vector/list of vectors per left_on docs left_index : boolean, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels right_index : boolean, default False Use the index from the right DataFrame as the join key. Same caveats as left_index sort : boolean, default False Sort the join keys lexicographically in the result DataFrame suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively copy : boolean, default True If False, do not copy data unnecessarily Examples -------- >>> A >>> B lkey value rkey value 0 foo 1 0 foo 5 1 bar 2 1 bar 6 2 baz 3 2 qux 7 3 foo 4 3 bar 8 >>> merge(A, B, left_on='lkey', right_on='rkey', how='outer') lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 4 foo 5 2 bar 2 bar 6 3 bar 2 bar 8 4 baz 3 NaN NaN 5 NaN NaN qux 7 Returns ------- merged : DataFrame The output type will the be same as 'left', if it is a subclass of DataFrame. """ class DataFrame(NDFrame): _auto_consolidate = True @property def _constructor(self): return DataFrame _constructor_sliced = Series def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._data if isinstance(data, BlockManager): mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = self._init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords if isinstance(data, mrecords.MaskedRecords): mgr = _masked_rec_array_to_mgr(data, index, columns, dtype, copy) else: mask = ma.getmaskarray(data) if mask.any(): data, fill_value = _maybe_upcast(data, copy=True) data[mask] = fill_value else: data = data.copy() mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: data_columns = list(data.dtype.names) data = dict((k, data[k]) for k in data_columns) if columns is None: columns = data_columns mgr = self._init_dict(data, index, columns, dtype=dtype) elif getattr(data, 'name', None): mgr = self._init_dict({data.name: data}, index, columns, dtype=dtype) else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (list, types.GeneratorType)): if isinstance(data, types.GeneratorType): data = list(data) if len(data) > 0: if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1: arrays, columns = _to_arrays(data, columns, dtype=dtype) columns = _ensure_index(columns) if index is None: if isinstance(data[0], Series): index = _get_names_from_index(data) elif isinstance(data[0], Categorical): index = _default_index(len(data[0])) else: index = _default_index(len(data)) mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, collections.Iterator): raise TypeError("data argument can't be an iterator") else: try: arr = np.array(data, dtype=dtype, copy=copy) except (ValueError, TypeError) as e: exc = TypeError('DataFrame constructor called with ' 'incompatible data and dtype: %s' % e) raise_with_traceback(exc) if arr.ndim == 0 and index is not None and columns is not None: if isinstance(data, compat.string_types) and dtype is None: dtype = np.object_ if dtype is None: dtype, data = _infer_dtype_from_scalar(data) values = np.empty((len(index), len(columns)), dtype=dtype) values.fill(data) mgr = self._init_ndarray(values, index, columns, dtype=dtype, copy=False) else: raise PandasError('DataFrame constructor not properly called!') NDFrame.__init__(self, mgr, fastpath=True) def _init_dict(self, data, index, columns, dtype=None): if columns is not None: columns = _ensure_index(columns) # prefilter if columns passed data = dict((k, v) for k, v in compat.iteritems(data) if k in columns) if index is None: index = extract_index(list(data.values())) else: index = _ensure_index(index) arrays = [] data_names = [] for k in columns: if k not in data: # no obvious "empty" int column if dtype is not None and issubclass(dtype.type, np.integer): continue if dtype is None: # 1783 v = np.empty(len(index), dtype=object) else: v = np.empty(len(index), dtype=dtype) v.fill(NA) else: v = data[k] data_names.append(k) arrays.append(v) else: keys = list(data.keys()) if not isinstance(data, OrderedDict): keys = _try_sort(keys) columns = data_names = Index(keys) arrays = [data[k] for k in keys] return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype) def _init_ndarray(self, values, index, columns, dtype=None, copy=False): # input must be a ndarray, list, Series, index if isinstance(values, Series): if columns is None: if values.name is not None: columns = [values.name] if index is None: index = values.index else: values = values.reindex(index) # zero len case (GH #2234) if not len(values) and columns is not None and len(columns): values = np.empty((0, 1), dtype=object) # helper to create the axes as indexes def _get_axes(N, K, index=index, columns=columns): # return axes or defaults if index is None: index = _default_index(N) else: index = _ensure_index(index) if columns is None: columns = _default_index(K) else: columns = _ensure_index(columns) return index, columns # we could have a categorical type passed or coerced to 'category' # recast this to an _arrays_to_mgr if is_categorical_dtype(getattr(values,'dtype',None)) or is_categorical_dtype(dtype): if not hasattr(values,'dtype'): values = _prep_ndarray(values, copy=copy) values = values.ravel() elif copy: values = values.copy() index, columns = _get_axes(len(values),1) return _arrays_to_mgr([ values ], columns, index, columns, dtype=dtype) # by definition an array here # the dtypes will be coerced to a single dtype values = _prep_ndarray(values, copy=copy) if dtype is not None: if values.dtype != dtype: try: values = values.astype(dtype) except Exception as orig: e = ValueError("failed to cast to '%s' (Exception was: %s)" % (dtype, orig)) raise_with_traceback(e) index, columns = _get_axes(*values.shape) values = values.T # if we don't have a dtype specified, then try to convert objects # embedded in an object type if dtype is None and is_object_dtype(values): values = _possibly_infer_to_datetimelike(values) return create_block_manager_from_blocks([values], [columns, index]) @property def axes(self): return [self.index, self.columns] @property def shape(self): return (len(self.index), len(self.columns)) def _repr_fits_vertical_(self): max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width=False): width, height = fmt.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if ((max_columns and nb_columns > max_columns) or ((not ignore_width) and width and nb_columns > (width // 2))): return False if (ignore_width # used by repr_html under IPython notebook # scripts ignore terminal dims or not com.in_interactive_session()): return True if (get_option('display.width') is not None or com.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actualy checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out d = self if not (max_rows is None): d = d.iloc[:min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max([len(l) for l in value.split('\n')]) return repr_width < width def _info_repr(self): info_repr_option = (get_option("display.large_repr") == "info") return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __unicode__(self): buf = StringIO(u("")) if self._info_repr(): self.info(buf=buf) return buf.getvalue() max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") if get_option("display.expand_frame_repr"): width, _ = fmt.get_console_size() else: width = None self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols, line_width=width, show_dimensions=show_dimensions) return buf.getvalue() def _repr_html_(self): # behaves badly when outputting an HTML table # that doesn't fit the window, so disable it. if com.in_qtconsole(): return None if self._info_repr(): buf = StringIO(u("")) self.info(buf=buf) val = buf.getvalue().replace('<', r'&lt;', 1).replace('>', r'&gt;', 1) return '<pre>' + val + '</pre>' if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") return ('<div style="max-height:1000px;' 'max-width:1500px;overflow:auto;">\n' + self.to_html(max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions) + '\n</div>') else: return None def iteritems(self): if self.columns.is_unique and hasattr(self, '_item_cache'): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self.icol(i) def iterrows(self): columns = self.columns for k, v in zip(self.index, self.values): s = Series(v, index=columns, name=k) yield k, s def itertuples(self, index=True): arrays = [] if index: arrays.append(self.index) arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) return zip(*arrays) if compat.PY3: items = iteritems def __len__(self): return len(self.index) def dot(self, other): if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if (len(common) > len(self.columns) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError('Dot product shape mismatch, %s vs %s' % (lvals.shape, rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=left.index, columns=other.columns) elif isinstance(other, Series): return Series(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return Series(result, index=left.index) else: raise TypeError('unsupported type: %s' % type(other)) @classmethod def from_dict(cls, data, orient='columns', dtype=None): index, columns = None, None orient = orient.lower() if orient == 'index': if len(data) > 0: if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient != 'columns': raise ValueError('only recognize index or columns for orient') return cls(data, index=index, columns=columns, dtype=dtype) @deprecate_kwarg(old_arg_name='outtype', new_arg_name='orient') def to_dict(self, orient='dict'): if not self.columns.is_unique: warnings.warn("DataFrame columns are not unique, some " "columns will be omitted.", UserWarning) if orient.lower().startswith('d'): return dict((k, v.to_dict()) for k, v in compat.iteritems(self)) elif orient.lower().startswith('l'): return dict((k, v.tolist()) for k, v in compat.iteritems(self)) elif orient.lower().startswith('sp'): return {'index': self.index.tolist(), 'columns': self.columns.tolist(), 'data': self.values.tolist()} elif orient.lower().startswith('s'): return dict((k, v) for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): return [dict((k, v) for k, v in zip(self.columns, row)) for row in self.values] else: raise ValueError("orient '%s' not understood" % orient) def to_gbq(self, destination_table, project_id=None, chunksize=10000, verbose=True, reauth=False): from pandas.io import gbq return gbq.to_gbq(self, destination_table, project_id=project_id, chunksize=chunksize, verbose=verbose, reauth=reauth) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, coerce_float=False, nrows=None): if columns is not None: columns = _ensure_index(columns) if com.is_iterator(data): if nrows == 0: return cls() try: if compat.PY3: first_row = next(data) else: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, 'dtype') and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = _ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns = [] for k, v in compat.iteritems(data): if k in columns: arr_columns.append(k) arrays.append(v) arrays, arr_columns = _reorder_arrays(arrays, arr_columns, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = _to_arrays(data, columns) if columns is not None: columns = _ensure_index(columns) arr_columns = columns else: arrays, arr_columns = _to_arrays(data, columns, coerce_float=coerce_float) arr_columns = _ensure_index(arr_columns) if columns is not None: columns = _ensure_index(columns) else: columns = arr_columns if exclude is None: exclude = set() else: exclude = set(exclude) result_index = None if index is not None: if (isinstance(index, compat.string_types) or not hasattr(index, "__iter__")): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: to_remove = [arr_columns.get_loc(field) for field in index] result_index = MultiIndex.from_arrays( [arrays[i] for i in to_remove], names=index) exclude.update(index) except Exception: result_index = index if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] arr_columns = arr_columns.drop(arr_exclude) columns = columns.drop(exclude) mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns) return cls(mgr) def to_records(self, index=True, convert_datetime64=True): if index: if com.is_datetime64_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] else: if isinstance(self.index, MultiIndex): ix_vals = lmap(np.array, zip(*self.index.values)) else: ix_vals = [self.index.values] arrays = ix_vals + [self[c].get_values() for c in self.columns] count = 0 index_names = list(self.index.names) if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = 'level_%d' % count count += 1 elif index_names[0] is None: index_names = ['index'] names = index_names + lmap(str, self.columns) else: arrays = [self[c].get_values() for c in self.columns] names = lmap(str, self.columns) dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)]) return np.rec.fromarrays(arrays, dtype=dtype, names=names) @classmethod def from_items(cls, items, columns=None, orient='columns'): keys, values = lzip(*items) if orient == 'columns': if columns is not None: columns = _ensure_index(columns) idict = dict(items) if len(idict) < len(items): if not columns.equals(_ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: columns = _ensure_index(keys) arrays = values return cls._from_arrays(arrays, columns, None) elif orient == 'index': if columns is None: raise TypeError("Must pass columns with orient='index'") keys = _ensure_index(keys) arr = np.array(values, dtype=object).T data = [lib.maybe_convert_objects(v) for v in arr] return cls._from_arrays(data, columns, keys) else: raise ValueError("'orient' must be either 'columns' or 'index'") @classmethod def _from_arrays(cls, arrays, columns, index, dtype=None): mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) return cls(mgr) @classmethod def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=False, infer_datetime_format=False): from pandas.io.parsers import read_table return read_table(path, header=header, sep=sep, parse_dates=parse_dates, index_col=index_col, encoding=encoding, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format) def to_sparse(self, fill_value=None, kind='block'): from pandas.core.sparse import SparseDataFrame return SparseDataFrame(self._series, index=self.index, default_kind=kind, default_fill_value=fill_value) def to_panel(self): from pandas.core.panel import Panel if (not isinstance(self.index, MultiIndex) or len(self.index.levels) != 2): raise NotImplementedError('Only 2-level MultiIndex are supported.') if not self.index.is_unique: raise ValueError("Can't convert non-uniquely indexed " "DataFrame to Panel") self._consolidate_inplace() # minor axis must be sorted if self.index.lexsort_depth < 2: selfsorted = self.sortlevel(0) else: selfsorted = self major_axis, minor_axis = selfsorted.index.levels major_labels, minor_labels = selfsorted.index.labels shape = len(major_axis), len(minor_axis) # preserve names, if any major_axis = major_axis.copy() major_axis.name = self.index.names[0] minor_axis = minor_axis.copy() minor_axis.name = self.index.names[1] # create new axes new_axes = [selfsorted.columns, major_axis, minor_axis] # create new manager new_mgr = selfsorted._data.reshape_nd(axes=new_axes, labels=[major_labels, minor_labels], shape=shape, ref_items=selfsorted.columns) return Panel(new_mgr) to_wide = deprecate('to_wide', to_panel) def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, mode='w', encoding=None, quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=False, date_format=None, doublequote=True, escapechar=None, decimal='.', **kwds): formatter = fmt.CSVFormatter(self, path_or_buf, line_terminator=line_terminator, sep=sep, encoding=encoding, quoting=quoting, na_rep=na_rep, float_format=float_format, cols=columns, header=header, index=index, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, engine=kwds.get("engine"), tupleize_cols=tupleize_cols, date_format=date_format, doublequote=doublequote, escapechar=escapechar, decimal=decimal) formatter.save() if path_or_buf is None: return formatter.path_or_buf.getvalue() def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep='inf'): from pandas.io.excel import ExcelWriter need_save = False if encoding == None: encoding = 'ascii' if isinstance(excel_writer, compat.string_types): excel_writer = ExcelWriter(excel_writer, engine=engine) need_save = True formatter = fmt.ExcelFormatter(self, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep) formatted_cells = formatter.get_formatted_cells() excel_writer.write_cells(formatted_cells, sheet_name, startrow=startrow, startcol=startcol) if need_save: excel_writer.save() def to_stata( self, fname, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None): from pandas.io.stata import StataWriter writer = StataWriter(fname, self, convert_dates=convert_dates, encoding=encoding, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index) writer.write_file() @Appender(fmt.docstring_to_string, indents=1) def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, line_width=None, max_rows=None, max_cols=None, show_dimensions=False): if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) col_space = colSpace formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, line_width=line_width, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions) formatter.to_string() if buf is None: result = formatter.buf.getvalue() return result @Appender(fmt.docstring_to_string, indents=1) def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, bold_rows=True, classes=None, escape=True, max_rows=None, max_cols=None, show_dimensions=False): if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) col_space = colSpace formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions) formatter.to_html(classes=classes) if buf is None: return formatter.buf.getvalue() @Appender(fmt.docstring_to_string, indents=1) def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=True, longtable=False, escape=True): if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) col_space = colSpace formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, index_names=index_names, escape=escape) formatter.to_latex(longtable=longtable) if buf is None: return formatter.buf.getvalue() def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None): from pandas.core.format import _put_lines if buf is None: # pragma: no cover buf = sys.stdout lines = [] lines.append(str(type(self))) lines.append(self.index.summary()) if len(self.columns) == 0: lines.append('Empty %s' % type(self).__name__) _put_lines(buf, lines) return cols = self.columns # hack if max_cols is None: max_cols = get_option( 'display.max_info_columns', len(self.columns) + 1) max_rows = get_option('display.max_info_rows', len(self) + 1) if null_counts is None: show_counts = ((len(self.columns) <= max_cols) and (len(self) < max_rows)) else: show_counts = null_counts exceeds_info_cols = len(self.columns) > max_cols def _verbose_repr(): lines.append('Data columns (total %d columns):' % len(self.columns)) space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4 counts = None tmpl = "%s%s" if show_counts: counts = self.count() if len(cols) != len(counts): # pragma: no cover raise AssertionError('Columns must equal counts (%d != %d)' % (len(cols), len(counts))) tmpl = "%s non-null %s" dtypes = self.dtypes for i, col in enumerate(self.columns): dtype = dtypes[col] col = com.pprint_thing(col) count = "" if show_counts: count = counts.iloc[i] lines.append(_put_str(col, space) + tmpl % (count, dtype)) def _non_verbose_repr(): lines.append(self.columns.summary(name='Columns')) def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return "%3.1f%s %s" % (num, size_qualifier, x) num /= 1024.0 return "%3.1f%s %s" % (num, size_qualifier, 'PB') if verbose: _verbose_repr() elif verbose is False: # specifically set to False, not nesc None _non_verbose_repr() else: if exceeds_info_cols: _non_verbose_repr() else: _verbose_repr() counts = self.get_dtype_counts() dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))] lines.append('dtypes: %s' % ', '.join(dtypes)) if memory_usage is None: memory_usage = get_option('display.memory_usage') if memory_usage: # append memory usage of df to display # size_qualifier is just a best effort; not guaranteed to catch all # cases (e.g., it misses categorical data even with object # categories) size_qualifier = ('+' if 'object' in counts or is_object_dtype(self.index) else '') mem_usage = self.memory_usage(index=True).sum() lines.append("memory usage: %s\n" % _sizeof_fmt(mem_usage, size_qualifier)) _put_lines(buf, lines) def memory_usage(self, index=False): result = Series([ c.values.nbytes for col, c in self.iteritems() ], index=self.columns) if index: result = Series(self.index.nbytes, index=['Index']).append(result) return result def transpose(self): return super(DataFrame, self).transpose(1, 0) T = property(transpose) #---------------------------------------------------------------------- # Picklability # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover from pandas.core.common import _unpickle_array if len(state) == 2: # pragma: no cover series, idx = state columns = sorted(series) else: series, cols, idx = state columns = _unpickle_array(cols) index = _unpickle_array(idx) self._data = self._init_dict(series, index, columns, None) def _unpickle_matrix_compat(self, state): # pragma: no cover from pandas.core.common import _unpickle_array # old unpickling (vals, idx, cols), object_state = state index = _unpickle_array(idx) dm = DataFrame(vals, index=index, columns=_unpickle_array(cols), copy=False) if object_state is not None: ovals, _, ocols = object_state objects = DataFrame(ovals, index=index, columns=_unpickle_array(ocols), copy=False) dm = dm.join(objects) self._data = dm._data #---------------------------------------------------------------------- #---------------------------------------------------------------------- # Getting and setting elements def get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) return _maybe_box_datetimelike(series.values[index]) series = self._get_item_cache(col) engine = self.index._engine return engine.get_value(series.get_values(), index) def set_value(self, index, col, value, takeable=False): try: if takeable is True: series = self._iget_item_cache(col) return series.set_value(index, value, takeable=True) series = self._get_item_cache(col) engine = self.index._engine engine.set_value(series.values, index, value) return self except (KeyError, TypeError): # set using a non-recursive method & reset the cache self.loc[index, col] = value self._item_cache.pop(col, None) return self def irow(self, i, copy=False): return self._ixs(i, axis=0) def icol(self, i): return self._ixs(i, axis=1) def _ixs(self, i, axis=0): # irow if axis == 0: if isinstance(i, slice): return self[i] else: label = self.index[i] if isinstance(label, Index): # a location index by definition result = self.take(i, axis=axis) copy=True else: new_values = self._data.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_values,np.ndarray) and new_values.base is None result = Series(new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype) result._set_is_copy(self, copy=copy) return result # icol else: """ Notes ----- If slice passed, the resulting data will be a view """ label = self.columns[i] if isinstance(i, slice): # need to return view lab_slice = slice(label[0], label[-1]) return self.ix[:, lab_slice] else: label = self.columns[i] if isinstance(label, Index): return self.take(i, axis=1, convert=True) # if the values returned are not the same length # as the index (iow a not found value), iget returns # a 0-len ndarray. This is effectively catching # a numpy error (as numpy should really raise) values = self._data.iget(i) if not len(values): values = np.array([np.nan] * len(self.index), dtype=object) result = self._constructor_sliced.from_array( values, index=self.index, name=label, fastpath=True) # this is a cached value, mark it so result._set_as_cached(label, self) return result def iget_value(self, i, j): return self.iat[i, j] def __getitem__(self, key): # shortcut if we are an actual column is_mi_columns = isinstance(self.columns, MultiIndex) try: if key in self.columns and not is_mi_columns: return self._getitem_column(key) except: pass # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._getitem_slice(indexer) if isinstance(key, (Series, np.ndarray, Index, list)): # either boolean or fancy integer index return self._getitem_array(key) elif isinstance(key, DataFrame): return self._getitem_frame(key) elif is_mi_columns: return self._getitem_multilevel(key) else: return self._getitem_column(key) def _getitem_column(self, key): # get column if self.columns.is_unique: return self._get_item_cache(key) # duplicate columns & possible reduce dimensionaility result = self._constructor(self._data.get(key)) if result.columns.is_unique: result = result[key] return result def _getitem_slice(self, key): return self._slice(key, axis=0) def _getitem_array(self, key): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn("Boolean Series key will be reindexed to match " "DataFrame index.", UserWarning) elif len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d.' % (len(key), len(self.index))) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] return self.take(indexer, axis=0, convert=False) else: indexer = self.ix._convert_to_indexer(key, axis=1) return self.take(indexer, axis=1, convert=True) def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self.values[:, loc] result = DataFrame(new_values, index=self.index, columns=result_columns).__finalize__(self) if len(result.columns) == 1: top = result.columns[0] if ((type(top) == str and top == '') or (type(top) == tuple and top[0] == '')): result = result[''] if isinstance(result, Series): result = Series(result, index=self.index, name=key) result._set_is_copy(self) return result else: return self._get_item_cache(key) def _getitem_frame(self, key): if key.values.dtype != np.bool_: raise ValueError('Must pass DataFrame with boolean values only') return self.where(key) def query(self, expr, **kwargs): kwargs['level'] = kwargs.pop('level', 0) + 1 res = self.eval(expr, **kwargs) try: return self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query return self[res] def eval(self, expr, **kwargs): resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() resolvers = dict(self.iteritems()), index_resolvers kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers return _eval(expr, **kwargs) def select_dtypes(self, include=None, exclude=None): include, exclude = include or (), exclude or () if not (com.is_list_like(include) and com.is_list_like(exclude)): raise TypeError('include and exclude must both be non-string' ' sequences') selection = tuple(map(frozenset, (include, exclude))) if not any(selection): raise ValueError('at least one of include or exclude must be ' 'nonempty') # convert the myriad valid dtypes object to a single representation include, exclude = map(lambda x: frozenset(map(com._get_dtype_from_object, x)), selection) for dtypes in (include, exclude): com._invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError('include and exclude overlap on %s' % (include & exclude)) # empty include/exclude -> defaults to True # three cases (we've already raised if both are empty) # case 1: empty include, nonempty exclude # we have True, True, ... True for include, same for exclude # in the loop below we get the excluded # and when we call '&' below we get only the excluded # case 2: nonempty include, empty exclude # same as case 1, but with include # case 3: both nonempty # the "union" of the logic of case 1 and case 2: # we get the included and excluded, and return their logical and include_these = Series(not bool(include), index=self.columns) exclude_these = Series(not bool(exclude), index=self.columns) def is_dtype_instance_mapper(column, dtype): return column, functools.partial(issubclass, dtype.type) for column, f in itertools.starmap(is_dtype_instance_mapper, self.dtypes.iteritems()): if include: # checks for the case of empty include or exclude include_these[column] = any(map(f, include)) if exclude: exclude_these[column] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these return self.loc[com._get_info_slice(self, dtype_indexer)] def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] if values.ndim == 2: return self._constructor(values.T, columns=items, index=self.index) else: return self._box_col_values(values, items) def _box_col_values(self, values, items): return self._constructor_sliced.from_array(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._setitem_slice(indexer, value) if isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(key, DataFrame): self._setitem_frame(key, value) else: # set column self._set_item(key, value) def _setitem_slice(self, key, value): self._check_setitem_copy() self.ix._setitem_with_indexer(key, value) def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): if len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d!' % (len(key), len(self.index))) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() self.ix._setitem_with_indexer(indexer, value) else: if isinstance(value, DataFrame): if len(value.columns) != len(key): raise ValueError('Columns must be same length as key') for k1, k2 in zip(key, value.columns): self[k1] = value[k2] else: indexer = self.ix._convert_to_indexer(key, axis=1) self._check_setitem_copy() self.ix._setitem_with_indexer((slice(None), indexer), value) def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if key.values.dtype != np.bool_: raise TypeError('Must pass DataFrame with boolean values only') self._check_inplace_setting(value) self._check_setitem_copy() self.where(-key, value, inplace=True) def _ensure_valid_index(self, value): if not len(self.index): # GH5632, make sure that we are a Series convertible if is_list_like(value): try: value = Series(value) except: pass if not isinstance(value, Series): raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') self._data = self._data.reindex_axis(value.index.copy(), axis=1, fill_value=np.nan) # we are a scalar # noop else: pass def _set_item(self, key, value): self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) # check if we are modifying a copy # try to set first as we want an invalid # value exeption to occur first if len(self): self._check_setitem_copy() def insert(self, loc, column, value, allow_duplicates=False): self._ensure_valid_index(value) value = self._sanitize_column(column, value) self._data.insert( loc, column, value, allow_duplicates=allow_duplicates) def assign(self, **kwargs): data = self.copy() # do all calculations first... results = {} for k, v in kwargs.items(): if callable(v): results[k] = v(data) else: results[k] = v # ... and then assign for k, v in results.items(): data[k] = v return data def _sanitize_column(self, key, value): # Need to make sure new columns (which go into the BlockManager as new # blocks) are always copied def reindexer(value): # reindex if necessary if value.index.equals(self.index) or not len(self.index): value = value.values.copy() else: # GH 4107 try: value = value.reindex(self.index).values except Exception as e: # duplicate axis if not value.index.is_unique: raise e # other raise TypeError('incompatible index of inserted column ' 'with frame index') return value if isinstance(value, Series): value = reindexer(value) elif isinstance(value, DataFrame): # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and key in self.columns: loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) if len(cols) and not cols.equals(value.columns): value = value.reindex_axis(cols, axis=1) # now align rows value = reindexer(value).T elif isinstance(value, Categorical): value = value.copy() elif (isinstance(value, Index) or is_sequence(value)): from pandas.core.series import _sanitize_index # turn me into an ndarray value = _sanitize_index(value, self.index, copy=False) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = com._possibly_convert_platform(value) else: value = com._asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T else: value = value.copy() # possibly infer to datetimelike if is_object_dtype(value.dtype): value = _possibly_infer_to_datetimelike(value.ravel()).reshape(value.shape) else: # upcast the scalar dtype, value = _infer_dtype_from_scalar(value) value = np.repeat(value, len(self.index)).astype(dtype) value = com._possibly_cast_to_datetime(value, dtype) # return unconsolidatables directly if isinstance(value, (Categorical, SparseArray)): return value # broadcast across multiple columns if necessary if key in self.columns and value.ndim == 1: if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) return np.atleast_2d(np.asarray(value)) @property def _series(self): result = {} for idx, item in enumerate(self.columns): result[item] = Series(self._data.iget(idx), index=self.index, name=item) return result def lookup(self, row_labels, col_labels): n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self.get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result #---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, method, fill_value, copy): frame = self columns = axes['columns'] if columns is not None: frame = frame._reindex_columns(columns, copy, level, fill_value, limit) index = axes['index'] if index is not None: frame = frame._reindex_index(index, method, copy, level, fill_value, limit) return frame def _reindex_index(self, new_index, method, copy, level, fill_value=NA, limit=None): new_index, indexer = self.index.reindex(new_index, method, level, limit=limit) return self._reindex_with_indexers({0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_columns(self, new_columns, copy, level, fill_value=NA, limit=None): new_columns, indexer = self.columns.reindex(new_columns, level=level, limit=limit) return self._reindex_with_indexers({1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_multi(self, axes, copy, fill_value): new_index, row_indexer = self.index.reindex(axes['index']) new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer new_values = com.take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value) @Appender(_shared_docs['reindex'] % _shared_doc_kwargs) def reindex(self, index=None, columns=None, **kwargs): return super(DataFrame, self).reindex(index=index, columns=columns, **kwargs) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan): return super(DataFrame, self).reindex_axis(labels=labels, axis=axis, method=method, level=level, copy=copy, limit=limit, fill_value=fill_value) @Appender(_shared_docs['rename'] % _shared_doc_kwargs) def rename(self, index=None, columns=None, **kwargs): return super(DataFrame, self).rename(index=index, columns=columns, **kwargs) def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): if not isinstance(keys, list): keys = [keys] if inplace: frame = self else: frame = self.copy() arrays = [] names = [] if append: names = [x for x in self.index.names] if isinstance(self.index, MultiIndex): for i in range(self.index.nlevels): arrays.append(self.index.get_level_values(i)) else: arrays.append(self.index) to_remove = [] for col in keys: if isinstance(col, MultiIndex): # append all but the last column so we don't have to modify # the end of this loop for n in range(col.nlevels - 1): arrays.append(col.get_level_values(n)) level = col.get_level_values(col.nlevels - 1) names.extend(col.names) elif isinstance(col, Series): level = col.values names.append(col.name) elif isinstance(col, Index): level = col names.append(col.name) elif isinstance(col, (list, np.ndarray, Index)): level = col names.append(None) else: level = frame[col].values names.append(col) if drop: to_remove.append(col) arrays.append(level) index = MultiIndex.from_arrays(arrays, names=names) if verify_integrity and not index.is_unique: duplicates = index.get_duplicates() raise ValueError('Index has duplicate keys: %s' % duplicates) for c in to_remove: del frame[c] # clear up memory usage index._cleanup() frame.index = index if not inplace: return frame def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''): if inplace: new_obj = self else: new_obj = self.copy() def _maybe_casted_values(index, labels=None): if isinstance(index, PeriodIndex): values = index.asobject.values elif (isinstance(index, DatetimeIndex) and index.tz is not None): values = index.asobject else: values = index.values if values.dtype == np.object_: values = lib.maybe_convert_objects(values) # if we have the labels, extract the values with a mask if labels is not None: mask = labels == -1 values = values.take(labels) if mask.any(): values, changed = com._maybe_upcast_putmask(values, mask, np.nan) return values new_index = np.arange(len(new_obj),dtype='int64') if isinstance(self.index, MultiIndex): if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < len(self.index.levels): new_index = self.index.droplevel(level) if not drop: names = self.index.names zipped = lzip(self.index.levels, self.index.labels) multi_col = isinstance(self.columns, MultiIndex) for i, (lev, lab) in reversed(list(enumerate(zipped))): col_name = names[i] if col_name is None: col_name = 'level_%d' % i if multi_col: if col_fill is None: col_name = tuple([col_name] * self.columns.nlevels) else: name_lst = [col_fill] * self.columns.nlevels lev_num = self.columns._get_level_number(col_level) name_lst[lev_num] = col_name col_name = tuple(name_lst) # to ndarray and maybe infer different dtype level_values = _maybe_casted_values(lev, lab) if level is None or i in level: new_obj.insert(0, col_name, level_values) elif not drop: name = self.index.name if name is None or name == 'index': name = 'index' if 'index' not in self else 'level_0' if isinstance(self.columns, MultiIndex): if col_fill is None: name = tuple([name] * self.columns.nlevels) else: name_lst = [col_fill] * self.columns.nlevels lev_num = self.columns._get_level_number(col_level) name_lst[lev_num] = name name = tuple(name_lst) values = _maybe_casted_values(self.index) new_obj.insert(0, name, values) new_obj.index = new_index if not inplace: return new_obj #---------------------------------------------------------------------- # Reindex-based selection methods def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): if isinstance(axis, (tuple, list)): result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) else: axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check,subset))) agg_obj = self.take(indices,axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == 'any': mask = count == len(agg_obj._get_axis(agg_axis)) elif how == 'all': mask = count > 0 else: if how is not None: raise ValueError('invalid how option: %s' % how) else: raise TypeError('must specify how or thresh') result = self.take(mask.nonzero()[0], axis=axis, convert=False) if inplace: self._update_inplace(result) else: return result @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset') def drop_duplicates(self, subset=None, take_last=False, inplace=False): duplicated = self.duplicated(subset, take_last=take_last) if inplace: inds, = (-duplicated).nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: return self[-duplicated] @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset') def duplicated(self, subset=None, take_last=False): from pandas.core.groupby import get_group_index from pandas.core.algorithms import factorize from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT def f(vals): labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) return labels.astype('i8',copy=False), len(shape) if subset is None: subset = self.columns elif not np.iterable(subset) or \ isinstance(subset, compat.string_types) or \ isinstance(subset, tuple) and subset in self.columns: subset = subset, vals = (self[col].values for col in subset) labels, shape = map(list, zip( * map(f, vals))) ids = get_group_index(labels, shape, sort=False, xnull=False) return Series(duplicated_int64(ids, take_last), index=self.index) #---------------------------------------------------------------------- # Sorting def sort(self, columns=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): return self.sort_index(by=columns, axis=axis, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position) def sort_index(self, axis=0, by=None, ascending=True, inplace=False, kind='quicksort', na_position='last'): from pandas.core.groupby import _lexsort_indexer, _nargsort axis = self._get_axis_number(axis) if axis not in [0, 1]: # pragma: no cover raise AssertionError('Axis must be 0 or 1, got %s' % str(axis)) labels = self._get_axis(axis) if by is not None: if axis != 0: raise ValueError('When sorting by column, axis must be 0 ' '(rows)') if not isinstance(by, list): by = [by] if com.is_sequence(ascending) and len(by) != len(ascending): raise ValueError('Length of ascending (%d) != length of by' ' (%d)' % (len(ascending), len(by))) if len(by) > 1: def trans(v): if com.needs_i8_conversion(v): return v.view('i8') return v keys = [] for x in by: k = self[x].values if k.ndim == 2: raise ValueError('Cannot sort by duplicate column %s' % str(x)) keys.append(trans(k)) indexer = _lexsort_indexer(keys, orders=ascending, na_position=na_position) indexer = com._ensure_platform_int(indexer) else: by = by[0] k = self[by].values if k.ndim == 2: # try to be helpful if isinstance(self.columns, MultiIndex): raise ValueError('Cannot sort by column %s in a multi-index' ' you need to explicity provide all the levels' % str(by)) raise ValueError('Cannot sort by duplicate column %s' % str(by)) if isinstance(ascending, (tuple, list)): ascending = ascending[0] indexer = _nargsort(k, kind=kind, ascending=ascending, na_position=na_position) elif isinstance(labels, MultiIndex): # make sure that the axis is lexsorted to start # if not we need to reconstruct to get the correct indexer if not labels.is_lexsorted(): labels = MultiIndex.from_tuples(labels.values) indexer = _lexsort_indexer(labels.labels, orders=ascending, na_position=na_position) indexer = com._ensure_platform_int(indexer) else: indexer = _nargsort(labels, kind=kind, ascending=ascending, na_position=na_position) bm_axis = self._get_block_manager_axis(axis) new_data = self._data.take(indexer, axis=bm_axis, convert=False, verify=False) if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) def sortlevel(self, level=0, axis=0, ascending=True, inplace=False, sort_remaining=True): axis = self._get_axis_number(axis) the_axis = self._get_axis(axis) if not isinstance(the_axis, MultiIndex): raise TypeError('can only sort by level with a hierarchical index') new_axis, indexer = the_axis.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) if self._is_mixed_type and not inplace: ax = 'index' if axis == 0 else 'columns' if new_axis.is_unique: return self.reindex(**{ax: new_axis}) else: return self.take(indexer, axis=axis, convert=False) bm_axis = self._get_block_manager_axis(axis) new_data = self._data.take(indexer, axis=bm_axis, convert=False, verify=False) if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) def swaplevel(self, i, j, axis=0): result = self.copy() axis = self._get_axis_number(axis) if axis == 0: result.index = result.index.swaplevel(i, j) else: result.columns = result.columns.swaplevel(i, j) return result def reorder_levels(self, order, axis=0): axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() if axis == 0: result.index = result.index.reorder_levels(order) else: result.columns = result.columns.reorder_levels(order) return result #---------------------------------------------------------------------- # Arithmetic / combination related def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns def _arith_op(left, right): if fill_value is not None: left_mask = isnull(left) right_mask = isnull(right) left = left.copy() right = right.copy() # one but not both mask = left_mask ^ right_mask left[left_mask & mask] = fill_value right[right_mask & mask] = fill_value return func(left, right) if this._is_mixed_type or other._is_mixed_type: # unique if this.columns.is_unique: def f(col): r = _arith_op(this[col].values, other[col].values) return self._constructor_sliced(r, index=new_index, dtype=r.dtype) result = dict([(col, f(col)) for col in this]) # non-unique else: def f(i): r = _arith_op(this.iloc[:, i].values, other.iloc[:, i].values) return self._constructor_sliced(r, index=new_index, dtype=r.dtype) result = dict([ (i, f(i)) for i, col in enumerate(this.columns) ]) result = self._constructor(result, index=new_index, copy=False) result.columns = new_columns return result else: result = _arith_op(this.values, other.values) return self._constructor(result, index=new_index, columns=new_columns, copy=False) def _combine_series(self, other, func, fill_value=None, axis=None, level=None): if axis is not None: axis = self._get_axis_name(axis) if axis == 'index': return self._combine_match_index(other, func, level=level, fill_value=fill_value) else: return self._combine_match_columns(other, func, level=level, fill_value=fill_value) return self._combine_series_infer(other, func, level=level, fill_value=fill_value) def _combine_series_infer(self, other, func, level=None, fill_value=None): if len(other) == 0: return self * NA if len(self) == 0: # Ambiguous case, use _series so works with DataFrame return self._constructor(data=self._series, index=self.index, columns=self.columns) # teeny hack because one does DataFrame + TimeSeries all the time if self.index.is_all_dates and other.index.is_all_dates: warnings.warn(("TimeSeries broadcasting along DataFrame index " "by default is deprecated. Please use " "DataFrame.<op> to explicitly broadcast arithmetic " "operations along the index"), FutureWarning) return self._combine_match_index(other, func, level=level, fill_value=fill_value) else: return self._combine_match_columns(other, func, level=level, fill_value=fill_value) def _combine_match_index(self, other, func, level=None, fill_value=None): left, right = self.align(other, join='outer', axis=0, level=level, copy=False) if fill_value is not None: raise NotImplementedError("fill_value %r not supported." % fill_value) return self._constructor(func(left.values.T, right.values).T, index=left.index, columns=self.columns, copy=False) def _combine_match_columns(self, other, func, level=None, fill_value=None): left, right = self.align(other, join='outer', axis=1, level=level, copy=False) if fill_value is not None: raise NotImplementedError("fill_value %r not supported" % fill_value) new_data = left._data.eval( func=func, other=right, axes=[left.columns, self.index]) return self._constructor(new_data) def _combine_const(self, other, func, raise_on_error=True): if self.empty: return self new_data = self._data.eval(func=func, other=other, raise_on_error=raise_on_error) return self._constructor(new_data) def _compare_frame_evaluate(self, other, func, str_rep): # unique if self.columns.is_unique: def _compare(a, b): return dict([(col, func(a[col], b[col])) for col in a.columns]) new_data = expressions.evaluate(_compare, str_rep, self, other) return self._constructor(data=new_data, index=self.index, columns=self.columns, copy=False) # non-unique else: def _compare(a, b): return dict([(i, func(a.iloc[:, i], b.iloc[:, i])) for i, col in enumerate(a.columns)]) new_data = expressions.evaluate(_compare, str_rep, self, other) result = self._constructor(data=new_data, index=self.index, copy=False) result.columns = self.columns return result def _compare_frame(self, other, func, str_rep): if not self._indexed_same(other): raise ValueError('Can only compare identically-labeled ' 'DataFrame objects') return self._compare_frame_evaluate(other, func, str_rep) def _flex_compare_frame(self, other, func, str_rep, level): if not self._indexed_same(other): self, other = self.align(other, 'outer', level=level, copy=False) return self._compare_frame_evaluate(other, func, str_rep) def combine(self, other, func, fill_value=None, overwrite=True): other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isnull(series) other_mask = isnull(otherSeries) # don't overwrite columns unecessarily # DO propogate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value # if we have different dtypes, possibily promote new_dtype = this_dtype if this_dtype != other_dtype: new_dtype = com._lcd_dtypes(this_dtype, other_dtype) series = series.astype(new_dtype) otherSeries = otherSeries.astype(new_dtype) # see if we need to be represented as i8 (datetimelike) # try to keep us at this dtype needs_i8_conversion = com.needs_i8_conversion(new_dtype) if needs_i8_conversion: this_dtype = new_dtype arr = func(series, otherSeries, True) else: arr = func(series, otherSeries) if do_fill: arr = com.ensure_float(arr) arr[this_mask & other_mask] = NA # try to downcast back to the original dtype if needs_i8_conversion: arr = com._possibly_cast_to_datetime(arr, this_dtype) else: arr = com._possibly_downcast_to_dtype(arr, this_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns).convert_objects( convert_dates=True, copy=False) def combine_first(self, other): def combiner(x, y, needs_i8_conversion=False): x_values = x.values if hasattr(x, 'values') else x y_values = y.values if hasattr(y, 'values') else y if needs_i8_conversion: mask = isnull(x) x_values = x_values.view('i8') y_values = y_values.view('i8') else: mask = isnull(x_values) return expressions.where(mask, y_values, x_values, raise_on_error=True) return self.combine(other, combiner, overwrite=False) def update(self, other, join='left', overwrite=True, filter_func=None, raise_conflict=False): # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col].values that = other[col].values if filter_func is not None: mask = ~filter_func(this) | isnull(that) else: if raise_conflict: mask_this = notnull(that) mask_that = notnull(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isnull(that) # don't overwrite columns unecessarily if mask.all(): continue else: mask = notnull(this) self[col] = expressions.where( mask, this, that, raise_on_error=True) #---------------------------------------------------------------------- # Misc methods def first_valid_index(self): return self.index[self.count(1) > 0][0] def last_valid_index(self): return self.index[self.count(1) > 0][-1] #---------------------------------------------------------------------- # Data reshaping def pivot(self, index=None, columns=None, values=None): from pandas.core.reshape import pivot return pivot(self, index=index, columns=columns, values=values) def stack(self, level=-1, dropna=True): from pandas.core.reshape import stack, stack_multiple if isinstance(level, (tuple, list)): return stack_multiple(self, level, dropna=dropna) else: return stack(self, level, dropna=dropna) def unstack(self, level=-1): from pandas.core.reshape import unstack return unstack(self, level) #---------------------------------------------------------------------- # Time series-related def diff(self, periods=1): new_data = self._data.diff(n=periods) return self._constructor(new_data) #---------------------------------------------------------------------- # Function application def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds): axis = self._get_axis_number(axis) if kwds or args and not isinstance(func, np.ufunc): f = lambda x: func(x, *args, **kwds) else: f = func if len(self.columns) == 0 and len(self.index) == 0: return self._apply_empty_result(func, axis, reduce, *args, **kwds) if isinstance(f, np.ufunc): results = f(self.values) return self._constructor(data=results, index=self.index, columns=self.columns, copy=False) else: if not broadcast: if not all(self.shape): return self._apply_empty_result(func, axis, reduce, *args, **kwds) if raw and not self._is_mixed_type: return self._apply_raw(f, axis) else: if reduce is None: reduce = True return self._apply_standard(f, axis, reduce=reduce) else: return self._apply_broadcast(f, axis) def _apply_empty_result(self, func, axis, reduce, *args, **kwds): if reduce is None: reduce = False try: reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds), Series) except Exception: pass if reduce: return Series(NA, index=self._get_agg_axis(axis)) else: return self.copy() def _apply_raw(self, func, axis): try: result = lib.reduce(self.values, func, axis=axis) except Exception: result = np.apply_along_axis(func, axis, self.values) # TODO: mixed type case if result.ndim == 2: return DataFrame(result, index=self.index, columns=self.columns) else: return Series(result, index=self._get_agg_axis(axis)) def _apply_standard(self, func, axis, ignore_failures=False, reduce=True): # skip if we are mixed datelike and trying reduce across axes # GH6125 if reduce and axis==1 and self._is_mixed_type and self._is_datelike_mixed_type: reduce=False # try to reduce first (by default) # this only matters if the reduction in values is of different dtype # e.g. if we want to apply to a SparseFrame, then can't directly reduce if reduce: try: # the is the fast-path values = self.values dummy = Series(NA, index=self._get_axis(axis), dtype=values.dtype) labels = self._get_agg_axis(axis) result = lib.reduce(values, func, axis=axis, dummy=dummy, labels=labels) return Series(result, index=labels) except Exception: pass dtype = object if self._is_mixed_type else None if axis == 0: series_gen = (self.icol(i) for i in range(len(self.columns))) res_index = self.columns res_columns = self.index elif axis == 1: res_index = self.index res_columns = self.columns values = self.values series_gen = (Series.from_array(arr, index=res_columns, name=name, dtype=dtype) for i, (arr, name) in enumerate(zip(values, res_index))) else: # pragma : no cover raise AssertionError('Axis must be 0 or 1, got %s' % str(axis)) i = None keys = [] results = {} if ignore_failures: successes = [] for i, v in enumerate(series_gen): try: results[i] = func(v) keys.append(v.name) successes.append(i) except Exception: pass # so will work with MultiIndex if len(successes) < len(res_index): res_index = res_index.take(successes) else: try: for i, v in enumerate(series_gen): results[i] = func(v) keys.append(v.name) except Exception as e: if hasattr(e, 'args'): # make sure i is defined if i is not None: k = res_index[i] e.args = e.args + ('occurred at index %s' % com.pprint_thing(k),) raise if len(results) > 0 and is_sequence(results[0]): if not isinstance(results[0], Series): index = res_columns else: index = None result = self._constructor(data=results, index=index) result.columns = res_index if axis == 1: result = result.T result = result.convert_objects(copy=False) else: result = Series(results) result.index = res_index return result def _apply_broadcast(self, func, axis): if axis == 0: target = self elif axis == 1: target = self.T else: # pragma: no cover raise AssertionError('Axis must be 0 or 1, got %s' % axis) result_values = np.empty_like(target.values) columns = target.columns for i, col in enumerate(columns): result_values[:, i] = func(target[col]) result = self._constructor(result_values, index=target.index, columns=target.columns) if axis == 1: result = result.T return result def applymap(self, func): # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if com.needs_i8_conversion(x): f = com.i8_boxer(x) x = lib.map_infer(_values_from_object(x), f) return lib.map_infer(_values_from_object(x), func) return self.apply(infer) #---------------------------------------------------------------------- # Merging / joining methods def append(self, other, ignore_index=False, verify_integrity=False): if isinstance(other, (Series, dict)): if isinstance(other, dict): other = Series(other) if other.name is None and not ignore_index: raise TypeError('Can only append a Series if ignore_index=True' ' or if the Series has a name') index = None if other.name is None else [other.name] combined_columns = self.columns.tolist() + self.columns.union(other.index).difference(self.columns).tolist() other = other.reindex(combined_columns, copy=False) other = DataFrame(other.values.reshape((1, len(other))), index=index, columns=combined_columns).convert_objects() if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list) and not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.ix[:, self.columns] from pandas.tools.merge import concat if isinstance(other, (list, tuple)): to_concat = [self] + other else: to_concat = [self, other] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity) def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): # For SparseDataFrame's benefit return self._join_compat(other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): from pandas.tools.merge import merge, concat if isinstance(other, Series): if other.name is None: raise ValueError('Other Series must have a name') other = DataFrame({other.name: other}) if isinstance(other, DataFrame): return merge(self, other, left_on=on, how=how, left_index=on is None, right_index=True, suffixes=(lsuffix, rsuffix), sort=sort) else: if on is not None: raise ValueError('Joining multiple DataFrames only supported' ' for joining on index') # join indexes only using concat if how == 'left': how = 'outer' join_axes = [self.index] else: join_axes = None frames = [self] + list(other) can_concat = all(df.index.is_unique for df in frames) if can_concat: return concat(frames, axis=1, join=how, join_axes=join_axes, verify_integrity=True) joined = frames[0] for frame in frames[1:]: joined = merge(joined, frame, how=how, left_index=True, right_index=True) return joined @Substitution('') @Appender(_merge_doc, indents=2) def merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True): from pandas.tools.merge import merge return merge(self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy) #---------------------------------------------------------------------- # Statistical methods, etc. def corr(self, method='pearson', min_periods=1): numeric_df = self._get_numeric_data() cols = numeric_df.columns mat = numeric_df.values if method == 'pearson': correl = _algos.nancorr(com._ensure_float64(mat), minp=min_periods) elif method == 'spearman': correl = _algos.nancorr_spearman(com._ensure_float64(mat), minp=min_periods) else: if min_periods is None: min_periods = 1 mat = mat.T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): valid = mask[i] & mask[j] if valid.sum() < min_periods: c = NA elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c return self._constructor(correl, index=cols, columns=cols) def cov(self, min_periods=None): numeric_df = self._get_numeric_data() cols = numeric_df.columns mat = numeric_df.values if notnull(mat).all(): if min_periods is not None and min_periods > len(mat): baseCov = np.empty((mat.shape[1], mat.shape[1])) baseCov.fill(np.nan) else: baseCov = np.cov(mat.T) baseCov = baseCov.reshape((len(cols), len(cols))) else: baseCov = _algos.nancorr(com._ensure_float64(mat), cov=True, minp=min_periods) return self._constructor(baseCov, index=cols, columns=cols) def corrwith(self, other, axis=0, drop=False): axis = self._get_axis_number(axis) if isinstance(other, Series): return self.apply(other.corr, axis=axis) this = self._get_numeric_data() other = other._get_numeric_data() left, right = this.align(other, join='inner', copy=False) # mask missing values left = left + right * 0 right = right + left * 0 if axis == 1: left = left.T right = right.T # demeaned data ldem = left - left.mean() rdem = right - right.mean() num = (ldem * rdem).sum() dom = (left.count() - 1) * left.std() * right.std() correl = num / dom if not drop: raxis = 1 if axis == 0 else 0 result_index = this._get_axis(raxis).union(other._get_axis(raxis)) correl = correl.reindex(result_index) return correl #---------------------------------------------------------------------- # ndarray-like stats methods def count(self, axis=0, level=None, numeric_only=False): axis = self._get_axis_number(axis) if level is not None: return self._count_level(level, axis=axis, numeric_only=numeric_only) if numeric_only: frame = self._get_numeric_data() else: frame = self # GH #423 if len(frame._get_axis(axis)) == 0: result = Series(0, index=frame._get_agg_axis(axis)) else: if frame._is_mixed_type: result = notnull(frame).sum(axis=axis) else: counts = notnull(frame.values).sum(axis=axis) result = Series(counts, index=frame._get_agg_axis(axis)) return result.astype('int64') def _count_level(self, level, axis=0, numeric_only=False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, MultiIndex): raise TypeError("Can only count levels on hierarchical %s." % self._get_axis_name(axis)) if frame._is_mixed_type: # Since we have mixed types, calling notnull(frame.values) might # upcast everything to object mask = notnull(frame).values else: # But use the speedup when we have homogeneous dtypes mask = notnull(frame.values) if axis == 1: # We're transposing the mask rather than frame to avoid potential # upcasts to object, which induces a ~20x slowdown mask = mask.T if isinstance(level, compat.string_types): level = count_axis._get_level_number(level) level_index = count_axis.levels[level] labels = com._ensure_int64(count_axis.labels[level]) counts = lib.count_level_2d(mask, labels, len(level_index)) result = DataFrame(counts, index=level_index, columns=agg_axis) if axis == 1: # Undo our earlier transpose return result.T else: return result def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): axis = self._get_axis_number(axis) f = lambda x: op(x, axis=axis, skipna=skipna, **kwds) labels = self._get_agg_axis(axis) # exclude timedelta/datetime unless we are uniform types if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type: numeric_only = True if numeric_only is None: try: values = self.values result = f(values) except Exception as e: # try by-column first if filter_type is None and axis == 0: try: # this can end up with a non-reduction # but not always. if the types are mixed # with datelike then need to make sure a series result = self.apply(f,reduce=False) if result.ndim == self.ndim: result = result.iloc[0] return result except: pass if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover e = NotImplementedError("Handling exception with filter_" "type %s not implemented." % filter_type) raise_with_traceback(e) result = f(data.values) labels = data._get_agg_axis(axis) else: if numeric_only: if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover msg = ("Generating numeric_only data with filter_type %s" "not supported." % filter_type) raise NotImplementedError(msg) values = data.values labels = data._get_agg_axis(axis) else: values = self.values result = f(values) if is_object_dtype(result.dtype): try: if filter_type is None or filter_type == 'numeric': result = result.astype(np.float64) elif filter_type == 'bool' and notnull(result).all(): result = result.astype(np.bool_) except (ValueError, TypeError): # try to coerce to the original dtypes item by item if we can if axis == 0: result = com._coerce_to_dtypes(result, self.dtypes) return Series(result, index=labels) def idxmin(self, axis=0, skipna=True): axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else NA for i in indices] return Series(result, index=self._get_agg_axis(axis)) def idxmax(self, axis=0, skipna=True): axis = self._get_axis_number(axis) indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else NA for i in indices] return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num) def mode(self, axis=0, numeric_only=False): data = self if not numeric_only else self._get_numeric_data() f = lambda s: s.mode() return data.apply(f, axis=axis) def quantile(self, q=0.5, axis=0, numeric_only=True): per = np.asarray(q) * 100 if not com.is_list_like(per): per = [per] q = [q] squeeze = True else: squeeze = False def f(arr, per): if arr._is_datelike_mixed_type: values = _values_from_object(arr).view('i8') else: values = arr.astype(float) values = values[notnull(values)] if len(values) == 0: return NA else: return _quantile(values, per) data = self._get_numeric_data() if numeric_only else self if axis == 1: data = data.T # need to know which cols are timestamp going in so that we can # map timestamp over them after getting the quantile. is_dt_col = data.dtypes.map(com.is_datetime64_dtype) is_dt_col = is_dt_col[is_dt_col].index quantiles = [[f(vals, x) for x in per] for (_, vals) in data.iteritems()] result = DataFrame(quantiles, index=data._info_axis, columns=q).T if len(is_dt_col) > 0: result[is_dt_col] = result[is_dt_col].applymap(lib.Timestamp) if squeeze: if result.shape == (1, 1): result = result.T.iloc[:, 0] # don't want scalar else: result = result.T.squeeze() result.name = None # For groupby, so it can set an index name return result def rank(self, axis=0, numeric_only=None, method='average', na_option='keep', ascending=True, pct=False): axis = self._get_axis_number(axis) if numeric_only is None: try: ranks = algos.rank(self.values, axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct) return self._constructor(ranks, index=self.index, columns=self.columns) except TypeError: numeric_only = True if numeric_only: data = self._get_numeric_data() else: data = self ranks = algos.rank(data.values, axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct) return self._constructor(ranks, index=data.index, columns=data.columns) def to_timestamp(self, freq=None, how='start', axis=0, copy=True): new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how)) elif axis == 1: new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis)) return self._constructor(new_data) def to_period(self, freq=None, axis=0, copy=True): new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_period(freq=freq)) elif axis == 1: new_data.set_axis(0, self.columns.to_period(freq=freq)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis)) return self._constructor(new_data) def isin(self, values): if isinstance(values, dict): from collections import defaultdict from pandas.tools.merge import concat values = defaultdict(list, values) return concat((self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns)), axis=1) elif isinstance(values, Series): if not values.index.is_unique: raise ValueError("ValueError: cannot compute isin with" " a duplicate axis.") return self.eq(values.reindex_like(self), axis='index') elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): raise ValueError("ValueError: cannot compute isin with" " a duplicate axis.") return self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError("only list-like or dict-like objects are" " allowed to be passed to DataFrame.isin(), " "you passed a " "{0!r}".format(type(values).__name__)) return DataFrame(lib.ismember(self.values.ravel(), set(values)).reshape(self.shape), self.index, self.columns) #---------------------------------------------------------------------- # Deprecated stuff def combineAdd(self, other): return self.add(other, fill_value=0.) def combineMult(self, other): return self.mul(other, fill_value=1.) DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True, aliases={'rows': 0}) DataFrame._add_numeric_operations() _EMPTY_SERIES = Series([]) def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): # figure out the index, if necessary if index is None: index = extract_index(arrays) else: index = _ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays = _homogenize(arrays, index, dtype) # from BlockManager perspective axes = [_ensure_index(columns), _ensure_index(index)] return create_block_manager_from_arrays(arrays, arr_names, axes) def extract_index(data): from pandas.core.index import _union_indexes index = None if len(data) == 0: index = Index([]) elif len(data) > 0: raw_lengths = [] indexes = [] have_raw_arrays = False have_series = False have_dicts = False for v in data: if isinstance(v, Series): have_series = True indexes.append(v.index) elif isinstance(v, dict): have_dicts = True indexes.append(list(v.keys())) elif is_list_like(v) and getattr(v, 'ndim', 1) == 1: have_raw_arrays = True raw_lengths.append(len(v)) if not indexes and not raw_lengths: raise ValueError('If using all scalar values, you must pass' ' an index') if have_series or have_dicts: index = _union_indexes(indexes) if have_raw_arrays: lengths = list(set(raw_lengths)) if len(lengths) > 1: raise ValueError('arrays must all be same length') if have_dicts: raise ValueError('Mixing dicts with non-Series may lead to ' 'ambiguous ordering.') if have_series: if lengths[0] != len(index): msg = ('array length %d does not match index length %d' % (lengths[0], len(index))) raise ValueError(msg) else: index = Index(np.arange(lengths[0])) return _ensure_index(index) def _prep_ndarray(values, copy=True): if not isinstance(values, (np.ndarray, Series, Index)): if len(values) == 0: return np.empty((0, 0), dtype=object) def convert(v): return com._possibly_convert_platform(v) # we could have a 1-dim or 2-dim list here # this is equiv of np.asarray, but does object conversion # and platform dtype preservation try: if com.is_list_like(values[0]) or hasattr(values[0], 'len'): values = np.array([convert(v) for v in values]) else: values = convert(values) except: values = convert(values) else: # drop subclass info, do not copy data values = np.asarray(values) if copy: values = values.copy() if values.ndim == 1: values = values.reshape((values.shape[0], 1)) elif values.ndim != 2: raise ValueError('Must pass 2-d input') return values def _to_arrays(data, columns, coerce_float=False, dtype=None): if isinstance(data, DataFrame): if columns is not None: arrays = [data.icol(i).values for i, col in enumerate(data.columns) if col in columns] else: columns = data.columns arrays = [data.icol(i).values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): columns = data.dtype.names if columns is not None: return [[]] * len(columns), columns return [], [] # columns if columns is not None else [] if isinstance(data[0], (list, tuple)): return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], collections.Mapping): return _list_of_dict_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Series): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: columns = _default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, Series, Index)) and data.dtype.names is not None): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns else: # last ditch effort data = lmap(tuple, data) return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) def _masked_rec_array_to_mgr(data, index, columns, dtype, copy): # essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) if index is None: index = _get_names_from_index(fdata) if index is None: index = _default_index(len(data)) index = _ensure_index(index) if columns is not None: columns = _ensure_index(columns) arrays, arr_columns = _to_arrays(fdata, columns) # fill if needed new_arrays = [] for fv, arr, col in zip(fill_value, arrays, arr_columns): mask = ma.getmaskarray(data[col]) if mask.any(): arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True) arr[mask] = fv new_arrays.append(arr) # create the manager arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns) if columns is None: columns = arr_columns mgr = _arrays_to_mgr(arrays, arr_columns, index, columns) if copy: mgr = mgr.copy() return mgr def _reorder_arrays(arrays, arr_columns, columns): # reorder according to the columns if (columns is not None and len(columns) and arr_columns is not None and len(arr_columns)): indexer = _ensure_index( arr_columns).get_indexer(columns) arr_columns = _ensure_index( [arr_columns[i] for i in indexer]) arrays = [arrays[i] for i in indexer] return arrays, arr_columns def _list_to_arrays(data, columns, coerce_float=False, dtype=None): if len(data) > 0 and isinstance(data[0], tuple): content = list(lib.to_object_array_tuples(data).T) else: # list of lists content = list(lib.to_object_array(data).T) return _convert_object_array(content, columns, dtype=dtype, coerce_float=coerce_float) def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): from pandas.core.index import _get_combined_index if columns is None: columns = _get_combined_index([ s.index for s in data if getattr(s, 'index', None) is not None ]) indexer_cache = {} aligned_values = [] for s in data: index = getattr(s, 'index', None) if index is None: index = _default_index(len(s)) if id(index) in indexer_cache: indexer = indexer_cache[id(index)] else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) values = _values_from_object(s) aligned_values.append(com.take_1d(values, indexer)) values = np.vstack(aligned_values) if values.dtype == np.object_: content = list(values.T) return _convert_object_array(content, columns, dtype=dtype, coerce_float=coerce_float) else: return values.T, columns def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): if columns is None: gen = (list(x.keys()) for x in data) columns = lib.fast_unique_multiple_list_gen(gen) # assure that they are of the base dict class and not of derived # classes data = [(type(d) is dict) and d or dict(d) for d in data] content = list(lib.dicts_to_array(data, list(columns)).T) return _convert_object_array(content, columns, dtype=dtype, coerce_float=coerce_float) def _convert_object_array(content, columns, coerce_float=False, dtype=None): if columns is None: columns = _default_index(len(content)) else: if len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... raise AssertionError('%d columns passed, passed data had %s ' 'columns' % (len(columns), len(content))) # provide soft conversion of object dtypes def convert(arr): if dtype != object and dtype != np.object: arr = lib.maybe_convert_objects(arr, try_float=coerce_float) arr = com._possibly_cast_to_datetime(arr, dtype) return arr arrays = [ convert(arr) for arr in content ] return arrays, columns def _get_names_from_index(data): index = lrange(len(data)) has_some_name = any([getattr(s, 'name', None) is not None for s in data]) if not has_some_name: return index count = 0 for i, s in enumerate(data): n = getattr(s, 'name', None) if n is not None: index[i] = n else: index[i] = 'Unnamed %d' % count count += 1 return index def _homogenize(data, index, dtype=None): from pandas.core.series import _sanitize_array oindex = None homogenized = [] for v in data: if isinstance(v, Series): if dtype is not None: v = v.astype(dtype) if v.index is not index: # Forces alignment. No need to copy data since we # are putting it into an ndarray later v = v.reindex(index, copy=False) else: if isinstance(v, dict): if oindex is None: oindex = index.astype('O') if type(v) == dict: # fast cython method v = lib.fast_multiget(v, oindex.values, default=NA) else: v = lib.map_infer(oindex.values, v.get) v = _sanitize_array(v, index, dtype=dtype, copy=False, raise_cast_failure=False) homogenized.append(v) return homogenized def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() for index, s in compat.iteritems(data): for col, v in compat.iteritems(s): new_data[col] = new_data.get(col, OrderedDict()) new_data[col][index] = v return new_data def _put_str(s, space): return ('%s' % s)[:space].ljust(space) #---------------------------------------------------------------------- # Add plotting methods to DataFrame import pandas.tools.plotting as gfx DataFrame.plot = gfx.plot_frame DataFrame.hist = gfx.hist_frame @Appender(_shared_docs['boxplot'] % _shared_doc_kwargs) def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None, layout=None, return_type=None, **kwds): import pandas.tools.plotting as plots import matplotlib.pyplot as plt ax = plots.boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, grid=grid, rot=rot, figsize=figsize, layout=layout, return_type=return_type, **kwds) plt.draw_if_interactive() return ax DataFrame.boxplot = boxplot ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs) ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs) if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
true
true
f700d457ceccdb1435f157493066afbfc7b6f6f1
256
py
Python
geo_agent/test/overwrite_geojson.py
kevjp/openstreetmap-carto
be30cfe8d73f78cb4b5ba9acaaf42a942c70270d
[ "CC0-1.0" ]
null
null
null
geo_agent/test/overwrite_geojson.py
kevjp/openstreetmap-carto
be30cfe8d73f78cb4b5ba9acaaf42a942c70270d
[ "CC0-1.0" ]
null
null
null
geo_agent/test/overwrite_geojson.py
kevjp/openstreetmap-carto
be30cfe8d73f78cb4b5ba9acaaf42a942c70270d
[ "CC0-1.0" ]
null
null
null
import geopandas import shapely.geometry gdf = geopandas.GeoDataFrame(geometry=[shapely.geometry.Point(x, x) for x in [5,4,3,2]]) gdf.index.name = 'id' gdf.to_file("test.geojson", index=True, driver='GeoJSON') gdf.to_file("test.geojson1", driver='GeoJSON')
42.666667
88
0.75
import geopandas import shapely.geometry gdf = geopandas.GeoDataFrame(geometry=[shapely.geometry.Point(x, x) for x in [5,4,3,2]]) gdf.index.name = 'id' gdf.to_file("test.geojson", index=True, driver='GeoJSON') gdf.to_file("test.geojson1", driver='GeoJSON')
true
true
f700d475269b885063676b9344c7da5676553cc3
18,143
py
Python
src/sage/groups/semimonomial_transformations/semimonomial_transformation_group.py
sensen1/sage
d6c5cd9be78cc448ee4c54bac93385b1244a234c
[ "BSL-1.0" ]
1,742
2015-01-04T07:06:13.000Z
2022-03-30T11:32:52.000Z
src/sage/groups/semimonomial_transformations/semimonomial_transformation_group.py
sensen1/sage
d6c5cd9be78cc448ee4c54bac93385b1244a234c
[ "BSL-1.0" ]
66
2015-03-19T19:17:24.000Z
2022-03-16T11:59:30.000Z
src/sage/groups/semimonomial_transformations/semimonomial_transformation_group.py
sensen1/sage
d6c5cd9be78cc448ee4c54bac93385b1244a234c
[ "BSL-1.0" ]
495
2015-01-10T10:23:18.000Z
2022-03-24T22:06:11.000Z
r""" Semimonomial transformation group The semimonomial transformation group of degree `n` over a ring `R` is the semidirect product of the monomial transformation group of degree `n` (also known as the complete monomial group over the group of units `R^{\times}` of `R`) and the group of ring automorphisms. The multiplication of two elements `(\phi, \pi, \alpha)(\psi, \sigma, \beta)` with - `\phi, \psi \in {R^{\times}}^n` - `\pi, \sigma \in S_n` (with the multiplication `\pi\sigma` done from left to right (like in GAP) -- that is, `(\pi\sigma)(i) = \sigma(\pi(i))` for all `i`.) - `\alpha, \beta \in Aut(R)` is defined by .. MATH:: (\phi, \pi, \alpha)(\psi, \sigma, \beta) = (\phi \cdot \psi^{\pi, \alpha}, \pi\sigma, \alpha \circ \beta) where `\psi^{\pi, \alpha} = (\alpha(\psi_{\pi(1)-1}), \ldots, \alpha(\psi_{\pi(n)-1}))` and the multiplication of vectors is defined elementwisely. (The indexing of vectors is `0`-based here, so `\psi = (\psi_0, \psi_1, \ldots, \psi_{n-1})`.) .. TODO:: Up to now, this group is only implemented for finite fields because of the limited support of automorphisms for arbitrary rings. AUTHORS: - Thomas Feulner (2012-11-15): initial version EXAMPLES:: sage: S = SemimonomialTransformationGroup(GF(4, 'a'), 4) sage: G = S.gens() sage: G[0]*G[1] ((a, 1, 1, 1); (1,2,3,4), Ring endomorphism of Finite Field in a of size 2^2 Defn: a |--> a) TESTS:: sage: TestSuite(S).run() sage: TestSuite(S.an_element()).run() """ from sage.rings.integer import Integer from sage.groups.group import FiniteGroup from sage.structure.unique_representation import UniqueRepresentation from sage.categories.action import Action from sage.combinat.permutation import Permutation from sage.groups.semimonomial_transformations.semimonomial_transformation import SemimonomialTransformation class SemimonomialTransformationGroup(FiniteGroup, UniqueRepresentation): r""" A semimonomial transformation group over a ring. The semimonomial transformation group of degree `n` over a ring `R` is the semidirect product of the monomial transformation group of degree `n` (also known as the complete monomial group over the group of units `R^{\times}` of `R`) and the group of ring automorphisms. The multiplication of two elements `(\phi, \pi, \alpha)(\psi, \sigma, \beta)` with - `\phi, \psi \in {R^{\times}}^n` - `\pi, \sigma \in S_n` (with the multiplication `\pi\sigma` done from left to right (like in GAP) -- that is, `(\pi\sigma)(i) = \sigma(\pi(i))` for all `i`.) - `\alpha, \beta \in Aut(R)` is defined by .. MATH:: (\phi, \pi, \alpha)(\psi, \sigma, \beta) = (\phi \cdot \psi^{\pi, \alpha}, \pi\sigma, \alpha \circ \beta) where `\psi^{\pi, \alpha} = (\alpha(\psi_{\pi(1)-1}), \ldots, \alpha(\psi_{\pi(n)-1}))` and the multiplication of vectors is defined elementwisely. (The indexing of vectors is `0`-based here, so `\psi = (\psi_0, \psi_1, \ldots, \psi_{n-1})`.) .. TODO:: Up to now, this group is only implemented for finite fields because of the limited support of automorphisms for arbitrary rings. EXAMPLES:: sage: F.<a> = GF(9) sage: S = SemimonomialTransformationGroup(F, 4) sage: g = S(v = [2, a, 1, 2]) sage: h = S(perm = Permutation('(1,2,3,4)'), autom=F.hom([a**3])) sage: g*h ((2, a, 1, 2); (1,2,3,4), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> 2*a + 1) sage: h*g ((2*a + 1, 1, 2, 2); (1,2,3,4), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> 2*a + 1) sage: S(g) ((2, a, 1, 2); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a) sage: S(1) ((1, 1, 1, 1); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a) """ Element = SemimonomialTransformation def __init__(self, R, len): r""" Initialization. INPUT: - ``R`` -- a ring - ``len`` -- the degree of the monomial group OUTPUT: - the complete semimonomial group EXAMPLES:: sage: F.<a> = GF(9) sage: S = SemimonomialTransformationGroup(F, 4) """ if not R.is_field(): raise NotImplementedError('the ring must be a field') self._R = R self._len = len from sage.categories.finite_groups import FiniteGroups super(SemimonomialTransformationGroup, self).__init__(category=FiniteGroups()) def _element_constructor_(self, arg1, v=None, perm=None, autom=None, check=True): r""" Coerce ``arg1`` into this permutation group, if ``arg1`` is 0, then we will try to coerce ``(v, perm, autom)``. INPUT: - ``arg1`` (optional) -- either the integers 0, 1 or an element of ``self`` - ``v`` (optional) -- a vector of length ``self.degree()`` - ``perm`` (optional) -- a permutation of degree ``self.degree()`` - ``autom`` (optional) -- an automorphism of the ring EXAMPLES:: sage: F.<a> = GF(9) sage: S = SemimonomialTransformationGroup(F, 4) sage: S(1) ((1, 1, 1, 1); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a) sage: g = S(v=[1,1,1,a]) sage: S(g) ((1, 1, 1, a); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a) sage: S(perm=Permutation('(1,2)(3,4)')) ((1, 1, 1, 1); (1,2)(3,4), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a) sage: S(autom=F.hom([a**3])) ((1, 1, 1, 1); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> 2*a + 1) """ from sage.categories.homset import End R = self.base_ring() if arg1 == 0: if v is None: v = [R.one()] * self.degree() if perm is None: perm = Permutation(range(1, self.degree() + 1)) if autom is None: autom = R.hom(R.gens()) if check: try: v = [R(x) for x in v] except TypeError: raise TypeError('the vector attribute %s ' % v + 'should be iterable') if len(v) != self.degree(): raise ValueError('the length of the vector is %s,' % len(v) + ' should be %s' % self.degree()) if not all(x.parent() is R and x.is_unit() for x in v): raise ValueError('there is at least one element in the ' + 'list %s not lying in %s ' % (v, R) + 'or which is not invertible') try: perm = Permutation(perm) except TypeError: raise TypeError('the permutation attribute %s ' % perm + 'could not be converted to a permutation') if len(perm) != self.degree(): txt = 'the permutation length is {}, should be {}' raise ValueError(txt.format(len(perm), self.degree())) try: if autom.parent() != End(R): autom = End(R)(autom) except TypeError: raise TypeError('%s of type %s' % (autom, type(autom)) + ' is not coerceable to an automorphism') return self.Element(self, v, perm, autom) else: try: if arg1.parent() is self: return arg1 except AttributeError: pass try: from sage.rings.integer import Integer if Integer(arg1) == 1: return self() except TypeError: pass raise TypeError('the first argument must be an integer' + ' or an element of this group') def base_ring(self): r""" Return the underlying ring of ``self``. EXAMPLES:: sage: F.<a> = GF(4) sage: SemimonomialTransformationGroup(F, 3).base_ring() is F True """ return self._R def degree(self) -> Integer: r""" Return the degree of ``self``. EXAMPLES:: sage: F.<a> = GF(4) sage: SemimonomialTransformationGroup(F, 3).degree() 3 """ return self._len def _an_element_(self): r""" Return an element of ``self``. EXAMPLES:: sage: F.<a> = GF(4) sage: SemimonomialTransformationGroup(F, 3).an_element() # indirect doctest ((a, 1, 1); (1,3,2), Ring endomorphism of Finite Field in a of size 2^2 Defn: a |--> a + 1) """ R = self.base_ring() v = [R.primitive_element()] + [R.one()] * (self.degree() - 1) p = Permutation([self.degree()] + [i for i in range(1, self.degree())]) if not R.is_prime_field(): f = R.hom([R.gen()**R.characteristic()]) else: f = R.Hom(R).identity() return self(0, v, p, f) def __contains__(self, item) -> bool: r""" EXAMPLES:: sage: F.<a> = GF(4) sage: S = SemimonomialTransformationGroup(F, 3) sage: 1 in S # indirect doctest True sage: a in S # indirect doctest False """ try: self(item, check=True) except TypeError: return False return True def gens(self): r""" Return a tuple of generators of ``self``. EXAMPLES:: sage: F.<a> = GF(4) sage: SemimonomialTransformationGroup(F, 3).gens() [((a, 1, 1); (), Ring endomorphism of Finite Field in a of size 2^2 Defn: a |--> a), ((1, 1, 1); (1,2,3), Ring endomorphism of Finite Field in a of size 2^2 Defn: a |--> a), ((1, 1, 1); (1,2), Ring endomorphism of Finite Field in a of size 2^2 Defn: a |--> a), ((1, 1, 1); (), Ring endomorphism of Finite Field in a of size 2^2 Defn: a |--> a + 1)] """ from sage.groups.perm_gps.permgroup_named import SymmetricGroup R = self.base_ring() l = [self(v=([R.primitive_element()] + [R.one()] * (self.degree() - 1)))] for g in SymmetricGroup(self.degree()).gens(): l.append(self(perm=Permutation(g))) if R.is_field() and not R.is_prime_field(): l.append(self(autom=R.hom([R.primitive_element()**R.characteristic()]))) return l def order(self) -> Integer: r""" Return the number of elements of ``self``. EXAMPLES:: sage: F.<a> = GF(4) sage: SemimonomialTransformationGroup(F, 5).order() == (4-1)**5 * factorial(5) * 2 True """ from sage.functions.other import factorial from sage.categories.homset import End n = self.degree() R = self.base_ring() if R.is_field(): multgroup_size = len(R) - 1 autgroup_size = R.degree() else: multgroup_size = R.unit_group_order() autgroup_size = len([x for x in End(R) if x.is_injective()]) return multgroup_size**n * factorial(n) * autgroup_size def _get_action_(self, X, op, self_on_left): r""" If ``self`` is the semimonomial group of degree `n` over `R`, then there is the natural action on `R^n` and on matrices `R^{m \times n}` for arbitrary integers `m` from the left. See also: :class:`~sage.groups.semimonomial_transformations.semimonomial_transformation_group.SemimonomialActionVec` and :class:`~sage.groups.semimonomial_transformations.semimonomial_transformation_group.SemimonomialActionMat` EXAMPLES:: sage: F.<a> = GF(4) sage: s = SemimonomialTransformationGroup(F, 3).an_element() sage: v = (F**3).0 sage: s*v # indirect doctest (0, 1, 0) sage: M = MatrixSpace(F, 3).one() sage: s*M # indirect doctest [ 0 1 0] [ 0 0 1] [a + 1 0 0] """ if self_on_left: try: A = SemimonomialActionVec(self, X) return A except ValueError: pass try: A = SemimonomialActionMat(self, X) return A except ValueError: pass return None def _repr_(self) -> str: r""" Return a string describing ``self``. EXAMPLES:: sage: F.<a> = GF(4) sage: SemimonomialTransformationGroup(F, 3) # indirect doctest Semimonomial transformation group over Finite Field in a of size 2^2 of degree 3 """ return ('Semimonomial transformation group over %s' % self.base_ring() + ' of degree %s' % self.degree()) def _latex_(self) -> str: r""" Method for describing ``self`` in LaTeX. EXAMPLES:: sage: F.<a> = GF(4) sage: latex(SemimonomialTransformationGroup(F, 3)) # indirect doctest \left(\Bold{F}_{2^{2}}^3\wr\langle (1,2,3), (1,2) \rangle \right) \rtimes \operatorname{Aut}(\Bold{F}_{2^{2}}) """ from sage.groups.perm_gps.permgroup_named import SymmetricGroup ring_latex = self.base_ring()._latex_() return ('\\left(' + ring_latex + '^' + str(self.degree()) + '\\wr' + SymmetricGroup(self.degree())._latex_() + ' \\right) \\rtimes \\operatorname{Aut}(' + ring_latex + ')') class SemimonomialActionVec(Action): r""" The natural left action of the semimonomial group on vectors. The action is defined by: `(\phi, \pi, \alpha)*(v_0, \ldots, v_{n-1}) := (\alpha(v_{\pi(1)-1}) \cdot \phi_0^{-1}, \ldots, \alpha(v_{\pi(n)-1}) \cdot \phi_{n-1}^{-1})`. (The indexing of vectors is `0`-based here, so `\psi = (\psi_0, \psi_1, \ldots, \psi_{n-1})`.) """ def __init__(self, G, V, check=True): r""" Initialization. EXAMPLES:: sage: F.<a> = GF(4) sage: s = SemimonomialTransformationGroup(F, 3).an_element() sage: v = (F**3).1 sage: s*v # indirect doctest (0, 0, 1) """ if check: from sage.modules.free_module import FreeModule_generic if not isinstance(G, SemimonomialTransformationGroup): raise ValueError('%s is not a semimonomial group' % G) if not isinstance(V, FreeModule_generic): raise ValueError('%s is not a free module' % V) if V.ambient_module() != V: raise ValueError('%s is not equal to its ambient module' % V) if V.dimension() != G.degree(): raise ValueError('%s has a dimension different to the degree of %s' % (V, G)) if V.base_ring() != G.base_ring(): raise ValueError('%s and %s have different base rings' % (V, G)) Action.__init__(self, G, V.dense_module()) def _act_(self, a, b): r""" Apply the semimonomial group element `a` to the vector `b`. EXAMPLES:: sage: F.<a> = GF(4) sage: s = SemimonomialTransformationGroup(F, 3).an_element() sage: v = (F**3).1 sage: s*v # indirect doctest (0, 0, 1) """ b = b.apply_map(a.get_autom()) b = self.codomain()(a.get_perm().action(b)) return b.pairwise_product(self.codomain()(a.get_v_inverse())) class SemimonomialActionMat(Action): r""" The left action of :class:`~sage.groups.semimonomial_transformations.semimonomial_transformation_group.SemimonomialTransformationGroup` on matrices over the same ring whose number of columns is equal to the degree. See :class:`~sage.groups.semimonomial_transformations.semimonomial_transformation_group.SemimonomialActionVec` for the definition of the action on the row vectors of such a matrix. """ def __init__(self, G, M, check=True): r""" Initialization. EXAMPLES:: sage: F.<a> = GF(4) sage: s = SemimonomialTransformationGroup(F, 3).an_element() sage: M = MatrixSpace(F, 3).one() sage: s*M # indirect doctest [ 0 1 0] [ 0 0 1] [a + 1 0 0] """ if check: from sage.matrix.matrix_space import MatrixSpace if not isinstance(G, SemimonomialTransformationGroup): raise ValueError('%s is not a semimonomial group' % G) if not isinstance(M, MatrixSpace): raise ValueError('%s is not a matrix space' % M) if M.ncols() != G.degree(): raise ValueError('the number of columns of %s' % M + ' and the degree of %s are different' % G) if M.base_ring() != G.base_ring(): raise ValueError('%s and %s have different base rings' % (M, G)) Action.__init__(self, G, M) def _act_(self, a, b): r""" Apply the semimonomial group element `a` to the matrix `b`. EXAMPLES:: sage: F.<a> = GF(4) sage: s = SemimonomialTransformationGroup(F, 3).an_element() sage: M = MatrixSpace(F, 3).one() sage: s*M # indirect doctest [ 0 1 0] [ 0 0 1] [a + 1 0 0] """ return self.codomain()([a * x for x in b.rows()])
35.926733
122
0.532933
from sage.rings.integer import Integer from sage.groups.group import FiniteGroup from sage.structure.unique_representation import UniqueRepresentation from sage.categories.action import Action from sage.combinat.permutation import Permutation from sage.groups.semimonomial_transformations.semimonomial_transformation import SemimonomialTransformation class SemimonomialTransformationGroup(FiniteGroup, UniqueRepresentation): Element = SemimonomialTransformation def __init__(self, R, len): if not R.is_field(): raise NotImplementedError('the ring must be a field') self._R = R self._len = len from sage.categories.finite_groups import FiniteGroups super(SemimonomialTransformationGroup, self).__init__(category=FiniteGroups()) def _element_constructor_(self, arg1, v=None, perm=None, autom=None, check=True): from sage.categories.homset import End R = self.base_ring() if arg1 == 0: if v is None: v = [R.one()] * self.degree() if perm is None: perm = Permutation(range(1, self.degree() + 1)) if autom is None: autom = R.hom(R.gens()) if check: try: v = [R(x) for x in v] except TypeError: raise TypeError('the vector attribute %s ' % v + 'should be iterable') if len(v) != self.degree(): raise ValueError('the length of the vector is %s,' % len(v) + ' should be %s' % self.degree()) if not all(x.parent() is R and x.is_unit() for x in v): raise ValueError('there is at least one element in the ' + 'list %s not lying in %s ' % (v, R) + 'or which is not invertible') try: perm = Permutation(perm) except TypeError: raise TypeError('the permutation attribute %s ' % perm + 'could not be converted to a permutation') if len(perm) != self.degree(): txt = 'the permutation length is {}, should be {}' raise ValueError(txt.format(len(perm), self.degree())) try: if autom.parent() != End(R): autom = End(R)(autom) except TypeError: raise TypeError('%s of type %s' % (autom, type(autom)) + ' is not coerceable to an automorphism') return self.Element(self, v, perm, autom) else: try: if arg1.parent() is self: return arg1 except AttributeError: pass try: from sage.rings.integer import Integer if Integer(arg1) == 1: return self() except TypeError: pass raise TypeError('the first argument must be an integer' + ' or an element of this group') def base_ring(self): return self._R def degree(self) -> Integer: return self._len def _an_element_(self): R = self.base_ring() v = [R.primitive_element()] + [R.one()] * (self.degree() - 1) p = Permutation([self.degree()] + [i for i in range(1, self.degree())]) if not R.is_prime_field(): f = R.hom([R.gen()**R.characteristic()]) else: f = R.Hom(R).identity() return self(0, v, p, f) def __contains__(self, item) -> bool: try: self(item, check=True) except TypeError: return False return True def gens(self): from sage.groups.perm_gps.permgroup_named import SymmetricGroup R = self.base_ring() l = [self(v=([R.primitive_element()] + [R.one()] * (self.degree() - 1)))] for g in SymmetricGroup(self.degree()).gens(): l.append(self(perm=Permutation(g))) if R.is_field() and not R.is_prime_field(): l.append(self(autom=R.hom([R.primitive_element()**R.characteristic()]))) return l def order(self) -> Integer: from sage.functions.other import factorial from sage.categories.homset import End n = self.degree() R = self.base_ring() if R.is_field(): multgroup_size = len(R) - 1 autgroup_size = R.degree() else: multgroup_size = R.unit_group_order() autgroup_size = len([x for x in End(R) if x.is_injective()]) return multgroup_size**n * factorial(n) * autgroup_size def _get_action_(self, X, op, self_on_left): if self_on_left: try: A = SemimonomialActionVec(self, X) return A except ValueError: pass try: A = SemimonomialActionMat(self, X) return A except ValueError: pass return None def _repr_(self) -> str: return ('Semimonomial transformation group over %s' % self.base_ring() + ' of degree %s' % self.degree()) def _latex_(self) -> str: from sage.groups.perm_gps.permgroup_named import SymmetricGroup ring_latex = self.base_ring()._latex_() return ('\\left(' + ring_latex + '^' + str(self.degree()) + '\\wr' + SymmetricGroup(self.degree())._latex_() + ' \\right) \\rtimes \\operatorname{Aut}(' + ring_latex + ')') class SemimonomialActionVec(Action): def __init__(self, G, V, check=True): if check: from sage.modules.free_module import FreeModule_generic if not isinstance(G, SemimonomialTransformationGroup): raise ValueError('%s is not a semimonomial group' % G) if not isinstance(V, FreeModule_generic): raise ValueError('%s is not a free module' % V) if V.ambient_module() != V: raise ValueError('%s is not equal to its ambient module' % V) if V.dimension() != G.degree(): raise ValueError('%s has a dimension different to the degree of %s' % (V, G)) if V.base_ring() != G.base_ring(): raise ValueError('%s and %s have different base rings' % (V, G)) Action.__init__(self, G, V.dense_module()) def _act_(self, a, b): b = b.apply_map(a.get_autom()) b = self.codomain()(a.get_perm().action(b)) return b.pairwise_product(self.codomain()(a.get_v_inverse())) class SemimonomialActionMat(Action): def __init__(self, G, M, check=True): if check: from sage.matrix.matrix_space import MatrixSpace if not isinstance(G, SemimonomialTransformationGroup): raise ValueError('%s is not a semimonomial group' % G) if not isinstance(M, MatrixSpace): raise ValueError('%s is not a matrix space' % M) if M.ncols() != G.degree(): raise ValueError('the number of columns of %s' % M + ' and the degree of %s are different' % G) if M.base_ring() != G.base_ring(): raise ValueError('%s and %s have different base rings' % (M, G)) Action.__init__(self, G, M) def _act_(self, a, b): return self.codomain()([a * x for x in b.rows()])
true
true
f700d48ef6a50160f2fb6218e975fdfbb19d16bb
199
py
Python
mbpo/static/reacher.py
AIDefender/MyMBPO
d75699b65af8eea14acffc1b5738900d1079ad46
[ "MIT" ]
null
null
null
mbpo/static/reacher.py
AIDefender/MyMBPO
d75699b65af8eea14acffc1b5738900d1079ad46
[ "MIT" ]
null
null
null
mbpo/static/reacher.py
AIDefender/MyMBPO
d75699b65af8eea14acffc1b5738900d1079ad46
[ "MIT" ]
null
null
null
import numpy as np class StaticFns: @staticmethod def termination_fn(obs, act, next_obs): done = np.array([False]).repeat(len(obs)) done = done[:,None] return done
18.090909
49
0.613065
import numpy as np class StaticFns: @staticmethod def termination_fn(obs, act, next_obs): done = np.array([False]).repeat(len(obs)) done = done[:,None] return done
true
true
f700d53f312546235262c209dd86926fb0027889
44,327
py
Python
networkx_mod/classes/digraph.py
movingpictures83/MATria
d3dbd0d15e00dbc26db39ace0663868180fdc471
[ "BSD-3-Clause", "MIT" ]
null
null
null
networkx_mod/classes/digraph.py
movingpictures83/MATria
d3dbd0d15e00dbc26db39ace0663868180fdc471
[ "BSD-3-Clause", "MIT" ]
null
null
null
networkx_mod/classes/digraph.py
movingpictures83/MATria
d3dbd0d15e00dbc26db39ace0663868180fdc471
[ "BSD-3-Clause", "MIT" ]
null
null
null
"""Base class for directed graphs.""" # Copyright (C) 2004-2015 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved. # BSD license. from copy import deepcopy import networkx_mod as nx from networkx_mod.classes.graph import Graph from networkx_mod.exception import NetworkXError import networkx_mod.convert as convert __author__ = """\n""".join(['Aric Hagberg ([email protected])', 'Pieter Swart ([email protected])', 'Dan Schult([email protected])']) class DiGraph(Graph): """ Base class for directed graphs. A DiGraph stores nodes and edges with optional data, or attributes. DiGraphs hold directed edges. Self loops are allowed but multiple (parallel) edges are not. Nodes can be arbitrary (hashable) Python objects with optional key/value attributes. Edges are represented as links between nodes with optional key/value attributes. Parameters ---------- data : input graph Data to initialize graph. If data=None (default) an empty graph is created. The data can be an edge list, or any NetworkX graph object. If the corresponding optional Python packages are installed the data can also be a NumPy matrix or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. attr : keyword arguments, optional (default= no attributes) Attributes to add to graph as key=value pairs. See Also -------- Graph MultiGraph MultiDiGraph Examples -------- Create an empty graph structure (a "null graph") with no nodes and no edges. >>> G = nx.DiGraph() G can be grown in several ways. **Nodes:** Add one node at a time: >>> G.add_node(1) Add the nodes from any container (a list, dict, set or even the lines from a file or the nodes from another graph). >>> G.add_nodes_from([2,3]) >>> G.add_nodes_from(range(100,110)) >>> H=nx.Graph() >>> H.add_path([0,1,2,3,4,5,6,7,8,9]) >>> G.add_nodes_from(H) In addition to strings and integers any hashable Python object (except None) can represent a node, e.g. a customized node object, or even another Graph. >>> G.add_node(H) **Edges:** G can also be grown by adding edges. Add one edge, >>> G.add_edge(1, 2) a list of edges, >>> G.add_edges_from([(1,2),(1,3)]) or a collection of edges, >>> G.add_edges_from(H.edges()) If some edges connect nodes not yet in the graph, the nodes are added automatically. There are no errors when adding nodes or edges that already exist. **Attributes:** Each graph, node, and edge can hold key/value attribute pairs in an associated attribute dictionary (the keys must be hashable). By default these are empty, but can be added or changed using add_edge, add_node or direct manipulation of the attribute dictionaries named graph, node and edge respectively. >>> G = nx.DiGraph(day="Friday") >>> G.graph {'day': 'Friday'} Add node attributes using add_node(), add_nodes_from() or G.node >>> G.add_node(1, time='5pm') >>> G.add_nodes_from([3], time='2pm') >>> G.node[1] {'time': '5pm'} >>> G.node[1]['room'] = 714 >>> del G.node[1]['room'] # remove attribute >>> G.nodes(data=True) [(1, {'time': '5pm'}), (3, {'time': '2pm'})] Warning: adding a node to G.node does not add it to the graph. Add edge attributes using add_edge(), add_edges_from(), subscript notation, or G.edge. >>> G.add_edge(1, 2, weight=4.7 ) >>> G.add_edges_from([(3,4),(4,5)], color='red') >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})]) >>> G[1][2]['weight'] = 4.7 >>> G.edge[1][2]['weight'] = 4 **Shortcuts:** Many common graph features allow python syntax to speed reporting. >>> 1 in G # check if node in graph True >>> [n for n in G if n<3] # iterate through nodes [1, 2] >>> len(G) # number of nodes in graph 5 The fastest way to traverse all edges of a graph is via adjacency_iter(), but the edges() method is often more convenient. >>> for n,nbrsdict in G.adjacency_iter(): ... for nbr,eattr in nbrsdict.items(): ... if 'weight' in eattr: ... (n,nbr,eattr['weight']) (1, 2, 4) (2, 3, 8) >>> G.edges(data='weight') [(1, 2, 4), (2, 3, 8), (3, 4, None), (4, 5, None)] **Reporting:** Simple graph information is obtained using methods. Iterator versions of many reporting methods exist for efficiency. Methods exist for reporting nodes(), edges(), neighbors() and degree() as well as the number of nodes and edges. For details on these and other miscellaneous methods, see below. **Subclasses (Advanced):** The Graph class uses a dict-of-dict-of-dict data structure. The outer dict (node_dict) holds adjacency lists keyed by node. The next dict (adjlist) represents the adjacency list and holds edge data keyed by neighbor. The inner dict (edge_attr) represents the edge data and holds edge attribute values keyed by attribute names. Each of these three dicts can be replaced by a user defined dict-like object. In general, the dict-like features should be maintained but extra features can be added. To replace one of the dicts create a new graph class by changing the class(!) variable holding the factory for that dict-like structure. The variable names are node_dict_factory, adjlist_dict_factory and edge_attr_dict_factory. node_dict_factory : function, optional (default: dict) Factory function to be used to create the outer-most dict in the data structure that holds adjacency lists keyed by node. It should require no arguments and return a dict-like object. adjlist_dict_factory : function, optional (default: dict) Factory function to be used to create the adjacency list dict which holds edge data keyed by neighbor. It should require no arguments and return a dict-like object edge_attr_dict_factory : function, optional (default: dict) Factory function to be used to create the edge attribute dict which holds attrbute values keyed by attribute name. It should require no arguments and return a dict-like object. Examples -------- Create a graph object that tracks the order nodes are added. >>> from collections import OrderedDict >>> class OrderedNodeGraph(nx.Graph): ... node_dict_factory=OrderedDict >>> G=OrderedNodeGraph() >>> G.add_nodes_from( (2,1) ) >>> G.nodes() [2, 1] >>> G.add_edges_from( ((2,2), (2,1), (1,1)) ) >>> G.edges() [(2, 1), (2, 2), (1, 1)] Create a graph object that tracks the order nodes are added and for each node track the order that neighbors are added. >>> class OrderedGraph(nx.Graph): ... node_dict_factory = OrderedDict ... adjlist_dict_factory = OrderedDict >>> G = OrderedGraph() >>> G.add_nodes_from( (2,1) ) >>> G.nodes() [2, 1] >>> G.add_edges_from( ((2,2), (2,1), (1,1)) ) >>> G.edges() [(2, 2), (2, 1), (1, 1)] Create a low memory graph class that effectively disallows edge attributes by using a single attribute dict for all edges. This reduces the memory used, but you lose edge attributes. >>> class ThinGraph(nx.Graph): ... all_edge_dict = {'weight': 1} ... def single_edge_dict(self): ... return self.all_edge_dict ... edge_attr_dict_factory = single_edge_dict >>> G = ThinGraph() >>> G.add_edge(2,1) >>> G.edges(data= True) [(1, 2, {'weight': 1})] >>> G.add_edge(2,2) >>> G[2][1] is G[2][2] True """ def __init__(self, data=None, **attr): """Initialize a graph with edges, name, graph attributes. Parameters ---------- data : input graph Data to initialize graph. If data=None (default) an empty graph is created. The data can be an edge list, or any NetworkX graph object. If the corresponding optional Python packages are installed the data can also be a NumPy matrix or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. name : string, optional (default='') An optional name for the graph. attr : keyword arguments, optional (default= no attributes) Attributes to add to graph as key=value pairs. See Also -------- convert Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G = nx.Graph(name='my graph') >>> e = [(1,2),(2,3),(3,4)] # list of edges >>> G = nx.Graph(e) Arbitrary graph attribute pairs (key=value) may be assigned >>> G=nx.Graph(e, day="Friday") >>> G.graph {'day': 'Friday'} """ self.node_dict_factory = ndf = self.node_dict_factory self.adjlist_dict_factory = self.adjlist_dict_factory self.edge_attr_dict_factory = self.edge_attr_dict_factory self.graph = {} # dictionary for graph attributes self.node = ndf() # dictionary for node attributes # We store two adjacency lists: # the predecessors of node n are stored in the dict self.pred # the successors of node n are stored in the dict self.succ=self.adj self.adj = ndf() # empty adjacency dictionary self.pred = ndf() # predecessor self.succ = self.adj # successor # attempt to load graph with data if data is not None: convert.to_networkx_mod_graph(data,create_using=self) # load graph attributes (must be after convert) self.graph.update(attr) self.edge=self.adj def add_node(self, n, attr_dict=None, **attr): """Add a single node n and update node attributes. Parameters ---------- n : node A node can be any hashable Python object except None. attr_dict : dictionary, optional (default= no attributes) Dictionary of node attributes. Key/value pairs will update existing data associated with the node. attr : keyword arguments, optional Set or change attributes using key=value. See Also -------- add_nodes_from Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_node(1) >>> G.add_node('Hello') >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) >>> G.add_node(K3) >>> G.number_of_nodes() 3 Use keywords set/change node attributes: >>> G.add_node(1,size=10) >>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649)) Notes ----- A hashable object is one that can be used as a key in a Python dictionary. This includes strings, numbers, tuples of strings and numbers, etc. On many platforms hashable items also include mutables such as NetworkX Graphs, though one should be careful that the hash doesn't change on mutables. """ # set up attribute dict if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") if n not in self.succ: self.succ[n] = self.adjlist_dict_factory() self.pred[n] = self.adjlist_dict_factory() self.node[n] = attr_dict else: # update attr even if node already exists self.node[n].update(attr_dict) def add_nodes_from(self, nodes, **attr): """Add multiple nodes. Parameters ---------- nodes : iterable container A container of nodes (list, dict, set, etc.). OR A container of (node, attribute dict) tuples. Node attributes are updated using the attribute dict. attr : keyword arguments, optional (default= no attributes) Update attributes for all nodes in nodes. Node attributes specified in nodes as a tuple take precedence over attributes specified generally. See Also -------- add_node Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_nodes_from('Hello') >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) >>> G.add_nodes_from(K3) >>> sorted(G.nodes(),key=str) [0, 1, 2, 'H', 'e', 'l', 'o'] Use keywords to update specific node attributes for every node. >>> G.add_nodes_from([1,2], size=10) >>> G.add_nodes_from([3,4], weight=0.4) Use (node, attrdict) tuples to update attributes for specific nodes. >>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})]) >>> G.node[1]['size'] 11 >>> H = nx.Graph() >>> H.add_nodes_from(G.nodes(data=True)) >>> H.node[1]['size'] 11 """ for n in nodes: # keep all this inside try/except because # CPython throws TypeError on n not in self.succ, # while pre-2.7.5 ironpython throws on self.succ[n] try: if n not in self.succ: self.succ[n] = self.adjlist_dict_factory() self.pred[n] = self.adjlist_dict_factory() self.node[n] = attr.copy() else: self.node[n].update(attr) except TypeError: nn,ndict = n if nn not in self.succ: self.succ[nn] = self.adjlist_dict_factory() self.pred[nn] = self.adjlist_dict_factory() newdict = attr.copy() newdict.update(ndict) self.node[nn] = newdict else: olddict = self.node[nn] olddict.update(attr) olddict.update(ndict) def remove_node(self, n): """Remove node n. Removes the node n and all adjacent edges. Attempting to remove a non-existent node will raise an exception. Parameters ---------- n : node A node in the graph Raises ------- NetworkXError If n is not in the graph. See Also -------- remove_nodes_from Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> G.edges() [(0, 1), (1, 2)] >>> G.remove_node(1) >>> G.edges() [] """ try: nbrs=self.succ[n] del self.node[n] except KeyError: # NetworkXError if n not in self raise NetworkXError("The node %s is not in the digraph."%(n,)) for u in nbrs: del self.pred[u][n] # remove all edges n-u in digraph del self.succ[n] # remove node from succ for u in self.pred[n]: del self.succ[u][n] # remove all edges n-u in digraph del self.pred[n] # remove node from pred def remove_nodes_from(self, nbunch): """Remove multiple nodes. Parameters ---------- nodes : iterable container A container of nodes (list, dict, set, etc.). If a node in the container is not in the graph it is silently ignored. See Also -------- remove_node Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> e = G.nodes() >>> e [0, 1, 2] >>> G.remove_nodes_from(e) >>> G.nodes() [] """ for n in nbunch: try: succs=self.succ[n] del self.node[n] for u in succs: del self.pred[u][n] # remove all edges n-u in digraph del self.succ[n] # now remove node for u in self.pred[n]: del self.succ[u][n] # remove all edges n-u in digraph del self.pred[n] # now remove node except KeyError: pass # silent failure on remove def add_edge(self, u, v, attr_dict=None, **attr): """Add an edge between u and v. The nodes u and v will be automatically added if they are not already in the graph. Edge attributes can be specified with keywords or by providing a dictionary with key/value pairs. See examples below. Parameters ---------- u,v : nodes Nodes can be, for example, strings or numbers. Nodes must be hashable (and not None) Python objects. attr_dict : dictionary, optional (default= no attributes) Dictionary of edge attributes. Key/value pairs will update existing data associated with the edge. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edges_from : add a collection of edges Notes ----- Adding an edge that already exists updates the edge data. Many NetworkX algorithms designed for weighted graphs use as the edge weight a numerical value assigned to a keyword which by default is 'weight'. Examples -------- The following all add the edge e=(1,2) to graph G: >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> e = (1,2) >>> G.add_edge(1, 2) # explicit two-node form >>> G.add_edge(*e) # single edge as tuple of two nodes >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container Associate data to edges using keywords: >>> G.add_edge(1, 2, weight=3) >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) """ # set up attribute dict if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") # add nodes if u not in self.succ: self.succ[u]= self.adjlist_dict_factory() self.pred[u]= self.adjlist_dict_factory() self.node[u] = {} if v not in self.succ: self.succ[v]= self.adjlist_dict_factory() self.pred[v]= self.adjlist_dict_factory() self.node[v] = {} # add the edge datadict=self.adj[u].get(v,self.edge_attr_dict_factory()) datadict.update(attr_dict) self.succ[u][v]=datadict self.pred[v][u]=datadict def add_edges_from(self, ebunch, attr_dict=None, **attr): """Add all the edges in ebunch. Parameters ---------- ebunch : container of edges Each edge given in the container will be added to the graph. The edges must be given as as 2-tuples (u,v) or 3-tuples (u,v,d) where d is a dictionary containing edge data. attr_dict : dictionary, optional (default= no attributes) Dictionary of edge attributes. Key/value pairs will update existing data associated with each edge. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edge : add a single edge add_weighted_edges_from : convenient way to add weighted edges Notes ----- Adding the same edge twice has no effect but any edge data will be updated when each duplicate edge is added. Edge attributes specified in edges take precedence over attributes specified generally. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples >>> e = zip(range(0,3),range(1,4)) >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 Associate data to edges >>> G.add_edges_from([(1,2),(2,3)], weight=3) >>> G.add_edges_from([(3,4),(1,4)], label='WN2898') """ # set up attribute dict if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dict.") # process ebunch for e in ebunch: ne = len(e) if ne==3: u,v,dd = e assert hasattr(dd,"update") elif ne==2: u,v = e dd = {} else: raise NetworkXError(\ "Edge tuple %s must be a 2-tuple or 3-tuple."%(e,)) if u not in self.succ: self.succ[u] = self.adjlist_dict_factory() self.pred[u] = self.adjlist_dict_factory() self.node[u] = {} if v not in self.succ: self.succ[v] = self.adjlist_dict_factory() self.pred[v] = self.adjlist_dict_factory() self.node[v] = {} datadict=self.adj[u].get(v,self.edge_attr_dict_factory()) datadict.update(attr_dict) datadict.update(dd) self.succ[u][v] = datadict self.pred[v][u] = datadict def remove_edge(self, u, v): """Remove the edge between u and v. Parameters ---------- u,v: nodes Remove the edge between nodes u and v. Raises ------ NetworkXError If there is not an edge between u and v. See Also -------- remove_edges_from : remove a collection of edges Examples -------- >>> G = nx.Graph() # or DiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.remove_edge(0,1) >>> e = (1,2) >>> G.remove_edge(*e) # unpacks e from an edge tuple >>> e = (2,3,{'weight':7}) # an edge with attribute data >>> G.remove_edge(*e[:2]) # select first part of edge tuple """ try: del self.succ[u][v] del self.pred[v][u] except KeyError: raise NetworkXError("The edge %s-%s not in graph."%(u,v)) def remove_edges_from(self, ebunch): """Remove all edges specified in ebunch. Parameters ---------- ebunch: list or container of edge tuples Each edge given in the list or container will be removed from the graph. The edges can be: - 2-tuples (u,v) edge between u and v. - 3-tuples (u,v,k) where k is ignored. See Also -------- remove_edge : remove a single edge Notes ----- Will fail silently if an edge in ebunch is not in the graph. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> ebunch=[(1,2),(2,3)] >>> G.remove_edges_from(ebunch) """ for e in ebunch: (u,v)=e[:2] # ignore edge data if u in self.succ and v in self.succ[u]: del self.succ[u][v] del self.pred[v][u] def has_successor(self, u, v): """Return True if node u has successor v. This is true if graph has the edge u->v. """ return (u in self.succ and v in self.succ[u]) def has_predecessor(self, u, v): """Return True if node u has predecessor v. This is true if graph has the edge u<-v. """ return (u in self.pred and v in self.pred[u]) def successors_iter(self,n): """Return an iterator over successor nodes of n. neighbors_iter() and successors_iter() are the same. """ try: return iter(self.succ[n]) except KeyError: raise NetworkXError("The node %s is not in the digraph."%(n,)) def predecessors_iter(self,n): """Return an iterator over predecessor nodes of n.""" try: return iter(self.pred[n]) except KeyError: raise NetworkXError("The node %s is not in the digraph."%(n,)) def successors(self, n): """Return a list of successor nodes of n. neighbors() and successors() are the same function. """ return list(self.successors_iter(n)) def predecessors(self, n): """Return a list of predecessor nodes of n.""" return list(self.predecessors_iter(n)) # digraph definitions neighbors = successors neighbors_iter = successors_iter def edges_iter(self, nbunch=None, data=False, default=None): """Return an iterator over the edges. Edges are returned as tuples with optional data in the order (node, neighbor, data). Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. data : string or bool, optional (default=False) The edge attribute returned in 3-tuple (u,v,ddict[data]). If True, return edge attribute dict in 3-tuple (u,v,ddict). If False, return 2-tuple (u,v). default : value, optional (default=None) Value used for edges that dont have the requested attribute. Only relevant if data is not True or False. Returns ------- edge_iter : iterator An iterator of (u,v) or (u,v,d) tuples of edges. See Also -------- edges : return a list of edges Notes ----- Nodes in nbunch that are not in the graph will be (quietly) ignored. For directed graphs this returns the out-edges. Examples -------- >>> G = nx.DiGraph() # or MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> G.add_edge(2,3,weight=5) >>> [e for e in G.edges_iter()] [(0, 1), (1, 2), (2, 3)] >>> list(G.edges_iter(data=True)) # default data is {} (empty dict) [(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})] >>> list(G.edges_iter(data='weight', default=1)) [(0, 1, 1), (1, 2, 1), (2, 3, 5)] >>> list(G.edges_iter([0,2])) [(0, 1), (2, 3)] >>> list(G.edges_iter(0)) [(0, 1)] """ if nbunch is None: nodes_nbrs=self.adj.items() else: nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) if data is True: for n,nbrs in nodes_nbrs: for nbr,ddict in nbrs.items(): yield (n,nbr,ddict) elif data is not False: for n,nbrs in nodes_nbrs: for nbr,ddict in nbrs.items(): d=ddict[data] if data in ddict else default yield (n,nbr,d) else: for n,nbrs in nodes_nbrs: for nbr in nbrs: yield (n,nbr) # alias out_edges to edges out_edges_iter=edges_iter out_edges=Graph.edges def in_edges_iter(self, nbunch=None, data=False): """Return an iterator over the incoming edges. Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. data : bool, optional (default=False) If True, return edge attribute dict in 3-tuple (u,v,data). Returns ------- in_edge_iter : iterator An iterator of (u,v) or (u,v,d) tuples of incoming edges. See Also -------- edges_iter : return an iterator of edges """ if nbunch is None: nodes_nbrs=self.pred.items() else: nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) if data: for n,nbrs in nodes_nbrs: for nbr,data in nbrs.items(): yield (nbr,n,data) else: for n,nbrs in nodes_nbrs: for nbr in nbrs: yield (nbr,n) def in_edges(self, nbunch=None, data=False): """Return a list of the incoming edges. See Also -------- edges : return a list of edges """ return list(self.in_edges_iter(nbunch, data)) def degree_iter(self, nbunch=None, weight=None): """Return an iterator for (node, degree). The node degree is the number of edges adjacent to the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd_iter : an iterator The iterator returns two-tuples of (node, degree). See Also -------- degree, in_degree, out_degree, in_degree_iter, out_degree_iter Examples -------- >>> G = nx.DiGraph() # or MultiDiGraph >>> G.add_path([0,1,2,3]) >>> list(G.degree_iter(0)) # node 0 with degree 1 [(0, 1)] >>> list(G.degree_iter([0,1])) [(0, 1), (1, 2)] """ if nbunch is None: nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items())) else: nodes_nbrs=zip( ((n,self.succ[n]) for n in self.nbunch_iter(nbunch)), ((n,self.pred[n]) for n in self.nbunch_iter(nbunch))) if weight is None: for (n,succ),(n2,pred) in nodes_nbrs: yield (n,len(succ)+len(pred)) else: # edge weighted graph - degree is sum of edge weights for (n,succ),(n2,pred) in nodes_nbrs: yield (n, sum((succ[nbr].get(weight,1) for nbr in succ))+ sum((pred[nbr].get(weight,1) for nbr in pred))) def in_degree_iter(self, nbunch=None, weight=None): """Return an iterator for (node, in-degree). The node in-degree is the number of edges pointing in to the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd_iter : an iterator The iterator returns two-tuples of (node, in-degree). See Also -------- degree, in_degree, out_degree, out_degree_iter Examples -------- >>> G = nx.DiGraph() >>> G.add_path([0,1,2,3]) >>> list(G.in_degree_iter(0)) # node 0 with degree 0 [(0, 0)] >>> list(G.in_degree_iter([0,1])) [(0, 0), (1, 1)] """ if nbunch is None: nodes_nbrs=self.pred.items() else: nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) if weight is None: for n,nbrs in nodes_nbrs: yield (n,len(nbrs)) else: # edge weighted graph - degree is sum of edge weights for n,nbrs in nodes_nbrs: yield (n, sum(data.get(weight,1) for data in nbrs.values())) def out_degree_iter(self, nbunch=None, weight=None): """Return an iterator for (node, out-degree). The node out-degree is the number of edges pointing out of the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd_iter : an iterator The iterator returns two-tuples of (node, out-degree). See Also -------- degree, in_degree, out_degree, in_degree_iter Examples -------- >>> G = nx.DiGraph() >>> G.add_path([0,1,2,3]) >>> list(G.out_degree_iter(0)) # node 0 with degree 1 [(0, 1)] >>> list(G.out_degree_iter([0,1])) [(0, 1), (1, 1)] """ if nbunch is None: nodes_nbrs=self.succ.items() else: nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch)) if weight is None: for n,nbrs in nodes_nbrs: yield (n,len(nbrs)) else: # edge weighted graph - degree is sum of edge weights for n,nbrs in nodes_nbrs: yield (n, sum(data.get(weight,1) for data in nbrs.values())) def in_degree(self, nbunch=None, weight=None): """Return the in-degree of a node or nodes. The node in-degree is the number of edges pointing in to the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd : dictionary, or number A dictionary with nodes as keys and in-degree as values or a number if a single node is specified. See Also -------- degree, out_degree, in_degree_iter Examples -------- >>> G = nx.DiGraph() # or MultiDiGraph >>> G.add_path([0,1,2,3]) >>> G.in_degree(0) 0 >>> G.in_degree([0,1]) {0: 0, 1: 1} >>> list(G.in_degree([0,1]).values()) [0, 1] """ if nbunch in self: # return a single node return next(self.in_degree_iter(nbunch,weight))[1] else: # return a dict return dict(self.in_degree_iter(nbunch,weight)) def out_degree(self, nbunch=None, weight=None): """Return the out-degree of a node or nodes. The node out-degree is the number of edges pointing out of the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd : dictionary, or number A dictionary with nodes as keys and out-degree as values or a number if a single node is specified. Examples -------- >>> G = nx.DiGraph() # or MultiDiGraph >>> G.add_path([0,1,2,3]) >>> G.out_degree(0) 1 >>> G.out_degree([0,1]) {0: 1, 1: 1} >>> list(G.out_degree([0,1]).values()) [1, 1] """ if nbunch in self: # return a single node return next(self.out_degree_iter(nbunch,weight))[1] else: # return a dict return dict(self.out_degree_iter(nbunch,weight)) def clear(self): """Remove all nodes and edges from the graph. This also removes the name, and all graph, node, and edge attributes. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.clear() >>> G.nodes() [] >>> G.edges() [] """ self.succ.clear() self.pred.clear() self.node.clear() self.graph.clear() def is_multigraph(self): """Return True if graph is a multigraph, False otherwise.""" return False def is_directed(self): """Return True if graph is directed, False otherwise.""" return True def to_directed(self): """Return a directed copy of the graph. Returns ------- G : DiGraph A deepcopy of the graph. Notes ----- This returns a "deepcopy" of the edge, node, and graph attributes which attempts to completely copy all of the data and references. This is in contrast to the similar D=DiGraph(G) which returns a shallow copy of the data. See the Python copy module for more information on shallow and deep copies, http://docs.python.org/library/copy.html. Examples -------- >>> G = nx.Graph() # or MultiGraph, etc >>> G.add_path([0,1]) >>> H = G.to_directed() >>> H.edges() [(0, 1), (1, 0)] If already directed, return a (deep) copy >>> G = nx.DiGraph() # or MultiDiGraph, etc >>> G.add_path([0,1]) >>> H = G.to_directed() >>> H.edges() [(0, 1)] """ return deepcopy(self) def to_undirected(self, reciprocal=False): """Return an undirected representation of the digraph. Parameters ---------- reciprocal : bool (optional) If True only keep edges that appear in both directions in the original digraph. Returns ------- G : Graph An undirected graph with the same name and nodes and with edge (u,v,data) if either (u,v,data) or (v,u,data) is in the digraph. If both edges exist in digraph and their edge data is different, only one edge is created with an arbitrary choice of which edge data to use. You must check and correct for this manually if desired. Notes ----- If edges in both directions (u,v) and (v,u) exist in the graph, attributes for the new undirected edge will be a combination of the attributes of the directed edges. The edge data is updated in the (arbitrary) order that the edges are encountered. For more customized control of the edge attributes use add_edge(). This returns a "deepcopy" of the edge, node, and graph attributes which attempts to completely copy all of the data and references. This is in contrast to the similar G=DiGraph(D) which returns a shallow copy of the data. See the Python copy module for more information on shallow and deep copies, http://docs.python.org/library/copy.html. Warning ------- If you have subclassed DiGraph to use dict-like objects in the data structure, those changes do not transfer to the Graph created by this method. """ H=Graph() H.name=self.name H.add_nodes_from(self) if reciprocal is True: H.add_edges_from( (u,v,deepcopy(d)) for u,nbrs in self.adjacency_iter() for v,d in nbrs.items() if v in self.pred[u]) else: H.add_edges_from( (u,v,deepcopy(d)) for u,nbrs in self.adjacency_iter() for v,d in nbrs.items() ) H.graph=deepcopy(self.graph) H.node=deepcopy(self.node) return H def reverse(self, copy=True): """Return the reverse of the graph. The reverse is a graph with the same nodes and edges but with the directions of the edges reversed. Parameters ---------- copy : bool optional (default=True) If True, return a new DiGraph holding the reversed edges. If False, reverse the reverse graph is created using the original graph (this changes the original graph). """ if copy: H = self.__class__(name="Reverse of (%s)"%self.name) H.add_nodes_from(self) H.add_edges_from( (v,u,deepcopy(d)) for u,v,d in self.edges(data=True) ) H.graph=deepcopy(self.graph) H.node=deepcopy(self.node) else: self.pred,self.succ=self.succ,self.pred self.adj=self.succ H=self return H def subgraph(self, nbunch): """Return the subgraph induced on nodes in nbunch. The induced subgraph of the graph contains the nodes in nbunch and the edges between those nodes. Parameters ---------- nbunch : list, iterable A container of nodes which will be iterated through once. Returns ------- G : Graph A subgraph of the graph with the same edge attributes. Notes ----- The graph, edge or node attributes just point to the original graph. So changes to the node or edge structure will not be reflected in the original graph while changes to the attributes will. To create a subgraph with its own copy of the edge/node attributes use: nx.Graph(G.subgraph(nbunch)) If edge attributes are containers, a deep copy can be obtained using: G.subgraph(nbunch).copy() For an inplace reduction of a graph to a subgraph you can remove nodes: G.remove_nodes_from([ n in G if n not in set(nbunch)]) Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> H = G.subgraph([0,1,2]) >>> H.edges() [(0, 1), (1, 2)] """ bunch = self.nbunch_iter(nbunch) # create new graph and copy subgraph into it H = self.__class__() # copy node and attribute dictionaries for n in bunch: H.node[n]=self.node[n] # namespace shortcuts for speed H_succ=H.succ H_pred=H.pred self_succ=self.succ # add nodes for n in H: H_succ[n]=H.adjlist_dict_factory() H_pred[n]=H.adjlist_dict_factory() # add edges for u in H_succ: Hnbrs=H_succ[u] for v,datadict in self_succ[u].items(): if v in H_succ: # add both representations of edge: u-v and v-u Hnbrs[v]=datadict H_pred[v][u]=datadict H.graph=self.graph return H
33.055183
79
0.553816
from copy import deepcopy import networkx_mod as nx from networkx_mod.classes.graph import Graph from networkx_mod.exception import NetworkXError import networkx_mod.convert as convert __author__ = """\n""".join(['Aric Hagberg ([email protected])', 'Pieter Swart ([email protected])', 'Dan Schult([email protected])']) class DiGraph(Graph): def __init__(self, data=None, **attr): self.node_dict_factory = ndf = self.node_dict_factory self.adjlist_dict_factory = self.adjlist_dict_factory self.edge_attr_dict_factory = self.edge_attr_dict_factory self.graph = {} self.node = ndf() self.adj = ndf() self.pred = ndf() self.succ = self.adj if data is not None: convert.to_networkx_mod_graph(data,create_using=self) self.graph.update(attr) self.edge=self.adj def add_node(self, n, attr_dict=None, **attr): if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") if n not in self.succ: self.succ[n] = self.adjlist_dict_factory() self.pred[n] = self.adjlist_dict_factory() self.node[n] = attr_dict else: self.node[n].update(attr_dict) def add_nodes_from(self, nodes, **attr): for n in nodes: try: if n not in self.succ: self.succ[n] = self.adjlist_dict_factory() self.pred[n] = self.adjlist_dict_factory() self.node[n] = attr.copy() else: self.node[n].update(attr) except TypeError: nn,ndict = n if nn not in self.succ: self.succ[nn] = self.adjlist_dict_factory() self.pred[nn] = self.adjlist_dict_factory() newdict = attr.copy() newdict.update(ndict) self.node[nn] = newdict else: olddict = self.node[nn] olddict.update(attr) olddict.update(ndict) def remove_node(self, n): try: nbrs=self.succ[n] del self.node[n] except KeyError: raise NetworkXError("The node %s is not in the digraph."%(n,)) for u in nbrs: del self.pred[u][n] del self.succ[n] for u in self.pred[n]: del self.succ[u][n] del self.pred[n] def remove_nodes_from(self, nbunch): for n in nbunch: try: succs=self.succ[n] del self.node[n] for u in succs: del self.pred[u][n] del self.succ[n] for u in self.pred[n]: del self.succ[u][n] del self.pred[n] except KeyError: pass def add_edge(self, u, v, attr_dict=None, **attr): if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") if u not in self.succ: self.succ[u]= self.adjlist_dict_factory() self.pred[u]= self.adjlist_dict_factory() self.node[u] = {} if v not in self.succ: self.succ[v]= self.adjlist_dict_factory() self.pred[v]= self.adjlist_dict_factory() self.node[v] = {} datadict=self.adj[u].get(v,self.edge_attr_dict_factory()) datadict.update(attr_dict) self.succ[u][v]=datadict self.pred[v][u]=datadict def add_edges_from(self, ebunch, attr_dict=None, **attr): if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dict.") for e in ebunch: ne = len(e) if ne==3: u,v,dd = e assert hasattr(dd,"update") elif ne==2: u,v = e dd = {} else: raise NetworkXError(\ "Edge tuple %s must be a 2-tuple or 3-tuple."%(e,)) if u not in self.succ: self.succ[u] = self.adjlist_dict_factory() self.pred[u] = self.adjlist_dict_factory() self.node[u] = {} if v not in self.succ: self.succ[v] = self.adjlist_dict_factory() self.pred[v] = self.adjlist_dict_factory() self.node[v] = {} datadict=self.adj[u].get(v,self.edge_attr_dict_factory()) datadict.update(attr_dict) datadict.update(dd) self.succ[u][v] = datadict self.pred[v][u] = datadict def remove_edge(self, u, v): try: del self.succ[u][v] del self.pred[v][u] except KeyError: raise NetworkXError("The edge %s-%s not in graph."%(u,v)) def remove_edges_from(self, ebunch): for e in ebunch: (u,v)=e[:2] if u in self.succ and v in self.succ[u]: del self.succ[u][v] del self.pred[v][u] def has_successor(self, u, v): return (u in self.succ and v in self.succ[u]) def has_predecessor(self, u, v): return (u in self.pred and v in self.pred[u]) def successors_iter(self,n): try: return iter(self.succ[n]) except KeyError: raise NetworkXError("The node %s is not in the digraph."%(n,)) def predecessors_iter(self,n): try: return iter(self.pred[n]) except KeyError: raise NetworkXError("The node %s is not in the digraph."%(n,)) def successors(self, n): return list(self.successors_iter(n)) def predecessors(self, n): return list(self.predecessors_iter(n)) neighbors = successors neighbors_iter = successors_iter def edges_iter(self, nbunch=None, data=False, default=None): if nbunch is None: nodes_nbrs=self.adj.items() else: nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) if data is True: for n,nbrs in nodes_nbrs: for nbr,ddict in nbrs.items(): yield (n,nbr,ddict) elif data is not False: for n,nbrs in nodes_nbrs: for nbr,ddict in nbrs.items(): d=ddict[data] if data in ddict else default yield (n,nbr,d) else: for n,nbrs in nodes_nbrs: for nbr in nbrs: yield (n,nbr) out_edges_iter=edges_iter out_edges=Graph.edges def in_edges_iter(self, nbunch=None, data=False): if nbunch is None: nodes_nbrs=self.pred.items() else: nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) if data: for n,nbrs in nodes_nbrs: for nbr,data in nbrs.items(): yield (nbr,n,data) else: for n,nbrs in nodes_nbrs: for nbr in nbrs: yield (nbr,n) def in_edges(self, nbunch=None, data=False): return list(self.in_edges_iter(nbunch, data)) def degree_iter(self, nbunch=None, weight=None): if nbunch is None: nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items())) else: nodes_nbrs=zip( ((n,self.succ[n]) for n in self.nbunch_iter(nbunch)), ((n,self.pred[n]) for n in self.nbunch_iter(nbunch))) if weight is None: for (n,succ),(n2,pred) in nodes_nbrs: yield (n,len(succ)+len(pred)) else: for (n,succ),(n2,pred) in nodes_nbrs: yield (n, sum((succ[nbr].get(weight,1) for nbr in succ))+ sum((pred[nbr].get(weight,1) for nbr in pred))) def in_degree_iter(self, nbunch=None, weight=None): if nbunch is None: nodes_nbrs=self.pred.items() else: nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) if weight is None: for n,nbrs in nodes_nbrs: yield (n,len(nbrs)) else: for n,nbrs in nodes_nbrs: yield (n, sum(data.get(weight,1) for data in nbrs.values())) def out_degree_iter(self, nbunch=None, weight=None): if nbunch is None: nodes_nbrs=self.succ.items() else: nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch)) if weight is None: for n,nbrs in nodes_nbrs: yield (n,len(nbrs)) else: for n,nbrs in nodes_nbrs: yield (n, sum(data.get(weight,1) for data in nbrs.values())) def in_degree(self, nbunch=None, weight=None): if nbunch in self: return next(self.in_degree_iter(nbunch,weight))[1] else: return dict(self.in_degree_iter(nbunch,weight)) def out_degree(self, nbunch=None, weight=None): if nbunch in self: return next(self.out_degree_iter(nbunch,weight))[1] else: return dict(self.out_degree_iter(nbunch,weight)) def clear(self): self.succ.clear() self.pred.clear() self.node.clear() self.graph.clear() def is_multigraph(self): return False def is_directed(self): return True def to_directed(self): return deepcopy(self) def to_undirected(self, reciprocal=False): H=Graph() H.name=self.name H.add_nodes_from(self) if reciprocal is True: H.add_edges_from( (u,v,deepcopy(d)) for u,nbrs in self.adjacency_iter() for v,d in nbrs.items() if v in self.pred[u]) else: H.add_edges_from( (u,v,deepcopy(d)) for u,nbrs in self.adjacency_iter() for v,d in nbrs.items() ) H.graph=deepcopy(self.graph) H.node=deepcopy(self.node) return H def reverse(self, copy=True): if copy: H = self.__class__(name="Reverse of (%s)"%self.name) H.add_nodes_from(self) H.add_edges_from( (v,u,deepcopy(d)) for u,v,d in self.edges(data=True) ) H.graph=deepcopy(self.graph) H.node=deepcopy(self.node) else: self.pred,self.succ=self.succ,self.pred self.adj=self.succ H=self return H def subgraph(self, nbunch): bunch = self.nbunch_iter(nbunch) H = self.__class__() for n in bunch: H.node[n]=self.node[n] H_succ=H.succ H_pred=H.pred self_succ=self.succ for n in H: H_succ[n]=H.adjlist_dict_factory() H_pred[n]=H.adjlist_dict_factory() for u in H_succ: Hnbrs=H_succ[u] for v,datadict in self_succ[u].items(): if v in H_succ: Hnbrs[v]=datadict H_pred[v][u]=datadict H.graph=self.graph return H
true
true
f700d5697d622d86217bf9023751cd36a04be139
1,937
py
Python
app/__init__.py
NewbieTechTeam/USSD-Python-Demo
1bd839f7908c0cdafb06a4418ec3f2f42a4898ed
[ "MIT" ]
51
2017-09-05T10:31:16.000Z
2022-02-07T10:35:22.000Z
app/__init__.py
MalingasCUMBANE/USSD-Python-Demo
695c5e5e1b73e4203f45d29e58b2730f3cafd6ff
[ "MIT" ]
3
2019-07-08T09:28:18.000Z
2021-06-01T23:59:11.000Z
app/__init__.py
MalingasCUMBANE/USSD-Python-Demo
695c5e5e1b73e4203f45d29e58b2730f3cafd6ff
[ "MIT" ]
56
2017-04-19T10:02:41.000Z
2022-03-04T12:15:07.000Z
import logging import logging.config import os from celery.utils.log import get_task_logger from dotenv import load_dotenv from flask import Flask from flask_login import LoginManager from config import config, Config from .AfricasTalkingGateway import gateway from .database import db, redis dotenv_path = os.path.join(os.path.join(os.path.dirname(__file__), ".."), ".env") load_dotenv(dotenv_path) __version__ = "0.2.0" __author__ = "[email protected]" __description__ = "Nerds Microfinance application" __email__ = "[email protected]" __copyright__ = "MIT LICENCE" login_manager = LoginManager() celery_logger = get_task_logger(__name__) def create_celery(): from celery import Celery celery = Celery( __name__, backend=Config.CELERY_RESULT_BACKEND, broker=Config.CELERY_BROKER_URL ) return celery celery = create_celery() def create_app(config_name): app = Flask(__name__) # configure application app.config.from_object(config[config_name]) config[config_name].init_app(app) # setup login manager login_manager.init_app(app) # setup database redis.init_app(app) db.init_app(app) # initialize africastalking gateway gateway.init_app(app=app) # setup celery celery.conf.update(app.config) class ContextTask(celery.Task): def __call__(self, *args, **kwargs): with app.app_context(): return self.run(*args, **kwargs) celery.Task = ContextTask # register blueprints from app.ussd import ussd as ussd_bp app.register_blueprint(ussd_bp) # setup logging from app.util import setup_logging from config import basedir if app.debug: logging_level = logging.DEBUG else: logging_level = logging.INFO path = os.path.join(basedir, "app_logger.yaml") setup_logging(default_level=logging_level, logger_file_path=path) return app
23.059524
81
0.716056
import logging import logging.config import os from celery.utils.log import get_task_logger from dotenv import load_dotenv from flask import Flask from flask_login import LoginManager from config import config, Config from .AfricasTalkingGateway import gateway from .database import db, redis dotenv_path = os.path.join(os.path.join(os.path.dirname(__file__), ".."), ".env") load_dotenv(dotenv_path) __version__ = "0.2.0" __author__ = "[email protected]" __description__ = "Nerds Microfinance application" __email__ = "[email protected]" __copyright__ = "MIT LICENCE" login_manager = LoginManager() celery_logger = get_task_logger(__name__) def create_celery(): from celery import Celery celery = Celery( __name__, backend=Config.CELERY_RESULT_BACKEND, broker=Config.CELERY_BROKER_URL ) return celery celery = create_celery() def create_app(config_name): app = Flask(__name__) app.config.from_object(config[config_name]) config[config_name].init_app(app) login_manager.init_app(app) redis.init_app(app) db.init_app(app) gateway.init_app(app=app) celery.conf.update(app.config) class ContextTask(celery.Task): def __call__(self, *args, **kwargs): with app.app_context(): return self.run(*args, **kwargs) celery.Task = ContextTask from app.ussd import ussd as ussd_bp app.register_blueprint(ussd_bp) from app.util import setup_logging from config import basedir if app.debug: logging_level = logging.DEBUG else: logging_level = logging.INFO path = os.path.join(basedir, "app_logger.yaml") setup_logging(default_level=logging_level, logger_file_path=path) return app
true
true
f700d854e2b87c83dfa59314f01e5237a60e914f
4,393
py
Python
tests/unit/retries/test_bucket.py
kellertk/botocore
3b8eb4ab832ed1ca1833a6cfce8277ef6d54dc9f
[ "Apache-2.0" ]
null
null
null
tests/unit/retries/test_bucket.py
kellertk/botocore
3b8eb4ab832ed1ca1833a6cfce8277ef6d54dc9f
[ "Apache-2.0" ]
null
null
null
tests/unit/retries/test_bucket.py
kellertk/botocore
3b8eb4ab832ed1ca1833a6cfce8277ef6d54dc9f
[ "Apache-2.0" ]
null
null
null
from botocore.exceptions import CapacityNotAvailableError from botocore.retries import bucket from tests import unittest class FakeClock(bucket.Clock): def __init__(self, timestamp_sequences): self.timestamp_sequences = timestamp_sequences self.sleep_call_amounts = [] def sleep(self, amount): self.sleep_call_amounts.append(amount) def current_time(self): return self.timestamp_sequences.pop(0) class TestTokenBucket(unittest.TestCase): def setUp(self): self.timestamp_sequences = [0] self.clock = FakeClock(self.timestamp_sequences) def create_token_bucket(self, max_rate=10, min_rate=0.1): return bucket.TokenBucket(max_rate=max_rate, clock=self.clock, min_rate=min_rate) def test_can_acquire_amount(self): self.timestamp_sequences.extend([ # Requests tokens every second, which is well below our # 10 TPS fill rate. 1, 2, 3, 4, 5, ]) token_bucket = self.create_token_bucket(max_rate=10) for _ in range(5): self.assertTrue(token_bucket.acquire(1, block=False)) def test_can_change_max_capacity_lower(self): # Requests at 1 TPS. self.timestamp_sequences.extend([1, 2, 3, 4, 5]) token_bucket = self.create_token_bucket(max_rate=10) # Request the first 5 tokens with max_rate=10 for _ in range(5): self.assertTrue(token_bucket.acquire(1, block=False)) # Now scale the max_rate down to 1 on the 5th second. self.timestamp_sequences.append(5) token_bucket.max_rate = 1 # And then from seconds 6-10 we request at one per second. self.timestamp_sequences.extend([6, 7, 8, 9, 10]) for _ in range(5): self.assertTrue(token_bucket.acquire(1, block=False)) def test_max_capacity_is_at_least_one(self): token_bucket = self.create_token_bucket() self.timestamp_sequences.append(1) token_bucket.max_rate = 0.5 self.assertEqual(token_bucket.max_rate, 0.5) self.assertEqual(token_bucket.max_capacity, 1) def test_acquire_fails_on_non_block_mode_returns_false(self): self.timestamp_sequences.extend([ # Initial creation time. 0, # Requests a token 1 second later. 1 ]) token_bucket = self.create_token_bucket(max_rate=10) with self.assertRaises(CapacityNotAvailableError): token_bucket.acquire(100, block=False) def test_can_retrieve_at_max_send_rate(self): self.timestamp_sequences.extend([ # Request a new token every 100ms (10 TPS) for 2 seconds. 1 + 0.1 * i for i in range(20) ]) token_bucket = self.create_token_bucket(max_rate=10) for _ in range(20): self.assertTrue(token_bucket.acquire(1, block=False)) def test_acquiring_blocks_when_capacity_reached(self): # This is 1 token every 0.1 seconds. token_bucket = self.create_token_bucket(max_rate=10) self.timestamp_sequences.extend([ # The first acquire() happens after .1 seconds. 0.1, # The second acquire() will fail because we get tokens at # 1 per 0.1 seconds. We will then sleep for 0.05 seconds until we # get a new token. 0.15, # And at 0.2 seconds we get our token. 0.2, # And at 0.3 seconds we have no issues getting a token. # Because we're using such small units (to avoid bloating the # test run time), we have to go slightly over 0.3 seconds here. 0.300001, ]) self.assertTrue(token_bucket.acquire(1, block=False)) self.assertEqual(token_bucket.available_capacity, 0) self.assertTrue(token_bucket.acquire(1, block=True)) self.assertEqual(token_bucket.available_capacity, 0) self.assertTrue(token_bucket.acquire(1, block=False)) def test_rate_cant_go_below_min(self): token_bucket = self.create_token_bucket(max_rate=1, min_rate=0.2) self.timestamp_sequences.append(1) token_bucket.max_rate = 0.1 self.assertEqual(token_bucket.max_rate, 0.2) self.assertEqual(token_bucket.max_capacity, 1)
39.223214
78
0.644434
from botocore.exceptions import CapacityNotAvailableError from botocore.retries import bucket from tests import unittest class FakeClock(bucket.Clock): def __init__(self, timestamp_sequences): self.timestamp_sequences = timestamp_sequences self.sleep_call_amounts = [] def sleep(self, amount): self.sleep_call_amounts.append(amount) def current_time(self): return self.timestamp_sequences.pop(0) class TestTokenBucket(unittest.TestCase): def setUp(self): self.timestamp_sequences = [0] self.clock = FakeClock(self.timestamp_sequences) def create_token_bucket(self, max_rate=10, min_rate=0.1): return bucket.TokenBucket(max_rate=max_rate, clock=self.clock, min_rate=min_rate) def test_can_acquire_amount(self): self.timestamp_sequences.extend([ 1, 2, 3, 4, 5, ]) token_bucket = self.create_token_bucket(max_rate=10) for _ in range(5): self.assertTrue(token_bucket.acquire(1, block=False)) def test_can_change_max_capacity_lower(self): self.timestamp_sequences.extend([1, 2, 3, 4, 5]) token_bucket = self.create_token_bucket(max_rate=10) for _ in range(5): self.assertTrue(token_bucket.acquire(1, block=False)) self.timestamp_sequences.append(5) token_bucket.max_rate = 1 self.timestamp_sequences.extend([6, 7, 8, 9, 10]) for _ in range(5): self.assertTrue(token_bucket.acquire(1, block=False)) def test_max_capacity_is_at_least_one(self): token_bucket = self.create_token_bucket() self.timestamp_sequences.append(1) token_bucket.max_rate = 0.5 self.assertEqual(token_bucket.max_rate, 0.5) self.assertEqual(token_bucket.max_capacity, 1) def test_acquire_fails_on_non_block_mode_returns_false(self): self.timestamp_sequences.extend([ 0, 1 ]) token_bucket = self.create_token_bucket(max_rate=10) with self.assertRaises(CapacityNotAvailableError): token_bucket.acquire(100, block=False) def test_can_retrieve_at_max_send_rate(self): self.timestamp_sequences.extend([ 1 + 0.1 * i for i in range(20) ]) token_bucket = self.create_token_bucket(max_rate=10) for _ in range(20): self.assertTrue(token_bucket.acquire(1, block=False)) def test_acquiring_blocks_when_capacity_reached(self): token_bucket = self.create_token_bucket(max_rate=10) self.timestamp_sequences.extend([ 0.1, 0.15, 0.2, # test run time), we have to go slightly over 0.3 seconds here. 0.300001, ]) self.assertTrue(token_bucket.acquire(1, block=False)) self.assertEqual(token_bucket.available_capacity, 0) self.assertTrue(token_bucket.acquire(1, block=True)) self.assertEqual(token_bucket.available_capacity, 0) self.assertTrue(token_bucket.acquire(1, block=False)) def test_rate_cant_go_below_min(self): token_bucket = self.create_token_bucket(max_rate=1, min_rate=0.2) self.timestamp_sequences.append(1) token_bucket.max_rate = 0.1 self.assertEqual(token_bucket.max_rate, 0.2) self.assertEqual(token_bucket.max_capacity, 1)
true
true
f700d93a84f72e1c0853d7a82796bc4f20c06470
10,498
py
Python
google/ads/googleads/v7/services/services/ad_group_simulation_service/transports/grpc.py
wxxlouisa/google-ads-python
f24137966f6bfcb765a9b1fae79f2d23041825fe
[ "Apache-2.0" ]
285
2018-10-05T16:47:58.000Z
2022-03-31T00:58:39.000Z
google/ads/googleads/v7/services/services/ad_group_simulation_service/transports/grpc.py
wxxlouisa/google-ads-python
f24137966f6bfcb765a9b1fae79f2d23041825fe
[ "Apache-2.0" ]
425
2018-09-10T13:32:41.000Z
2022-03-31T14:50:05.000Z
google/ads/googleads/v7/services/services/ad_group_simulation_service/transports/grpc.py
wxxlouisa/google-ads-python
f24137966f6bfcb765a9b1fae79f2d23041825fe
[ "Apache-2.0" ]
369
2018-11-28T07:01:00.000Z
2022-03-28T09:53:22.000Z
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers # type: ignore from google.api_core import gapic_v1 # type: ignore from google import auth # type: ignore from google.auth import credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v7.resources.types import ad_group_simulation from google.ads.googleads.v7.services.types import ad_group_simulation_service from .base import AdGroupSimulationServiceTransport, DEFAULT_CLIENT_INFO class AdGroupSimulationServiceGrpcTransport(AdGroupSimulationServiceTransport): """gRPC backend transport for AdGroupSimulationService. Service to fetch ad group simulations. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ def __init__( self, *, host: str = "googleads.googleapis.com", credentials: credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ self._ssl_channel_credentials = ssl_channel_credentials if channel: # Sanity check: Ensure that channel and credentials are not both # provided. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning, ) host = ( api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" ) if credentials is None: credentials, _ = auth.default( scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id ) # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = auth.default(scopes=self.AUTH_SCOPES) # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel( cls, host: str = "googleads.googleapis.com", credentials: credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def get_ad_group_simulation( self, ) -> Callable[ [ad_group_simulation_service.GetAdGroupSimulationRequest], ad_group_simulation.AdGroupSimulation, ]: r"""Return a callable for the get ad group simulation method over gRPC. Returns the requested ad group simulation in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.GetAdGroupSimulationRequest], ~.AdGroupSimulation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_ad_group_simulation" not in self._stubs: self._stubs[ "get_ad_group_simulation" ] = self.grpc_channel.unary_unary( "/google.ads.googleads.v7.services.AdGroupSimulationService/GetAdGroupSimulation", request_serializer=ad_group_simulation_service.GetAdGroupSimulationRequest.serialize, response_deserializer=ad_group_simulation.AdGroupSimulation.deserialize, ) return self._stubs["get_ad_group_simulation"] __all__ = ("AdGroupSimulationServiceGrpcTransport",)
41.824701
101
0.624119
import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers from google.api_core import gapic_v1 from google import auth from google.auth import credentials from google.auth.transport.grpc import SslCredentials import grpc from google.ads.googleads.v7.resources.types import ad_group_simulation from google.ads.googleads.v7.services.types import ad_group_simulation_service from .base import AdGroupSimulationServiceTransport, DEFAULT_CLIENT_INFO class AdGroupSimulationServiceGrpcTransport(AdGroupSimulationServiceTransport): def __init__( self, *, host: str = "googleads.googleapis.com", credentials: credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: self._ssl_channel_credentials = ssl_channel_credentials if channel: credentials = False self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning, ) host = ( api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" ) if credentials is None: credentials, _ = auth.default( scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id ) if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = auth.default(scopes=self.AUTH_SCOPES) self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel( cls, host: str = "googleads.googleapis.com", credentials: credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs, ) -> grpc.Channel: return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: return self._grpc_channel @property def get_ad_group_simulation( self, ) -> Callable[ [ad_group_simulation_service.GetAdGroupSimulationRequest], ad_group_simulation.AdGroupSimulation, ]: if "get_ad_group_simulation" not in self._stubs: self._stubs[ "get_ad_group_simulation" ] = self.grpc_channel.unary_unary( "/google.ads.googleads.v7.services.AdGroupSimulationService/GetAdGroupSimulation", request_serializer=ad_group_simulation_service.GetAdGroupSimulationRequest.serialize, response_deserializer=ad_group_simulation.AdGroupSimulation.deserialize, ) return self._stubs["get_ad_group_simulation"] __all__ = ("AdGroupSimulationServiceGrpcTransport",)
true
true
f700d9479c9728354a66dc7a550274f53cde032e
6,401
py
Python
experiment/Behav_Consistency.py
colizoli/letter_color_mri
f4c4d8a91aa17664bdeb16b0436fc8f8fdac2710
[ "MIT" ]
null
null
null
experiment/Behav_Consistency.py
colizoli/letter_color_mri
f4c4d8a91aa17664bdeb16b0436fc8f8fdac2710
[ "MIT" ]
null
null
null
experiment/Behav_Consistency.py
colizoli/letter_color_mri
f4c4d8a91aa17664bdeb16b0436fc8f8fdac2710
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Letter-color Consistency test O.Colizoli 2020 Each letter of the alphabet in random order x 2 Color wheel opens at a randomized color on each trial (but does not turn) Python 2..7 """ # data saved in ~/LogFiles/sub-XXX # Import necessary modules import random import numpy as np import pandas as pd import os, time # for paths and data from IPython import embed as shell try: import Tkinter as tk # py27 from tkColorChooser import askcolor except: import tkinter as tk from tkinter.colorchooser import askcolor # Get subject number via tkinter (command line doesn't work in PsychoPy) subject_ID = [] session = [] ## INPUT WINDOW class GetInput(): def __init__(self): self.root2 = tk.Tk() self.root2.title("Subject and Session") # always put in same location w = 400 # width for the Tk root h = 200 # height for the Tk root # get screen width and height ws = self.root2.winfo_screenwidth() # width of the screen hs = self.root2.winfo_screenheight() # height of the screen # calculate x and y coordinates for the Tk root window x = (ws/6) - (w/6) y = (hs/6) - (h/6) self.root2.geometry('%dx%d+%d+%d' % (w, h, x, y)) # Subject self.e = tk.Entry(self.root2) self.e.insert(0, 'Subject Number') self.e.pack() self.e.focus_set() # Session self.e2 = tk.Entry(self.root2) self.e2.insert(0, 'Session') self.e2.pack() self.e2.focus_set() txt='If each letter of the alphabet\ \nwere to have a unique color,\ \nwhat color would it have?\ \n\nThere are no right or wrong answers.' # instructions self.instr = tk.Label(self.root2, bg='white', text=txt, font=("Helvetica", 14)) self.instr.pack() b = tk.Button(self.root2,text='OK',command=self.get_input) b.pack(side='bottom') self.root2.mainloop() def get_input(self): subj_str = self.e.get() sess_str = self.e2.get() subject_ID.append(subj_str) session.append(sess_str) self.root2.destroy() ## ASK INPUT app = GetInput() # subject and session subject_ID = int(subject_ID[0]) session = int(session[0]) ## Create LogFile folder cwd/LogFiles cwd = os.getcwd() logfile_dir = os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav') if not os.path.isdir(logfile_dir): os.makedirs(logfile_dir) timestr = time.strftime("%Y%m%d-%H%M%S") output_alphabet = os.path.join(logfile_dir,'sub-{}_sess-{}_task-consistency_events_{}.tsv'.format(subject_ID,session,timestr)) ### CONSISTENCY TASK ### alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'] #alphabet = ['a','b','c'] REPS = 2 # number of times to repeat whole alphabet RGBS = [] # save output L = '2' # place holder class Test(): def __init__(self): self.counter = 1 self.root = tk.Tk() self.root.title("Subject {} Session {}".format(subject_ID, session)) # always put in same location # get screen width and height ws = self.root.winfo_screenwidth() # width of the screen hs = self.root.winfo_screenheight() # height of the screen # open in full screen self.root.geometry('%dx%d+%d+%d' % (ws, hs, 0, 0)) self.open1 = tk.Button(self.root, text='Pick a color:', command=self.pick_a_color, font=('Helvetica', '36'),padx=5, pady=5) self.open1.pack(fill=tk.X, expand=False) self.letter = tk.Label(self.root, bg='white', text=L, font=("Helvetica", 90)) self.letter.pack() self.root.mainloop() def quit(self): RGBS.append( [L ,self.RGB, self.HEX, abc] ) self.root.destroy() def pick_a_color(self,): # GET COLOR CHOOSER NOT OPEN ON TOP OF ROOT self.RGB,self.HEX = askcolor((random.randint(0,255), random.randint(0,255), random.randint(0,255)), parent=None, title='Pick a color: {}'.format(L) ) self.letter.configure(fg = self.HEX) if self.counter: exit_button = tk.Button(self.root, text='FINISHED', command=self.quit, font=('Helvetica', '28')) exit_button.pack() self.counter = 0 self.root.mainloop() # MAIN LOOP abc = 1 # round for R in np.arange(REPS): random.shuffle(alphabet) # Open a new GUI per letter for L in alphabet: app = Test() # save colors on each trial to prevent losing data DFS = pd.DataFrame(RGBS) print(RGBS) try: DFS.columns = ["letter","rgb","hex","choice"] DFS['subject'] = np.repeat(subject_ID,len(DFS)) DFS['r'] = [c[0] for c in DFS['rgb']] DFS['g'] = [c[1] for c in DFS['rgb']] DFS['b'] = [c[2] for c in DFS['rgb']] except: # clicked window away pass DFS.to_csv(output_alphabet, sep='\t') # save all alphabet/preferences for both groups (also in case it goes wrong) abc+=1 #################################### ## SAVE OUTPUT & determine conditions print(RGBS) print('consistency test - success!') ##### OUTPUT FIGURE WITH COLORS ##### # Sort and show letters x 2 side by side del tk # py27 del askcolor import matplotlib.pyplot as plt # doesn't work together with tkinter import seaborn as sns fig = plt.figure(figsize=(10,5)) # Sort so the same letters go side by side for each choice try: DFS.sort_values(by=['choice', 'letter'],inplace=True) except: DFS = DFS.sort(['choice', 'letter']) DFS.reset_index(inplace=True) for i,A in enumerate(alphabet): ax = fig.add_subplot(6,5,i+1) ax.text(0.5, 0.5, DFS['letter'][i], color=DFS['hex'][i],fontsize=18) ax.text(0.25, 0.5, DFS['letter'][i+len(alphabet)], color=DFS['hex'][i+len(alphabet)],fontsize=18) ax.set_axis_off() sns.despine(offset=10, trim=True) plt.tight_layout() fig.savefig(os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav','sub-{}_sess-{}_colors.pdf'.format(subject_ID,session))) print('success: sub-{}_sess-{}_colors.pdf'.format(subject_ID,session))
34.229947
158
0.600375
import random import numpy as np import pandas as pd import os, time from IPython import embed as shell try: import Tkinter as tk from tkColorChooser import askcolor except: import tkinter as tk from tkinter.colorchooser import askcolor subject_ID = [] session = [] ## INPUT WINDOW class GetInput(): def __init__(self): self.root2 = tk.Tk() self.root2.title("Subject and Session") # always put in same location w = 400 # width for the Tk root h = 200 # height for the Tk root # get screen width and height ws = self.root2.winfo_screenwidth() # width of the screen hs = self.root2.winfo_screenheight() # height of the screen # calculate x and y coordinates for the Tk root window x = (ws/6) - (w/6) y = (hs/6) - (h/6) self.root2.geometry('%dx%d+%d+%d' % (w, h, x, y)) # Subject self.e = tk.Entry(self.root2) self.e.insert(0, 'Subject Number') self.e.pack() self.e.focus_set() # Session self.e2 = tk.Entry(self.root2) self.e2.insert(0, 'Session') self.e2.pack() self.e2.focus_set() txt='If each letter of the alphabet\ \nwere to have a unique color,\ \nwhat color would it have?\ \n\nThere are no right or wrong answers.' # instructions self.instr = tk.Label(self.root2, bg='white', text=txt, font=("Helvetica", 14)) self.instr.pack() b = tk.Button(self.root2,text='OK',command=self.get_input) b.pack(side='bottom') self.root2.mainloop() def get_input(self): subj_str = self.e.get() sess_str = self.e2.get() subject_ID.append(subj_str) session.append(sess_str) self.root2.destroy() ## ASK INPUT app = GetInput() # subject and session subject_ID = int(subject_ID[0]) session = int(session[0]) ## Create LogFile folder cwd/LogFiles cwd = os.getcwd() logfile_dir = os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav') if not os.path.isdir(logfile_dir): os.makedirs(logfile_dir) timestr = time.strftime("%Y%m%d-%H%M%S") output_alphabet = os.path.join(logfile_dir,'sub-{}_sess-{}_task-consistency_events_{}.tsv'.format(subject_ID,session,timestr)) ### CONSISTENCY TASK ### alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'] #alphabet = ['a','b','c'] REPS = 2 # number of times to repeat whole alphabet RGBS = [] # save output L = '2' # place holder class Test(): def __init__(self): self.counter = 1 self.root = tk.Tk() self.root.title("Subject {} Session {}".format(subject_ID, session)) # always put in same location # get screen width and height ws = self.root.winfo_screenwidth() # width of the screen hs = self.root.winfo_screenheight() # height of the screen # open in full screen self.root.geometry('%dx%d+%d+%d' % (ws, hs, 0, 0)) self.open1 = tk.Button(self.root, text='Pick a color:', command=self.pick_a_color, font=('Helvetica', '36'),padx=5, pady=5) self.open1.pack(fill=tk.X, expand=False) self.letter = tk.Label(self.root, bg='white', text=L, font=("Helvetica", 90)) self.letter.pack() self.root.mainloop() def quit(self): RGBS.append( [L ,self.RGB, self.HEX, abc] ) self.root.destroy() def pick_a_color(self,): # GET COLOR CHOOSER NOT OPEN ON TOP OF ROOT self.RGB,self.HEX = askcolor((random.randint(0,255), random.randint(0,255), random.randint(0,255)), parent=None, title='Pick a color: {}'.format(L) ) self.letter.configure(fg = self.HEX) if self.counter: exit_button = tk.Button(self.root, text='FINISHED', command=self.quit, font=('Helvetica', '28')) exit_button.pack() self.counter = 0 self.root.mainloop() # MAIN LOOP abc = 1 # round for R in np.arange(REPS): random.shuffle(alphabet) # Open a new GUI per letter for L in alphabet: app = Test() # save colors on each trial to prevent losing data DFS = pd.DataFrame(RGBS) print(RGBS) try: DFS.columns = ["letter","rgb","hex","choice"] DFS['subject'] = np.repeat(subject_ID,len(DFS)) DFS['r'] = [c[0] for c in DFS['rgb']] DFS['g'] = [c[1] for c in DFS['rgb']] DFS['b'] = [c[2] for c in DFS['rgb']] except: # clicked window away pass DFS.to_csv(output_alphabet, sep='\t') # save all alphabet/preferences for both groups (also in case it goes wrong) abc+=1 #################################### ## SAVE OUTPUT & determine conditions print(RGBS) print('consistency test - success!') ##### OUTPUT FIGURE WITH COLORS ##### # Sort and show letters x 2 side by side del tk # py27 del askcolor import matplotlib.pyplot as plt # doesn't work together with tkinter import seaborn as sns fig = plt.figure(figsize=(10,5)) try: DFS.sort_values(by=['choice', 'letter'],inplace=True) except: DFS = DFS.sort(['choice', 'letter']) DFS.reset_index(inplace=True) for i,A in enumerate(alphabet): ax = fig.add_subplot(6,5,i+1) ax.text(0.5, 0.5, DFS['letter'][i], color=DFS['hex'][i],fontsize=18) ax.text(0.25, 0.5, DFS['letter'][i+len(alphabet)], color=DFS['hex'][i+len(alphabet)],fontsize=18) ax.set_axis_off() sns.despine(offset=10, trim=True) plt.tight_layout() fig.savefig(os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav','sub-{}_sess-{}_colors.pdf'.format(subject_ID,session))) print('success: sub-{}_sess-{}_colors.pdf'.format(subject_ID,session))
true
true
f700d97441878947dbaf1f055b12c487191f49a6
4,471
py
Python
list_utils_test.py
KeepCoding/Connecta
e11c2974795cf325c194e107d1749c7e6431219c
[ "MIT" ]
null
null
null
list_utils_test.py
KeepCoding/Connecta
e11c2974795cf325c194e107d1749c7e6431219c
[ "MIT" ]
null
null
null
list_utils_test.py
KeepCoding/Connecta
e11c2974795cf325c194e107d1749c7e6431219c
[ "MIT" ]
null
null
null
import pytest from list_utils import * from oracle import ColumnRecommendation, ColumnClassification def test_find_one(): needle = 1 none = [0, 0, 5, 's'] beginning = [1, None, 9, 6, 0, 0] end = ['x', '0', 1] several = [0, 0, 3, 4, 1, 3, 2, 1, 3, 4] assert find_one(none, needle) == False assert find_one(beginning, needle) assert find_one(end, needle) assert find_one(several, needle) def test_find_n(): assert find_n([2, 3, 4, 5, 6], 2, -1) == False assert find_n([1, 2, 3, 4, 5], 42, 2) == False assert find_n([1, 2, 3, 4, 5], 1, 2) == False assert find_n([1, 2, 3, 2, 4, 5], 2, 2) assert find_n([1, 2, 3, 4, 5, 4, 6, 4, 7, 4, 6], 4, 2) assert find_n([1, 2, 3, 4], 'x', 0) == True def test_find_streak(): assert find_streak([1, 2, 3, 4, 5], 4, -1) == False assert find_streak([1, 2, 3, 4, 5], 42, 2) == False assert find_streak([1, 2, 3, 4], 4, 1) assert find_streak([1, 2, 3, 1, 2], 2, 2) == False assert find_streak([1, 2, 3, 4, 5, 5, 5], 5, 3) assert find_streak([5, 5, 5, 1, 2, 3, 4], 5, 3) assert find_streak([1, 2, 5, 5, 5, 3, 4], 5, 3) assert find_streak([1, 2, 3, 4, 5, 5, 5], 5, 4) == False def test_first_elements(): original = [[0, 7, 3], [4, 0, 1]] assert first_elements(original) == [0, 4] def test_transpose(): original = [[0, 7, 3], [4, 0, 1]] transposed = [[0, 4], [7, 0], [3, 1]] assert transpose(original) == transposed assert transpose(transpose(original)) == original def test_zero_distance_displace(): l1 = [1, 2, 3, 4, 5, 6] l2 = [1] l3 = [[4, 5], ['x', 'o', 'c']] assert displace([], 0) == [] assert displace(l1, 0) == l1 assert displace(l2, 0) == l2 assert displace(l3, 0) == l3 def test_positive_distance_displace(): l1 = [1, 2, 3, 4, 5, 6] l2 = [1] l3 = [[4, 5], ['x', 'o', 'c']] l4 = [9, 6, 5] assert displace([], 2) == [] assert displace(l1, 2) == [None, None, 1, 2, 3, 4] assert displace(l2, 3, '-') == ['-'] assert displace(l3, 1, '#') == ['#', [4, 5]] assert displace(l4, 3, 0) == [0, 0, 0] def test_negative_distance_displace(): l1 = [1, 2, 3, 4, 5, 6] l2 = [1] l3 = [[4, 5], ['x', 'o', 'c']] l4 = [9, 6, 5] assert displace([], -2) == [] assert displace(l1, -2) == [3, 4, 5, 6, None, None] assert displace(l2, -3, '-') == ['-'] assert displace(l3, -1, '#') == [['x', 'o', 'c'], '#'] assert displace(l4, -3, 0) == [0, 0, 0] def test_reverse_list(): assert reverse_list([]) == [] assert reverse_list([1, 2, 3, 4, 5, 6]) == [6, 5, 4, 3, 2, 1] def test_reverse_matrix(): assert reverse_matrix([]) == [] assert reverse_matrix([[0, 1, 2, 3], [0, 1, 2, 3]]) == [ [3, 2, 1, 0], [3, 2, 1, 0]] def test_all_same(): assert all_same([9, 1, 2, 3, 4]) == False assert all_same([[], [], []]) assert all_same([]) assert all_same([ColumnRecommendation(0, ColumnClassification.WIN), ColumnRecommendation(2, ColumnClassification.WIN)]) assert all_same([ColumnRecommendation(0, ColumnClassification.MAYBE), ColumnRecommendation(0, ColumnClassification.WIN)]) == False def test_collapse_list(): assert collapse_list([]) == '' assert collapse_list(['o', 'x', 'x', 'o']) == 'oxxo' assert collapse_list(['x', 'x', None, None, None]) == 'xx...' def test_collapse_matrix(): assert collapse_matrix([]) == '' assert collapse_matrix([['x', 'x', None], ['o', 'x', 'x'], ['o', None, None]]) == 'xx.|oxx|o..' def test_replace_all_in_list(): assert replace_all_in_list([None, 3, '546', 33, None], None, '#') == [ '#', 3, '546', 33, '#'] assert replace_all_in_list([1, 2, 3, 4, 5], 'e', 42) == [1, 2, 3, 4, 5] assert replace_all_in_list([], 34, 43) == [] def test_replace_all_in_matrix(): # caso normal: tiene lo viejo assert replace_all_in_matrix([[1, 2, 3, 'n', 'n', None], [4, 5, 'n']], 'n', '#') == [[1, 2, 3, '#', '#', None], [4, 5, '#']] # caso raro: no tiene lo viejo assert replace_all_in_matrix([[None, None, 2, True], [4, 5, '#']], 'k', 42) == [[ None, None, 2, True], [4, 5, '#']] # caso más raro: lista de listas vacías assert replace_all_in_matrix([], None, 7) == [] assert replace_all_in_matrix([[], []], None, 7) == [[], []]
30.834483
101
0.518676
import pytest from list_utils import * from oracle import ColumnRecommendation, ColumnClassification def test_find_one(): needle = 1 none = [0, 0, 5, 's'] beginning = [1, None, 9, 6, 0, 0] end = ['x', '0', 1] several = [0, 0, 3, 4, 1, 3, 2, 1, 3, 4] assert find_one(none, needle) == False assert find_one(beginning, needle) assert find_one(end, needle) assert find_one(several, needle) def test_find_n(): assert find_n([2, 3, 4, 5, 6], 2, -1) == False assert find_n([1, 2, 3, 4, 5], 42, 2) == False assert find_n([1, 2, 3, 4, 5], 1, 2) == False assert find_n([1, 2, 3, 2, 4, 5], 2, 2) assert find_n([1, 2, 3, 4, 5, 4, 6, 4, 7, 4, 6], 4, 2) assert find_n([1, 2, 3, 4], 'x', 0) == True def test_find_streak(): assert find_streak([1, 2, 3, 4, 5], 4, -1) == False assert find_streak([1, 2, 3, 4, 5], 42, 2) == False assert find_streak([1, 2, 3, 4], 4, 1) assert find_streak([1, 2, 3, 1, 2], 2, 2) == False assert find_streak([1, 2, 3, 4, 5, 5, 5], 5, 3) assert find_streak([5, 5, 5, 1, 2, 3, 4], 5, 3) assert find_streak([1, 2, 5, 5, 5, 3, 4], 5, 3) assert find_streak([1, 2, 3, 4, 5, 5, 5], 5, 4) == False def test_first_elements(): original = [[0, 7, 3], [4, 0, 1]] assert first_elements(original) == [0, 4] def test_transpose(): original = [[0, 7, 3], [4, 0, 1]] transposed = [[0, 4], [7, 0], [3, 1]] assert transpose(original) == transposed assert transpose(transpose(original)) == original def test_zero_distance_displace(): l1 = [1, 2, 3, 4, 5, 6] l2 = [1] l3 = [[4, 5], ['x', 'o', 'c']] assert displace([], 0) == [] assert displace(l1, 0) == l1 assert displace(l2, 0) == l2 assert displace(l3, 0) == l3 def test_positive_distance_displace(): l1 = [1, 2, 3, 4, 5, 6] l2 = [1] l3 = [[4, 5], ['x', 'o', 'c']] l4 = [9, 6, 5] assert displace([], 2) == [] assert displace(l1, 2) == [None, None, 1, 2, 3, 4] assert displace(l2, 3, '-') == ['-'] assert displace(l3, 1, '#') == ['#', [4, 5]] assert displace(l4, 3, 0) == [0, 0, 0] def test_negative_distance_displace(): l1 = [1, 2, 3, 4, 5, 6] l2 = [1] l3 = [[4, 5], ['x', 'o', 'c']] l4 = [9, 6, 5] assert displace([], -2) == [] assert displace(l1, -2) == [3, 4, 5, 6, None, None] assert displace(l2, -3, '-') == ['-'] assert displace(l3, -1, '#') == [['x', 'o', 'c'], '#'] assert displace(l4, -3, 0) == [0, 0, 0] def test_reverse_list(): assert reverse_list([]) == [] assert reverse_list([1, 2, 3, 4, 5, 6]) == [6, 5, 4, 3, 2, 1] def test_reverse_matrix(): assert reverse_matrix([]) == [] assert reverse_matrix([[0, 1, 2, 3], [0, 1, 2, 3]]) == [ [3, 2, 1, 0], [3, 2, 1, 0]] def test_all_same(): assert all_same([9, 1, 2, 3, 4]) == False assert all_same([[], [], []]) assert all_same([]) assert all_same([ColumnRecommendation(0, ColumnClassification.WIN), ColumnRecommendation(2, ColumnClassification.WIN)]) assert all_same([ColumnRecommendation(0, ColumnClassification.MAYBE), ColumnRecommendation(0, ColumnClassification.WIN)]) == False def test_collapse_list(): assert collapse_list([]) == '' assert collapse_list(['o', 'x', 'x', 'o']) == 'oxxo' assert collapse_list(['x', 'x', None, None, None]) == 'xx...' def test_collapse_matrix(): assert collapse_matrix([]) == '' assert collapse_matrix([['x', 'x', None], ['o', 'x', 'x'], ['o', None, None]]) == 'xx.|oxx|o..' def test_replace_all_in_list(): assert replace_all_in_list([None, 3, '546', 33, None], None, '#') == [ '#', 3, '546', 33, '#'] assert replace_all_in_list([1, 2, 3, 4, 5], 'e', 42) == [1, 2, 3, 4, 5] assert replace_all_in_list([], 34, 43) == [] def test_replace_all_in_matrix(): assert replace_all_in_matrix([[1, 2, 3, 'n', 'n', None], [4, 5, 'n']], 'n', '#') == [[1, 2, 3, '#', '#', None], [4, 5, '#']] assert replace_all_in_matrix([[None, None, 2, True], [4, 5, '#']], 'k', 42) == [[ None, None, 2, True], [4, 5, '#']] assert replace_all_in_matrix([], None, 7) == [] assert replace_all_in_matrix([[], []], None, 7) == [[], []]
true
true
f700d97d04825866396519cbea6a506aeba198bd
1,169
py
Python
GBB.ConversationalKM.Python/SelectIntent/__init__.py
microsoft/Customer-Service-Conversational-Insights
a5275d8d465df000f3ffb8fedc9a99b5e095dcb8
[ "MIT" ]
8
2020-07-02T08:07:30.000Z
2021-12-02T17:50:06.000Z
GBB.ConversationalKM.Python/SelectIntent/__init__.py
microsoft/Customer-Service-Conversational-Insights
a5275d8d465df000f3ffb8fedc9a99b5e095dcb8
[ "MIT" ]
1
2021-06-08T17:56:54.000Z
2021-06-09T12:28:01.000Z
GBB.ConversationalKM.Python/SelectIntent/__init__.py
microsoft/Customer-Service-Conversational-Insights
a5275d8d465df000f3ffb8fedc9a99b5e095dcb8
[ "MIT" ]
8
2020-07-02T07:50:30.000Z
2021-11-10T08:37:18.000Z
import logging import azure.functions as func import json import os from azure.cosmosdb.table.tableservice import TableService from azure.cosmosdb.table.models import Entity def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') # Connect to Azure Table Storage table_service = TableService(connection_string= os.environ['AzureWebJobsStorage']) table_service.create_table('intents') if not table_service.exists('intents') else None req_body = req.get_json() if req_body: # Create row to be saved on Azure Table Storage print(req_body.get('ConversationId')) data = req_body data["PartitionKey"] = req_body.get('ConversationId') data["RowKey"] = req_body.get('MessageId') # Save row on Azure Table Storage table_service.insert_or_replace_entity('intents', data) return func.HttpResponse(f"Row {req_body.get('MessageId')} for {req_body.get('ConversationId')} added") else: return func.HttpResponse( "Please pass valid request body", status_code=400 )
38.966667
111
0.691189
import logging import azure.functions as func import json import os from azure.cosmosdb.table.tableservice import TableService from azure.cosmosdb.table.models import Entity def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') table_service = TableService(connection_string= os.environ['AzureWebJobsStorage']) table_service.create_table('intents') if not table_service.exists('intents') else None req_body = req.get_json() if req_body: print(req_body.get('ConversationId')) data = req_body data["PartitionKey"] = req_body.get('ConversationId') data["RowKey"] = req_body.get('MessageId') table_service.insert_or_replace_entity('intents', data) return func.HttpResponse(f"Row {req_body.get('MessageId')} for {req_body.get('ConversationId')} added") else: return func.HttpResponse( "Please pass valid request body", status_code=400 )
true
true
f700d9cab0b2649998886d89963bee454b666b82
269
py
Python
TimeLoopLib/__init__.py
jvollhueter/pyMANGA-1
414204a394d44405225b4b8224b19464c1006f1d
[ "MIT" ]
null
null
null
TimeLoopLib/__init__.py
jvollhueter/pyMANGA-1
414204a394d44405225b4b8224b19464c1006f1d
[ "MIT" ]
null
null
null
TimeLoopLib/__init__.py
jvollhueter/pyMANGA-1
414204a394d44405225b4b8224b19464c1006f1d
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Nov 8 15:25:03 2018 @author: bathmann """ from .TreeDynamicTimeStepping import TreeDynamicTimeStepping from .TreeDynamicTimeLoop import TreeDynamicTimeLoop from .SimpleTimeLoop.SimpleLoop import Loop
22.416667
60
0.773234
from .TreeDynamicTimeStepping import TreeDynamicTimeStepping from .TreeDynamicTimeLoop import TreeDynamicTimeLoop from .SimpleTimeLoop.SimpleLoop import Loop
true
true
f700dc10cf350473ea1bc07b4576a52b3cab050a
5,684
py
Python
src/bin/create_esta_layers.py
terraPulse/boreal-tcc-analysis
e8a7b4bae727811d03bb57c5738945af7fe2920d
[ "MIT" ]
null
null
null
src/bin/create_esta_layers.py
terraPulse/boreal-tcc-analysis
e8a7b4bae727811d03bb57c5738945af7fe2920d
[ "MIT" ]
null
null
null
src/bin/create_esta_layers.py
terraPulse/boreal-tcc-analysis
e8a7b4bae727811d03bb57c5738945af7fe2920d
[ "MIT" ]
null
null
null
''' File: detect_forest_change.py Author: Min Feng Version: 0.1 Create: 2018-04-20 15:42:37 Description: detect forest changes from foest probility layers and tree cover layers ''' import logging def _load_tcc(f_tcc, msk): from gio import geo_raster_ex as gx from gio import config import numpy as np _bnd = gx.read_block(f_tcc, msk) if _bnd is None: return None _dat = np.zeros(msk.data.shape, dtype=np.uint8) _m_tcc = config.getfloat('conf', 'min_tcc') _idx = _bnd.data >= _m_tcc _dat[_idx] = 100 _idx = _bnd.data > 100 _dat[_idx] = _bnd.data[_idx] return msk.from_grid(_dat, nodata=255) def _task(tile, d_out, d_ref, opts): from gio import file_unzip from gio import config from gio import file_mag from gio import metadata from gio import geo_raster as ge from gio import mod_filter import numpy as np import os import re _tag = tile.tag _ttt = config.get('conf', 'test_tile') if _ttt and _tag not in _ttt.replace(' ', '').split(','): return _m = re.match(r'(h\d+)(v\d+)', _tag) _h = _m.group(1) _v = _m.group(2) _d_out = os.path.join(d_out, _h, _v, _tag) _d_ref = os.path.join(d_ref, _h, _v, _tag) _f_met = os.path.join(_d_out, '%s_met.txt' % _tag) _fname = lambda t: os.path.join(_d_out, '%s_%s.tif' % (_tag, t)) _fname_ref = lambda t: os.path.join(_d_ref, '%s_%s.tif' % (_tag, t)) _fname_m1 = lambda t, a='_m1': _fname('%s_n0%s' % (t, a)) # if not file_mag.get(_f_met).exists(): # logging.info('skip non-existing result for %s' % _tag) # return if not file_mag.get(_fname_m1('loss_year')).exists(): logging.info('skip non-existing result for %s' % _tag) return if (not _ttt) and file_mag.get(_fname_m1('esta_year')).exists() and \ (not config.getboolean('conf', 'over_write', False)): logging.info('skip processed esta result for %s' % _tag) return _b_loss_year = ge.open(_fname_m1('loss_year')).get_band().cache() _b_gain_year = ge.open(_fname_m1('gain_year')).get_band().cache() _b_loss_prob = ge.open(_fname_m1('loss_prob')).get_band().cache() _b_gain_prob = ge.open(_fname_m1('gain_prob')).get_band().cache() _f_tcc = config.get('conf', 'latest_tcc') _b_prob = _load_tcc(_f_tcc, _b_loss_year) if _f_tcc else ge.open(_fname_ref('age_prob')).get_band().cache() if _b_prob is None: logging.info('forced to use age_prob layer %s' % _fname_ref('age_prob')) _b_prob = ge.open(_fname_ref('age_prob')).get_band().cache() _d_forest_prob = _b_prob.data _d_loss = _b_loss_year.data _d_gain = _b_gain_year.data _d_esta = np.zeros(_d_forest_prob.shape, dtype=np.uint8) _d_prob = np.empty(_d_forest_prob.shape, dtype=np.float32) _d_prob.fill(100) _d_prob[_b_prob.data == _b_prob.nodata] = -9999 _b_esta = _b_loss_year.from_grid(_d_esta, nodata=255) _b_esta.color_table = ge.load_colortable(config.get('conf', 'color')) _d_esta[_d_forest_prob > 100] = _d_forest_prob[_d_forest_prob > 100] for _y in range(1970, 2021): _y = _y - 1970 _idx = _d_loss == _y _d_esta[_idx] = 100 _d_prob[_idx] = _b_loss_prob.data[_idx] _idx = _d_gain == _y _d_esta[_idx] = _y _d_prob[_idx] = _b_gain_prob.data[_idx] _d_esta[_d_forest_prob < 50] = 100 _d_test = (_d_esta < 100).astype(np.uint8) _d_test[(_d_esta < 100) & (_d_esta > 0)] = 1 _b_test = _b_esta.from_grid(_d_test, nodata=255) mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch')) _d_esta[(_d_esta == 100) & (_b_test.data == 1)] = 0 _d_test = ((_d_esta > 0) & (_d_esta <= 100)).astype(np.uint8) _d_test[(_d_esta < 100) & (_d_esta > 0)] = 1 _b_test = _b_esta.from_grid(_d_test, nodata=255) mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch')) _d_esta[(_d_esta == 0) & (_b_test.data == 1)] = 100 with file_unzip.file_unzip() as _zip: _zip.save(_b_esta, _fname_m1('esta_year')) _zip.save(_b_esta.from_grid(_d_prob, nodata=-9999), _fname_m1('esta_prob')) return True def main(opts): import logging from gio import config from gio import file_mag from gio import global_task import os _d_inp = config.get('conf', 'input') _d_ref = config.get('conf', 'refer', _d_inp) _f_mak = file_mag.get(os.path.join(_d_inp, 'tasks.txt')) _ts = global_task.load(_f_mak) from gio import multi_task _rs = multi_task.run(_task, [(_t, os.path.join(_d_inp, 'data'), os.path.join(_d_ref, 'data'), opts) for _t in multi_task.load(_ts, opts)], opts) print('processed', len([_r for _r in _rs if _r]), 'tiles') def usage(): _p = environ_mag.usage(True) _p.add_argument('-i', '--input', dest='input') _p.add_argument('-r', '--refer', dest='refer') _p.add_argument('--latest-tcc', dest='latest_tcc') _p.add_argument('-w', '--over-write', dest='over_write', type='bool') _p.add_argument('--min-tcc', dest='min_tcc', type=int, default=30) _p.add_argument('-m', '--min-patch', dest='min_patch', type=float, default=100 * 100) _p.add_argument('--test-tile', dest='test_tile') return _p if __name__ == '__main__': from gio import environ_mag environ_mag.init_path() environ_mag.run(main, [environ_mag.config(usage())])
34.448485
149
0.619458
import logging def _load_tcc(f_tcc, msk): from gio import geo_raster_ex as gx from gio import config import numpy as np _bnd = gx.read_block(f_tcc, msk) if _bnd is None: return None _dat = np.zeros(msk.data.shape, dtype=np.uint8) _m_tcc = config.getfloat('conf', 'min_tcc') _idx = _bnd.data >= _m_tcc _dat[_idx] = 100 _idx = _bnd.data > 100 _dat[_idx] = _bnd.data[_idx] return msk.from_grid(_dat, nodata=255) def _task(tile, d_out, d_ref, opts): from gio import file_unzip from gio import config from gio import file_mag from gio import metadata from gio import geo_raster as ge from gio import mod_filter import numpy as np import os import re _tag = tile.tag _ttt = config.get('conf', 'test_tile') if _ttt and _tag not in _ttt.replace(' ', '').split(','): return _m = re.match(r'(h\d+)(v\d+)', _tag) _h = _m.group(1) _v = _m.group(2) _d_out = os.path.join(d_out, _h, _v, _tag) _d_ref = os.path.join(d_ref, _h, _v, _tag) _f_met = os.path.join(_d_out, '%s_met.txt' % _tag) _fname = lambda t: os.path.join(_d_out, '%s_%s.tif' % (_tag, t)) _fname_ref = lambda t: os.path.join(_d_ref, '%s_%s.tif' % (_tag, t)) _fname_m1 = lambda t, a='_m1': _fname('%s_n0%s' % (t, a)) if not file_mag.get(_fname_m1('loss_year')).exists(): logging.info('skip non-existing result for %s' % _tag) return if (not _ttt) and file_mag.get(_fname_m1('esta_year')).exists() and \ (not config.getboolean('conf', 'over_write', False)): logging.info('skip processed esta result for %s' % _tag) return _b_loss_year = ge.open(_fname_m1('loss_year')).get_band().cache() _b_gain_year = ge.open(_fname_m1('gain_year')).get_band().cache() _b_loss_prob = ge.open(_fname_m1('loss_prob')).get_band().cache() _b_gain_prob = ge.open(_fname_m1('gain_prob')).get_band().cache() _f_tcc = config.get('conf', 'latest_tcc') _b_prob = _load_tcc(_f_tcc, _b_loss_year) if _f_tcc else ge.open(_fname_ref('age_prob')).get_band().cache() if _b_prob is None: logging.info('forced to use age_prob layer %s' % _fname_ref('age_prob')) _b_prob = ge.open(_fname_ref('age_prob')).get_band().cache() _d_forest_prob = _b_prob.data _d_loss = _b_loss_year.data _d_gain = _b_gain_year.data _d_esta = np.zeros(_d_forest_prob.shape, dtype=np.uint8) _d_prob = np.empty(_d_forest_prob.shape, dtype=np.float32) _d_prob.fill(100) _d_prob[_b_prob.data == _b_prob.nodata] = -9999 _b_esta = _b_loss_year.from_grid(_d_esta, nodata=255) _b_esta.color_table = ge.load_colortable(config.get('conf', 'color')) _d_esta[_d_forest_prob > 100] = _d_forest_prob[_d_forest_prob > 100] for _y in range(1970, 2021): _y = _y - 1970 _idx = _d_loss == _y _d_esta[_idx] = 100 _d_prob[_idx] = _b_loss_prob.data[_idx] _idx = _d_gain == _y _d_esta[_idx] = _y _d_prob[_idx] = _b_gain_prob.data[_idx] _d_esta[_d_forest_prob < 50] = 100 _d_test = (_d_esta < 100).astype(np.uint8) _d_test[(_d_esta < 100) & (_d_esta > 0)] = 1 _b_test = _b_esta.from_grid(_d_test, nodata=255) mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch')) _d_esta[(_d_esta == 100) & (_b_test.data == 1)] = 0 _d_test = ((_d_esta > 0) & (_d_esta <= 100)).astype(np.uint8) _d_test[(_d_esta < 100) & (_d_esta > 0)] = 1 _b_test = _b_esta.from_grid(_d_test, nodata=255) mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch')) _d_esta[(_d_esta == 0) & (_b_test.data == 1)] = 100 with file_unzip.file_unzip() as _zip: _zip.save(_b_esta, _fname_m1('esta_year')) _zip.save(_b_esta.from_grid(_d_prob, nodata=-9999), _fname_m1('esta_prob')) return True def main(opts): import logging from gio import config from gio import file_mag from gio import global_task import os _d_inp = config.get('conf', 'input') _d_ref = config.get('conf', 'refer', _d_inp) _f_mak = file_mag.get(os.path.join(_d_inp, 'tasks.txt')) _ts = global_task.load(_f_mak) from gio import multi_task _rs = multi_task.run(_task, [(_t, os.path.join(_d_inp, 'data'), os.path.join(_d_ref, 'data'), opts) for _t in multi_task.load(_ts, opts)], opts) print('processed', len([_r for _r in _rs if _r]), 'tiles') def usage(): _p = environ_mag.usage(True) _p.add_argument('-i', '--input', dest='input') _p.add_argument('-r', '--refer', dest='refer') _p.add_argument('--latest-tcc', dest='latest_tcc') _p.add_argument('-w', '--over-write', dest='over_write', type='bool') _p.add_argument('--min-tcc', dest='min_tcc', type=int, default=30) _p.add_argument('-m', '--min-patch', dest='min_patch', type=float, default=100 * 100) _p.add_argument('--test-tile', dest='test_tile') return _p if __name__ == '__main__': from gio import environ_mag environ_mag.init_path() environ_mag.run(main, [environ_mag.config(usage())])
true
true
f700dcc22a5c304484efeb7bededd6dfe427c31c
1,251
py
Python
ElevatorBot/misc/helperFunctions.py
LukasSchmid97/destinyBloodoakStats
1420802ce01c3435ad5c283f44eb4531d9b22c38
[ "MIT" ]
3
2019-10-19T11:24:50.000Z
2021-01-29T12:02:17.000Z
ElevatorBot/misc/helperFunctions.py
LukasSchmid97/destinyBloodoakStats
1420802ce01c3435ad5c283f44eb4531d9b22c38
[ "MIT" ]
29
2019-10-14T12:26:10.000Z
2021-07-28T20:50:29.000Z
ElevatorBot/misc/helperFunctions.py
LukasSchmid97/destinyBloodoakStats
1420802ce01c3435ad5c283f44eb4531d9b22c38
[ "MIT" ]
2
2019-10-13T17:11:09.000Z
2020-05-13T15:29:04.000Z
import datetime import logging import traceback from dis_snek.models import ComponentContext from dis_snek.models import InteractionContext from ElevatorBot.misc.formating import embed_message def get_now_with_tz() -> datetime.datetime: """Returns the current datetime (timezone aware)""" return datetime.datetime.now(tz=datetime.timezone.utc) def localize_datetime(obj: datetime.datetime) -> datetime.datetime: """Returns a timezone aware object, localized to the system timezone""" return obj.astimezone() async def log_error( ctx: InteractionContext | ComponentContext, error: Exception, situation: str, ) -> None: """Respond to the context and log error""" if not ctx.responded: await ctx.send( embeds=embed_message( "Error", f"Sorry, something went wrong\nThe Error has been logged and will be worked on", str(error), ) ) # log the error logger = logging.getLogger(situation) logger.exception( f"InteractionID '{ctx.interaction_id}' - Error {error} - Traceback: \n{''.join(traceback.format_tb(error.__traceback__))}" ) # raising error again to making deving easier raise error
26.617021
130
0.680256
import datetime import logging import traceback from dis_snek.models import ComponentContext from dis_snek.models import InteractionContext from ElevatorBot.misc.formating import embed_message def get_now_with_tz() -> datetime.datetime: return datetime.datetime.now(tz=datetime.timezone.utc) def localize_datetime(obj: datetime.datetime) -> datetime.datetime: return obj.astimezone() async def log_error( ctx: InteractionContext | ComponentContext, error: Exception, situation: str, ) -> None: if not ctx.responded: await ctx.send( embeds=embed_message( "Error", f"Sorry, something went wrong\nThe Error has been logged and will be worked on", str(error), ) ) logger = logging.getLogger(situation) logger.exception( f"InteractionID '{ctx.interaction_id}' - Error {error} - Traceback: \n{''.join(traceback.format_tb(error.__traceback__))}" ) raise error
true
true
f700dcc9cfb02e8ae619011acd798deb80b74bec
5,264
py
Python
sdk/python/pulumi_azure_native/edgeorder/v20201201preview/get_order_collection_by_name.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/edgeorder/v20201201preview/get_order_collection_by_name.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/edgeorder/v20201201preview/get_order_collection_by_name.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs __all__ = [ 'GetOrderCollectionByNameResult', 'AwaitableGetOrderCollectionByNameResult', 'get_order_collection_by_name', ] @pulumi.output_type class GetOrderCollectionByNameResult: """ Specifies the properties or parameters for an order collection. Order collection is a grouping of one or more orders. """ def __init__(__self__, id=None, location=None, name=None, order_ids=None, system_data=None, tags=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if order_ids and not isinstance(order_ids, list): raise TypeError("Expected argument 'order_ids' to be a list") pulumi.set(__self__, "order_ids", order_ids) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> str: """ The geo-location where the resource lives """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter(name="orderIds") def order_ids(self) -> Sequence[str]: """ List of order ARM Ids which are part of an order collection. """ return pulumi.get(self, "order_ids") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': """ Represents resource creation and update time """ return pulumi.get(self, "system_data") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type") class AwaitableGetOrderCollectionByNameResult(GetOrderCollectionByNameResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetOrderCollectionByNameResult( id=self.id, location=self.location, name=self.name, order_ids=self.order_ids, system_data=self.system_data, tags=self.tags, type=self.type) def get_order_collection_by_name(order_collection_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOrderCollectionByNameResult: """ Specifies the properties or parameters for an order collection. Order collection is a grouping of one or more orders. :param str order_collection_name: The name of the order collection :param str resource_group_name: The name of the resource group. The name is case insensitive. """ __args__ = dict() __args__['orderCollectionName'] = order_collection_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:edgeorder/v20201201preview:getOrderCollectionByName', __args__, opts=opts, typ=GetOrderCollectionByNameResult).value return AwaitableGetOrderCollectionByNameResult( id=__ret__.id, location=__ret__.location, name=__ret__.name, order_ids=__ret__.order_ids, system_data=__ret__.system_data, tags=__ret__.tags, type=__ret__.type)
36.303448
193
0.652356
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs __all__ = [ 'GetOrderCollectionByNameResult', 'AwaitableGetOrderCollectionByNameResult', 'get_order_collection_by_name', ] @pulumi.output_type class GetOrderCollectionByNameResult: def __init__(__self__, id=None, location=None, name=None, order_ids=None, system_data=None, tags=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if order_ids and not isinstance(order_ids, list): raise TypeError("Expected argument 'order_ids' to be a list") pulumi.set(__self__, "order_ids", order_ids) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> str: return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter(name="orderIds") def order_ids(self) -> Sequence[str]: return pulumi.get(self, "order_ids") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': return pulumi.get(self, "system_data") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") class AwaitableGetOrderCollectionByNameResult(GetOrderCollectionByNameResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetOrderCollectionByNameResult( id=self.id, location=self.location, name=self.name, order_ids=self.order_ids, system_data=self.system_data, tags=self.tags, type=self.type) def get_order_collection_by_name(order_collection_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOrderCollectionByNameResult: __args__ = dict() __args__['orderCollectionName'] = order_collection_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:edgeorder/v20201201preview:getOrderCollectionByName', __args__, opts=opts, typ=GetOrderCollectionByNameResult).value return AwaitableGetOrderCollectionByNameResult( id=__ret__.id, location=__ret__.location, name=__ret__.name, order_ids=__ret__.order_ids, system_data=__ret__.system_data, tags=__ret__.tags, type=__ret__.type)
true
true
f700dcf909f39203bc9f428ef86290ba47cf8386
12,984
py
Python
plasTeX/Base/LaTeX/Index.py
perfectbark/LaTex2Docx
e32f9dcc59cce7bea4e7b114687b2300c623d8c0
[ "MIT" ]
23
2019-06-16T06:00:39.000Z
2022-03-29T14:44:32.000Z
plasTeX/Base/LaTeX/Index.py
hao-han/LaTex2Docx
e32f9dcc59cce7bea4e7b114687b2300c623d8c0
[ "MIT" ]
null
null
null
plasTeX/Base/LaTeX/Index.py
hao-han/LaTex2Docx
e32f9dcc59cce7bea4e7b114687b2300c623d8c0
[ "MIT" ]
12
2019-05-27T06:32:06.000Z
2022-03-15T10:22:07.000Z
#!/usr/bin/env python """ C.11.5 Index and Glossary (p211) """ import string, os from plasTeX.Tokenizer import Token, EscapeSequence from plasTeX import Command, Environment from plasTeX.Logging import getLogger from Sectioning import SectionUtils try: from pyuca import Collator collator = Collator(os.path.join(os.path.dirname(__file__), 'allkeys.txt')).sort_key except ImportError: collator = lambda x: x.lower() class IndexUtils(object): """ Helper functions for generating indexes """ linkType = 'index' level = Command.CHAPTER_LEVEL class Index(Command): """ Utility class used to surface the index entries to the renderer """ def __init__(self, *args, **kwargs): Command.__init__(self, *args, **kwargs) self.pages = [] self.key = [] self.sortkey = '' @property def totallen(self): """ Return the total number of entries generated by this entry """ total = 1 for item in self: total += item.totallen return total def __repr__(self): return '%s%s --> %s' % (''.join([x.source for x in self.key]), ', '.join([str(x) for x in self.pages]), Command.__repr__(self)) class IndexGroup(list): title = None def invoke(self, tex): if isinstance(self, Environment): Environment.invoke(self, tex) else: Command.invoke(self, tex) self.attributes['title'] = self.ownerDocument.createElement('indexname').expand(tex) @property def groups(self): """ Group index entries into batches according to the first letter """ batches = [] current = '' for item in self: try: label = title = item.sortkey[0].upper() if title in string.letters: pass elif title == '_': title = '_ (Underscore)' else: label = title = 'Symbols' except IndexError: label = title = 'Symbols' if current != title: newgroup = self.IndexGroup() newgroup.title = title newgroup.id = label batches.append(newgroup) current = title batches[-1].append(item) for item in batches: item[:] = self.splitColumns(item, self.ownerDocument.config['document']['index-columns']) return batches def splitColumns(self, items, cols): """ Divide the index entries into the specified number of columns Required Arguments: items -- list of column entries cols -- number of columns to create Returns: list of length `cols' containing groups of column entries """ entries = [(0,0)] # Find the total number of entries grandtotal = 0 for item in items: entries.append((item.totallen, item)) grandtotal += entries[-1][0] entries.pop(0) entries.reverse() # Get total number of entries per column coltotal = int(grandtotal / cols) # Group entries into columns current = 0 output = [[]] for num, item in entries: current += num if len(output) >= cols: output[-1].append(item) elif current > coltotal: output.append([item]) current = num elif current == coltotal: output[-1].append(item) output.append([]) current = 0 else: output[-1].append(item) output.reverse() for item in output: item.reverse() # Get rid of empty columns output = [x for x in output if x] # Pad to the correct number of columns for i in range(cols-len(output)): output.append([]) return output def digest(self, tokens): """ Sort and group index entries """ if isinstance(self, Environment): Environment.digest(self, tokens) if self.macroMode == self.MODE_END: return # Throw it all away, we don't need it. We'll be generating # our own index entries below. while self.childNodes: self.pop() else: Command.digest(self, tokens) doc = self.ownerDocument current = self entries = sorted(self.ownerDocument.userdata.get('index', [])) prev = IndexEntry([], None) for item in entries: # See how many levels we need to add/subtract between this one # and the previous common = 0 for prevkey, itemkey in zip(zip(prev.sortkey, prev.key), zip(item.sortkey, item.key)): if prevkey == itemkey: common += 1 continue break # print # print item # print (prev.key, prev.sortkey), (item.key, item.sortkey), common # Pop out to the common level i = common while i < len(prev.key): # print 'POP' current = current.parentNode i += 1 # Add the appropriate number of levels i = common while i < len(item.key): # print 'ADD', item.sortkey[i] newidx = self.Index() newidx.key = item.key[i] newidx.sortkey = item.sortkey[i] newidx.parentNode = current current.append(newidx) current = newidx i += 1 # Add the current page and format it current.pages.append(IndexDestination(item.type, item.node)) if item.format is not None: text = doc.createTextNode(str(len(current.pages))) ipn = item.format.getElementsByTagName('index-page-number') if ipn: ipn = ipn[0] ipn.parentNode.replaceChild(text, ipn) item.node.append(item.format) else: text = doc.createTextNode(str(len(current.pages))) item.node.append(text) prev = item class IndexDestination(object): def __init__(self, type, node): self._cr_type = type self._cr_node = node @property def see(self): return self._cr_type == IndexEntry.TYPE_SEE @property def seealso(self): return self._cr_type == IndexEntry.TYPE_SEEALSO @property def normal(self): return not(self.see) and not(self.seealso) def __getattribute__(self, name): if name.startswith('_cr_') or name in ['see', 'seealso', 'normal']: return object.__getattribute__(self, name) if self._cr_type and name in ['url']: return None return getattr(self._cr_node, name) class theindex(IndexUtils, Environment, SectionUtils): blockType = True level = Environment.CHAPTER_LEVEL counter = 'chapter' class printindex(IndexUtils, Command, SectionUtils): blockType = True level = Command.CHAPTER_LEVEL counter = 'chapter' class makeindex(Command): pass class makeglossary(Command): pass class glossary(Command): args = 'entry:nox' class index(Command): args = 'entry:nox' @property def textContent(self): return '' def invoke(self, tex): result = Command.invoke(self, tex) sortkey, key, format = [], [], [] entry = iter(self.attributes['entry']) current = [] alphanumeric = [Token.CC_OTHER, Token.CC_LETTER, Token.CC_SPACE] # Parse the index tokens for tok in entry: if tok.catcode in alphanumeric: # Escape character if tok == '"': for tok in entry: current.append(tok) break # Entry separator elif tok == '!': key.append(current) if len(sortkey) < len(key): sortkey.append(current) current = [] # Sort key separator elif tok == '@': sortkey.append(current) current = [] # Format separator elif tok == '|': key.append(current) if len(sortkey) < len(key): sortkey.append(current) current = format else: current.append(tok) continue # Everything else current.append(tok) # Make sure to get the stuff at the end if not format: key.append(current) if len(sortkey) < len(key): sortkey.append(current) # Convert the sort keys to strings for i, item in enumerate(sortkey): sortkey[i] = tex.expandTokens(item).textContent # Expand the key tokens for i, item in enumerate(key): key[i] = tex.expandTokens(item) # Get the format element type = IndexEntry.TYPE_NORMAL if not format: format = None else: macro = [] while format and format[0].catcode == Token.CC_LETTER: macro.append(format.pop(0)) if macro: macro = ''.join(macro) format.insert(0, EscapeSequence(macro)) if macro == 'see': type = IndexEntry.TYPE_SEE elif macro == 'seealso': type = IndexEntry.TYPE_SEEALSO format.append(EscapeSequence('index-page-number')) format = tex.expandTokens(format) # Store the index information in the document userdata = self.ownerDocument.userdata if 'index' not in userdata: userdata['index'] = [] userdata['index'].append(IndexEntry(key, self, sortkey, format, type)) return result class IndexEntry(object): """ Utility class used to assist in the sorting of index entries """ TYPE_NORMAL = 0 TYPE_SEE = 1 TYPE_SEEALSO = 2 def __init__(self, key, node, sortkey=None, format=None, type=0): """ Required Arguments: key -- a list of keys for the index entry node -- the node of the document that the index entry is associated with sortkey -- a list of sort keys, one per key, to be used for sorting instead of the key values format -- formatting that should be used to format the destination of the index entry type -- the type of entry that this is: TYPE_NORMAL, TYPE_SEE, or TYPE_SEEALSO """ self.key = key if not sortkey: self.sortkey = key else: self.sortkey = [] for i, sk in enumerate(sortkey): if sk is None: self.sortkey.append(key[i].textContent) else: self.sortkey.append(sk) self.format = format self.node = node self.type = type @property def see(self): return self.type == type(self).TYPE_SEE @property def seealso(self): return self.type == type(self).TYPE_SEEALSO @property def normal(self): return not(self.see) and not(self.seealso) def __cmp__(self, other): result = cmp(zip([collator(x) for x in self.sortkey if isinstance(x, basestring)], [collator(x.textContent) for x in self.key], self.key), zip([collator(x) for x in other.sortkey if isinstance(x, basestring)], [collator(x.textContent) for x in other.key], other.key)) if result == 0 and len(self.key) != len(other.key): return cmp(len(self.key), len(other.key)) return result def __repr__(self): if self.format is None: return ' '.join(['@'.join(self.sortkey), '!'.join([x.source for x in self.key])]) else: return ' '.join(['@'.join(self.sortkey), '!'.join([x.source for x in self.key]), ' '.join([x.source for x in self.format])]) def __str__(self): return repr(self) class IndexPageNumber(Command): macroName = 'index-page-number'
31.211538
92
0.519871
import string, os from plasTeX.Tokenizer import Token, EscapeSequence from plasTeX import Command, Environment from plasTeX.Logging import getLogger from Sectioning import SectionUtils try: from pyuca import Collator collator = Collator(os.path.join(os.path.dirname(__file__), 'allkeys.txt')).sort_key except ImportError: collator = lambda x: x.lower() class IndexUtils(object): linkType = 'index' level = Command.CHAPTER_LEVEL class Index(Command): def __init__(self, *args, **kwargs): Command.__init__(self, *args, **kwargs) self.pages = [] self.key = [] self.sortkey = '' @property def totallen(self): total = 1 for item in self: total += item.totallen return total def __repr__(self): return '%s%s --> %s' % (''.join([x.source for x in self.key]), ', '.join([str(x) for x in self.pages]), Command.__repr__(self)) class IndexGroup(list): title = None def invoke(self, tex): if isinstance(self, Environment): Environment.invoke(self, tex) else: Command.invoke(self, tex) self.attributes['title'] = self.ownerDocument.createElement('indexname').expand(tex) @property def groups(self): batches = [] current = '' for item in self: try: label = title = item.sortkey[0].upper() if title in string.letters: pass elif title == '_': title = '_ (Underscore)' else: label = title = 'Symbols' except IndexError: label = title = 'Symbols' if current != title: newgroup = self.IndexGroup() newgroup.title = title newgroup.id = label batches.append(newgroup) current = title batches[-1].append(item) for item in batches: item[:] = self.splitColumns(item, self.ownerDocument.config['document']['index-columns']) return batches def splitColumns(self, items, cols): entries = [(0,0)] grandtotal = 0 for item in items: entries.append((item.totallen, item)) grandtotal += entries[-1][0] entries.pop(0) entries.reverse() coltotal = int(grandtotal / cols) current = 0 output = [[]] for num, item in entries: current += num if len(output) >= cols: output[-1].append(item) elif current > coltotal: output.append([item]) current = num elif current == coltotal: output[-1].append(item) output.append([]) current = 0 else: output[-1].append(item) output.reverse() for item in output: item.reverse() output = [x for x in output if x] for i in range(cols-len(output)): output.append([]) return output def digest(self, tokens): if isinstance(self, Environment): Environment.digest(self, tokens) if self.macroMode == self.MODE_END: return while self.childNodes: self.pop() else: Command.digest(self, tokens) doc = self.ownerDocument current = self entries = sorted(self.ownerDocument.userdata.get('index', [])) prev = IndexEntry([], None) for item in entries: common = 0 for prevkey, itemkey in zip(zip(prev.sortkey, prev.key), zip(item.sortkey, item.key)): if prevkey == itemkey: common += 1 continue break i = common while i < len(prev.key): current = current.parentNode i += 1 i = common while i < len(item.key): newidx = self.Index() newidx.key = item.key[i] newidx.sortkey = item.sortkey[i] newidx.parentNode = current current.append(newidx) current = newidx i += 1 current.pages.append(IndexDestination(item.type, item.node)) if item.format is not None: text = doc.createTextNode(str(len(current.pages))) ipn = item.format.getElementsByTagName('index-page-number') if ipn: ipn = ipn[0] ipn.parentNode.replaceChild(text, ipn) item.node.append(item.format) else: text = doc.createTextNode(str(len(current.pages))) item.node.append(text) prev = item class IndexDestination(object): def __init__(self, type, node): self._cr_type = type self._cr_node = node @property def see(self): return self._cr_type == IndexEntry.TYPE_SEE @property def seealso(self): return self._cr_type == IndexEntry.TYPE_SEEALSO @property def normal(self): return not(self.see) and not(self.seealso) def __getattribute__(self, name): if name.startswith('_cr_') or name in ['see', 'seealso', 'normal']: return object.__getattribute__(self, name) if self._cr_type and name in ['url']: return None return getattr(self._cr_node, name) class theindex(IndexUtils, Environment, SectionUtils): blockType = True level = Environment.CHAPTER_LEVEL counter = 'chapter' class printindex(IndexUtils, Command, SectionUtils): blockType = True level = Command.CHAPTER_LEVEL counter = 'chapter' class makeindex(Command): pass class makeglossary(Command): pass class glossary(Command): args = 'entry:nox' class index(Command): args = 'entry:nox' @property def textContent(self): return '' def invoke(self, tex): result = Command.invoke(self, tex) sortkey, key, format = [], [], [] entry = iter(self.attributes['entry']) current = [] alphanumeric = [Token.CC_OTHER, Token.CC_LETTER, Token.CC_SPACE] for tok in entry: if tok.catcode in alphanumeric: if tok == '"': for tok in entry: current.append(tok) break # Entry separator elif tok == '!': key.append(current) if len(sortkey) < len(key): sortkey.append(current) current = [] # Sort key separator elif tok == '@': sortkey.append(current) current = [] # Format separator elif tok == '|': key.append(current) if len(sortkey) < len(key): sortkey.append(current) current = format else: current.append(tok) continue # Everything else current.append(tok) # Make sure to get the stuff at the end if not format: key.append(current) if len(sortkey) < len(key): sortkey.append(current) # Convert the sort keys to strings for i, item in enumerate(sortkey): sortkey[i] = tex.expandTokens(item).textContent # Expand the key tokens for i, item in enumerate(key): key[i] = tex.expandTokens(item) # Get the format element type = IndexEntry.TYPE_NORMAL if not format: format = None else: macro = [] while format and format[0].catcode == Token.CC_LETTER: macro.append(format.pop(0)) if macro: macro = ''.join(macro) format.insert(0, EscapeSequence(macro)) if macro == 'see': type = IndexEntry.TYPE_SEE elif macro == 'seealso': type = IndexEntry.TYPE_SEEALSO format.append(EscapeSequence('index-page-number')) format = tex.expandTokens(format) # Store the index information in the document userdata = self.ownerDocument.userdata if 'index' not in userdata: userdata['index'] = [] userdata['index'].append(IndexEntry(key, self, sortkey, format, type)) return result class IndexEntry(object): TYPE_NORMAL = 0 TYPE_SEE = 1 TYPE_SEEALSO = 2 def __init__(self, key, node, sortkey=None, format=None, type=0): self.key = key if not sortkey: self.sortkey = key else: self.sortkey = [] for i, sk in enumerate(sortkey): if sk is None: self.sortkey.append(key[i].textContent) else: self.sortkey.append(sk) self.format = format self.node = node self.type = type @property def see(self): return self.type == type(self).TYPE_SEE @property def seealso(self): return self.type == type(self).TYPE_SEEALSO @property def normal(self): return not(self.see) and not(self.seealso) def __cmp__(self, other): result = cmp(zip([collator(x) for x in self.sortkey if isinstance(x, basestring)], [collator(x.textContent) for x in self.key], self.key), zip([collator(x) for x in other.sortkey if isinstance(x, basestring)], [collator(x.textContent) for x in other.key], other.key)) if result == 0 and len(self.key) != len(other.key): return cmp(len(self.key), len(other.key)) return result def __repr__(self): if self.format is None: return ' '.join(['@'.join(self.sortkey), '!'.join([x.source for x in self.key])]) else: return ' '.join(['@'.join(self.sortkey), '!'.join([x.source for x in self.key]), ' '.join([x.source for x in self.format])]) def __str__(self): return repr(self) class IndexPageNumber(Command): macroName = 'index-page-number'
true
true
f700dd2b339ed6f9e5015297b286bb64a6e6ede6
695
py
Python
coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py
VadimLevin/coremltools
66c17b0fa040a0d8088d33590ab5c355478a9e5c
[ "BSD-3-Clause" ]
2,740
2017-10-03T23:19:01.000Z
2022-03-30T15:16:39.000Z
coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py
holzschu/coremltools
5ece9069a1487d5083f00f56afe07832d88e3dfa
[ "BSD-3-Clause" ]
1,057
2017-10-05T22:47:01.000Z
2022-03-31T23:51:15.000Z
coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py
holzschu/coremltools
5ece9069a1487d5083f00f56afe07832d88e3dfa
[ "BSD-3-Clause" ]
510
2017-10-04T19:22:28.000Z
2022-03-31T12:16:52.000Z
# -*- coding: utf-8 -*- # Copyright (c) 2020, Apple Inc. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause def delete_disconnected_nodes(gd): # delete all nodes with no inputs and outputs empty_nodes = [] for k, v in gd.items(): if ( len(gd[k].inputs) == 0 and len(gd[k].outputs) == 0 and len(gd[k].control_inputs) == 0 and len(gd[k].control_outputs) == 0 and gd[k].op != "Placeholder" ): empty_nodes.append(k) for k in empty_nodes: del gd[k]
27.8
83
0.579856
def delete_disconnected_nodes(gd): empty_nodes = [] for k, v in gd.items(): if ( len(gd[k].inputs) == 0 and len(gd[k].outputs) == 0 and len(gd[k].control_inputs) == 0 and len(gd[k].control_outputs) == 0 and gd[k].op != "Placeholder" ): empty_nodes.append(k) for k in empty_nodes: del gd[k]
true
true
f700ddd360b85347609f21c1d6489abdde7537cf
725
py
Python
code/rolling_tests.py
DonaldWhyte/high-performance-data-processing-in-python
5f0d4155c951155bc6885a4f283eb4044879d0ad
[ "MIT" ]
16
2018-07-26T16:37:19.000Z
2021-08-18T18:34:22.000Z
code/rolling_tests.py
DonaldWhyte/high-performance-data-processing-in-python-v2
f7f8076ff67d53be09e1d2f9988976e31b92f8e9
[ "MIT" ]
1
2019-12-13T01:18:00.000Z
2019-12-14T16:34:26.000Z
code/rolling_tests.py
DonaldWhyte/high-performance-data-processing-in-python-v2
f7f8076ff67d53be09e1d2f9988976e31b92f8e9
[ "MIT" ]
5
2019-02-14T14:04:49.000Z
2021-04-20T17:00:37.000Z
import numpy as np def _main(): # Inputs n = 3 x = np.arange(20, dtype=np.float64) # Slow average/std avg = np.zeros(len(x) - n + 1) std = np.zeros(len(x) - n + 1) for i in range(len(avg)): avg[i] = np.mean(x[i:i+n]) std[i] = np.std(x[i:i+n]) print('AVG') print('\n'.join(str(x) for x in avg)) print('STD:') print('\n'.join(str(x) for x in std)) # Fast std squares = np.square(x) sum_of_squares = np.convolve(squares, np.ones(n, dtype=int), 'valid') var_fast = (sum_of_squares / n) - np.square(avg) std_fast = np.sqrt(var_fast) print('STD FAST:') print('\n'.join(str(x) for x in std_fast)) if __name__ == '__main__': _main()
21.969697
73
0.553103
import numpy as np def _main(): n = 3 x = np.arange(20, dtype=np.float64) avg = np.zeros(len(x) - n + 1) std = np.zeros(len(x) - n + 1) for i in range(len(avg)): avg[i] = np.mean(x[i:i+n]) std[i] = np.std(x[i:i+n]) print('AVG') print('\n'.join(str(x) for x in avg)) print('STD:') print('\n'.join(str(x) for x in std)) squares = np.square(x) sum_of_squares = np.convolve(squares, np.ones(n, dtype=int), 'valid') var_fast = (sum_of_squares / n) - np.square(avg) std_fast = np.sqrt(var_fast) print('STD FAST:') print('\n'.join(str(x) for x in std_fast)) if __name__ == '__main__': _main()
true
true
f700de8154178c1411a9769d01d40870fe625d67
18,052
py
Python
methods/transformers/examples/deebert/src/modeling_highway_bert.py
INK-USC/RiddleSense
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
[ "MIT" ]
3
2021-07-06T20:02:31.000Z
2022-03-27T13:13:01.000Z
methods/transformers/examples/deebert/src/modeling_highway_bert.py
INK-USC/RiddleSense
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
[ "MIT" ]
null
null
null
methods/transformers/examples/deebert/src/modeling_highway_bert.py
INK-USC/RiddleSense
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
[ "MIT" ]
null
null
null
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def entropy(x): """Calculate entropy of a pre-softmax logit Tensor""" exp_x = torch.exp(x) A = torch.sum(exp_x, dim=1) # sum of exp(x_i) B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i) return torch.log(A) - B / A class DeeBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)]) self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)] def set_early_exit_entropy(self, x): if (type(x) is float) or (type(x) is int): for i in range(len(self.early_exit_entropy)): self.early_exit_entropy[i] = x else: self.early_exit_entropy = x def init_highway_pooler(self, pooler): loaded_model = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): all_hidden_states = () all_attentions = () all_highway_exits = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) current_outputs = (hidden_states,) if self.output_hidden_states: current_outputs = current_outputs + (all_hidden_states,) if self.output_attentions: current_outputs = current_outputs + (all_attentions,) highway_exit = self.highway[i](current_outputs) # logits, pooled_output if not self.training: highway_logits = highway_exit[0] highway_entropy = entropy(highway_logits) highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy all_highway_exits = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i + 1) else: all_highway_exits = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) outputs = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). ", BERT_START_DOCSTRING, ) class DeeBertModel(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = DeeBertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def init_highway_pooler(self): self.encoder.init_highway_pooler(self.pooler) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, ): r""" Return: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pre-training. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. highway_exits (:obj:`tuple(tuple(torch.Tensor))`: Tuple of each early exit's results (total length: number of layers) Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to( dtype=next(self.parameters()).dtype ) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class HighwayException(Exception): def __init__(self, message, exit_layer): self.message = message self.exit_layer = exit_layer # start from 1! class BertHighway(nn.Module): """A module to provide a shortcut from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification) """ def __init__(self, config): super().__init__() self.pooler = BertPooler(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_outputs): # Pooler pooler_input = encoder_outputs[0] pooler_output = self.pooler(pooler_input) # "return" pooler_output # BertModel bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification pooled_output = bmodel_output[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """, BERT_START_DOCSTRING, ) class DeeBertForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.num_layers = config.num_hidden_layers self.bert = DeeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_layer=-1, train_highway=False, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. highway_exits (:obj:`tuple(tuple(torch.Tensor))`: Tuple of each early exit's results (total length: number of layers) Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. """ exit_layer = self.num_layers try: outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: outputs = e.message exit_layer = e.exit_layer logits = outputs[0] if not self.training: original_entropy = entropy(logits) highway_entropy = [] highway_logits_all = [] if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits highway_losses = [] for highway_exit in outputs[-1]: highway_logits = highway_exit[0] if not self.training: highway_logits_all.append(highway_logits) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(highway_loss) if train_highway: outputs = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: outputs = (loss,) + outputs if not self.training: outputs = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: outputs = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
45.471033
173
0.628241
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def entropy(x): exp_x = torch.exp(x) A = torch.sum(exp_x, dim=1) B = torch.sum(x * exp_x, dim=1) return torch.log(A) - B / A class DeeBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)]) self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)] def set_early_exit_entropy(self, x): if (type(x) is float) or (type(x) is int): for i in range(len(self.early_exit_entropy)): self.early_exit_entropy[i] = x else: self.early_exit_entropy = x def init_highway_pooler(self, pooler): loaded_model = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): all_hidden_states = () all_attentions = () all_highway_exits = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) current_outputs = (hidden_states,) if self.output_hidden_states: current_outputs = current_outputs + (all_hidden_states,) if self.output_attentions: current_outputs = current_outputs + (all_attentions,) highway_exit = self.highway[i](current_outputs) if not self.training: highway_logits = highway_exit[0] highway_entropy = entropy(highway_logits) highway_exit = highway_exit + (highway_entropy,) all_highway_exits = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(new_output, i + 1) else: all_highway_exits = all_highway_exits + (highway_exit,) if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) outputs = outputs + (all_highway_exits,) return outputs @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). ", BERT_START_DOCSTRING, ) class DeeBertModel(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = DeeBertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def init_highway_pooler(self): self.encoder.init_highway_pooler(self.pooler) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to( dtype=next(self.parameters()).dtype ) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[ 1: ] return outputs class HighwayException(Exception): def __init__(self, message, exit_layer): self.message = message self.exit_layer = exit_layer class BertHighway(nn.Module): def __init__(self, config): super().__init__() self.pooler = BertPooler(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_outputs): pooler_input = encoder_outputs[0] pooler_output = self.pooler(pooler_input) bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:] pooled_output = bmodel_output[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """, BERT_START_DOCSTRING, ) class DeeBertForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.num_layers = config.num_hidden_layers self.bert = DeeBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_layer=-1, train_highway=False, ): exit_layer = self.num_layers try: outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] except HighwayException as e: outputs = e.message exit_layer = e.exit_layer logits = outputs[0] if not self.training: original_entropy = entropy(logits) highway_entropy = [] highway_logits_all = [] if labels is not None: if self.num_labels == 1: loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) highway_losses = [] for highway_exit in outputs[-1]: highway_logits = highway_exit[0] if not self.training: highway_logits_all.append(highway_logits) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: loss_fct = MSELoss() highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(highway_loss) if train_highway: outputs = (sum(highway_losses[:-1]),) + outputs else: outputs = (loss,) + outputs if not self.training: outputs = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: outputs = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) return outputs
true
true
f700df51a301f8e08e05d3af92b79a2c373f8c3a
2,122
py
Python
sdk/python/pulumi_kubernetes/rbac/v1beta1/ClusterRoleBindingList.py
kulado/kulado-kubernetes
ecb72f9b25f6dbbae41f00c82388b1ca32329cc7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_kubernetes/rbac/v1beta1/ClusterRoleBindingList.py
kulado/kulado-kubernetes
ecb72f9b25f6dbbae41f00c82388b1ca32329cc7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_kubernetes/rbac/v1beta1/ClusterRoleBindingList.py
kulado/kulado-kubernetes
ecb72f9b25f6dbbae41f00c82388b1ca32329cc7
[ "Apache-2.0" ]
1
2019-08-20T22:51:57.000Z
2019-08-20T22:51:57.000Z
# *** WARNING: this file was generated by the Kulado Kubernetes codegen tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import kulado import kulado.runtime import warnings from ... import tables, version class ClusterRoleBindingList(kulado.CustomResource): """ ClusterRoleBindingList is a collection of ClusterRoleBindings """ def __init__(self, resource_name, opts=None, items=None, metadata=None, __name__=None, __opts__=None): if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to be a string') if opts and not isinstance(opts, kulado.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() __props__['apiVersion'] = 'rbac.authorization.k8s.io/v1beta1' __props__['kind'] = 'ClusterRoleBindingList' if items is None: raise TypeError('Missing required property items') __props__['items'] = items __props__['metadata'] = metadata if opts is None: opts = kulado.ResourceOptions() if opts.version is None: opts.version = version.get_version() super(ClusterRoleBindingList, self).__init__( "kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRoleBindingList", resource_name, __props__, opts) def translate_output_property(self, prop: str) -> str: return tables._CASING_FORWARD_TABLE.get(prop) or prop def translate_input_property(self, prop: str) -> str: return tables._CASING_BACKWARD_TABLE.get(prop) or prop
39.296296
107
0.66918
import kulado import kulado.runtime import warnings from ... import tables, version class ClusterRoleBindingList(kulado.CustomResource): def __init__(self, resource_name, opts=None, items=None, metadata=None, __name__=None, __opts__=None): if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to be a string') if opts and not isinstance(opts, kulado.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() __props__['apiVersion'] = 'rbac.authorization.k8s.io/v1beta1' __props__['kind'] = 'ClusterRoleBindingList' if items is None: raise TypeError('Missing required property items') __props__['items'] = items __props__['metadata'] = metadata if opts is None: opts = kulado.ResourceOptions() if opts.version is None: opts.version = version.get_version() super(ClusterRoleBindingList, self).__init__( "kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRoleBindingList", resource_name, __props__, opts) def translate_output_property(self, prop: str) -> str: return tables._CASING_FORWARD_TABLE.get(prop) or prop def translate_input_property(self, prop: str) -> str: return tables._CASING_BACKWARD_TABLE.get(prop) or prop
true
true
f700dfd336a74ceccacb99c9ee142a135cdb9899
3,286
py
Python
packages/tg/appwrappers/identity.py
rjcuevas/Email-Frontend-AngularJS-
753dbd190582ed953058c9e15c2be920716c7985
[ "MIT" ]
null
null
null
packages/tg/appwrappers/identity.py
rjcuevas/Email-Frontend-AngularJS-
753dbd190582ed953058c9e15c2be920716c7985
[ "MIT" ]
null
null
null
packages/tg/appwrappers/identity.py
rjcuevas/Email-Frontend-AngularJS-
753dbd190582ed953058c9e15c2be920716c7985
[ "MIT" ]
null
null
null
import logging from .base import ApplicationWrapper from ..configuration.utils import coerce_config from ..support.converters import asbool log = logging.getLogger(__name__) class IdentityApplicationWrapper(ApplicationWrapper): """Provides user identity when authentication is enabled. The repoze.who provided identity takes precedence over the identity provided by IdentityApplicationWrapper if available. Supported options which can be provided by config are: - ``sa_auth.authmetadata``: The TGAuthMetadata object that should be used to retrieve identity metadata. - ``identity.enabled``: Enable the Identity Application Wrapper. By default enabled if authmetadata available. - ``identity.allow_missing_user``: Whenever the identity should be discarded or not when the authmetadata is unable to find an user. """ def __init__(self, handler, config): super(IdentityApplicationWrapper, self).__init__(handler, config) options = { 'enabled': True, 'allow_missing_user': True, 'authmetadata': config.get('sa_auth', {}).get('authmetadata'), } options.update(coerce_config(config, 'identity.', { 'enabled': asbool, 'allow_missing_user': asbool })) self.enabled = options['enabled'] and options['authmetadata'] is not None self.options = options self.tgmdprovider = options['authmetadata'] log.debug('Identity enabled: %s -> %s', self.enabled, self.options) @property def injected(self): return self.enabled def __call__(self, controller, environ, context): identity = environ.get('repoze.who.identity') if identity is None: context.request.identity = None return self.next_handler(controller, environ, context) req_identity = {} # Get the userid retrieved by repoze.who Authenticator userid = identity['repoze.who.userid'] if userid is not None: # Finding the user, groups and permissions: identity['user'] = identity_user = self.tgmdprovider.get_user(identity, userid) if identity_user: identity['groups'] = self.tgmdprovider.get_groups(identity, userid) identity['permissions'] = self.tgmdprovider.get_permissions(identity, userid) else: identity['groups'] = identity['permissions'] = [] req_identity = Identity() req_identity.update(identity) req_identity['repoze.what.userid'] = userid if req_identity.get('user') is None and not self.options['allow_missing_user']: req_identity = {} # Add identity to request with repoze.who/what compatibility context.request.identity = req_identity environ['repoze.who.identity'] = req_identity environ['repoze.what.credentials'] = req_identity return self.next_handler(controller, environ, context) class Identity(dict): """dict subclass: prevent members from being rendered during print. Took as is from repoze.who. """ def __repr__(self): return '<TurboGears Identity (hidden, dict-like) at %s>' % id(self) __str__ = __repr__
38.658824
140
0.660986
import logging from .base import ApplicationWrapper from ..configuration.utils import coerce_config from ..support.converters import asbool log = logging.getLogger(__name__) class IdentityApplicationWrapper(ApplicationWrapper): def __init__(self, handler, config): super(IdentityApplicationWrapper, self).__init__(handler, config) options = { 'enabled': True, 'allow_missing_user': True, 'authmetadata': config.get('sa_auth', {}).get('authmetadata'), } options.update(coerce_config(config, 'identity.', { 'enabled': asbool, 'allow_missing_user': asbool })) self.enabled = options['enabled'] and options['authmetadata'] is not None self.options = options self.tgmdprovider = options['authmetadata'] log.debug('Identity enabled: %s -> %s', self.enabled, self.options) @property def injected(self): return self.enabled def __call__(self, controller, environ, context): identity = environ.get('repoze.who.identity') if identity is None: context.request.identity = None return self.next_handler(controller, environ, context) req_identity = {} userid = identity['repoze.who.userid'] if userid is not None: identity['user'] = identity_user = self.tgmdprovider.get_user(identity, userid) if identity_user: identity['groups'] = self.tgmdprovider.get_groups(identity, userid) identity['permissions'] = self.tgmdprovider.get_permissions(identity, userid) else: identity['groups'] = identity['permissions'] = [] req_identity = Identity() req_identity.update(identity) req_identity['repoze.what.userid'] = userid if req_identity.get('user') is None and not self.options['allow_missing_user']: req_identity = {} context.request.identity = req_identity environ['repoze.who.identity'] = req_identity environ['repoze.what.credentials'] = req_identity return self.next_handler(controller, environ, context) class Identity(dict): def __repr__(self): return '<TurboGears Identity (hidden, dict-like) at %s>' % id(self) __str__ = __repr__
true
true
f700e2424796afd0180d4de78c337de6b2e51cd4
7,792
py
Python
litex_things/deps/litex_boards/litex_boards/partner/targets/nereid.py
bjonnh/fomu-playground
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
[ "0BSD" ]
null
null
null
litex_things/deps/litex_boards/litex_boards/partner/targets/nereid.py
bjonnh/fomu-playground
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
[ "0BSD" ]
null
null
null
litex_things/deps/litex_boards/litex_boards/partner/targets/nereid.py
bjonnh/fomu-playground
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
[ "0BSD" ]
null
null
null
#!/usr/bin/env python3 # This file is Copyright (c) 2018-2019 Rohit Singh <[email protected]> # This file is Copyright (c) 2019 Florent Kermarrec <[email protected]> # License: BSD import sys from migen import * from litex.build.generic_platform import * from litex.soc.integration.soc_core import * from litex.soc.integration.soc_sdram import * from litex.soc.integration.builder import * from litex.soc.cores.clock import * from litex.soc.cores import dna, xadc from litex.soc.cores.uart import * from litex.soc.integration.cpu_interface import get_csr_header from litedram.modules import MT8KTF51264 from litedram.modules import _TechnologyTimings, _SpeedgradeTimings from litedram.phy import s7ddrphy from litepcie.phy.s7pciephy import S7PCIEPHY from litepcie.core import LitePCIeEndpoint, LitePCIeMSI from litepcie.frontend.dma import LitePCIeDMA from litepcie.frontend.wishbone import LitePCIeWishboneBridge from litex_boards.platforms import nereid # CRG ---------------------------------------------------------------------------------------------- class CRG(Module): def __init__(self, platform, sys_clk_freq): self.clock_domains.cd_sys = ClockDomain() self.clock_domains.cd_sys4x = ClockDomain(reset_less=True) self.clock_domains.cd_clk200 = ClockDomain() clk100 = platform.request("clk100") self.submodules.pll = pll = S7PLL() pll.register_clkin(clk100, 100e6) pll.create_clkout(self.cd_sys, sys_clk_freq) pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq) pll.create_clkout(self.cd_clk200, 200e6) self.comb += pll.reset.eq(platform.request("cpu_reset")) self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_clk200) # NereidSoC ---------------------------------------------------------------------------------------- class NereidSoC(SoCSDRAM): SoCSDRAM.mem_map["csr"] = 0x00000000 SoCSDRAM.mem_map["rom"] = 0x20000000 def __init__(self, platform, with_pcie_uart=True): sys_clk_freq = int(100e6) SoCSDRAM.__init__(self, platform, sys_clk_freq, csr_data_width=32, integrated_rom_size=0x10000, integrated_sram_size=0x10000, integrated_main_ram_size=0x10000, # FIXME: keep this for initial PCIe tests ident="Nereid LiteX Test SoC", ident_version=True, with_uart=not with_pcie_uart) # CRG -------------------------------------------------------------------------------------- self.submodules.crg = CRG(platform, sys_clk_freq) self.add_csr("crg") # DNA -------------------------------------------------------------------------------------- self.submodules.dna = dna.DNA() self.add_csr("dna") # XADC ------------------------------------------------------------------------------------- self.submodules.xadc = xadc.XADC() self.add_csr("xadc") # SDRAM ------------------------------------------------------------------------------------ if not self.integrated_main_ram_size: self.submodules.ddrphy = s7ddrphy.K7DDRPHY( platform.request("ddram"), sys_clk_freq=sys_clk_freq, iodelay_clk_freq=200e6) sdram_module = MT8KTF51264(sys_clk_freq, "1:4", speedgrade="800") self.register_sdram(self.ddrphy, sdram_module.geom_settings, sdram_module.timing_settings) self.add_csr("ddrphy") # PCIe ------------------------------------------------------------------------------------- # pcie phy self.submodules.pcie_phy = S7PCIEPHY(platform, platform.request("pcie_x1"), bar0_size=0x20000) self.pcie_phy.cd_pcie.clk.attr.add("keep") platform.add_platform_command("create_clock -name pcie_clk -period 8 [get_nets pcie_clk]") platform.add_false_path_constraints( self.crg.cd_sys.clk, self.pcie_phy.cd_pcie.clk) self.add_csr("pcie_phy") # pcie endpoint self.submodules.pcie_endpoint = LitePCIeEndpoint(self.pcie_phy) # pcie wishbone bridge self.submodules.pcie_wishbone = LitePCIeWishboneBridge(self.pcie_endpoint, lambda a: 1, shadow_base=self.shadow_base) self.add_wb_master(self.pcie_wishbone.wishbone) # pcie dma self.submodules.pcie_dma = LitePCIeDMA(self.pcie_phy, self.pcie_endpoint, with_buffering=True, buffering_depth=1024, with_loopback=True) self.add_csr("pcie_dma") # pcie msi self.submodules.pcie_msi = LitePCIeMSI() self.add_csr("pcie_msi") self.comb += self.pcie_msi.source.connect(self.pcie_phy.msi) self.msis = { "DMA_WRITER": self.pcie_dma.writer.irq, "DMA_READER": self.pcie_dma.reader.irq } for i, (k, v) in enumerate(sorted(self.msis.items())): self.comb += self.pcie_msi.irqs[i].eq(v) self.add_constant(k + "_INTERRUPT", i) # pcie uart if with_pcie_uart: class PCIeUART(Module, AutoCSR): def __init__(self, uart): self.rx_valid = CSRStatus() self.rx_ready = CSR() self.rx_data = CSRStatus(8) self.tx_valid = CSR() self.tx_ready = CSRStatus() self.tx_data = CSRStorage(8) # # # # cpu to pcie self.comb += [ self.rx_valid.status.eq(uart.sink.valid), uart.sink.ready.eq(self.rx_ready.re), self.rx_data.status.eq(uart.sink.data), ] # pcie to cpu self.sync += [ If(self.tx_valid.re, uart.source.valid.eq(1) ).Elif(uart.source.ready, uart.source.valid.eq(0) ) ] self.comb += [ self.tx_ready.status.eq(~uart.source.valid), uart.source.data.eq(self.tx_data.storage) ] uart_interface = RS232PHYInterface() self.submodules.uart = UART(uart_interface) self.add_csr("uart") self.add_interrupt("uart") self.submodules.pcie_uart = PCIeUART(uart_interface) self.add_csr("pcie_uart") # Leds ------------------------------------------------------------------------------------- # led blinking (sys) sys_counter = Signal(32) self.sync.sys += sys_counter.eq(sys_counter + 1) rgb = platform.request("rgb_led") self.comb += [ rgb.r.eq(1), rgb.g.eq(sys_counter[26]), rgb.b.eq(1), ] def generate_software_header(self, filename): csr_header = get_csr_header(self.get_csr_regions(), self.get_constants(), with_access_functions=False, with_shadow_base=False) tools.write_to_file(filename, csr_header) # Build -------------------------------------------------------------------------------------------- def main(): platform = nereid.Platform() soc = NereidSoC(platform) builder = Builder(soc, output_dir="../build/nereid", csr_csv="../build/nereid/csr.csv", compile_gateware=not "no-compile" in sys.argv[1:]) vns = builder.build(build_name="nereid") soc.generate_software_header("../software/kernel/csr.h") if __name__ == "__main__": main()
39.353535
102
0.538758
import sys from migen import * from litex.build.generic_platform import * from litex.soc.integration.soc_core import * from litex.soc.integration.soc_sdram import * from litex.soc.integration.builder import * from litex.soc.cores.clock import * from litex.soc.cores import dna, xadc from litex.soc.cores.uart import * from litex.soc.integration.cpu_interface import get_csr_header from litedram.modules import MT8KTF51264 from litedram.modules import _TechnologyTimings, _SpeedgradeTimings from litedram.phy import s7ddrphy from litepcie.phy.s7pciephy import S7PCIEPHY from litepcie.core import LitePCIeEndpoint, LitePCIeMSI from litepcie.frontend.dma import LitePCIeDMA from litepcie.frontend.wishbone import LitePCIeWishboneBridge from litex_boards.platforms import nereid class CRG(Module): def __init__(self, platform, sys_clk_freq): self.clock_domains.cd_sys = ClockDomain() self.clock_domains.cd_sys4x = ClockDomain(reset_less=True) self.clock_domains.cd_clk200 = ClockDomain() clk100 = platform.request("clk100") self.submodules.pll = pll = S7PLL() pll.register_clkin(clk100, 100e6) pll.create_clkout(self.cd_sys, sys_clk_freq) pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq) pll.create_clkout(self.cd_clk200, 200e6) self.comb += pll.reset.eq(platform.request("cpu_reset")) self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_clk200) class NereidSoC(SoCSDRAM): SoCSDRAM.mem_map["csr"] = 0x00000000 SoCSDRAM.mem_map["rom"] = 0x20000000 def __init__(self, platform, with_pcie_uart=True): sys_clk_freq = int(100e6) SoCSDRAM.__init__(self, platform, sys_clk_freq, csr_data_width=32, integrated_rom_size=0x10000, integrated_sram_size=0x10000, integrated_main_ram_size=0x10000, ident="Nereid LiteX Test SoC", ident_version=True, with_uart=not with_pcie_uart) self.submodules.crg = CRG(platform, sys_clk_freq) self.add_csr("crg") self.submodules.dna = dna.DNA() self.add_csr("dna") self.submodules.xadc = xadc.XADC() self.add_csr("xadc") if not self.integrated_main_ram_size: self.submodules.ddrphy = s7ddrphy.K7DDRPHY( platform.request("ddram"), sys_clk_freq=sys_clk_freq, iodelay_clk_freq=200e6) sdram_module = MT8KTF51264(sys_clk_freq, "1:4", speedgrade="800") self.register_sdram(self.ddrphy, sdram_module.geom_settings, sdram_module.timing_settings) self.add_csr("ddrphy") self.submodules.pcie_phy = S7PCIEPHY(platform, platform.request("pcie_x1"), bar0_size=0x20000) self.pcie_phy.cd_pcie.clk.attr.add("keep") platform.add_platform_command("create_clock -name pcie_clk -period 8 [get_nets pcie_clk]") platform.add_false_path_constraints( self.crg.cd_sys.clk, self.pcie_phy.cd_pcie.clk) self.add_csr("pcie_phy") self.submodules.pcie_endpoint = LitePCIeEndpoint(self.pcie_phy) self.submodules.pcie_wishbone = LitePCIeWishboneBridge(self.pcie_endpoint, lambda a: 1, shadow_base=self.shadow_base) self.add_wb_master(self.pcie_wishbone.wishbone) self.submodules.pcie_dma = LitePCIeDMA(self.pcie_phy, self.pcie_endpoint, with_buffering=True, buffering_depth=1024, with_loopback=True) self.add_csr("pcie_dma") self.submodules.pcie_msi = LitePCIeMSI() self.add_csr("pcie_msi") self.comb += self.pcie_msi.source.connect(self.pcie_phy.msi) self.msis = { "DMA_WRITER": self.pcie_dma.writer.irq, "DMA_READER": self.pcie_dma.reader.irq } for i, (k, v) in enumerate(sorted(self.msis.items())): self.comb += self.pcie_msi.irqs[i].eq(v) self.add_constant(k + "_INTERRUPT", i) if with_pcie_uart: class PCIeUART(Module, AutoCSR): def __init__(self, uart): self.rx_valid = CSRStatus() self.rx_ready = CSR() self.rx_data = CSRStatus(8) self.tx_valid = CSR() self.tx_ready = CSRStatus() self.tx_data = CSRStorage(8) self.comb += [ self.rx_valid.status.eq(uart.sink.valid), uart.sink.ready.eq(self.rx_ready.re), self.rx_data.status.eq(uart.sink.data), ] self.sync += [ If(self.tx_valid.re, uart.source.valid.eq(1) ).Elif(uart.source.ready, uart.source.valid.eq(0) ) ] self.comb += [ self.tx_ready.status.eq(~uart.source.valid), uart.source.data.eq(self.tx_data.storage) ] uart_interface = RS232PHYInterface() self.submodules.uart = UART(uart_interface) self.add_csr("uart") self.add_interrupt("uart") self.submodules.pcie_uart = PCIeUART(uart_interface) self.add_csr("pcie_uart") sys_counter = Signal(32) self.sync.sys += sys_counter.eq(sys_counter + 1) rgb = platform.request("rgb_led") self.comb += [ rgb.r.eq(1), rgb.g.eq(sys_counter[26]), rgb.b.eq(1), ] def generate_software_header(self, filename): csr_header = get_csr_header(self.get_csr_regions(), self.get_constants(), with_access_functions=False, with_shadow_base=False) tools.write_to_file(filename, csr_header) def main(): platform = nereid.Platform() soc = NereidSoC(platform) builder = Builder(soc, output_dir="../build/nereid", csr_csv="../build/nereid/csr.csv", compile_gateware=not "no-compile" in sys.argv[1:]) vns = builder.build(build_name="nereid") soc.generate_software_header("../software/kernel/csr.h") if __name__ == "__main__": main()
true
true
f700e260a7d6b3f4dc9cdfd4df281f246d308a20
2,504
py
Python
tests/test_validators.py
fakeezz/edipy
00c125621201e7290add135240c131c22feb3a72
[ "MIT" ]
1
2018-05-15T18:27:31.000Z
2018-05-15T18:27:31.000Z
tests/test_validators.py
fakeezz/edipy
00c125621201e7290add135240c131c22feb3a72
[ "MIT" ]
null
null
null
tests/test_validators.py
fakeezz/edipy
00c125621201e7290add135240c131c22feb3a72
[ "MIT" ]
2
2020-12-25T16:37:56.000Z
2021-06-22T13:13:18.000Z
# coding: utf-8 import pytest from edipy import fields, validators, exceptions @pytest.mark.parametrize('fixed_type, data', [ (fields.Integer(1, validators=[validators.Range(1, 5)]), '1'), (fields.Integer(1, validators=[validators.MaxValue(3)]), '2'), (fields.Integer(1, validators=[validators.MinValue(1)]), '5'), (fields.String(5, validators=[validators.Regex(r"[0-9]+")]), '12345'), (fields.String(12, validators=[validators.Email()]), '[email protected]'), ]) def test_using_validators(fixed_type, data): try: fixed_type.encode(data) except exceptions.ValidationError: pytest.fail(u"ValidationError should not be thrown") @pytest.mark.parametrize('fixed_type, data', [ (fields.Integer(1, validators=[validators.Range(1, 5)]), '0'), (fields.Integer(1, validators=[validators.Range(1, 5)]), '6'), ]) def test_validate_range(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data) @pytest.mark.parametrize('fixed_type, data', [ (fields.Integer(1, validators=[validators.MaxValue(1)]), '2'), (fields.Integer(1, validators=[validators.MaxValue(5)]), '6'), ]) def test_validate_max_value(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data) @pytest.mark.parametrize('fixed_type, data', [ (fields.Integer(1, validators=[validators.MinValue(1)]), '0'), (fields.Integer(1, validators=[validators.MinValue(5)]), '4'), ]) def test_validate_min_value(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data) @pytest.mark.parametrize('fixed_type, data', [ (fields.String(5, validators=[validators.Regex(r"[0-9]+")]), 'a123f'), (fields.String(5, validators=[validators.Regex(r"\d")]), 'abcde'), (fields.String(5, validators=[validators.Regex(r"[A-Z]{6}")]), 'ABCDE'), ]) def test_validate_regex(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data) def test_throws_exception_when_regex_is_invalid(): with pytest.raises(ValueError): field = fields.String(5, validators=[validators.Regex(")")]) @pytest.mark.parametrize('fixed_type, data', [ (fields.String(11, validators=[validators.Email()]), 'edimail.com'), (fields.String(11, validators=[validators.Email()]), 'edi@mailcom'), ]) def test_validate_email(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data)
33.837838
76
0.69369
import pytest from edipy import fields, validators, exceptions @pytest.mark.parametrize('fixed_type, data', [ (fields.Integer(1, validators=[validators.Range(1, 5)]), '1'), (fields.Integer(1, validators=[validators.MaxValue(3)]), '2'), (fields.Integer(1, validators=[validators.MinValue(1)]), '5'), (fields.String(5, validators=[validators.Regex(r"[0-9]+")]), '12345'), (fields.String(12, validators=[validators.Email()]), '[email protected]'), ]) def test_using_validators(fixed_type, data): try: fixed_type.encode(data) except exceptions.ValidationError: pytest.fail(u"ValidationError should not be thrown") @pytest.mark.parametrize('fixed_type, data', [ (fields.Integer(1, validators=[validators.Range(1, 5)]), '0'), (fields.Integer(1, validators=[validators.Range(1, 5)]), '6'), ]) def test_validate_range(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data) @pytest.mark.parametrize('fixed_type, data', [ (fields.Integer(1, validators=[validators.MaxValue(1)]), '2'), (fields.Integer(1, validators=[validators.MaxValue(5)]), '6'), ]) def test_validate_max_value(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data) @pytest.mark.parametrize('fixed_type, data', [ (fields.Integer(1, validators=[validators.MinValue(1)]), '0'), (fields.Integer(1, validators=[validators.MinValue(5)]), '4'), ]) def test_validate_min_value(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data) @pytest.mark.parametrize('fixed_type, data', [ (fields.String(5, validators=[validators.Regex(r"[0-9]+")]), 'a123f'), (fields.String(5, validators=[validators.Regex(r"\d")]), 'abcde'), (fields.String(5, validators=[validators.Regex(r"[A-Z]{6}")]), 'ABCDE'), ]) def test_validate_regex(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data) def test_throws_exception_when_regex_is_invalid(): with pytest.raises(ValueError): field = fields.String(5, validators=[validators.Regex(")")]) @pytest.mark.parametrize('fixed_type, data', [ (fields.String(11, validators=[validators.Email()]), 'edimail.com'), (fields.String(11, validators=[validators.Email()]), 'edi@mailcom'), ]) def test_validate_email(fixed_type, data): with pytest.raises(exceptions.ValidationError): fixed_type.encode(data)
true
true
f700e28af26ab19a72197442b3b55696a4239890
10,237
py
Python
tulflow/harvest.py
tulibraries/tulflow
652957d079c481a84b3602932ed86f3b2b21e3e9
[ "Apache-2.0" ]
1
2022-03-04T18:27:06.000Z
2022-03-04T18:27:06.000Z
tulflow/harvest.py
tulibraries/tulflow
652957d079c481a84b3602932ed86f3b2b21e3e9
[ "Apache-2.0" ]
117
2019-08-29T21:34:53.000Z
2022-03-31T22:11:58.000Z
tulflow/harvest.py
tulibraries/tulflow
652957d079c481a84b3602932ed86f3b2b21e3e9
[ "Apache-2.0" ]
1
2021-09-22T20:40:12.000Z
2021-09-22T20:40:12.000Z
""" tulflow.harvest ~~~~~~~~~~~~~~~ This module contains objects to harvest data from one given location to another. """ import hashlib import io import logging import pandas import sickle from lxml import etree from sickle import Sickle from sickle.models import xml_to_dict from sickle.oaiexceptions import NoRecordsMatch from tulflow import process NS = { "marc21": "http://www.loc.gov/MARC21/slim", "oai": "http://www.openarchives.org/OAI/2.0/" } def oai_to_s3(**kwargs): """Wrapper function for using OAI Harvest, Default Processor, and S3 Writer.""" kwargs["harvest_params"] = { "metadataPrefix": kwargs.get("metadata_prefix"), "from": kwargs.get("harvest_from_date"), "until": kwargs.get("harvest_until_date") } dag_id = kwargs["dag"].dag_id dag_start_date = kwargs["timestamp"] oai_sets = generate_oai_sets(**kwargs) all_processed = [] sets_with_no_records = [] if oai_sets: for oai_set in oai_sets: kwargs["harvest_params"]["set"] = oai_set data = harvest_oai(**kwargs) if data == []: sets_with_no_records.append(oai_set) logging.info("Skipping processing % set because it has no data.", oai_set) continue outdir = dag_s3_prefix(dag_id, dag_start_date) processed = process_xml(data, dag_write_string_to_s3, outdir, **kwargs) all_processed.append(processed) else: data = harvest_oai(**kwargs) if data == []: sets_with_no_records.append(oai_set) outdir = dag_s3_prefix(dag_id, dag_start_date) processed = process_xml(data, dag_write_string_to_s3, outdir, **kwargs) all_processed.append(processed) all_updated = sum([set['updated'] for set in all_processed]) all_deleted = sum([set['deleted'] for set in all_processed]) logging.info("Total OAI Records Harvested & Processed: %s", all_updated) logging.info("Total OAI Records Harvest & Marked for Deletion: %s", all_deleted) logging.info("Total sets with no records: %s", len(sets_with_no_records)) logging.info("Sets with no records %s", sets_with_no_records) return {"updated": all_updated, "deleted": all_deleted, "sets_with_no_records": sets_with_no_records} def generate_oai_sets(**kwargs): """Generate the oai sets we want to harvest.""" all_sets = bool(kwargs.get("all_sets")) included_sets = kwargs.get("included_sets") excluded_sets = kwargs.get("excluded_sets") oai_endpoint = kwargs.get("oai_endpoint") if all_sets: logging.info("Seeing All Sets Needed.") return [] elif included_sets: logging.info("Seeing SetSpec List.") if not isinstance(included_sets, list): return [included_sets] return included_sets elif excluded_sets: logging.info("Seeing Excluded SetSpec List.") if not isinstance(excluded_sets, list): excluded_sets = [excluded_sets] list_sets = Sickle(oai_endpoint).ListSets() all_sets = [oai_set.xml.find("oai:setSpec", namespaces=NS).text for oai_set in list_sets] remaining_sets = list(set(all_sets) - set(excluded_sets)) logging.info(remaining_sets) return remaining_sets return [] class HarvestIterator(sickle.iterator.OAIItemIterator): def next(self): """Return the next record/header/set.""" while True: for item in self._items: mapped = self.mapper(item) if self.ignore_deleted and mapped.deleted: continue if hasattr(mapped, 'metadata') and mapped.metadata == None: logging.info("Skipping record with no metadata: %s", mapped.header.identifier) continue return mapped if self.resumption_token and self.resumption_token.token: self._next_response() else: raise StopIteration pass # TODO: Remove if https://github.com/mloesch/sickle/pull/47 gets merged. class HarvestRecord(sickle.models.Record): def get_metadata(self): # We want to get record/metadata/<container>/* # <container> would be the element ``dc`` # in the ``oai_dc`` case. meta_data = self.xml.find('.//' + self._oai_namespace + 'metadata') if meta_data != None: return xml_to_dict(meta_data.getchildren()[0], strip_ns=self._strip_ns) pass def harvest_oai(**kwargs): """Create OAI ListRecords Iterator for Harvesting Data.""" oai_endpoint = kwargs.get("oai_endpoint") harvest_params = kwargs.get("harvest_params") logging.info("Harvesting from %s", oai_endpoint) logging.info("Harvesting %s", harvest_params) sickle = Sickle(oai_endpoint, retry_status_codes=[500,503], max_retries=3) class_mapping = harvest_params.get("class_mapping", { "ListRecords": HarvestRecord, }) iterator = harvest_params.get("iterator", HarvestIterator) for key in class_mapping: sickle.class_mapping[key] = class_mapping[key] sickle.iterator = iterator try: return sickle.ListRecords(**harvest_params) except NoRecordsMatch: logging.info("No records found.") return [] class OaiXml: """oai-pmh xml etree wrapper""" def __init__(self, dag_id, timestamp): etree.register_namespace("oai", "http://www.openarchives.org/OAI/2.0/") etree.register_namespace("marc21", "http://www.loc.gov/MARC21/slim") self.root = etree.Element("{http://www.openarchives.org/OAI/2.0/}collection") self.root.attrib["dag-id"] = dag_id self.root.attrib["dag-timestamp"] = timestamp def append(self, record): self.root.append(record) def tostring(self): return etree.tostring(self.root, encoding="utf-8").decode("utf-8") def process_xml(data, writer, outdir, **kwargs): """Process & Write XML data to S3.""" parser = kwargs.get("parser") records_per_file = kwargs.get("records_per_file") if kwargs.get("dag"): run_id = kwargs.get("dag").dag_id else: run_id = "no-dag-provided" if kwargs.get("timestamp"): timestamp = kwargs.get("timestamp") else: timestamp = "no-timestamp-provided" if not records_per_file: records_per_file = 1000 count = deleted_count = 0 oai_updates = OaiXml(run_id, timestamp) oai_deletes = OaiXml(run_id, timestamp) logging.info("Processing XML") for record in data: record_id = record.header.identifier record = record.xml record.attrib["airflow-record-id"] = record_id if parser: record = parser(record, **kwargs) if record.xpath(".//oai:header[@status='deleted']", namespaces=NS): logging.info("Added record %s to deleted xml file(s)", record_id) deleted_count += 1 oai_deletes.append(record) if deleted_count % int(records_per_file) == 0: writer(oai_deletes.tostring(), outdir + "/deleted", **kwargs) oai_deletes = OaiXml(run_id, timestamp) else: logging.info("Added record %s to new-updated xml file", record_id) count += 1 oai_updates.append(record) if count % int(records_per_file) == 0: writer(oai_updates.tostring(), outdir + "/new-updated", **kwargs) oai_updates = OaiXml(run_id, timestamp) writer(oai_updates.tostring(), outdir + "/new-updated", **kwargs) writer(oai_deletes.tostring(), outdir + "/deleted", **kwargs) logging.info("OAI Records Harvested & Processed: %s", count) logging.info("OAI Records Harvest & Marked for Deletion: %s", deleted_count) return {"updated": count, "deleted": deleted_count} def perform_xml_lookup_with_cache(): cache = {} def perform_xml_lookup(oai_record, **kwargs): """Parse additions/updates & add boundwiths.""" if len(cache) == 0: logging.info("*** Fetching CSV lookup file from s3 ***") access_id = kwargs.get("access_id") access_secret = kwargs.get("access_secret") bucket = kwargs.get("bucket_name") lookup_key = kwargs.get("lookup_key") csv_data = process.get_s3_content(bucket, lookup_key, access_id, access_secret) cache["value"] = pandas.read_csv(io.BytesIO(csv_data), header=0) lookup_csv = cache["value"] for record in oai_record.xpath(".//marc21:record", namespaces=NS): record_id = process.get_record_001(record) logging.info("Reading in Record %s", record_id) parent_txt = lookup_csv.loc[lookup_csv.child_id == int(record_id), "parent_xml"].values if len(set(parent_txt)) >= 1: logging.info("Child XML record found %s", record_id) for parent_node in parent_txt[0].split("||"): try: record.append(etree.fromstring(parent_node)) except etree.XMLSyntaxError as error: logging.error("Problem with string syntax:") logging.error(error) logging.error(parent_node) return oai_record return perform_xml_lookup def dag_write_string_to_s3(string, prefix, **kwargs): """Push a string in memory to s3 with a defined prefix""" access_id = kwargs.get("access_id") access_secret = kwargs.get("access_secret") bucket_name = kwargs.get("bucket_name") logging.info("Writing to S3 Bucket %s", bucket_name) our_hash = hashlib.md5(string.encode("utf-8")).hexdigest() filename = "{}/{}".format(prefix, our_hash) process.generate_s3_object(string, bucket_name, filename, access_id, access_secret) def write_log(string, prefix, **kwargs): """Write the data to logging info.""" prefix = prefix logging.info(prefix) string = string logging.info(string) def dag_s3_prefix(dag_id, timestamp): """Define the prefix that will be prepended to all files created by this dag run""" return "{}/{}".format(dag_id, timestamp)
38.484962
105
0.639152
import hashlib import io import logging import pandas import sickle from lxml import etree from sickle import Sickle from sickle.models import xml_to_dict from sickle.oaiexceptions import NoRecordsMatch from tulflow import process NS = { "marc21": "http://www.loc.gov/MARC21/slim", "oai": "http://www.openarchives.org/OAI/2.0/" } def oai_to_s3(**kwargs): kwargs["harvest_params"] = { "metadataPrefix": kwargs.get("metadata_prefix"), "from": kwargs.get("harvest_from_date"), "until": kwargs.get("harvest_until_date") } dag_id = kwargs["dag"].dag_id dag_start_date = kwargs["timestamp"] oai_sets = generate_oai_sets(**kwargs) all_processed = [] sets_with_no_records = [] if oai_sets: for oai_set in oai_sets: kwargs["harvest_params"]["set"] = oai_set data = harvest_oai(**kwargs) if data == []: sets_with_no_records.append(oai_set) logging.info("Skipping processing % set because it has no data.", oai_set) continue outdir = dag_s3_prefix(dag_id, dag_start_date) processed = process_xml(data, dag_write_string_to_s3, outdir, **kwargs) all_processed.append(processed) else: data = harvest_oai(**kwargs) if data == []: sets_with_no_records.append(oai_set) outdir = dag_s3_prefix(dag_id, dag_start_date) processed = process_xml(data, dag_write_string_to_s3, outdir, **kwargs) all_processed.append(processed) all_updated = sum([set['updated'] for set in all_processed]) all_deleted = sum([set['deleted'] for set in all_processed]) logging.info("Total OAI Records Harvested & Processed: %s", all_updated) logging.info("Total OAI Records Harvest & Marked for Deletion: %s", all_deleted) logging.info("Total sets with no records: %s", len(sets_with_no_records)) logging.info("Sets with no records %s", sets_with_no_records) return {"updated": all_updated, "deleted": all_deleted, "sets_with_no_records": sets_with_no_records} def generate_oai_sets(**kwargs): all_sets = bool(kwargs.get("all_sets")) included_sets = kwargs.get("included_sets") excluded_sets = kwargs.get("excluded_sets") oai_endpoint = kwargs.get("oai_endpoint") if all_sets: logging.info("Seeing All Sets Needed.") return [] elif included_sets: logging.info("Seeing SetSpec List.") if not isinstance(included_sets, list): return [included_sets] return included_sets elif excluded_sets: logging.info("Seeing Excluded SetSpec List.") if not isinstance(excluded_sets, list): excluded_sets = [excluded_sets] list_sets = Sickle(oai_endpoint).ListSets() all_sets = [oai_set.xml.find("oai:setSpec", namespaces=NS).text for oai_set in list_sets] remaining_sets = list(set(all_sets) - set(excluded_sets)) logging.info(remaining_sets) return remaining_sets return [] class HarvestIterator(sickle.iterator.OAIItemIterator): def next(self): while True: for item in self._items: mapped = self.mapper(item) if self.ignore_deleted and mapped.deleted: continue if hasattr(mapped, 'metadata') and mapped.metadata == None: logging.info("Skipping record with no metadata: %s", mapped.header.identifier) continue return mapped if self.resumption_token and self.resumption_token.token: self._next_response() else: raise StopIteration pass class HarvestRecord(sickle.models.Record): def get_metadata(self): meta_data = self.xml.find('.//' + self._oai_namespace + 'metadata') if meta_data != None: return xml_to_dict(meta_data.getchildren()[0], strip_ns=self._strip_ns) pass def harvest_oai(**kwargs): oai_endpoint = kwargs.get("oai_endpoint") harvest_params = kwargs.get("harvest_params") logging.info("Harvesting from %s", oai_endpoint) logging.info("Harvesting %s", harvest_params) sickle = Sickle(oai_endpoint, retry_status_codes=[500,503], max_retries=3) class_mapping = harvest_params.get("class_mapping", { "ListRecords": HarvestRecord, }) iterator = harvest_params.get("iterator", HarvestIterator) for key in class_mapping: sickle.class_mapping[key] = class_mapping[key] sickle.iterator = iterator try: return sickle.ListRecords(**harvest_params) except NoRecordsMatch: logging.info("No records found.") return [] class OaiXml: def __init__(self, dag_id, timestamp): etree.register_namespace("oai", "http://www.openarchives.org/OAI/2.0/") etree.register_namespace("marc21", "http://www.loc.gov/MARC21/slim") self.root = etree.Element("{http://www.openarchives.org/OAI/2.0/}collection") self.root.attrib["dag-id"] = dag_id self.root.attrib["dag-timestamp"] = timestamp def append(self, record): self.root.append(record) def tostring(self): return etree.tostring(self.root, encoding="utf-8").decode("utf-8") def process_xml(data, writer, outdir, **kwargs): parser = kwargs.get("parser") records_per_file = kwargs.get("records_per_file") if kwargs.get("dag"): run_id = kwargs.get("dag").dag_id else: run_id = "no-dag-provided" if kwargs.get("timestamp"): timestamp = kwargs.get("timestamp") else: timestamp = "no-timestamp-provided" if not records_per_file: records_per_file = 1000 count = deleted_count = 0 oai_updates = OaiXml(run_id, timestamp) oai_deletes = OaiXml(run_id, timestamp) logging.info("Processing XML") for record in data: record_id = record.header.identifier record = record.xml record.attrib["airflow-record-id"] = record_id if parser: record = parser(record, **kwargs) if record.xpath(".//oai:header[@status='deleted']", namespaces=NS): logging.info("Added record %s to deleted xml file(s)", record_id) deleted_count += 1 oai_deletes.append(record) if deleted_count % int(records_per_file) == 0: writer(oai_deletes.tostring(), outdir + "/deleted", **kwargs) oai_deletes = OaiXml(run_id, timestamp) else: logging.info("Added record %s to new-updated xml file", record_id) count += 1 oai_updates.append(record) if count % int(records_per_file) == 0: writer(oai_updates.tostring(), outdir + "/new-updated", **kwargs) oai_updates = OaiXml(run_id, timestamp) writer(oai_updates.tostring(), outdir + "/new-updated", **kwargs) writer(oai_deletes.tostring(), outdir + "/deleted", **kwargs) logging.info("OAI Records Harvested & Processed: %s", count) logging.info("OAI Records Harvest & Marked for Deletion: %s", deleted_count) return {"updated": count, "deleted": deleted_count} def perform_xml_lookup_with_cache(): cache = {} def perform_xml_lookup(oai_record, **kwargs): if len(cache) == 0: logging.info("*** Fetching CSV lookup file from s3 ***") access_id = kwargs.get("access_id") access_secret = kwargs.get("access_secret") bucket = kwargs.get("bucket_name") lookup_key = kwargs.get("lookup_key") csv_data = process.get_s3_content(bucket, lookup_key, access_id, access_secret) cache["value"] = pandas.read_csv(io.BytesIO(csv_data), header=0) lookup_csv = cache["value"] for record in oai_record.xpath(".//marc21:record", namespaces=NS): record_id = process.get_record_001(record) logging.info("Reading in Record %s", record_id) parent_txt = lookup_csv.loc[lookup_csv.child_id == int(record_id), "parent_xml"].values if len(set(parent_txt)) >= 1: logging.info("Child XML record found %s", record_id) for parent_node in parent_txt[0].split("||"): try: record.append(etree.fromstring(parent_node)) except etree.XMLSyntaxError as error: logging.error("Problem with string syntax:") logging.error(error) logging.error(parent_node) return oai_record return perform_xml_lookup def dag_write_string_to_s3(string, prefix, **kwargs): access_id = kwargs.get("access_id") access_secret = kwargs.get("access_secret") bucket_name = kwargs.get("bucket_name") logging.info("Writing to S3 Bucket %s", bucket_name) our_hash = hashlib.md5(string.encode("utf-8")).hexdigest() filename = "{}/{}".format(prefix, our_hash) process.generate_s3_object(string, bucket_name, filename, access_id, access_secret) def write_log(string, prefix, **kwargs): prefix = prefix logging.info(prefix) string = string logging.info(string) def dag_s3_prefix(dag_id, timestamp): return "{}/{}".format(dag_id, timestamp)
true
true
f700e496d81a63919f3b2a6c817388f8a32a78ec
1,789
py
Python
snake.py
joshua-bilbrey/snake-game
58b4daaee0ced74a40d103319d7efc515274826d
[ "CC0-1.0" ]
null
null
null
snake.py
joshua-bilbrey/snake-game
58b4daaee0ced74a40d103319d7efc515274826d
[ "CC0-1.0" ]
null
null
null
snake.py
joshua-bilbrey/snake-game
58b4daaee0ced74a40d103319d7efc515274826d
[ "CC0-1.0" ]
null
null
null
import turtle STARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)] MOVE_DISTANCE = 20 UP = 90 DOWN = 270 RIGHT = 0 LEFT = 180 class Snake: """Initializes length and segments of snake.""" def __init__(self): self.length = 3 self.segments = [] self.create_snake() self.head = self.segments[0] def create_snake(self): """Creates snake and sets starting position of snake.""" for position in STARTING_POSITIONS: self.add_segment(position) def add_segment(self, position): new_segment = turtle.Turtle(shape="square") new_segment.color("white") new_segment.penup() new_segment.setpos(position) self.segments.append(new_segment) def reset_snake(self): for segment in self.segments: segment.hideturtle() self.segments.clear() self.create_snake() self.head = self.segments[0] def extend(self, loot): for time in range(loot): self.add_segment(self.segments[-1].position()) def move(self): """Moves snake forward with segments following index zero segment.""" for seg_num in range(len(self.segments) - 1, 0, -1): self.segments[seg_num].goto(self.segments[seg_num - 1].pos()) self.head.forward(MOVE_DISTANCE) def up(self): if self.head.heading() != DOWN: self.head.setheading(UP) def down(self): if self.head.heading() != UP: self.head.setheading(DOWN) def right(self): if self.head.heading() != LEFT: self.head.setheading(RIGHT) def left(self): if self.head.heading() != RIGHT: self.head.setheading(LEFT)
28.396825
78
0.577977
import turtle STARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)] MOVE_DISTANCE = 20 UP = 90 DOWN = 270 RIGHT = 0 LEFT = 180 class Snake: def __init__(self): self.length = 3 self.segments = [] self.create_snake() self.head = self.segments[0] def create_snake(self): for position in STARTING_POSITIONS: self.add_segment(position) def add_segment(self, position): new_segment = turtle.Turtle(shape="square") new_segment.color("white") new_segment.penup() new_segment.setpos(position) self.segments.append(new_segment) def reset_snake(self): for segment in self.segments: segment.hideturtle() self.segments.clear() self.create_snake() self.head = self.segments[0] def extend(self, loot): for time in range(loot): self.add_segment(self.segments[-1].position()) def move(self): for seg_num in range(len(self.segments) - 1, 0, -1): self.segments[seg_num].goto(self.segments[seg_num - 1].pos()) self.head.forward(MOVE_DISTANCE) def up(self): if self.head.heading() != DOWN: self.head.setheading(UP) def down(self): if self.head.heading() != UP: self.head.setheading(DOWN) def right(self): if self.head.heading() != LEFT: self.head.setheading(RIGHT) def left(self): if self.head.heading() != RIGHT: self.head.setheading(LEFT)
true
true
f700e563255eb9b5b51024e029510165ba3ade1b
22,420
py
Python
pailab/tools/tree.py
pailabteam/pailab
3995b25f105827ae631e6120f380748d7d284c9f
[ "Apache-2.0" ]
26
2019-03-15T12:40:11.000Z
2021-05-26T05:23:02.000Z
pailab/tools/tree.py
pailabteam/pailab
3995b25f105827ae631e6120f380748d7d284c9f
[ "Apache-2.0" ]
89
2019-03-15T12:39:07.000Z
2022-02-10T00:14:24.000Z
pailab/tools/tree.py
pailabteam/pailab
3995b25f105827ae631e6120f380748d7d284c9f
[ "Apache-2.0" ]
2
2019-05-10T09:00:36.000Z
2020-03-05T11:32:34.000Z
# -*- coding: utf-8 -*- """This module contains all functions and classes for the MLTree. The MLTree buils a tree-like structure of the objects in a given repository. This allows the user to access objects in a comfortable way allowing for autocompletion (i.e. in Jupyter notebooks). To use it one can simply call the :py:meth:`pailab.tools.tree.MLTree.add_tree` method to add such a tree to the current repository:: >>from pailab.tools.tree import MLTree >>MLTree.add_tree(ml_repo) After the tree has been added, one can simply use the tree. Here, using autocompletion makes the basic work wih repo objects quite simply. Each tree node provides useful functions that can be applied: - ``load`` loads the object of the given tree node or the child tree nodes of the current node. a After calling load the respective nodes have a new attribute ``obj`` that contains the respective loaded object. To load all objects belonging to the models subtree like parameters, evaluations or measures one can call:: >> ml_repo.tree.models.load() - ``history`` lists the history of all objects of the respective subtree, where history excepts certain parameters such as a range of versions or which repo object information to include. To list th history of all training data just use:: >> ml_repo.tree.training_data.history() - ``modifications`` lists all objects of the respective subtree that have been modified and no yet been committed. There are also node dependent function (depending on what object the node represents). """ import logging from numpy import load from deepdiff import DeepDiff from pailab.ml_repo.repo import MLObjectType, MLRepo from pailab.ml_repo.repo_objects import RepoInfoKey, DataSet # pylint: disable=E0401 from pailab.ml_repo.repo_store import RepoStore # pylint: disable=E0401 import pailab.ml_repo.repo_store as repo_store import pailab.ml_repo.repo_objects as repo_objects logger = logging.getLogger(__name__) #region collections and items class _RepoObjectItem: def __init__(self, name, ml_repo, repo_obj = None): self._name = name self._repo = ml_repo if repo_obj is not None: self.obj = repo_obj def _set(self, path, items): if len(path) > 0: if len(path) == 1: setattr(self, path[0], items[0]) return if hasattr(self, path[0]): getattr(self, path[0])._set(path[1:], items[1:]) else: setattr(self, path[0], items[0]) items[0]._set(path[1:], items[1:]) def load(self, version=repo_store.LAST_VERSION, full_object=False, modifier_versions=None, containing_str=None): """Loads the object into the tree and stores it in obj member. Args: version (str, optional): The version of the object to be loaded. Defaults to repo_store.LAST_VERSION. full_object (bool, optional): If True, also the bigobject-members of the object will be loaded and stored. Defaults to False. modifier_versions (dict of str to str, optional): The version of the object that has been created with the objects and their respective versions defined in the dict will be loaded. Defaults to None. containing_str (str, optional): The object will only be loaded if the given string is contained in the objects name (intended for internal use). Defaults to None. """ if containing_str is None or containing_str in self._name: if self._repo is not None: self.obj = self._repo.get(self._name, version, full_object, modifier_versions, throw_error_not_exist = False) for v in self.__dict__.values(): if hasattr(v,'load'): v.load(version, full_object, modifier_versions, containing_str) def modifications(self, commit=False, commit_message=''): result = {} if self._name is not None: try: if self._repo is not None: obj_orig = self._repo.get( self.obj.repo_info[RepoInfoKey.NAME], version=self.obj.repo_info[RepoInfoKey.VERSION]) diff = DeepDiff(obj_orig, self.obj, ignore_order=True) except AttributeError: return None if len(diff) == 0: return None else: if commit and (self._repo is not None): version = self._repo.add( self.obj, message=commit_message) self.obj = self._repo.get(self._name, version=version) result = {self._name: diff} for v in self.__dict__.values(): if hasattr(v, 'modifications'): tmp = v.modifications(commit, commit_message) if tmp is not None: result.update(tmp) return result def history(self, version = (repo_store.FIRST_VERSION,repo_store.LAST_VERSION), repo_info = [RepoInfoKey.NAME, RepoInfoKey.AUTHOR, RepoInfoKey.COMMIT_DATE, RepoInfoKey.COMMIT_MESSAGE], obj_data = []): history = [] if self._repo is not None: history = self._repo.get(self._name, version = version, throw_error_not_exist=False) if not isinstance(history, list): history = [history] result = {} tmp = [] for h in history: r = {} for r_info in repo_info: r[str(r_info)] = h.repo_info[r_info] for o_info in obj_data: r[o_info] = obj_data.__dict__[o_info] tmp.append(r) result[self._name] = tmp for v in self.__dict__.values(): if isinstance(v, _RepoObjectItem): tmp2 = v.history(version, repo_info, obj_data) if tmp2 is not None: result.update(tmp2) if len(result) > 0: return result def __call__(self, containing_str=None): # if len(self.__dict__) == 1: if containing_str is not None: result = [] if containing_str in self._name: result.append(self._name) for v in self.__dict__.values(): if isinstance(v, _RepoObjectItem): d = v(containing_str) if isinstance(d, str): result.append(d) else: result.extend(d) return [x for x in result if containing_str in x] else: return self._name return result class _RawDataItem(_RepoObjectItem): def __init__(self, name, ml_repo, repo_obj = None): super(_RawDataItem,self).__init__(name, ml_repo, repo_obj) def append(self, x_data, y_data = None): """Append data to a RawData object It appends data to the given RawData object and updates all training and test DataSets which implicitely changed by this update. Args: name (string): name of RawData object x_data (numpy matrix): the x_data to append y_data (numpy matrix, optional): Defaults to None. The y_data to append Raises: Exception: If the data is not consistent to the RawData (e.g. different number of x-coordinates) it throws an exception. """ logger.info('Start appending ' + str(x_data.shape[0]) + ' datapoints to RawData' + self._name) raw_data = self._repo.get(self._name) if len(raw_data.x_coord_names) != x_data.shape[1]: raise Exception('Number of columns of x_data of RawData object is not equal to number of columns of additional x_data.') if raw_data.y_coord_names is None and y_data is not None: raise Exception('RawData object does not contain y_data but y_data is given') if raw_data.y_coord_names is not None: if y_data is None: raise Exception('RawData object has y_data but no y_data is given') if y_data.shape[1] != len(raw_data.y_coord_names ): raise Exception('Number of columns of y_data of RawData object is not equal to number of columns of additional y_data.') numpy_dict = {'x_data' : x_data} if raw_data.y_coord_names is not None: numpy_dict['y_data'] = y_data raw_data.n_data += x_data.shape[0] old_version = raw_data.repo_info[RepoInfoKey.VERSION] new_version = self._repo.add(raw_data) self._repo._numpy_repo.append(self._name, old_version, new_version, numpy_dict) # now find all datasets which are affected by the updated data changed_data_sets = [] training_data = self._repo.get_training_data(full_object = False) if isinstance(training_data, DataSet): if training_data.raw_data == self._name and training_data.raw_data_version == repo_store.RepoStore.LAST_VERSION: if training_data.end_index is None or training_data.end_index < 0: training_data.raw_data_version = new_version changed_data_sets.append(training_data) test_data = self._repo.get_names(MLObjectType.TEST_DATA) for d in test_data: data = self._repo.get(d) if isinstance(data, DataSet): if data.raw_data == self._name and data.raw_data_version == repo_store.RepoStore.LAST_VERSION: if data.end_index is None or data.end_index < 0: data.raw_data_version = new_version changed_data_sets.append(data) self._repo.add(changed_data_sets, 'RawData ' + self._name + ' updated, add DataSets depending om the updated RawData.') if hasattr(self, 'obj'):#update current object self.obj = self._repo.get(self._name, version=new_version) logger.info('Finished appending data to RawData' + self._name) class _RawDataCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(path): return path.split('/')[-1] def __init__(self, repo): super(_RawDataCollection, self).__init__('raw_data', repo) names = repo.get_names(MLObjectType.RAW_DATA) for n in names: setattr(self, _RawDataCollection.__get_name_from_path(n), _RawDataItem(n, repo)) def add(self, name, data, input_variables = None, target_variables = None): """Add raw data to the repository Arguments: data_name {name of data} -- the name of the data added data {pandas DataFrame} -- the data as pandas datatable input_variables {str or iterable of str} -- column name or iterable of column names defining the input variables of the given data target_variables {str or iterable of str} -- column name or iterable of column names defining the target variables of the given data Keyword Arguments: input_variables {list of strings} -- list of column names defining the input variables for the machine learning (default: {None}). If None, all variables are used as input target_variables {list of strings} -- list of column names defining the target variables for the machine learning (default: {None}). If None, no target data is added from the table. """ path = 'raw_data/' + name if input_variables is None: input_variables = list(data) if not target_variables is None: [input_variables.remove(x) for x in target_variables] else: if isinstance(input_variables, str): input_variables = [input_variables] # check whether the input_variables are included in the data if not [item for item in input_variables if item in list(data)] == list(input_variables): raise Exception('RawData does not include at least one column included in input_variables') if target_variables is not None: if isinstance(target_variables, str): target_variables = [target_variables] # check if target variables are in list if not [item for item in target_variables if item in list(data)] == list(target_variables): raise Exception('RawData does not include at least one column included in target_variables') raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, data.loc[:, target_variables].values, target_variables, repo_info = {RepoInfoKey.NAME: path}) else: raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, repo_info = {RepoInfoKey.NAME: path}) v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA) obj = self._repo.get(path, version=v, full_object = False) setattr(self, name, _RawDataItem(path, self._repo, obj)) def add_from_numpy_file(self, name, filename_X, x_names, filename_Y=None, y_names = None): path = name X = load(filename_X) Y = None if filename_Y is not None: Y = load(filename_Y) raw_data = repo_objects.RawData(X, x_names, Y, y_names, repo_info = {RepoInfoKey.NAME: path}) v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA) obj = self._repo.get(path, version=v, full_object = False) setattr(self, name, _RawDataItem(path, self._repo, obj)) class _TrainingDataCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(path): return path.split('/')[-1] def __init__(self, repo): super(_TrainingDataCollection, self).__init__('training_data', None) self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class names = repo.get_names(MLObjectType.TRAINING_DATA) for n in names: setattr(self, _TrainingDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo)) def add(self, name, raw_data, start_index=0, end_index=None, raw_data_version='last'): #path = 'training_data/' + name data_set = repo_objects.DataSet(raw_data, start_index, end_index, raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TRAINING_DATA}) v = self.__repo.add(data_set) tmp = self.__repo.get(name, version=v) item = _RepoObjectItem(name, self.__repo, tmp) setattr(self, name, item) class _TestDataCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(path): return path.split('/')[-1] def __init__(self, repo): super(_TestDataCollection, self).__init__('test_data', None) self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class names = repo.get_names(MLObjectType.TEST_DATA) for n in names: setattr(self, _TestDataCollection.__get_name_from_path(n), _RepoObjectItem(n,repo)) def add(self, name, raw_data, start_index=0, end_index=None, raw_data_version='last'): data_set = repo_objects.DataSet(raw_data, start_index, end_index, raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TEST_DATA}) v = self.__repo.add(data_set) tmp = self.__repo.get(name, version=v) item = _RepoObjectItem(name, self.__repo, tmp) setattr(self, name, item) class _MeasureItem(_RepoObjectItem): def __init__(self, name, ml_repo, repo_obj = None): super(_MeasureItem, self).__init__(name, ml_repo, repo_obj) class _JobItem(_RepoObjectItem): def __init__(self, name, ml_repo, repo_obj = None): super(_JobItem, self).__init__(name, ml_repo, repo_obj) class _MeasureCollection(_RepoObjectItem): def __init__(self, name, ml_repo): super(_MeasureCollection, self).__init__('measures', None) names = ml_repo.get_names(MLObjectType.MEASURE) for n in names: path = n.split('/')[2:] items = [None] * len(path) for i in range(len(items)-1): items[i] = _RepoObjectItem(path[i], None) items[-1] = _MeasureItem(n, ml_repo) self._set(path, items) #items[-2] = MeasuresOnDataItem class _EvalCollection(_RepoObjectItem): def __init__(self, name, ml_repo): super(_EvalCollection, self).__init__('eval', None) names = ml_repo.get_names(MLObjectType.EVAL_DATA) for n in names: path = n.split('/')[2:] items = [None] * len(path) for i in range(len(items)-1): items[i] = _RepoObjectItem(path[i], None) items[-1] = _MeasureItem(n, ml_repo) self._set(path, items) class _TestCollection(_RepoObjectItem): def __init__(self, name, ml_repo): super(_TestCollection, self).__init__('tests', None) names = ml_repo.get_names(MLObjectType.TEST) for n in names: path = n.split('/')[2:] items = [None] * len(path) for i in range(len(items)-1): items[i] = _RepoObjectItem(path[i], None) items[-1] = _RepoObjectItem(n, ml_repo) self._set(path, items) class _JobCollection(_RepoObjectItem): def __init__(self, name, ml_repo, model_name): super(_JobCollection, self).__init__('jobs', None) names = ml_repo.get_names(MLObjectType.JOB) for n in names: if model_name in n: path = n.split('/') path = path[path.index('jobs')+1:] items = [None] * len(path) for i in range(len(items)-1): items[i] = _RepoObjectItem(path[i], None) items[-1] = _JobItem(n, ml_repo) self._set(path, items) class _ModelItem(_RepoObjectItem): def __init__(self, name, ml_repo, repo_obj = None): super(_ModelItem,self).__init__(name, ml_repo, repo_obj) self.model = _RepoObjectItem(name + '/model', ml_repo) self.eval = _EvalCollection(name + '/eval', ml_repo) self.model_param = _RepoObjectItem(name + '/model_param', ml_repo) self.tests = _TestCollection(name + '/tests', ml_repo) self.measures = _MeasureCollection(name+ '/measure', ml_repo) self.jobs = _JobCollection(name+'/jobs', ml_repo, name) if ml_repo._object_exists(name+'/training_stat'): self.training_statistic = _RepoObjectItem(name+'/training_stat', ml_repo) if ml_repo._object_exists(name+'/training_param'): self.training_param = _RepoObjectItem(name + '/training_param', ml_repo) def set_label(self, label_name, version = repo_store.RepoStore.LAST_VERSION, message=''): self._repo.set_label(label_name, self._name+ '/model', version, message) class _LabelCollection(_RepoObjectItem): def __init__(self, repo): super(_LabelCollection,self).__init__('labels', None) names = repo.get_names(MLObjectType.LABEL) for n in names: #label = ml_repo.get() setattr(self, n, _RepoObjectItem(n, repo)) class _ModelCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(name): return name def __init__(self, repo): super(_ModelCollection,self).__init__('models', None) names = repo.get_names(MLObjectType.MODEL) for n in names: setattr(self, _ModelCollection.__get_name_from_path(n), _ModelItem(n, repo)) self.labels = _LabelCollection(repo) def add(self, name): setattr(self, name, _ModelItem(name,self._repo)) class _CacheDataCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(path): return path.split('/')[-1] def __init__(self, repo): super(_CacheDataCollection, self).__init__('cache', None) self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class names = repo.get_names(MLObjectType.CACHED_VALUE) for n in names: setattr(self, _CacheDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo)) #endregion class MLTree: @staticmethod def add_tree(ml_repo): """Adds an MLTree to a repository. Args: ml_repo (MLRepo): the repository the tre is added """ setattr(ml_repo, 'tree', MLTree(ml_repo)) ml_repo._add_triggers.append(ml_repo.tree.reload) def __create(self): self.raw_data = _RawDataCollection(self.__ml_repo) self.training_data = _TrainingDataCollection(self.__ml_repo) self.test_data = _TestDataCollection(self.__ml_repo) self.models = _ModelCollection(self.__ml_repo) self.cache = _CacheDataCollection(self.__ml_repo) def __init__(self, ml_repo): self.__ml_repo = ml_repo self.__create() def reload(self, **kwargs): """Method to reload the tree after objects have been added or deleted from the repository. """ self.__create() # todo make this more efficient by just updating collections and items which are affected by this def modifications(self): """Return a dictionary of all objects that were modified but no yet commited to the repository. Returns: dict: dictionary mapping object ids to dictionary of the modified attributes """ result = {} tmp = self.raw_data.modifications() if tmp is not None: result.update(tmp) tmp = self.training_data.modifications() if tmp is not None: result.update(tmp) tmp = self.test_data.modifications() if tmp is not None: result.update(stmp) tmp = self.models.modifications() if tmp is not None: result.update(tmp) if len(result) == 0: return None return result
46.036961
193
0.63479
import logging from numpy import load from deepdiff import DeepDiff from pailab.ml_repo.repo import MLObjectType, MLRepo from pailab.ml_repo.repo_objects import RepoInfoKey, DataSet from pailab.ml_repo.repo_store import RepoStore import pailab.ml_repo.repo_store as repo_store import pailab.ml_repo.repo_objects as repo_objects logger = logging.getLogger(__name__) class _RepoObjectItem: def __init__(self, name, ml_repo, repo_obj = None): self._name = name self._repo = ml_repo if repo_obj is not None: self.obj = repo_obj def _set(self, path, items): if len(path) > 0: if len(path) == 1: setattr(self, path[0], items[0]) return if hasattr(self, path[0]): getattr(self, path[0])._set(path[1:], items[1:]) else: setattr(self, path[0], items[0]) items[0]._set(path[1:], items[1:]) def load(self, version=repo_store.LAST_VERSION, full_object=False, modifier_versions=None, containing_str=None): if containing_str is None or containing_str in self._name: if self._repo is not None: self.obj = self._repo.get(self._name, version, full_object, modifier_versions, throw_error_not_exist = False) for v in self.__dict__.values(): if hasattr(v,'load'): v.load(version, full_object, modifier_versions, containing_str) def modifications(self, commit=False, commit_message=''): result = {} if self._name is not None: try: if self._repo is not None: obj_orig = self._repo.get( self.obj.repo_info[RepoInfoKey.NAME], version=self.obj.repo_info[RepoInfoKey.VERSION]) diff = DeepDiff(obj_orig, self.obj, ignore_order=True) except AttributeError: return None if len(diff) == 0: return None else: if commit and (self._repo is not None): version = self._repo.add( self.obj, message=commit_message) self.obj = self._repo.get(self._name, version=version) result = {self._name: diff} for v in self.__dict__.values(): if hasattr(v, 'modifications'): tmp = v.modifications(commit, commit_message) if tmp is not None: result.update(tmp) return result def history(self, version = (repo_store.FIRST_VERSION,repo_store.LAST_VERSION), repo_info = [RepoInfoKey.NAME, RepoInfoKey.AUTHOR, RepoInfoKey.COMMIT_DATE, RepoInfoKey.COMMIT_MESSAGE], obj_data = []): history = [] if self._repo is not None: history = self._repo.get(self._name, version = version, throw_error_not_exist=False) if not isinstance(history, list): history = [history] result = {} tmp = [] for h in history: r = {} for r_info in repo_info: r[str(r_info)] = h.repo_info[r_info] for o_info in obj_data: r[o_info] = obj_data.__dict__[o_info] tmp.append(r) result[self._name] = tmp for v in self.__dict__.values(): if isinstance(v, _RepoObjectItem): tmp2 = v.history(version, repo_info, obj_data) if tmp2 is not None: result.update(tmp2) if len(result) > 0: return result def __call__(self, containing_str=None): if containing_str is not None: result = [] if containing_str in self._name: result.append(self._name) for v in self.__dict__.values(): if isinstance(v, _RepoObjectItem): d = v(containing_str) if isinstance(d, str): result.append(d) else: result.extend(d) return [x for x in result if containing_str in x] else: return self._name return result class _RawDataItem(_RepoObjectItem): def __init__(self, name, ml_repo, repo_obj = None): super(_RawDataItem,self).__init__(name, ml_repo, repo_obj) def append(self, x_data, y_data = None): logger.info('Start appending ' + str(x_data.shape[0]) + ' datapoints to RawData' + self._name) raw_data = self._repo.get(self._name) if len(raw_data.x_coord_names) != x_data.shape[1]: raise Exception('Number of columns of x_data of RawData object is not equal to number of columns of additional x_data.') if raw_data.y_coord_names is None and y_data is not None: raise Exception('RawData object does not contain y_data but y_data is given') if raw_data.y_coord_names is not None: if y_data is None: raise Exception('RawData object has y_data but no y_data is given') if y_data.shape[1] != len(raw_data.y_coord_names ): raise Exception('Number of columns of y_data of RawData object is not equal to number of columns of additional y_data.') numpy_dict = {'x_data' : x_data} if raw_data.y_coord_names is not None: numpy_dict['y_data'] = y_data raw_data.n_data += x_data.shape[0] old_version = raw_data.repo_info[RepoInfoKey.VERSION] new_version = self._repo.add(raw_data) self._repo._numpy_repo.append(self._name, old_version, new_version, numpy_dict) changed_data_sets = [] training_data = self._repo.get_training_data(full_object = False) if isinstance(training_data, DataSet): if training_data.raw_data == self._name and training_data.raw_data_version == repo_store.RepoStore.LAST_VERSION: if training_data.end_index is None or training_data.end_index < 0: training_data.raw_data_version = new_version changed_data_sets.append(training_data) test_data = self._repo.get_names(MLObjectType.TEST_DATA) for d in test_data: data = self._repo.get(d) if isinstance(data, DataSet): if data.raw_data == self._name and data.raw_data_version == repo_store.RepoStore.LAST_VERSION: if data.end_index is None or data.end_index < 0: data.raw_data_version = new_version changed_data_sets.append(data) self._repo.add(changed_data_sets, 'RawData ' + self._name + ' updated, add DataSets depending om the updated RawData.') if hasattr(self, 'obj'): self.obj = self._repo.get(self._name, version=new_version) logger.info('Finished appending data to RawData' + self._name) class _RawDataCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(path): return path.split('/')[-1] def __init__(self, repo): super(_RawDataCollection, self).__init__('raw_data', repo) names = repo.get_names(MLObjectType.RAW_DATA) for n in names: setattr(self, _RawDataCollection.__get_name_from_path(n), _RawDataItem(n, repo)) def add(self, name, data, input_variables = None, target_variables = None): path = 'raw_data/' + name if input_variables is None: input_variables = list(data) if not target_variables is None: [input_variables.remove(x) for x in target_variables] else: if isinstance(input_variables, str): input_variables = [input_variables] if not [item for item in input_variables if item in list(data)] == list(input_variables): raise Exception('RawData does not include at least one column included in input_variables') if target_variables is not None: if isinstance(target_variables, str): target_variables = [target_variables] if not [item for item in target_variables if item in list(data)] == list(target_variables): raise Exception('RawData does not include at least one column included in target_variables') raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, data.loc[:, target_variables].values, target_variables, repo_info = {RepoInfoKey.NAME: path}) else: raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, repo_info = {RepoInfoKey.NAME: path}) v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA) obj = self._repo.get(path, version=v, full_object = False) setattr(self, name, _RawDataItem(path, self._repo, obj)) def add_from_numpy_file(self, name, filename_X, x_names, filename_Y=None, y_names = None): path = name X = load(filename_X) Y = None if filename_Y is not None: Y = load(filename_Y) raw_data = repo_objects.RawData(X, x_names, Y, y_names, repo_info = {RepoInfoKey.NAME: path}) v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA) obj = self._repo.get(path, version=v, full_object = False) setattr(self, name, _RawDataItem(path, self._repo, obj)) class _TrainingDataCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(path): return path.split('/')[-1] def __init__(self, repo): super(_TrainingDataCollection, self).__init__('training_data', None) self.__repo = repo names = repo.get_names(MLObjectType.TRAINING_DATA) for n in names: setattr(self, _TrainingDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo)) def add(self, name, raw_data, start_index=0, end_index=None, raw_data_version='last'): data_set = repo_objects.DataSet(raw_data, start_index, end_index, raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TRAINING_DATA}) v = self.__repo.add(data_set) tmp = self.__repo.get(name, version=v) item = _RepoObjectItem(name, self.__repo, tmp) setattr(self, name, item) class _TestDataCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(path): return path.split('/')[-1] def __init__(self, repo): super(_TestDataCollection, self).__init__('test_data', None) self.__repo = repo names = repo.get_names(MLObjectType.TEST_DATA) for n in names: setattr(self, _TestDataCollection.__get_name_from_path(n), _RepoObjectItem(n,repo)) def add(self, name, raw_data, start_index=0, end_index=None, raw_data_version='last'): data_set = repo_objects.DataSet(raw_data, start_index, end_index, raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TEST_DATA}) v = self.__repo.add(data_set) tmp = self.__repo.get(name, version=v) item = _RepoObjectItem(name, self.__repo, tmp) setattr(self, name, item) class _MeasureItem(_RepoObjectItem): def __init__(self, name, ml_repo, repo_obj = None): super(_MeasureItem, self).__init__(name, ml_repo, repo_obj) class _JobItem(_RepoObjectItem): def __init__(self, name, ml_repo, repo_obj = None): super(_JobItem, self).__init__(name, ml_repo, repo_obj) class _MeasureCollection(_RepoObjectItem): def __init__(self, name, ml_repo): super(_MeasureCollection, self).__init__('measures', None) names = ml_repo.get_names(MLObjectType.MEASURE) for n in names: path = n.split('/')[2:] items = [None] * len(path) for i in range(len(items)-1): items[i] = _RepoObjectItem(path[i], None) items[-1] = _MeasureItem(n, ml_repo) self._set(path, items) class _EvalCollection(_RepoObjectItem): def __init__(self, name, ml_repo): super(_EvalCollection, self).__init__('eval', None) names = ml_repo.get_names(MLObjectType.EVAL_DATA) for n in names: path = n.split('/')[2:] items = [None] * len(path) for i in range(len(items)-1): items[i] = _RepoObjectItem(path[i], None) items[-1] = _MeasureItem(n, ml_repo) self._set(path, items) class _TestCollection(_RepoObjectItem): def __init__(self, name, ml_repo): super(_TestCollection, self).__init__('tests', None) names = ml_repo.get_names(MLObjectType.TEST) for n in names: path = n.split('/')[2:] items = [None] * len(path) for i in range(len(items)-1): items[i] = _RepoObjectItem(path[i], None) items[-1] = _RepoObjectItem(n, ml_repo) self._set(path, items) class _JobCollection(_RepoObjectItem): def __init__(self, name, ml_repo, model_name): super(_JobCollection, self).__init__('jobs', None) names = ml_repo.get_names(MLObjectType.JOB) for n in names: if model_name in n: path = n.split('/') path = path[path.index('jobs')+1:] items = [None] * len(path) for i in range(len(items)-1): items[i] = _RepoObjectItem(path[i], None) items[-1] = _JobItem(n, ml_repo) self._set(path, items) class _ModelItem(_RepoObjectItem): def __init__(self, name, ml_repo, repo_obj = None): super(_ModelItem,self).__init__(name, ml_repo, repo_obj) self.model = _RepoObjectItem(name + '/model', ml_repo) self.eval = _EvalCollection(name + '/eval', ml_repo) self.model_param = _RepoObjectItem(name + '/model_param', ml_repo) self.tests = _TestCollection(name + '/tests', ml_repo) self.measures = _MeasureCollection(name+ '/measure', ml_repo) self.jobs = _JobCollection(name+'/jobs', ml_repo, name) if ml_repo._object_exists(name+'/training_stat'): self.training_statistic = _RepoObjectItem(name+'/training_stat', ml_repo) if ml_repo._object_exists(name+'/training_param'): self.training_param = _RepoObjectItem(name + '/training_param', ml_repo) def set_label(self, label_name, version = repo_store.RepoStore.LAST_VERSION, message=''): self._repo.set_label(label_name, self._name+ '/model', version, message) class _LabelCollection(_RepoObjectItem): def __init__(self, repo): super(_LabelCollection,self).__init__('labels', None) names = repo.get_names(MLObjectType.LABEL) for n in names: setattr(self, n, _RepoObjectItem(n, repo)) class _ModelCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(name): return name def __init__(self, repo): super(_ModelCollection,self).__init__('models', None) names = repo.get_names(MLObjectType.MODEL) for n in names: setattr(self, _ModelCollection.__get_name_from_path(n), _ModelItem(n, repo)) self.labels = _LabelCollection(repo) def add(self, name): setattr(self, name, _ModelItem(name,self._repo)) class _CacheDataCollection(_RepoObjectItem): @staticmethod def __get_name_from_path(path): return path.split('/')[-1] def __init__(self, repo): super(_CacheDataCollection, self).__init__('cache', None) self.__repo = repo names = repo.get_names(MLObjectType.CACHED_VALUE) for n in names: setattr(self, _CacheDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo)) class MLTree: @staticmethod def add_tree(ml_repo): setattr(ml_repo, 'tree', MLTree(ml_repo)) ml_repo._add_triggers.append(ml_repo.tree.reload) def __create(self): self.raw_data = _RawDataCollection(self.__ml_repo) self.training_data = _TrainingDataCollection(self.__ml_repo) self.test_data = _TestDataCollection(self.__ml_repo) self.models = _ModelCollection(self.__ml_repo) self.cache = _CacheDataCollection(self.__ml_repo) def __init__(self, ml_repo): self.__ml_repo = ml_repo self.__create() def reload(self, **kwargs): self.__create() def modifications(self): result = {} tmp = self.raw_data.modifications() if tmp is not None: result.update(tmp) tmp = self.training_data.modifications() if tmp is not None: result.update(tmp) tmp = self.test_data.modifications() if tmp is not None: result.update(stmp) tmp = self.models.modifications() if tmp is not None: result.update(tmp) if len(result) == 0: return None return result
true
true
f700e65c2ea8c83d9343839a41d81b9217b71539
184
py
Python
crash_course/ch07/exec/restaurant_seating.py
dantin/python-by-example
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
[ "BSD-3-Clause" ]
null
null
null
crash_course/ch07/exec/restaurant_seating.py
dantin/python-by-example
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
[ "BSD-3-Clause" ]
null
null
null
crash_course/ch07/exec/restaurant_seating.py
dantin/python-by-example
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
[ "BSD-3-Clause" ]
null
null
null
count = input('How many people will be in the dinner group? ') count = int(count) if count > 8: print('You\'ll have to wait for a table.') else: print('The table is ready.')
20.444444
62
0.646739
count = input('How many people will be in the dinner group? ') count = int(count) if count > 8: print('You\'ll have to wait for a table.') else: print('The table is ready.')
true
true
f700e672cd17275a041dea32beccb6a84ec37569
392
py
Python
setup.py
Damaen/Travis-Hello-world
6c88895142e708638000c9bd9550c3bc61045689
[ "MIT" ]
null
null
null
setup.py
Damaen/Travis-Hello-world
6c88895142e708638000c9bd9550c3bc61045689
[ "MIT" ]
null
null
null
setup.py
Damaen/Travis-Hello-world
6c88895142e708638000c9bd9550c3bc61045689
[ "MIT" ]
null
null
null
#!/usr/bin/env python from distutils.core import setup from glob import glob from setuptools import find_packages setup(name='Fibonacci', version='1.0', description='Python Distribution Utilities', author='Kevin Chen', packages=find_packages('src'), package_dir={'': 'src'}, py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')], )
24.5
76
0.665816
from distutils.core import setup from glob import glob from setuptools import find_packages setup(name='Fibonacci', version='1.0', description='Python Distribution Utilities', author='Kevin Chen', packages=find_packages('src'), package_dir={'': 'src'}, py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')], )
true
true
f700e68836d56c80b1eb23849bcf903eda4dfa6c
5,105
py
Python
nova/virt/hyperv/imagecache.py
ebalduf/nova-backports
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
[ "Apache-2.0" ]
null
null
null
nova/virt/hyperv/imagecache.py
ebalduf/nova-backports
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
[ "Apache-2.0" ]
null
null
null
nova/virt/hyperv/imagecache.py
ebalduf/nova-backports
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
[ "Apache-2.0" ]
null
null
null
# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Image caching and management. """ import os from os_win import utilsfactory from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units import nova.conf from nova import exception from nova import utils from nova.virt.hyperv import pathutils from nova.virt import images LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class ImageCache(object): def __init__(self): self._pathutils = pathutils.PathUtils() self._vhdutils = utilsfactory.get_vhdutils() def _get_root_vhd_size_gb(self, instance): if instance.old_flavor: return instance.old_flavor.root_gb else: return instance.root_gb def _resize_and_cache_vhd(self, instance, vhd_path): vhd_size = self._vhdutils.get_vhd_size(vhd_path)['VirtualSize'] root_vhd_size_gb = self._get_root_vhd_size_gb(instance) root_vhd_size = root_vhd_size_gb * units.Gi root_vhd_internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( vhd_path, root_vhd_size)) if root_vhd_internal_size < vhd_size: raise exception.FlavorDiskSmallerThanImage( flavor_size=root_vhd_size, image_size=vhd_size) if root_vhd_internal_size > vhd_size: path_parts = os.path.splitext(vhd_path) resized_vhd_path = '%s_%s%s' % (path_parts[0], root_vhd_size_gb, path_parts[1]) @utils.synchronized(resized_vhd_path) def copy_and_resize_vhd(): if not self._pathutils.exists(resized_vhd_path): try: LOG.debug("Copying VHD %(vhd_path)s to " "%(resized_vhd_path)s", {'vhd_path': vhd_path, 'resized_vhd_path': resized_vhd_path}) self._pathutils.copyfile(vhd_path, resized_vhd_path) LOG.debug("Resizing VHD %(resized_vhd_path)s to new " "size %(root_vhd_size)s", {'resized_vhd_path': resized_vhd_path, 'root_vhd_size': root_vhd_size}) self._vhdutils.resize_vhd(resized_vhd_path, root_vhd_internal_size, is_file_max_size=False) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(resized_vhd_path): self._pathutils.remove(resized_vhd_path) copy_and_resize_vhd() return resized_vhd_path def get_cached_image(self, context, instance): image_id = instance.image_ref base_vhd_dir = self._pathutils.get_base_vhd_dir() base_vhd_path = os.path.join(base_vhd_dir, image_id) @utils.synchronized(base_vhd_path) def fetch_image_if_not_existing(): vhd_path = None for format_ext in ['vhd', 'vhdx']: test_path = base_vhd_path + '.' + format_ext if self._pathutils.exists(test_path): vhd_path = test_path break if not vhd_path: try: images.fetch(context, image_id, base_vhd_path) format_ext = self._vhdutils.get_vhd_format(base_vhd_path) vhd_path = base_vhd_path + '.' + format_ext.lower() self._pathutils.rename(base_vhd_path, vhd_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_vhd_path): self._pathutils.remove(base_vhd_path) return vhd_path vhd_path = fetch_image_if_not_existing() if CONF.use_cow_images and vhd_path.split('.')[-1].lower() == 'vhd': # Resize the base VHD image as it's not possible to resize a # differencing VHD. This does not apply to VHDX images. resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path) if resized_vhd_path: return resized_vhd_path return vhd_path
39.573643
78
0.591773
import os from os_win import utilsfactory from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units import nova.conf from nova import exception from nova import utils from nova.virt.hyperv import pathutils from nova.virt import images LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class ImageCache(object): def __init__(self): self._pathutils = pathutils.PathUtils() self._vhdutils = utilsfactory.get_vhdutils() def _get_root_vhd_size_gb(self, instance): if instance.old_flavor: return instance.old_flavor.root_gb else: return instance.root_gb def _resize_and_cache_vhd(self, instance, vhd_path): vhd_size = self._vhdutils.get_vhd_size(vhd_path)['VirtualSize'] root_vhd_size_gb = self._get_root_vhd_size_gb(instance) root_vhd_size = root_vhd_size_gb * units.Gi root_vhd_internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( vhd_path, root_vhd_size)) if root_vhd_internal_size < vhd_size: raise exception.FlavorDiskSmallerThanImage( flavor_size=root_vhd_size, image_size=vhd_size) if root_vhd_internal_size > vhd_size: path_parts = os.path.splitext(vhd_path) resized_vhd_path = '%s_%s%s' % (path_parts[0], root_vhd_size_gb, path_parts[1]) @utils.synchronized(resized_vhd_path) def copy_and_resize_vhd(): if not self._pathutils.exists(resized_vhd_path): try: LOG.debug("Copying VHD %(vhd_path)s to " "%(resized_vhd_path)s", {'vhd_path': vhd_path, 'resized_vhd_path': resized_vhd_path}) self._pathutils.copyfile(vhd_path, resized_vhd_path) LOG.debug("Resizing VHD %(resized_vhd_path)s to new " "size %(root_vhd_size)s", {'resized_vhd_path': resized_vhd_path, 'root_vhd_size': root_vhd_size}) self._vhdutils.resize_vhd(resized_vhd_path, root_vhd_internal_size, is_file_max_size=False) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(resized_vhd_path): self._pathutils.remove(resized_vhd_path) copy_and_resize_vhd() return resized_vhd_path def get_cached_image(self, context, instance): image_id = instance.image_ref base_vhd_dir = self._pathutils.get_base_vhd_dir() base_vhd_path = os.path.join(base_vhd_dir, image_id) @utils.synchronized(base_vhd_path) def fetch_image_if_not_existing(): vhd_path = None for format_ext in ['vhd', 'vhdx']: test_path = base_vhd_path + '.' + format_ext if self._pathutils.exists(test_path): vhd_path = test_path break if not vhd_path: try: images.fetch(context, image_id, base_vhd_path) format_ext = self._vhdutils.get_vhd_format(base_vhd_path) vhd_path = base_vhd_path + '.' + format_ext.lower() self._pathutils.rename(base_vhd_path, vhd_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_vhd_path): self._pathutils.remove(base_vhd_path) return vhd_path vhd_path = fetch_image_if_not_existing() if CONF.use_cow_images and vhd_path.split('.')[-1].lower() == 'vhd': # differencing VHD. This does not apply to VHDX images. resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path) if resized_vhd_path: return resized_vhd_path return vhd_path
true
true
f700e6f165ef83040ba85bd247ff66f7e13fa19c
143,489
py
Python
Bottle.py
pmaillefert/Mywebsite
7ffed0cd5d17a522fa3313ce9183d965a439a611
[ "MIT" ]
null
null
null
Bottle.py
pmaillefert/Mywebsite
7ffed0cd5d17a522fa3313ce9183d965a439a611
[ "MIT" ]
null
null
null
Bottle.py
pmaillefert/Mywebsite
7ffed0cd5d17a522fa3313ce9183d965a439a611
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Bottle is a fast and simple micro-framework for small web applications. It offers request dispatching (Routes) with url parameter support, templates, a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and template engines - all in a single file and with no dependencies other than the Python Standard Library. Homepage and documentation: http://bottlepy.org/ Copyright (c) 2014, Marcel Hellkamp. License: MIT (see LICENSE for details) """ from __future__ import with_statement __author__ = 'Marcel Hellkamp' __version__ = '0.13-dev' __license__ = 'MIT' # The gevent and eventlet server adapters need to patch some modules before # they are imported. This is why we parse the commandline parameters here but # handle them later if __name__ == '__main__': from optparse import OptionParser _cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app") _opt = _cmd_parser.add_option _opt("--version", action="store_true", help="show version number.") _opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.") _opt("-s", "--server", default='wsgiref', help="use SERVER as backend.") _opt("-p", "--plugin", action="append", help="install additional plugin/s.") _opt("--debug", action="store_true", help="start server in debug mode.") _opt("--reload", action="store_true", help="auto-reload on file changes.") _cmd_options, _cmd_args = _cmd_parser.parse_args() if _cmd_options.server: if _cmd_options.server.startswith('gevent'): import gevent.monkey; gevent.monkey.patch_all() elif _cmd_options.server.startswith('eventlet'): import eventlet; eventlet.monkey_patch() import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\ os, re, subprocess, sys, tempfile, threading, time, warnings from datetime import date as datedate, datetime, timedelta from tempfile import TemporaryFile from traceback import format_exc, print_exc from inspect import getargspec from unicodedata import normalize try: from simplejson import dumps as json_dumps, loads as json_lds except ImportError: # pragma: no cover try: from json import dumps as json_dumps, loads as json_lds except ImportError: try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds except ImportError: def json_dumps(data): raise ImportError("JSON support requires Python 2.6 or simplejson.") json_lds = json_dumps # We now try to fix 2.5/2.6/3.1/3.2 incompatibilities. # It ain't pretty but it works... Sorry for the mess. py = sys.version_info py3k = py >= (3, 0, 0) py25 = py < (2, 6, 0) py31 = (3, 1, 0) <= py < (3, 2, 0) # Workaround for the missing "as" keyword in py3k. def _e(): return sys.exc_info()[1] # Workaround for the "print is a keyword/function" Python 2/3 dilemma # and a fallback for mod_wsgi (resticts stdout/err attribute access) try: _stdout, _stderr = sys.stdout.write, sys.stderr.write except IOError: _stdout = lambda x: sys.stdout.write(x) _stderr = lambda x: sys.stderr.write(x) # Lots of stdlib and builtin differences. if py3k: import http.client as httplib import _thread as thread from urllib.parse import urljoin, SplitResult as UrlSplitResult from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote urlunquote = functools.partial(urlunquote, encoding='latin1') from http.cookies import SimpleCookie from collections import MutableMapping as DictMixin import pickle from io import BytesIO from configparser import ConfigParser basestring = str unicode = str json_loads = lambda s: json_lds(touni(s)) callable = lambda x: hasattr(x, '__call__') imap = map def _raise(*a): raise a[0](a[1]).with_traceback(a[2]) else: # 2.x import httplib import thread from urlparse import urljoin, SplitResult as UrlSplitResult from urllib import urlencode, quote as urlquote, unquote as urlunquote from Cookie import SimpleCookie from itertools import imap import cPickle as pickle from StringIO import StringIO as BytesIO from ConfigParser import SafeConfigParser as ConfigParser if py25: msg = "Python 2.5 support may be dropped in future versions of Bottle." warnings.warn(msg, DeprecationWarning) from UserDict import DictMixin def next(it): return it.next() bytes = str else: # 2.6, 2.7 from collections import MutableMapping as DictMixin unicode = unicode json_loads = json_lds eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec')) # Some helpers for string/byte handling def tob(s, enc='utf8'): return s.encode(enc) if isinstance(s, unicode) else bytes(s) def touni(s, enc='utf8', err='strict'): if isinstance(s, bytes): return s.decode(enc, err) else: return unicode(s or ("" if s is None else s)) tonat = touni if py3k else tob # 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense). # 3.1 needs a workaround. if py31: from io import TextIOWrapper class NCTextIOWrapper(TextIOWrapper): def close(self): pass # Keep wrapped buffer open. # A bug in functools causes it to break if the wrapper is an instance method def update_wrapper(wrapper, wrapped, *a, **ka): try: functools.update_wrapper(wrapper, wrapped, *a, **ka) except AttributeError: pass # These helpers are used at module level and need to be defined first. # And yes, I know PEP-8, but sometimes a lower-case classname makes more sense. def depr(message, strict=False): warnings.warn(message, DeprecationWarning, stacklevel=3) def makelist(data): # This is just too handy if isinstance(data, (tuple, list, set, dict)): return list(data) elif data: return [data] else: return [] class DictProperty(object): """ Property that maps to a key in a local dict-like attribute. """ def __init__(self, attr, key=None, read_only=False): self.attr, self.key, self.read_only = attr, key, read_only def __call__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter, self.key = func, self.key or func.__name__ return self def __get__(self, obj, cls): if obj is None: return self key, storage = self.key, getattr(obj, self.attr) if key not in storage: storage[key] = self.getter(obj) return storage[key] def __set__(self, obj, value): if self.read_only: raise AttributeError("Read-Only property.") getattr(obj, self.attr)[self.key] = value def __delete__(self, obj): if self.read_only: raise AttributeError("Read-Only property.") del getattr(obj, self.attr)[self.key] class cached_property(object): """ A property that is only computed once per instance and then replaces itself with an ordinary attribute. Deleting the attribute resets the property. """ def __init__(self, func): self.__doc__ = getattr(func, '__doc__') self.func = func def __get__(self, obj, cls): if obj is None: return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value class lazy_attribute(object): """ A property that caches itself to the class object. """ def __init__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter = func def __get__(self, obj, cls): value = self.getter(cls) setattr(cls, self.__name__, value) return value ############################################################################### # Exceptions and Events ######################################################## ############################################################################### class BottleException(Exception): """ A base class for exceptions used by bottle. """ pass ############################################################################### # Routing ###################################################################### ############################################################################### class RouteError(BottleException): """ This is a base class for all routing related exceptions """ class RouteReset(BottleException): """ If raised by a plugin or request handler, the route is reset and all plugins are re-applied. """ class RouterUnknownModeError(RouteError): pass class RouteSyntaxError(RouteError): """ The route parser found something not supported by this router. """ class RouteBuildError(RouteError): """ The route could not be built. """ def _re_flatten(p): """ Turn all capturing groups in a regular expression pattern into non-capturing groups. """ if '(' not in p: return p return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p) class Router(object): """ A Router is an ordered collection of route->target pairs. It is used to efficiently match WSGI requests against a number of routes and return the first target that satisfies the request. The target may be anything, usually a string, ID or callable object. A route consists of a path-rule and a HTTP method. The path-rule is either a static path (e.g. `/contact`) or a dynamic path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax and details on the matching order are described in docs:`routing`. """ default_pattern = '[^/]+' default_filter = 're' #: The current CPython regexp implementation does not allow more #: than 99 matching groups per regular expression. _MAX_GROUPS_PER_PATTERN = 99 def __init__(self, strict=False): self.rules = [] # All rules in order self._groups = {} # index of regexes to find them in dyna_routes self.builder = {} # Data structure for the url builder self.static = {} # Search structure for static routes self.dyna_routes = {} self.dyna_regexes = {} # Search structure for dynamic routes #: If true, static routes are no longer checked first. self.strict_order = strict self.filters = { 're': lambda conf: (_re_flatten(conf or self.default_pattern), None, None), 'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))), 'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))), 'path': lambda conf: (r'.+?', None, None)} def add_filter(self, name, func): """ Add a filter. The provided function is called with the configuration string as parameter and must return a (regexp, to_python, to_url) tuple. The first element is a string, the last two are callables or None. """ self.filters[name] = func rule_syntax = re.compile('(\\\\*)' '(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)' '|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)' '(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))') def _itertokens(self, rule): offset, prefix = 0, '' for match in self.rule_syntax.finditer(rule): prefix += rule[offset:match.start()] g = match.groups() if len(g[0])%2: # Escaped wildcard prefix += match.group(0)[len(g[0]):] offset = match.end() continue if prefix: yield prefix, None, None name, filtr, conf = g[4:7] if g[2] is None else g[1:4] yield name, filtr or 'default', conf or None offset, prefix = match.end(), '' if offset <= len(rule) or prefix: yield prefix+rule[offset:], None, None def add(self, rule, method, target, name=None): """ Add a new rule or replace the target for an existing rule. """ anons = 0 # Number of anonymous wildcards found keys = [] # Names of keys pattern = '' # Regular expression pattern with named groups filters = [] # Lists of wildcard input filters builder = [] # Data structure for the URL builder is_static = True for key, mode, conf in self._itertokens(rule): if mode: is_static = False if mode == 'default': mode = self.default_filter mask, in_filter, out_filter = self.filters[mode](conf) if not key: pattern += '(?:%s)' % mask key = 'anon%d' % anons anons += 1 else: pattern += '(?P<%s>%s)' % (key, mask) keys.append(key) if in_filter: filters.append((key, in_filter)) builder.append((key, out_filter or str)) elif key: pattern += re.escape(key) builder.append((None, key)) self.builder[rule] = builder if name: self.builder[name] = builder if is_static and not self.strict_order: self.static.setdefault(method, {}) self.static[method][self.build(rule)] = (target, None) return try: re_pattern = re.compile('^(%s)$' % pattern) re_match = re_pattern.match except re.error: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e())) if filters: def getargs(path): url_args = re_match(path).groupdict() for name, wildcard_filter in filters: try: url_args[name] = wildcard_filter(url_args[name]) except ValueError: raise HTTPError(400, 'Path has wrong format.') return url_args elif re_pattern.groupindex: def getargs(path): return re_match(path).groupdict() else: getargs = None flatpat = _re_flatten(pattern) whole_rule = (rule, flatpat, target, getargs) if (flatpat, method) in self._groups: if DEBUG: msg = 'Route <%s %s> overwrites a previously defined route' warnings.warn(msg % (method, rule), RuntimeWarning) self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule else: self.dyna_routes.setdefault(method, []).append(whole_rule) self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1 self._compile(method) def _compile(self, method): all_rules = self.dyna_routes[method] comborules = self.dyna_regexes[method] = [] maxgroups = self._MAX_GROUPS_PER_PATTERN for x in range(0, len(all_rules), maxgroups): some = all_rules[x:x+maxgroups] combined = (flatpat for (_, flatpat, _, _) in some) combined = '|'.join('(^%s$)' % flatpat for flatpat in combined) combined = re.compile(combined).match rules = [(target, getargs) for (_, _, target, getargs) in some] comborules.append((combined, rules)) def build(self, _name, *anons, **query): """ Build an URL by filling the wildcards in a rule. """ builder = self.builder.get(_name) if not builder: raise RouteBuildError("No route with that name.", _name) try: for i, value in enumerate(anons): query['anon%d'%i] = value url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder]) return url if not query else url+'?'+urlencode(query) except KeyError: raise RouteBuildError('Missing URL argument: %r' % _e().args[0]) def match(self, environ): """ Return a (target, url_args) tuple or raise HTTPError(400/404/405). """ verb = environ['REQUEST_METHOD'].upper() path = environ['PATH_INFO'] or '/' if verb == 'HEAD': methods = ['PROXY', verb, 'GET', 'ANY'] else: methods = ['PROXY', verb, 'ANY'] for method in methods: if method in self.static and path in self.static[method]: target, getargs = self.static[method][path] return target, getargs(path) if getargs else {} elif method in self.dyna_regexes: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: target, getargs = rules[match.lastindex - 1] return target, getargs(path) if getargs else {} # No matching route found. Collect alternative methods for 405 response allowed = set([]) nocheck = set(methods) for method in set(self.static) - nocheck: if path in self.static[method]: allowed.add(verb) for method in set(self.dyna_regexes) - allowed - nocheck: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: allowed.add(method) if allowed: allow_header = ",".join(sorted(allowed)) raise HTTPError(405, "Method not allowed.", Allow=allow_header) # No matching route and no alternative method found. We give up raise HTTPError(404, "Not found: " + repr(path)) class Route(object): """ This class wraps a route callback along with route specific metadata and configuration and applies Plugins on demand. It is also responsible for turing an URL path rule into a regular expression usable by the Router. """ def __init__(self, app, rule, method, callback, name=None, plugins=None, skiplist=None, **config): #: The application this route is installed to. self.app = app #: The path-rule string (e.g. ``/wiki/<page>``). self.rule = rule #: The HTTP method as a string (e.g. ``GET``). self.method = method #: The original callback with no plugins applied. Useful for introspection. self.callback = callback #: The name of the route (if specified) or ``None``. self.name = name or None #: A list of route-specific plugins (see :meth:`Bottle.route`). self.plugins = plugins or [] #: A list of plugins to not apply to this route (see :meth:`Bottle.route`). self.skiplist = skiplist or [] #: Additional keyword arguments passed to the :meth:`Bottle.route` #: decorator are stored in this dictionary. Used for route-specific #: plugin configuration and meta-data. self.config = ConfigDict().load_dict(config) @cached_property def call(self): """ The route callback with all plugins applied. This property is created on demand and then cached to speed up subsequent requests.""" return self._make_callback() def reset(self): """ Forget any cached values. The next time :attr:`call` is accessed, all plugins are re-applied. """ self.__dict__.pop('call', None) def prepare(self): """ Do all on-demand work immediately (useful for debugging).""" self.call def all_plugins(self): """ Yield all Plugins affecting this route. """ unique = set() for p in reversed(self.app.plugins + self.plugins): if True in self.skiplist: break name = getattr(p, 'name', False) if name and (name in self.skiplist or name in unique): continue if p in self.skiplist or type(p) in self.skiplist: continue if name: unique.add(name) yield p def _make_callback(self): callback = self.callback for plugin in self.all_plugins(): try: if hasattr(plugin, 'apply'): callback = plugin.apply(callback, self) else: callback = plugin(callback) except RouteReset: # Try again with changed configuration. return self._make_callback() if not callback is self.callback: update_wrapper(callback, self.callback) return callback def get_undecorated_callback(self): """ Return the callback. If the callback is a decorated function, try to recover the original function. """ func = self.callback func = getattr(func, '__func__' if py3k else 'im_func', func) closure_attr = '__closure__' if py3k else 'func_closure' while hasattr(func, closure_attr) and getattr(func, closure_attr): func = getattr(func, closure_attr)[0].cell_contents return func def get_callback_args(self): """ Return a list of argument names the callback (most likely) accepts as keyword arguments. If the callback is a decorated function, try to recover the original function before inspection. """ return getargspec(self.get_undecorated_callback())[0] def get_config(self, key, default=None): """ Lookup a config field and return its value, first checking the route.config, then route.app.config.""" for conf in (self.config, self.app.conifg): if key in conf: return conf[key] return default def __repr__(self): cb = self.get_undecorated_callback() return '<%s %r %r>' % (self.method, self.rule, cb) ############################################################################### # Application Object ########################################################### ############################################################################### class Bottle(object): """ Each Bottle object represents a single, distinct web application and consists of routes, callbacks, plugins, resources and configuration. Instances are callable WSGI applications. :param catchall: If true (default), handle all exceptions. Turn off to let debugging middleware handle exceptions. """ def __init__(self, catchall=True, autojson=True): #: A :class:`ConfigDict` for app specific configuration. self.config = ConfigDict() self.config._on_change = functools.partial(self.trigger_hook, 'config') self.config.meta_set('autojson', 'validate', bool) self.config.meta_set('catchall', 'validate', bool) self.config['catchall'] = catchall self.config['autojson'] = autojson #: A :class:`ResourceManager` for application files self.resources = ResourceManager() self.routes = [] # List of installed :class:`Route` instances. self.router = Router() # Maps requests to :class:`Route` instances. self.error_handler = {} # Core plugins self.plugins = [] # List of installed plugins. if self.config['autojson']: self.install(JSONPlugin()) self.install(TemplatePlugin()) #: If true, most exceptions are caught and returned as :exc:`HTTPError` catchall = DictProperty('config', 'catchall') __hook_names = 'before_request', 'after_request', 'app_reset', 'config' __hook_reversed = 'after_request' @cached_property def _hooks(self): return dict((name, []) for name in self.__hook_names) def add_hook(self, name, func): """ Attach a callback to a hook. Three hooks are currently implemented: before_request Executed once before each request. The request context is available, but no routing has happened yet. after_request Executed once after each request regardless of its outcome. app_reset Called whenever :meth:`Bottle.reset` is called. """ if name in self.__hook_reversed: self._hooks[name].insert(0, func) else: self._hooks[name].append(func) def remove_hook(self, name, func): """ Remove a callback from a hook. """ if name in self._hooks and func in self._hooks[name]: self._hooks[name].remove(func) return True def trigger_hook(self, __name, *args, **kwargs): """ Trigger a hook and return a list of results. """ return [hook(*args, **kwargs) for hook in self._hooks[__name][:]] def hook(self, name): """ Return a decorator that attaches a callback to a hook. See :meth:`add_hook` for details.""" def decorator(func): self.add_hook(name, func) return func return decorator def mount(self, prefix, app, **options): """ Mount an application (:class:`Bottle` or plain WSGI) to a specific URL prefix. Example:: root_app.mount('/admin/', admin_app) :param prefix: path prefix or `mount-point`. If it ends in a slash, that slash is mandatory. :param app: an instance of :class:`Bottle` or a WSGI application. All other parameters are passed to the underlying :meth:`route` call. """ segments = [p for p in prefix.split('/') if p] if not segments: raise ValueError('Empty path prefix.') path_depth = len(segments) def mountpoint_wrapper(): try: request.path_shift(path_depth) rs = HTTPResponse([]) def start_response(status, headerlist, exc_info=None): if exc_info: _raise(*exc_info) rs.status = status for name, value in headerlist: rs.add_header(name, value) return rs.body.append body = app(request.environ, start_response) if body and rs.body: body = itertools.chain(rs.body, body) rs.body = body or rs.body return rs finally: request.path_shift(-path_depth) options.setdefault('skip', True) options.setdefault('method', 'PROXY') options.setdefault('mountpoint', {'prefix': prefix, 'target': app}) options['callback'] = mountpoint_wrapper self.route('/%s/<:re:.*>' % '/'.join(segments), **options) if not prefix.endswith('/'): self.route('/' + '/'.join(segments), **options) def merge(self, routes): """ Merge the routes of another :class:`Bottle` application or a list of :class:`Route` objects into this application. The routes keep their 'owner', meaning that the :data:`Route.app` attribute is not changed. """ if isinstance(routes, Bottle): routes = routes.routes for route in routes: self.add_route(route) def install(self, plugin): """ Add a plugin to the list of plugins and prepare it for being applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API. """ if hasattr(plugin, 'setup'): plugin.setup(self) if not callable(plugin) and not hasattr(plugin, 'apply'): raise TypeError("Plugins must be callable or implement .apply()") self.plugins.append(plugin) self.reset() return plugin def uninstall(self, plugin): """ Uninstall plugins. Pass an instance to remove a specific plugin, a type object to remove all plugins that match that type, a string to remove all plugins with a matching ``name`` attribute or ``True`` to remove all plugins. Return the list of removed plugins. """ removed, remove = [], plugin for i, plugin in list(enumerate(self.plugins))[::-1]: if remove is True or remove is plugin or remove is type(plugin) \ or getattr(plugin, 'name', True) == remove: removed.append(plugin) del self.plugins[i] if hasattr(plugin, 'close'): plugin.close() if removed: self.reset() return removed def reset(self, route=None): """ Reset all routes (force plugins to be re-applied) and clear all caches. If an ID or route object is given, only that specific route is affected. """ if route is None: routes = self.routes elif isinstance(route, Route): routes = [route] else: routes = [self.routes[route]] for route in routes: route.reset() if DEBUG: for route in routes: route.prepare() self.trigger_hook('app_reset') def close(self): """ Close the application and all installed plugins. """ for plugin in self.plugins: if hasattr(plugin, 'close'): plugin.close() def run(self, **kwargs): """ Calls :func:`run` with the same parameters. """ run(self, **kwargs) def match(self, environ): """ Search for a matching route and return a (:class:`Route` , urlargs) tuple. The second value is a dictionary with parameters extracted from the URL. Raise :exc:`HTTPError` (404/405) on a non-match.""" return self.router.match(environ) def get_url(self, routename, **kargs): """ Return a string that matches a named route """ scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' location = self.router.build(routename, **kargs).lstrip('/') return urljoin(urljoin('/', scriptname), location) def add_route(self, route): """ Add a route object, but do not change the :data:`Route.app` attribute.""" self.routes.append(route) self.router.add(route.rule, route.method, route, name=route.name) if DEBUG: route.prepare() def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @app.route('/hello/<name>') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`). """ if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) def decorator(callback): if isinstance(callback, basestring): callback = load(callback) for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() route = Route(self, rule, verb, callback, name=name, plugins=plugins, skiplist=skiplist, **config) self.add_route(route) return callback return decorator(callback) if callback else decorator def get(self, path=None, method='GET', **options): """ Equals :meth:`route`. """ return self.route(path, method, **options) def post(self, path=None, method='POST', **options): """ Equals :meth:`route` with a ``POST`` method parameter. """ return self.route(path, method, **options) def put(self, path=None, method='PUT', **options): """ Equals :meth:`route` with a ``PUT`` method parameter. """ return self.route(path, method, **options) def delete(self, path=None, method='DELETE', **options): """ Equals :meth:`route` with a ``DELETE`` method parameter. """ return self.route(path, method, **options) def patch(self, path=None, method='PATCH', **options): """ Equals :meth:`route` with a ``PATCH`` method parameter. """ return self.route(path, method, **options) def error(self, code=500): """ Decorator: Register an output handler for a HTTP error code""" def wrapper(handler): self.error_handler[int(code)] = handler return handler return wrapper def default_error_handler(self, res): return tob(template(ERROR_PAGE_TEMPLATE, e=res)) def _handle(self, environ): path = environ['bottle.raw_path'] = environ['PATH_INFO'] if py3k: try: environ['PATH_INFO'] = path.encode('latin1').decode('utf8') except UnicodeError: return HTTPError(400, 'Invalid path string. Expected UTF-8') try: environ['bottle.app'] = self request.bind(environ) response.bind() try: self.trigger_hook('before_request') route, args = self.router.match(environ) environ['route.handle'] = route environ['bottle.route'] = route environ['route.url_args'] = args return route.call(**args) finally: self.trigger_hook('after_request') except HTTPResponse: return _e() except RouteReset: route.reset() return self._handle(environ) except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise stacktrace = format_exc() environ['wsgi.errors'].write(stacktrace) return HTTPError(500, "Internal Server Error", _e(), stacktrace) def _cast(self, out, peek=None): """ Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes """ # Empty output is done here if not out: if 'Content-Length' not in response: response['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, (tuple, list))\ and isinstance(out[0], (bytes, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, bytes): if 'Content-Length' not in response: response['Content-Length'] = len(out) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) # TODO: Handle these explicitly in handle() or make them iterable. if isinstance(out, HTTPError): out.apply(response) out = self.error_handler.get(out.status_code, self.default_error_handler)(out) return self._cast(out) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.body) # File-like objects. if hasattr(out, 'read'): if 'wsgi.file_wrapper' in request.environ: return request.environ['wsgi.file_wrapper'](out) elif hasattr(out, 'close') or not hasattr(out, '__iter__'): return WSGIFileWrapper(out) # Handle Iterables. We peek into them to detect their inner type. try: iout = iter(out) first = next(iout) while not first: first = next(iout) except StopIteration: return self._cast('') except HTTPResponse: first = _e() except (KeyboardInterrupt, SystemExit, MemoryError): raise except: if not self.catchall: raise first = HTTPError(500, 'Unhandled exception', _e(), format_exc()) # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first) elif isinstance(first, bytes): new_iter = itertools.chain([first], iout) elif isinstance(first, unicode): encoder = lambda x: x.encode(response.charset) new_iter = imap(encoder, itertools.chain([first], iout)) else: msg = 'Unsupported response type: %s' % type(first) return self._cast(HTTPError(500, msg)) if hasattr(out, 'close'): new_iter = _closeiter(new_iter, out.close) return new_iter def wsgi(self, environ, start_response): """ The bottle WSGI-interface. """ try: out = self._cast(self._handle(environ)) # rfc2616 section 4.3 if response._status_code in (100, 101, 204, 304)\ or environ['REQUEST_METHOD'] == 'HEAD': if hasattr(out, 'close'): out.close() out = [] start_response(response._status_line, response.headerlist) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise except: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' \ % html_escape(environ.get('PATH_INFO', '/')) if DEBUG: err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \ '<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \ % (html_escape(repr(_e())), html_escape(format_exc())) environ['wsgi.errors'].write(err) headers = [('Content-Type', 'text/html; charset=UTF-8')] start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info()) return [tob(err)] def __call__(self, environ, start_response): """ Each instance of :class:'Bottle' is a WSGI application. """ return self.wsgi(environ, start_response) def __enter__(self): """ Use this application as default for all module-level shortcuts. """ default_app.push(self) return self def __exit__(self, exc_type, exc_value, traceback): default_app.pop() ############################################################################### # HTTP and WSGI Tools ########################################################## ############################################################################### class BaseRequest(object): """ A wrapper for WSGI environment dictionaries that adds a lot of convenient access methods and properties. Most of them are read-only. Adding new attributes to a request actually adds them to the environ dictionary (as 'bottle.request.ext.<name>'). This is the recommended way to store and access request-specific data. """ __slots__ = ('environ', ) #: Maximum size of memory buffer for :attr:`body` in bytes. MEMFILE_MAX = 102400 def __init__(self, environ=None): """ Wrap a WSGI environ dictionary. """ #: The wrapped WSGI environ dictionary. This is the only real attribute. #: All other attributes actually are read-only properties. self.environ = {} if environ is None else environ self.environ['bottle.request'] = self @DictProperty('environ', 'bottle.app', read_only=True) def app(self): """ Bottle application handling this request. """ raise RuntimeError('This request is not connected to an application.') @DictProperty('environ', 'bottle.route', read_only=True) def route(self): """ The bottle :class:`Route` object that matches this request. """ raise RuntimeError('This request is not connected to a route.') @DictProperty('environ', 'route.url_args', read_only=True) def url_args(self): """ The arguments extracted from the URL. """ raise RuntimeError('This request is not connected to a route.') @property def path(self): """ The value of ``PATH_INFO`` with exactly one prefixed slash (to fix broken clients and avoid the "empty path" edge case). """ return '/' + self.environ.get('PATH_INFO','').lstrip('/') @property def method(self): """ The ``REQUEST_METHOD`` value as an uppercase string. """ return self.environ.get('REQUEST_METHOD', 'GET').upper() @DictProperty('environ', 'bottle.request.headers', read_only=True) def headers(self): """ A :class:`WSGIHeaderDict` that provides case-insensitive access to HTTP request headers. """ return WSGIHeaderDict(self.environ) def get_header(self, name, default=None): """ Return the value of a request header, or a given default value. """ return self.headers.get(name, default) @DictProperty('environ', 'bottle.request.cookies', read_only=True) def cookies(self): """ Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT decoded. Use :meth:`get_cookie` if you expect signed cookies. """ cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values() return FormsDict((c.key, c.value) for c in cookies) def get_cookie(self, key, default=None, secret=None): """ Return the content of a cookie. To read a `Signed Cookie`, the `secret` must match the one used to create the cookie (see :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing cookie or wrong signature), return a default value. """ value = self.cookies.get(key) if secret and value: dec = cookie_decode(value, secret) # (key, value) tuple or None return dec[1] if dec and dec[0] == key else default return value or default @DictProperty('environ', 'bottle.request.query', read_only=True) def query(self): """ The :attr:`query_string` parsed into a :class:`FormsDict`. These values are sometimes called "URL arguments" or "GET parameters", but not to be confused with "URL wildcards" as they are provided by the :class:`Router`. """ get = self.environ['bottle.get'] = FormsDict() pairs = _parse_qsl(self.environ.get('QUERY_STRING', '')) for key, value in pairs: get[key] = value return get @DictProperty('environ', 'bottle.request.forms', read_only=True) def forms(self): """ Form values parsed from an `url-encoded` or `multipart/form-data` encoded POST or PUT request body. The result is returned as a :class:`FormsDict`. All keys and values are strings. File uploads are stored separately in :attr:`files`. """ forms = FormsDict() for name, item in self.POST.allitems(): if not isinstance(item, FileUpload): forms[name] = item return forms @DictProperty('environ', 'bottle.request.params', read_only=True) def params(self): """ A :class:`FormsDict` with the combined values of :attr:`query` and :attr:`forms`. File uploads are stored in :attr:`files`. """ params = FormsDict() for key, value in self.query.allitems(): params[key] = value for key, value in self.forms.allitems(): params[key] = value return params @DictProperty('environ', 'bottle.request.files', read_only=True) def files(self): """ File uploads parsed from `multipart/form-data` encoded POST or PUT request body. The values are instances of :class:`FileUpload`. """ files = FormsDict() for name, item in self.POST.allitems(): if isinstance(item, FileUpload): files[name] = item return files @DictProperty('environ', 'bottle.request.json', read_only=True) def json(self): """ If the ``Content-Type`` header is ``application/json``, this property holds the parsed content of the request body. Only requests smaller than :attr:`MEMFILE_MAX` are processed to avoid memory exhaustion. """ ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0] if ctype == 'application/json': b = self._get_body_string() if not b: return None return json_loads(b) return None def _iter_body(self, read, bufsize): maxread = max(0, self.content_length) while maxread: part = read(min(maxread, bufsize)) if not part: break yield part maxread -= len(part) @staticmethod def _iter_chunked(read, bufsize): err = HTTPError(400, 'Error while parsing chunked transfer body.') rn, sem, bs = tob('\r\n'), tob(';'), tob('') while True: header = read(1) while header[-2:] != rn: c = read(1) header += c if not c: raise err if len(header) > bufsize: raise err size, _, _ = header.partition(sem) try: maxread = int(tonat(size.strip()), 16) except ValueError: raise err if maxread == 0: break buff = bs while maxread > 0: if not buff: buff = read(min(maxread, bufsize)) part, buff = buff[:maxread], buff[maxread:] if not part: raise err yield part maxread -= len(part) if read(2) != rn: raise err @DictProperty('environ', 'bottle.request.body', read_only=True) def _body(self): body_iter = self._iter_chunked if self.chunked else self._iter_body read_func = self.environ['wsgi.input'].read body, body_size, is_temp_file = BytesIO(), 0, False for part in body_iter(read_func, self.MEMFILE_MAX): body.write(part) body_size += len(part) if not is_temp_file and body_size > self.MEMFILE_MAX: body, tmp = TemporaryFile(mode='w+b'), body body.write(tmp.getvalue()) del tmp is_temp_file = True self.environ['wsgi.input'] = body body.seek(0) return body def _get_body_string(self): """ read body until content-length or MEMFILE_MAX into a string. Raise HTTPError(413) on requests that are to large. """ clen = self.content_length if clen > self.MEMFILE_MAX: raise HTTPError(413, 'Request too large') if clen < 0: clen = self.MEMFILE_MAX + 1 data = self.body.read(clen) if len(data) > self.MEMFILE_MAX: # Fail fast raise HTTPError(413, 'Request too large') return data @property def body(self): """ The HTTP request body as a seek-able file-like object. Depending on :attr:`MEMFILE_MAX`, this is either a temporary file or a :class:`io.BytesIO` instance. Accessing this property for the first time reads and replaces the ``wsgi.input`` environ variable. Subsequent accesses just do a `seek(0)` on the file object. """ self._body.seek(0) return self._body @property def chunked(self): """ True if Chunked transfer encoding was. """ return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower() #: An alias for :attr:`query`. GET = query @DictProperty('environ', 'bottle.request.post', read_only=True) def POST(self): """ The values of :attr:`forms` and :attr:`files` combined into a single :class:`FormsDict`. Values are either strings (form values) or instances of :class:`cgi.FieldStorage` (file uploads). """ post = FormsDict() # We default to application/x-www-form-urlencoded for everything that # is not multipart and take the fast path (also: 3.1 workaround) if not self.content_type.startswith('multipart/'): pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1')) for key, value in pairs: post[key] = value return post safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: safe_env[key] = self.environ[key] args = dict(fp=self.body, environ=safe_env, keep_blank_values=True) if py31: args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8', newline='\n') elif py3k: args['encoding'] = 'utf8' data = cgi.FieldStorage(**args) self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958 data = data.list or [] for item in data: if item.filename: post[item.name] = FileUpload(item.file, item.name, item.filename, item.headers) else: post[item.name] = item.value return post @property def url(self): """ The full request URI including hostname and scheme. If your app lives behind a reverse proxy or load balancer and you get confusing results, make sure that the ``X-Forwarded-Host`` header is set correctly. """ return self.urlparts.geturl() @DictProperty('environ', 'bottle.request.urlparts', read_only=True) def urlparts(self): """ The :attr:`url` string as an :class:`urlparse.SplitResult` tuple. The tuple contains (scheme, host, path, query_string and fragment), but the fragment is always empty because it is not visible to the server. """ env = self.environ http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http') host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST') if not host: # HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients. host = env.get('SERVER_NAME', '127.0.0.1') port = env.get('SERVER_PORT') if port and port != ('80' if http == 'http' else '443'): host += ':' + port path = urlquote(self.fullpath) return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '') @property def fullpath(self): """ Request path including :attr:`script_name` (if present). """ return urljoin(self.script_name, self.path.lstrip('/')) @property def query_string(self): """ The raw :attr:`query` part of the URL (everything in between ``?`` and ``#``) as a string. """ return self.environ.get('QUERY_STRING', '') @property def script_name(self): """ The initial portion of the URL's `path` that was removed by a higher level (server or routing middleware) before the application was called. This script path is returned with leading and tailing slashes. """ script_name = self.environ.get('SCRIPT_NAME', '').strip('/') return '/' + script_name + '/' if script_name else '/' def path_shift(self, shift=1): """ Shift path segments from :attr:`path` to :attr:`script_name` and vice versa. :param shift: The number of path segments to shift. May be negative to change the shift direction. (default: 1) """ script = self.environ.get('SCRIPT_NAME','/') self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift) @property def content_length(self): """ The request body length as an integer. The client is responsible to set this header. Otherwise, the real length of the body is unknown and -1 is returned. In this case, :attr:`body` will be empty. """ return int(self.environ.get('CONTENT_LENGTH') or -1) @property def content_type(self): """ The Content-Type header as a lowercase-string (default: empty). """ return self.environ.get('CONTENT_TYPE', '').lower() @property def is_xhr(self): """ True if the request was triggered by a XMLHttpRequest. This only works with JavaScript libraries that support the `X-Requested-With` header (most of the popular libraries do). """ requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','') return requested_with.lower() == 'xmlhttprequest' @property def is_ajax(self): """ Alias for :attr:`is_xhr`. "Ajax" is not the right term. """ return self.is_xhr @property def auth(self): """ HTTP authentication data as a (user, password) tuple. This implementation currently supports basic (not digest) authentication only. If the authentication happened at a higher level (e.g. in the front web-server or a middleware), the password field is None, but the user field is looked up from the ``REMOTE_USER`` environ variable. On any errors, None is returned. """ basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION','')) if basic: return basic ruser = self.environ.get('REMOTE_USER') if ruser: return (ruser, None) return None @property def remote_route(self): """ A list of all IPs that were involved in this request, starting with the client IP and followed by zero or more proxies. This does only work if all proxies support the ```X-Forwarded-For`` header. Note that this information can be forged by malicious clients. """ proxy = self.environ.get('HTTP_X_FORWARDED_FOR') if proxy: return [ip.strip() for ip in proxy.split(',')] remote = self.environ.get('REMOTE_ADDR') return [remote] if remote else [] @property def remote_addr(self): """ The client IP as a string. Note that this information can be forged by malicious clients. """ route = self.remote_route return route[0] if route else None def copy(self): """ Return a new :class:`Request` with a shallow :attr:`environ` copy. """ return Request(self.environ.copy()) def get(self, value, default=None): return self.environ.get(value, default) def __getitem__(self, key): return self.environ[key] def __delitem__(self, key): self[key] = ""; del(self.environ[key]) def __iter__(self): return iter(self.environ) def __len__(self): return len(self.environ) def keys(self): return self.environ.keys() def __setitem__(self, key, value): """ Change an environ value and clear all caches that depend on it. """ if self.environ.get('bottle.request.readonly'): raise KeyError('The environ dictionary is read-only.') self.environ[key] = value todelete = () if key == 'wsgi.input': todelete = ('body', 'forms', 'files', 'params', 'post', 'json') elif key == 'QUERY_STRING': todelete = ('query', 'params') elif key.startswith('HTTP_'): todelete = ('headers', 'cookies') for key in todelete: self.environ.pop('bottle.request.'+key, None) def __repr__(self): return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url) def __getattr__(self, name): """ Search in self.environ for additional user defined attributes. """ try: var = self.environ['bottle.request.ext.%s'%name] return var.__get__(self) if hasattr(var, '__get__') else var except KeyError: raise AttributeError('Attribute %r not defined.' % name) def __setattr__(self, name, value): if name == 'environ': return object.__setattr__(self, name, value) self.environ['bottle.request.ext.%s'%name] = value def _hkey(s): return s.title().replace('_','-') class HeaderProperty(object): def __init__(self, name, reader=None, writer=str, default=''): self.name, self.default = name, default self.reader, self.writer = reader, writer self.__doc__ = 'Current value of the %r header.' % name.title() def __get__(self, obj, _): if obj is None: return self value = obj.headers.get(self.name, self.default) return self.reader(value) if self.reader else value def __set__(self, obj, value): obj.headers[self.name] = self.writer(value) def __delete__(self, obj): del obj.headers[self.name] class BaseResponse(object): """ Storage class for a response body as well as headers and cookies. This class does support dict-like case-insensitive item-access to headers, but is NOT a dict. Most notably, iterating over a response yields parts of the body and not the headers. :param body: The response body as one of the supported types. :param status: Either an HTTP status code (e.g. 200) or a status line including the reason phrase (e.g. '200 OK'). :param headers: A dictionary or a list of name-value pairs. Additional keyword arguments are added to the list of headers. Underscores in the header name are replaced with dashes. """ default_status = 200 default_content_type = 'text/html; charset=UTF-8' # Header blacklist for specific response codes # (rfc2616 section 10.2.3 and 10.3.5) bad_headers = { 204: set(('Content-Type',)), 304: set(('Allow', 'Content-Encoding', 'Content-Language', 'Content-Length', 'Content-Range', 'Content-Type', 'Content-Md5', 'Last-Modified'))} def __init__(self, body='', status=None, headers=None, **more_headers): self._cookies = None self._headers = {} self.body = body self.status = status or self.default_status if headers: if isinstance(headers, dict): headers = headers.items() for name, value in headers: self.add_header(name, value) if more_headers: for name, value in more_headers.items(): self.add_header(name, value) def copy(self, cls=None): """ Returns a copy of self. """ cls = cls or BaseResponse assert issubclass(cls, BaseResponse) copy = cls() copy.status = self.status copy._headers = dict((k, v[:]) for (k, v) in self._headers.items()) if self._cookies: copy._cookies = SimpleCookie() copy._cookies.load(self._cookies.output()) return copy def __iter__(self): return iter(self.body) def close(self): if hasattr(self.body, 'close'): self.body.close() @property def status_line(self): """ The HTTP status line as a string (e.g. ``404 Not Found``).""" return self._status_line @property def status_code(self): """ The HTTP status code as an integer (e.g. 404).""" return self._status_code def _set_status(self, status): if isinstance(status, int): code, status = status, _HTTP_STATUS_LINES.get(status) elif ' ' in status: status = status.strip() code = int(status.split()[0]) else: raise ValueError('String status line without a reason phrase.') if not 100 <= code <= 999: raise ValueError('Status code out of range.') self._status_code = code self._status_line = str(status or ('%d Unknown' % code)) def _get_status(self): return self._status_line status = property(_get_status, _set_status, None, ''' A writeable property to change the HTTP response status. It accepts either a numeric code (100-999) or a string with a custom reason phrase (e.g. "404 Brain not found"). Both :data:`status_line` and :data:`status_code` are updated accordingly. The return value is always a status string. ''') del _get_status, _set_status @property def headers(self): """ An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers. """ hdict = HeaderDict() hdict.dict = self._headers return hdict def __contains__(self, name): return _hkey(name) in self._headers def __delitem__(self, name): del self._headers[_hkey(name)] def __getitem__(self, name): return self._headers[_hkey(name)][-1] def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)] def get_header(self, name, default=None): """ Return the value of a previously defined header. If there is no header with that name, return a default value. """ return self._headers.get(_hkey(name), [default])[-1] def set_header(self, name, value): """ Create a new response header, replacing any previously defined headers with the same name. """ self._headers[_hkey(name)] = [value if isinstance(value, unicode) else str(value)] def add_header(self, name, value): """ Add an additional response header, not removing duplicates. """ self._headers.setdefault(_hkey(name), []).append(str(value)) def iter_headers(self): """ Yield (header, value) tuples, skipping headers that are not allowed with the current response status code. """ return self.headerlist @property def headerlist(self): """ WSGI conform list of (header, value) tuples. """ out = [] headers = list(self._headers.items()) if 'Content-Type' not in self._headers: headers.append(('Content-Type', [self.default_content_type])) if self._status_code in self.bad_headers: bad_headers = self.bad_headers[self._status_code] headers = [h for h in headers if h[0] not in bad_headers] out += [(name, val) for (name, vals) in headers for val in vals] if self._cookies: for c in self._cookies.values(): out.append(('Set-Cookie', c.OutputString())) if py3k: out = [ (k, v.encode('utf8').decode('latin1') if isinstance(v, unicode) else v) for (k, v) in out] return out content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int) expires = HeaderProperty('Expires', reader=lambda x: datetime.utcfromtimestamp(parse_date(x)), writer=lambda x: http_date(x)) @property def charset(self, default='UTF-8'): """ Return the charset specified in the content-type header (default: utf8). """ if 'charset=' in self.content_type: return self.content_type.split('charset=')[-1].split(';')[0].strip() return default def set_cookie(self, name, value, secret=None, **options): """ Create a new cookie or replace an old one. If the `secret` parameter is set, create a `Signed Cookie` (described below). :param name: the name of the cookie. :param value: the value of the cookie. :param secret: a signature key required for signed cookies. Additionally, this method accepts all RFC 2109 attributes that are supported by :class:`cookie.Morsel`, including: :param max_age: maximum age in seconds. (default: None) :param expires: a datetime object or UNIX timestamp. (default: None) :param domain: the domain that is allowed to read the cookie. (default: current domain) :param path: limits the cookie to a given path (default: current path) :param secure: limit the cookie to HTTPS connections (default: off). :param httponly: prevents client-side javascript to read this cookie (default: off, requires Python 2.6 or newer). If neither `expires` nor `max_age` is set (default), the cookie will expire at the end of the browser session (as soon as the browser window is closed). Signed cookies may store any pickle-able object and are cryptographically signed to prevent manipulation. Keep in mind that cookies are limited to 4kb in most browsers. Warning: Signed cookies are not encrypted (the client can still see the content) and not copy-protected (the client can restore an old cookie). The main intention is to make pickling and unpickling save, not to store secret information at client side. """ if not self._cookies: self._cookies = SimpleCookie() if secret: value = touni(cookie_encode((name, value), secret)) elif not isinstance(value, basestring): raise TypeError('Secret key missing for non-string Cookie.') if len(value) > 4096: raise ValueError('Cookie value to long.') self._cookies[name] = value for key, value in options.items(): if key == 'max_age': if isinstance(value, timedelta): value = value.seconds + value.days * 24 * 3600 if key == 'expires': if isinstance(value, (datedate, datetime)): value = value.timetuple() elif isinstance(value, (int, float)): value = time.gmtime(value) value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) self._cookies[name][key.replace('_', '-')] = value def delete_cookie(self, key, **kwargs): """ Delete a cookie. Be sure to use the same `domain` and `path` settings as used to create the cookie. """ kwargs['max_age'] = -1 kwargs['expires'] = 0 self.set_cookie(key, '', **kwargs) def __repr__(self): out = '' for name, value in self.headerlist: out += '%s: %s\n' % (name.title(), value.strip()) return out def _local_property(): ls = threading.local() def fget(_): try: return ls.var except AttributeError: raise RuntimeError("Request context not initialized.") def fset(_, value): ls.var = value def fdel(_): del ls.var return property(fget, fset, fdel, 'Thread-local property') class LocalRequest(BaseRequest): """ A thread-local subclass of :class:`BaseRequest` with a different set of attributes for each thread. There is usually only one global instance of this class (:data:`request`). If accessed during a request/response cycle, this instance always refers to the *current* request (even on a multithreaded server). """ bind = BaseRequest.__init__ environ = _local_property() class LocalResponse(BaseResponse): """ A thread-local subclass of :class:`BaseResponse` with a different set of attributes for each thread. There is usually only one global instance of this class (:data:`response`). Its attributes are used to build the HTTP response at the end of the request/response cycle. """ bind = BaseResponse.__init__ _status_line = _local_property() _status_code = _local_property() _cookies = _local_property() _headers = _local_property() body = _local_property() Request = BaseRequest Response = BaseResponse class HTTPResponse(Response, BottleException): def __init__(self, body='', status=None, headers=None, **more_headers): super(HTTPResponse, self).__init__(body, status, headers, **more_headers) def apply(self, other): other._status_code = self._status_code other._status_line = self._status_line other._headers = self._headers other._cookies = self._cookies other.body = self.body class HTTPError(HTTPResponse): default_status = 500 def __init__(self, status=None, body=None, exception=None, traceback=None, **options): self.exception = exception self.traceback = traceback super(HTTPError, self).__init__(body, status, **options) ############################################################################### # Plugins ###################################################################### ############################################################################### class PluginError(BottleException): pass class JSONPlugin(object): name = 'json' api = 2 def __init__(self, json_dumps=json_dumps): self.json_dumps = json_dumps def apply(self, callback, _): dumps = self.json_dumps if not dumps: return callback def wrapper(*a, **ka): try: rv = callback(*a, **ka) except HTTPError: rv = _e() if isinstance(rv, dict): #Attempt to serialize, raises exception on failure json_response = dumps(rv) #Set content type only if serialization successful response.content_type = 'application/json' return json_response elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict): rv.body = dumps(rv.body) rv.content_type = 'application/json' return rv return wrapper class TemplatePlugin(object): """ This plugin applies the :func:`view` decorator to all routes with a `template` config parameter. If the parameter is a tuple, the second element must be a dict with additional options (e.g. `template_engine`) or default variables for the template. """ name = 'template' api = 2 def apply(self, callback, route): conf = route.config.get('template') if isinstance(conf, (tuple, list)) and len(conf) == 2: return view(conf[0], **conf[1])(callback) elif isinstance(conf, str): return view(conf)(callback) else: return callback #: Not a plugin, but part of the plugin API. TODO: Find a better place. class _ImportRedirect(object): def __init__(self, name, impmask): """ Create a virtual package that redirects imports (see PEP 302). """ self.name = name self.impmask = impmask self.module = sys.modules.setdefault(name, imp.new_module(name)) self.module.__dict__.update({'__file__': __file__, '__path__': [], '__all__': [], '__loader__': self}) sys.meta_path.append(self) def find_module(self, fullname, path=None): if '.' not in fullname: return packname = fullname.rsplit('.', 1)[0] if packname != self.name: return return self def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] modname = fullname.rsplit('.', 1)[1] realname = self.impmask % modname __import__(realname) module = sys.modules[fullname] = sys.modules[realname] setattr(self.module, modname, module) module.__loader__ = self return module ############################################################################### # Common Utilities ############################################################# ############################################################################### class MultiDict(DictMixin): """ This dict stores multiple values per key, but behaves exactly like a normal dict in that it returns only the newest value for any given key. There are special methods available to access the full list of values. """ def __init__(self, *a, **k): self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items()) def __len__(self): return len(self.dict) def __iter__(self): return iter(self.dict) def __contains__(self, key): return key in self.dict def __delitem__(self, key): del self.dict[key] def __getitem__(self, key): return self.dict[key][-1] def __setitem__(self, key, value): self.append(key, value) def keys(self): return self.dict.keys() if py3k: def values(self): return (v[-1] for v in self.dict.values()) def items(self): return ((k, v[-1]) for k, v in self.dict.items()) def allitems(self): return ((k, v) for k, vl in self.dict.items() for v in vl) iterkeys = keys itervalues = values iteritems = items iterallitems = allitems else: def values(self): return [v[-1] for v in self.dict.values()] def items(self): return [(k, v[-1]) for k, v in self.dict.items()] def iterkeys(self): return self.dict.iterkeys() def itervalues(self): return (v[-1] for v in self.dict.itervalues()) def iteritems(self): return ((k, v[-1]) for k, v in self.dict.iteritems()) def iterallitems(self): return ((k, v) for k, vl in self.dict.iteritems() for v in vl) def allitems(self): return [(k, v) for k, vl in self.dict.iteritems() for v in vl] def get(self, key, default=None, index=-1, type=None): """ Return the most recent value for a key. :param default: The default value to be returned if the key is not present or the type conversion fails. :param index: An index for the list of available values. :param type: If defined, this callable is used to cast the value into a specific type. Exception are suppressed and result in the default value to be returned. """ try: val = self.dict[key][index] return type(val) if type else val except Exception: pass return default def append(self, key, value): """ Add a new value to the list of values for this key. """ self.dict.setdefault(key, []).append(value) def replace(self, key, value): """ Replace the list of values with a single value. """ self.dict[key] = [value] def getall(self, key): """ Return a (possibly empty) list of values for a key. """ return self.dict.get(key) or [] #: Aliases for WTForms to mimic other multi-dict APIs (Django) getone = get getlist = getall class FormsDict(MultiDict): """ This :class:`MultiDict` subclass is used to store request form data. Additionally to the normal dict-like item access methods (which return unmodified data as native strings), this container also supports attribute-like access to its values. Attributes are automatically de- or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing attributes default to an empty string. """ #: Encoding used for attribute values. input_encoding = 'utf8' #: If true (default), unicode strings are first encoded with `latin1` #: and then decoded to match :attr:`input_encoding`. recode_unicode = True def _fix(self, s, encoding=None): if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI return s.encode('latin1').decode(encoding or self.input_encoding) elif isinstance(s, bytes): # Python 2 WSGI return s.decode(encoding or self.input_encoding) else: return s def decode(self, encoding=None): """ Returns a copy with all keys and values de- or recoded to match :attr:`input_encoding`. Some libraries (e.g. WTForms) want a unicode dictionary. """ copy = FormsDict() enc = copy.input_encoding = encoding or self.input_encoding copy.recode_unicode = False for key, value in self.allitems(): copy.append(self._fix(key, enc), self._fix(value, enc)) return copy def getunicode(self, name, default=None, encoding=None): """ Return the value as a unicode string, or the default. """ try: return self._fix(self[name], encoding) except (UnicodeError, KeyError): return default def __getattr__(self, name, default=unicode()): # Without this guard, pickle generates a cryptic TypeError: if name.startswith('__') and name.endswith('__'): return super(FormsDict, self).__getattr__(name) return self.getunicode(name, default=default) class HeaderDict(MultiDict): """ A case-insensitive version of :class:`MultiDict` that defaults to replace the old value instead of appending it. """ def __init__(self, *a, **ka): self.dict = {} if a or ka: self.update(*a, **ka) def __contains__(self, key): return _hkey(key) in self.dict def __delitem__(self, key): del self.dict[_hkey(key)] def __getitem__(self, key): return self.dict[_hkey(key)][-1] def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)] def append(self, key, value): self.dict.setdefault(_hkey(key), []).append(str(value)) def replace(self, key, value): self.dict[_hkey(key)] = [str(value)] def getall(self, key): return self.dict.get(_hkey(key)) or [] def get(self, key, default=None, index=-1): return MultiDict.get(self, _hkey(key), default, index) def filter(self, names): for name in [_hkey(n) for n in names]: if name in self.dict: del self.dict[name] class WSGIHeaderDict(DictMixin): """ This dict-like class wraps a WSGI environ dict and provides convenient access to HTTP_* fields. Keys and values are native strings (2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI environment contains non-native string values, these are de- or encoded using a lossless 'latin1' character set. The API will remain stable even on changes to the relevant PEPs. Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one that uses non-native strings.) """ #: List of keys that do not have a ``HTTP_`` prefix. cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH') def __init__(self, environ): self.environ = environ def _ekey(self, key): """ Translate header field name to CGI/WSGI environ key. """ key = key.replace('-','_').upper() if key in self.cgikeys: return key return 'HTTP_' + key def raw(self, key, default=None): """ Return the header value as is (may be bytes or unicode). """ return self.environ.get(self._ekey(key), default) def __getitem__(self, key): val = self.environ[self._ekey(key)] if py3k: if isinstance(val, unicode): val = val.encode('latin1').decode('utf8') else: val = val.decode('utf8') return val def __setitem__(self, key, value): raise TypeError("%s is read-only." % self.__class__) def __delitem__(self, key): raise TypeError("%s is read-only." % self.__class__) def __iter__(self): for key in self.environ: if key[:5] == 'HTTP_': yield _hkey(key[5:]) elif key in self.cgikeys: yield _hkey(key) def keys(self): return [x for x in self] def __len__(self): return len(self.keys()) def __contains__(self, key): return self._ekey(key) in self.environ class ConfigDict(dict): """ A dict-like configuration storage with additional support for namespaces, validators, meta-data, on_change listeners and more. """ __slots__ = ('_meta', '_on_change') def __init__(self): self._meta = {} self._on_change = lambda name, value: None def load_config(self, filename): """ Load values from an ``*.ini`` style config file. If the config file contains sections, their names are used as namespaces for the values within. The two special sections ``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix). """ conf = ConfigParser() conf.read(filename) for section in conf.sections(): for key, value in conf.items(section): if section not in ('DEFAULT', 'bottle'): key = section + '.' + key self[key] = value return self def load_dict(self, source, namespace=''): """ Load values from a dictionary structure. Nesting can be used to represent namespaces. >>> c = ConfigDict() >>> c.load_dict({'some': {'namespace': {'key': 'value'} } }) {'some.namespace.key': 'value'} """ for key, value in source.items(): if isinstance(key, str): nskey = (namespace + '.' + key).strip('.') if isinstance(value, dict): self.load_dict(value, namespace=nskey) else: self[nskey] = value else: raise TypeError('Key has type %r (not a string)' % type(key)) return self def update(self, *a, **ka): """ If the first parameter is a string, all keys are prefixed with this namespace. Apart from that it works just as the usual dict.update(). Example: ``update('some.namespace', key='value')`` """ prefix = '' if a and isinstance(a[0], str): prefix = a[0].strip('.') + '.' a = a[1:] for key, value in dict(*a, **ka).items(): self[prefix+key] = value def setdefault(self, key, value): if key not in self: self[key] = value return self[key] def __setitem__(self, key, value): if not isinstance(key, str): raise TypeError('Key has type %r (not a string)' % type(key)) value = self.meta_get(key, 'filter', lambda x: x)(value) if key in self and self[key] is value: return self._on_change(key, value) dict.__setitem__(self, key, value) def __delitem__(self, key): self._on_change(key, None) dict.__delitem__(self, key) def meta_get(self, key, metafield, default=None): """ Return the value of a meta field for a key. """ return self._meta.get(key, {}).get(metafield, default) def meta_set(self, key, metafield, value): """ Set the meta field for a key to a new value. This triggers the on-change handler for existing keys. """ self._meta.setdefault(key, {})[metafield] = value if key in self: self[key] = self[key] def meta_list(self, key): """ Return an iterable of meta field names defined for a key. """ return self._meta.get(key, {}).keys() class AppStack(list): """ A stack-like list. Calling it returns the head of the stack. """ def __call__(self): """ Return the current default application. """ return self[-1] def push(self, value=None): """ Add a new :class:`Bottle` instance to the stack """ if not isinstance(value, Bottle): value = Bottle() self.append(value) return value class WSGIFileWrapper(object): def __init__(self, fp, buffer_size=1024*64): self.fp, self.buffer_size = fp, buffer_size for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'): if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) def __iter__(self): buff, read = self.buffer_size, self.read while True: part = read(buff) if not part: return yield part class _closeiter(object): """ This only exists to be able to attach a .close method to iterators that do not support attribute assignment (most of itertools). """ def __init__(self, iterator, close=None): self.iterator = iterator self.close_callbacks = makelist(close) def __iter__(self): return iter(self.iterator) def close(self): for func in self.close_callbacks: func() class ResourceManager(object): """ This class manages a list of search paths and helps to find and open application-bound resources (files). :param base: default value for :meth:`add_path` calls. :param opener: callable used to open resources. :param cachemode: controls which lookups are cached. One of 'all', 'found' or 'none'. """ def __init__(self, base='./', opener=open, cachemode='all'): self.opener = opener self.base = base self.cachemode = cachemode #: A list of search paths. See :meth:`add_path` for details. self.path = [] #: A cache for resolved paths. ``res.cache.clear()`` clears the cache. self.cache = {} def add_path(self, path, base=None, index=None, create=False): """ Add a new path to the list of search paths. Return False if the path does not exist. :param path: The new search path. Relative paths are turned into an absolute and normalized form. If the path looks like a file (not ending in `/`), the filename is stripped off. :param base: Path used to absolutize relative search paths. Defaults to :attr:`base` which defaults to ``os.getcwd()``. :param index: Position within the list of search paths. Defaults to last index (appends to the list). The `base` parameter makes it easy to reference files installed along with a python module or package:: res.add_path('./resources/', __file__) """ base = os.path.abspath(os.path.dirname(base or self.base)) path = os.path.abspath(os.path.join(base, os.path.dirname(path))) path += os.sep if path in self.path: self.path.remove(path) if create and not os.path.isdir(path): os.makedirs(path) if index is None: self.path.append(path) else: self.path.insert(index, path) self.cache.clear() return os.path.exists(path) def __iter__(self): """ Iterate over all existing files in all registered paths. """ search = self.path[:] while search: path = search.pop() if not os.path.isdir(path): continue for name in os.listdir(path): full = os.path.join(path, name) if os.path.isdir(full): search.append(full) else: yield full def lookup(self, name): """ Search for a resource and return an absolute file path, or `None`. The :attr:`path` list is searched in order. The first match is returend. Symlinks are followed. The result is cached to speed up future lookups. """ if name not in self.cache or DEBUG: for path in self.path: fpath = os.path.join(path, name) if os.path.isfile(fpath): if self.cachemode in ('all', 'found'): self.cache[name] = fpath return fpath if self.cachemode == 'all': self.cache[name] = None return self.cache[name] def open(self, name, mode='r', *args, **kwargs): """ Find a resource and return a file object, or raise IOError. """ fname = self.lookup(name) if not fname: raise IOError("Resource %r not found." % name) return self.opener(fname, mode=mode, *args, **kwargs) class FileUpload(object): def __init__(self, fileobj, name, filename, headers=None): """ Wrapper for file uploads. """ #: Open file(-like) object (BytesIO buffer or temporary file) self.file = fileobj #: Name of the upload form field self.name = name #: Raw filename as sent by the client (may contain unsafe characters) self.raw_filename = filename #: A :class:`HeaderDict` with additional headers (e.g. content-type) self.headers = HeaderDict(headers) if headers else HeaderDict() content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int, default=-1) @cached_property def filename(self): """ Name of the file on the client file system, but normalized to ensure file system compatibility. An empty filename is returned as 'empty'. Only ASCII letters, digits, dashes, underscores and dots are allowed in the final filename. Accents are removed, if possible. Whitespace is replaced by a single dash. Leading or tailing dots or dashes are removed. The filename is limited to 255 characters. """ fname = self.raw_filename if not isinstance(fname, unicode): fname = fname.decode('utf8', 'ignore') fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII') fname = os.path.basename(fname.replace('\\', os.path.sep)) fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip() fname = re.sub(r'[-\s]+', '-', fname).strip('.-') return fname[:255] or 'empty' def _copy_file(self, fp, chunk_size=2**16): read, write, offset = self.file.read, fp.write, self.file.tell() while 1: buf = read(chunk_size) if not buf: break write(buf) self.file.seek(offset) def save(self, destination, overwrite=False, chunk_size=2**16): """ Save file to disk or copy its content to an open file(-like) object. If *destination* is a directory, :attr:`filename` is added to the path. Existing files are not overwritten by default (IOError). :param destination: File path, directory or file(-like) object. :param overwrite: If True, replace existing files. (default: False) :param chunk_size: Bytes to read at a time. (default: 64kb) """ if isinstance(destination, basestring): # Except file-likes here if os.path.isdir(destination): destination = os.path.join(destination, self.filename) if not overwrite and os.path.exists(destination): raise IOError('File exists.') with open(destination, 'wb') as fp: self._copy_file(fp, chunk_size) else: self._copy_file(destination, chunk_size) ############################################################################### # Application Helper ########################################################### ############################################################################### def abort(code=500, text='Unknown Error.'): """ Aborts execution and causes a HTTP error. """ raise HTTPError(code, text) def redirect(url, code=None): """ Aborts execution and causes a 303 or 302 redirect, depending on the HTTP protocol version. """ if not code: code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302 res = response.copy(cls=HTTPResponse) res.status = code res.body = "" res.set_header('Location', urljoin(request.url, url)) raise res def _file_iter_range(fp, offset, bytes, maxread=1024*1024): """ Yield chunks from a range in a file. No chunk is bigger than maxread.""" fp.seek(offset) while bytes > 0: part = fp.read(min(bytes, maxread)) if not part: break bytes -= len(part) yield part def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'): """ Open a file in a safe way and return :exc:`HTTPResponse` with status code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``, ``Content-Length`` and ``Last-Modified`` headers are set if possible. Special support for ``If-Modified-Since``, ``Range`` and ``HEAD`` requests. :param filename: Name or path of the file to send. :param root: Root path for file lookups. Should be an absolute directory path. :param mimetype: Defines the content-type header (default: guess from file extension) :param download: If True, ask the browser to open a `Save as...` dialog instead of opening the file with the associated program. You can specify a custom filename as a string. If not specified, the original filename is used (default: False). :param charset: The charset to use for files with a ``text/*`` mime-type. (default: UTF-8) """ root = os.path.abspath(root) + os.sep filename = os.path.abspath(os.path.join(root, filename.strip('/\\'))) headers = dict() if not filename.startswith(root): return HTTPError(403, "Access denied.") if not os.path.exists(filename) or not os.path.isfile(filename): return HTTPError(404, "File does not exist.") if not os.access(filename, os.R_OK): return HTTPError(403, "You do not have permission to access this file.") if mimetype == 'auto': mimetype, encoding = mimetypes.guess_type(filename) if encoding: headers['Content-Encoding'] = encoding if mimetype: if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype: mimetype += '; charset=%s' % charset headers['Content-Type'] = mimetype if download: download = os.path.basename(filename if download == True else download) headers['Content-Disposition'] = 'attachment; filename="%s"' % download stats = os.stat(filename) headers['Content-Length'] = clen = stats.st_size lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime)) headers['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = parse_date(ims.split(";")[0].strip()) if ims is not None and ims >= int(stats.st_mtime): headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) return HTTPResponse(status=304, **headers) body = '' if request.method == 'HEAD' else open(filename, 'rb') headers["Accept-Ranges"] = "bytes" ranges = request.environ.get('HTTP_RANGE') if 'HTTP_RANGE' in request.environ: ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) if not ranges: return HTTPError(416, "Requested Range Not Satisfiable") offset, end = ranges[0] headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen) headers["Content-Length"] = str(end-offset) if body: body = _file_iter_range(body, offset, end-offset) return HTTPResponse(body, status=206, **headers) return HTTPResponse(body, **headers) ############################################################################### # HTTP Utilities and MISC (TODO) ############################################### ############################################################################### def debug(mode=True): """ Change the debug level. There is only one debug level supported at the moment.""" global DEBUG if mode: warnings.simplefilter('default') DEBUG = bool(mode) def http_date(value): if isinstance(value, (datedate, datetime)): value = value.utctimetuple() elif isinstance(value, (int, float)): value = time.gmtime(value) if not isinstance(value, basestring): value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) return value def parse_date(ims): """ Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """ try: ts = email.utils.parsedate_tz(ims) return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone except (TypeError, ValueError, IndexError, OverflowError): return None def parse_auth(header): """ Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None""" try: method, data = header.split(None, 1) if method.lower() == 'basic': user, pwd = touni(base64.b64decode(tob(data))).split(':',1) return user, pwd except (KeyError, ValueError): return None def parse_range_header(header, maxlen=0): """ Yield (start, end) ranges parsed from a HTTP Range header. Skip unsatisfiable ranges. The end index is non-inclusive.""" if not header or header[:6] != 'bytes=': return ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r] for start, end in ranges: try: if not start: # bytes=-100 -> last 100 bytes start, end = max(0, maxlen-int(end)), maxlen elif not end: # bytes=100- -> all but the first 99 bytes start, end = int(start), maxlen else: # bytes=100-200 -> bytes 100-200 (inclusive) start, end = int(start), min(int(end)+1, maxlen) if 0 <= start < end <= maxlen: yield start, end except ValueError: pass def _parse_qsl(qs): r = [] for pair in qs.replace(';','&').split('&'): if not pair: continue nv = pair.split('=', 1) if len(nv) != 2: nv.append('') key = urlunquote(nv[0].replace('+', ' ')) value = urlunquote(nv[1].replace('+', ' ')) r.append((key, value)) return r def _lscmp(a, b): """ Compares two strings in a cryptographically safe way: Runtime is not affected by length of common prefix. """ return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b) def cookie_encode(data, key): """ Encode and sign a pickle-able object. Return a (byte) string """ msg = base64.b64encode(pickle.dumps(data, -1)) sig = base64.b64encode(hmac.new(tob(key), msg).digest()) return tob('!') + sig + tob('?') + msg def cookie_decode(data, key): """ Verify and decode an encoded string. Return an object or None.""" data = tob(data) if cookie_is_encoded(data): sig, msg = data.split(tob('?'), 1) if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())): return pickle.loads(base64.b64decode(msg)) return None def cookie_is_encoded(data): """ Return True if the argument looks like a encoded cookie.""" return bool(data.startswith(tob('!')) and tob('?') in data) def html_escape(string): """ Escape HTML special characters ``&<>`` and quotes ``'"``. """ return string.replace('&','&amp;').replace('<','&lt;').replace('>','&gt;')\ .replace('"','&quot;').replace("'",'&#039;') def html_quote(string): """ Escape and quote a string to be used as an HTTP attribute.""" return '"%s"' % html_escape(string).replace('\n','&#10;')\ .replace('\r','&#13;').replace('\t','&#9;') def yieldroutes(func): """ Return a generator for routes that match the signature (name, args) of the func parameter. This may yield more than one route if the function takes optional keyword arguments. The output is best described by example:: a() -> '/a' b(x, y) -> '/b/<x>/<y>' c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>' d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>' """ path = '/' + func.__name__.replace('__','/').lstrip('/') spec = getargspec(func) argc = len(spec[0]) - len(spec[3] or []) path += ('/<%s>' * argc) % tuple(spec[0][:argc]) yield path for arg in spec[0][argc:]: path += '/<%s>' % arg yield path def path_shift(script_name, path_info, shift=1): """ Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa. :return: The modified paths. :param script_name: The SCRIPT_NAME path. :param script_name: The PATH_INFO path. :param shift: The number of path fragments to shift. May be negative to change the shift direction. (default: 1) """ if shift == 0: return script_name, path_info pathlist = path_info.strip('/').split('/') scriptlist = script_name.strip('/').split('/') if pathlist and pathlist[0] == '': pathlist = [] if scriptlist and scriptlist[0] == '': scriptlist = [] if 0 < shift <= len(pathlist): moved = pathlist[:shift] scriptlist = scriptlist + moved pathlist = pathlist[shift:] elif 0 > shift >= -len(scriptlist): moved = scriptlist[shift:] pathlist = moved + pathlist scriptlist = scriptlist[:shift] else: empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO' raise AssertionError("Cannot shift. Nothing left from %s" % empty) new_script_name = '/' + '/'.join(scriptlist) new_path_info = '/' + '/'.join(pathlist) if path_info.endswith('/') and pathlist: new_path_info += '/' return new_script_name, new_path_info def auth_basic(check, realm="private", text="Access denied"): """ Callback decorator to require HTTP auth (basic). TODO: Add route(check_auth=...) parameter. """ def decorator(func): @functools.wraps(func) def wrapper(*a, **ka): user, password = request.auth or (None, None) if user is None or not check(user, password): err = HTTPError(401, text) err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm) return err return func(*a, **ka) return wrapper return decorator # Shortcuts for common Bottle methods. # They all refer to the current default application. def make_default_app_wrapper(name): """ Return a callable that relays calls to the current default app. """ @functools.wraps(getattr(Bottle, name)) def wrapper(*a, **ka): return getattr(app(), name)(*a, **ka) return wrapper route = make_default_app_wrapper('route') get = make_default_app_wrapper('get') post = make_default_app_wrapper('post') put = make_default_app_wrapper('put') delete = make_default_app_wrapper('delete') patch = make_default_app_wrapper('patch') error = make_default_app_wrapper('error') mount = make_default_app_wrapper('mount') hook = make_default_app_wrapper('hook') install = make_default_app_wrapper('install') uninstall = make_default_app_wrapper('uninstall') url = make_default_app_wrapper('get_url') ############################################################################### # Server Adapter ############################################################### ############################################################################### class ServerAdapter(object): quiet = False def __init__(self, host='127.0.0.1', port=8080, **options): self.options = options self.host = host self.port = int(port) def run(self, handler): # pragma: no cover pass def __repr__(self): args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()]) return "%s(%s)" % (self.__class__.__name__, args) class CGIServer(ServerAdapter): quiet = True def run(self, handler): # pragma: no cover from wsgiref.handlers import CGIHandler def fixed_environ(environ, start_response): environ.setdefault('PATH_INFO', '') return handler(environ, start_response) CGIHandler().run(fixed_environ) class FlupFCGIServer(ServerAdapter): def run(self, handler): # pragma: no cover import flup.server.fcgi self.options.setdefault('bindAddress', (self.host, self.port)) flup.server.fcgi.WSGIServer(handler, **self.options).run() class WSGIRefServer(ServerAdapter): def run(self, app): # pragma: no cover from wsgiref.simple_server import make_server from wsgiref.simple_server import WSGIRequestHandler, WSGIServer import socket class FixedHandler(WSGIRequestHandler): def address_string(self): # Prevent reverse DNS lookups please. return self.client_address[0] def log_request(*args, **kw): if not self.quiet: return WSGIRequestHandler.log_request(*args, **kw) handler_cls = self.options.get('handler_class', FixedHandler) server_cls = self.options.get('server_class', WSGIServer) if ':' in self.host: # Fix wsgiref for IPv6 addresses. if getattr(server_cls, 'address_family') == socket.AF_INET: class server_cls(server_cls): address_family = socket.AF_INET6 self.srv = make_server(self.host, self.port, app, server_cls, handler_cls) self.port = self.srv.server_port # update port actual port (0 means random) try: self.srv.serve_forever() except KeyboardInterrupt: self.srv.server_close() # Prevent ResourceWarning: unclosed socket raise class CherryPyServer(ServerAdapter): def run(self, handler): # pragma: no cover from cherrypy import wsgiserver self.options['bind_addr'] = (self.host, self.port) self.options['wsgi_app'] = handler certfile = self.options.get('certfile') if certfile: del self.options['certfile'] keyfile = self.options.get('keyfile') if keyfile: del self.options['keyfile'] server = wsgiserver.CherryPyWSGIServer(**self.options) if certfile: server.ssl_certificate = certfile if keyfile: server.ssl_private_key = keyfile try: server.start() finally: server.stop() class WaitressServer(ServerAdapter): def run(self, handler): from waitress import serve serve(handler, host=self.host, port=self.port) class PasteServer(ServerAdapter): def run(self, handler): # pragma: no cover from paste import httpserver from paste.translogger import TransLogger handler = TransLogger(handler, setup_console_handler=(not self.quiet)) httpserver.serve(handler, host=self.host, port=str(self.port), **self.options) class MeinheldServer(ServerAdapter): def run(self, handler): from meinheld import server server.listen((self.host, self.port)) server.run(handler) class FapwsServer(ServerAdapter): """ Extremely fast webserver using libev. See http://www.fapws.org/ """ def run(self, handler): # pragma: no cover import fapws._evwsgi as evwsgi from fapws import base, config port = self.port if float(config.SERVER_IDENT[-2:]) > 0.4: # fapws3 silently changed its API in 0.5 port = str(port) evwsgi.start(self.host, port) # fapws3 never releases the GIL. Complain upstream. I tried. No luck. if 'BOTTLE_CHILD' in os.environ and not self.quiet: _stderr("WARNING: Auto-reloading does not work with Fapws3.\n") _stderr(" (Fapws3 breaks python thread support)\n") evwsgi.set_base_module(base) def app(environ, start_response): environ['wsgi.multiprocess'] = False return handler(environ, start_response) evwsgi.wsgi_cb(('', app)) evwsgi.run() class TornadoServer(ServerAdapter): """ The super hyped asynchronous server by facebook. Untested. """ def run(self, handler): # pragma: no cover import tornado.wsgi, tornado.httpserver, tornado.ioloop container = tornado.wsgi.WSGIContainer(handler) server = tornado.httpserver.HTTPServer(container) server.listen(port=self.port,address=self.host) tornado.ioloop.IOLoop.instance().start() class AppEngineServer(ServerAdapter): """ Adapter for Google App Engine. """ quiet = True def run(self, handler): from google.appengine.ext.webapp import util # A main() function in the handler script enables 'App Caching'. # Lets makes sure it is there. This _really_ improves performance. module = sys.modules.get('__main__') if module and not hasattr(module, 'main'): module.main = lambda: util.run_wsgi_app(handler) util.run_wsgi_app(handler) class TwistedServer(ServerAdapter): """ Untested. """ def run(self, handler): from twisted.web import server, wsgi from twisted.python.threadpool import ThreadPool from twisted.internet import reactor thread_pool = ThreadPool() thread_pool.start() reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler)) reactor.listenTCP(self.port, factory, interface=self.host) if not reactor.running: reactor.run() class DieselServer(ServerAdapter): """ Untested. """ def run(self, handler): from diesel.protocols.wsgi import WSGIApplication app = WSGIApplication(handler, port=self.port) app.run() class GeventServer(ServerAdapter): """ Untested. Options: * `fast` (default: False) uses libevent's http server, but has some issues: No streaming, no pipelining, no SSL. * See gevent.wsgi.WSGIServer() documentation for more options. """ def run(self, handler): from gevent import wsgi, pywsgi, local if not isinstance(threading.local(), local.local): msg = "Bottle requires gevent.monkey.patch_all() (before import)" raise RuntimeError(msg) if not self.options.pop('fast', None): wsgi = pywsgi self.options['log'] = None if self.quiet else 'default' address = (self.host, self.port) server = wsgi.WSGIServer(address, handler, **self.options) if 'BOTTLE_CHILD' in os.environ: import signal signal.signal(signal.SIGINT, lambda s, f: server.stop()) server.serve_forever() class GeventSocketIOServer(ServerAdapter): def run(self,handler): from socketio import server address = (self.host, self.port) server.SocketIOServer(address, handler, **self.options).serve_forever() class GunicornServer(ServerAdapter): """ Untested. See http://gunicorn.org/configure.html for options. """ def run(self, handler): from gunicorn.app.base import Application config = {'bind': "%s:%d" % (self.host, int(self.port))} config.update(self.options) class GunicornApplication(Application): def init(self, parser, opts, args): return config def load(self): return handler GunicornApplication().run() class EventletServer(ServerAdapter): """ Untested. Options: * `backlog` adjust the eventlet backlog parameter which is the maximum number of queued connections. Should be at least 1; the maximum value is system-dependent. * `family`: (default is 2) socket family, optional. See socket documentation for available families. """ def run(self, handler): from eventlet import wsgi, listen, patcher if not patcher.is_monkey_patched(os): msg = "Bottle requires eventlet.monkey_patch() (before import)" raise RuntimeError(msg) socket_args = {} for arg in ('backlog', 'family'): try: socket_args[arg] = self.options.pop(arg) except KeyError: pass address = (self.host, self.port) try: wsgi.server(listen(address, **socket_args), handler, log_output=(not self.quiet)) except TypeError: # Fallback, if we have old version of eventlet wsgi.server(listen(address), handler) class RocketServer(ServerAdapter): """ Untested. """ def run(self, handler): from rocket import Rocket server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler }) server.start() class BjoernServer(ServerAdapter): """ Fast server written in C: https://github.com/jonashaag/bjoern """ def run(self, handler): from bjoern import run run(handler, self.host, self.port) class AutoServer(ServerAdapter): """ Untested. """ adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer] def run(self, handler): for sa in self.adapters: try: return sa(self.host, self.port, **self.options).run(handler) except ImportError: pass server_names = { 'cgi': CGIServer, 'flup': FlupFCGIServer, 'wsgiref': WSGIRefServer, 'waitress': WaitressServer, 'cherrypy': CherryPyServer, 'paste': PasteServer, 'fapws3': FapwsServer, 'tornado': TornadoServer, 'gae': AppEngineServer, 'twisted': TwistedServer, 'diesel': DieselServer, 'meinheld': MeinheldServer, 'gunicorn': GunicornServer, 'eventlet': EventletServer, 'gevent': GeventServer, 'geventSocketIO':GeventSocketIOServer, 'rocket': RocketServer, 'bjoern' : BjoernServer, 'auto': AutoServer, } ############################################################################### # Application Control ########################################################## ############################################################################### def load(target, **namespace): """ Import a module or fetch an object from a module. * ``package.module`` returns `module` as a module object. * ``pack.mod:name`` returns the module variable `name` from `pack.mod`. * ``pack.mod:func()`` calls `pack.mod.func()` and returns the result. The last form accepts not only function calls, but any type of expression. Keyword arguments passed to this function are available as local variables. Example: ``import_string('re:compile(x)', x='[a-z]')`` """ module, target = target.split(":", 1) if ':' in target else (target, None) if module not in sys.modules: __import__(module) if not target: return sys.modules[module] if target.isalnum(): return getattr(sys.modules[module], target) package_name = module.split('.')[0] namespace[package_name] = sys.modules[package_name] return eval('%s.%s' % (module, target), namespace) def load_app(target): """ Load a bottle application from a module and make sure that the import does not affect the current default application, but returns a separate application object. See :func:`load` for the target parameter. """ global NORUN; NORUN, nr_old = True, NORUN tmp = default_app.push() # Create a new "default application" try: rv = load(target) # Import the target module return rv if callable(rv) else tmp finally: default_app.remove(tmp) # Remove the temporary added default application NORUN = nr_old _debug = debug def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, plugins=None, debug=None, **kargs): """ Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter. """ if NORUN: return if reloader and not os.environ.get('BOTTLE_CHILD'): lockfile = None try: fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) except KeyboardInterrupt: pass finally: if os.path.exists(lockfile): os.unlink(lockfile) return try: if debug is not None: _debug(debug) app = app or default_app() if isinstance(app, basestring): app = load_app(app) if not callable(app): raise ValueError("Application is not callable: %r" % app) for plugin in plugins or []: if isinstance(plugin, basestring): plugin = load(plugin) app.install(plugin) if server in server_names: server = server_names.get(server) if isinstance(server, basestring): server = load(server) if isinstance(server, type): server = server(host=host, port=port, **kargs) if not isinstance(server, ServerAdapter): raise ValueError("Unknown or unsupported server: %r" % server) server.quiet = server.quiet or quiet if not server.quiet: _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server))) _stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) _stderr("Hit Ctrl-C to quit.\n\n") if reloader: lockfile = os.environ.get('BOTTLE_LOCKFILE') bgcheck = FileCheckerThread(lockfile, interval) with bgcheck: server.run(app) if bgcheck.status == 'reload': sys.exit(3) else: server.run(app) except KeyboardInterrupt: pass except (SystemExit, MemoryError): raise except: if not reloader: raise if not getattr(server, 'quiet', quiet): print_exc() time.sleep(interval) sys.exit(3) class FileCheckerThread(threading.Thread): """ Interrupt main-thread as soon as a changed module file is detected, the lockfile gets deleted or gets to old. """ def __init__(self, lockfile, interval): threading.Thread.__init__(self) self.daemon = True self.lockfile, self.interval = lockfile, interval #: Is one of 'reload', 'error' or 'exit' self.status = None def run(self): exists = os.path.exists mtime = lambda p: os.stat(p).st_mtime files = dict() for module in list(sys.modules.values()): path = getattr(module, '__file__', '') if path[-4:] in ('.pyo', '.pyc'): path = path[:-1] if path and exists(path): files[path] = mtime(path) while not self.status: if not exists(self.lockfile)\ or mtime(self.lockfile) < time.time() - self.interval - 5: self.status = 'error' thread.interrupt_main() for path, lmtime in list(files.items()): if not exists(path) or mtime(path) > lmtime: self.status = 'reload' thread.interrupt_main() break time.sleep(self.interval) def __enter__(self): self.start() def __exit__(self, exc_type, *_): if not self.status: self.status = 'exit' # silent exit self.join() return exc_type is not None and issubclass(exc_type, KeyboardInterrupt) ############################################################################### # Template Adapters ############################################################ ############################################################################### class TemplateError(HTTPError): def __init__(self, message): HTTPError.__init__(self, 500, message) class BaseTemplate(object): """ Base class and minimal API for template adapters """ extensions = ['tpl','html','thtml','stpl'] settings = {} #used in prepare() defaults = {} #used in render() def __init__(self, source=None, name=None, lookup=None, encoding='utf8', **settings): """ Create a new template. If the source parameter (str or buffer) is missing, the name argument is used to guess a template filename. Subclasses can assume that self.source and/or self.filename are set. Both are strings. The lookup, encoding and settings parameters are stored as instance variables. The lookup parameter stores a list containing directory paths. The encoding parameter should be used to decode byte strings or files. The settings parameter contains a dict for engine-specific settings. """ self.name = name self.source = source.read() if hasattr(source, 'read') else source self.filename = source.filename if hasattr(source, 'filename') else None self.lookup = [os.path.abspath(x) for x in lookup] if lookup else [] self.encoding = encoding self.settings = self.settings.copy() # Copy from class variable self.settings.update(settings) # Apply if not self.source and self.name: self.filename = self.search(self.name, self.lookup) if not self.filename: raise TemplateError('Template %s not found.' % repr(name)) if not self.source and not self.filename: raise TemplateError('No template specified.') self.prepare(**self.settings) @classmethod def search(cls, name, lookup=None): """ Search name in all directories specified in lookup. First without, then with common extensions. Return first hit. """ if not lookup: depr('The template lookup path list should not be empty.', True) #0.12 lookup = ['.'] if os.path.isabs(name) and os.path.isfile(name): depr('Absolute template path names are deprecated.', True) #0.12 return os.path.abspath(name) for spath in lookup: spath = os.path.abspath(spath) + os.sep fname = os.path.abspath(os.path.join(spath, name)) if not fname.startswith(spath): continue if os.path.isfile(fname): return fname for ext in cls.extensions: if os.path.isfile('%s.%s' % (fname, ext)): return '%s.%s' % (fname, ext) @classmethod def global_config(cls, key, *args): """ This reads or sets the global settings stored in class.settings. """ if args: cls.settings = cls.settings.copy() # Make settings local to class cls.settings[key] = args[0] else: return cls.settings[key] def prepare(self, **options): """ Run preparations (parsing, caching, ...). It should be possible to call this again to refresh a template or to update settings. """ raise NotImplementedError def render(self, *args, **kwargs): """ Render the template with the specified local variables and return a single byte or unicode string. If it is a byte string, the encoding must match self.encoding. This method must be thread-safe! Local variables may be provided in dictionaries (args) or directly, as keywords (kwargs). """ raise NotImplementedError class MakoTemplate(BaseTemplate): def prepare(self, **options): from mako.template import Template from mako.lookup import TemplateLookup options.update({'input_encoding':self.encoding}) options.setdefault('format_exceptions', bool(DEBUG)) lookup = TemplateLookup(directories=self.lookup, **options) if self.source: self.tpl = Template(self.source, lookup=lookup, **options) else: self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) class CheetahTemplate(BaseTemplate): def prepare(self, **options): from Cheetah.Template import Template self.context = threading.local() self.context.vars = {} options['searchList'] = [self.context.vars] if self.source: self.tpl = Template(source=self.source, **options) else: self.tpl = Template(file=self.filename, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) self.context.vars.update(self.defaults) self.context.vars.update(kwargs) out = str(self.tpl) self.context.vars.clear() return out class Jinja2Template(BaseTemplate): def prepare(self, filters=None, tests=None, globals={}, **kwargs): from jinja2 import Environment, FunctionLoader self.env = Environment(loader=FunctionLoader(self.loader), **kwargs) if filters: self.env.filters.update(filters) if tests: self.env.tests.update(tests) if globals: self.env.globals.update(globals) if self.source: self.tpl = self.env.from_string(self.source) else: self.tpl = self.env.get_template(self.filename) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) def loader(self, name): fname = self.search(name, self.lookup) if not fname: return with open(fname, "rb") as f: return f.read().decode(self.encoding) class SimpleTemplate(BaseTemplate): def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka): self.cache = {} enc = self.encoding self._str = lambda x: touni(x, enc) self._escape = lambda x: escape_func(touni(x, enc)) self.syntax = syntax if noescape: self._str, self._escape = self._escape, self._str @cached_property def co(self): return compile(self.code, self.filename or '<string>', 'exec') @cached_property def code(self): source = self.source if not source: with open(self.filename, 'rb') as f: source = f.read() try: source, encoding = touni(source), 'utf8' except UnicodeError: depr('Template encodings other than utf8 are no longer supported.') #0.11 source, encoding = touni(source, 'latin1'), 'latin1' parser = StplParser(source, encoding=encoding, syntax=self.syntax) code = parser.translate() self.encoding = parser.encoding return code def _rebase(self, _env, _name=None, **kwargs): _env['_rebase'] = (_name, kwargs) def _include(self, _env, _name=None, **kwargs): env = _env.copy() env.update(kwargs) if _name not in self.cache: self.cache[_name] = self.__class__(name=_name, lookup=self.lookup) return self.cache[_name].execute(env['_stdout'], env) def execute(self, _stdout, kwargs): env = self.defaults.copy() env.update(kwargs) env.update({'_stdout': _stdout, '_printlist': _stdout.extend, 'include': functools.partial(self._include, env), 'rebase': functools.partial(self._rebase, env), '_rebase': None, '_str': self._str, '_escape': self._escape, 'get': env.get, 'setdefault': env.setdefault, 'defined': env.__contains__ }) eval(self.co, env) if env.get('_rebase'): subtpl, rargs = env.pop('_rebase') rargs['base'] = ''.join(_stdout) #copy stdout del _stdout[:] # clear stdout return self._include(env, subtpl, **rargs) return env def render(self, *args, **kwargs): """ Render the template using keyword arguments as local variables. """ env = {}; stdout = [] for dictarg in args: env.update(dictarg) env.update(kwargs) self.execute(stdout, env) return ''.join(stdout) class StplSyntaxError(TemplateError): pass class StplParser(object): """ Parser for stpl templates. """ _re_cache = {} #: Cache for compiled re patterns # This huge pile of voodoo magic splits python code into 8 different tokens. # 1: All kinds of python strings (trust me, it works) _re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \ '|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \ '|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \ '|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))' _re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later # 2: Comments (until end of line, but not the newline itself) _re_tok += '|(#.*)' # 3,4: Keywords that start or continue a python block (only start of line) _re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \ '|^([ \\t]*(?:elif|else|except|finally)\\b)' # 5: Our special 'end' keyword (but only if it stands alone) _re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))' # 6: A customizable end-of-code-block template token (only end of line) _re_tok += '|(%(block_close)s[ \\t]*(?=$))' # 7: And finally, a single newline. The 8th token is 'everything else' _re_tok += '|(\\r?\\n)' # Match the start tokens of code areas in a template _re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))' # Match inline statements (may contain python strings) _re_inl = '%%(inline_start)s((?:%s|[^\'"\n]+?)*?)%%(inline_end)s' % _re_inl default_syntax = '<% %> % {{ }}' def __init__(self, source, syntax=None, encoding='utf8'): self.source, self.encoding = touni(source, encoding), encoding self.set_syntax(syntax or self.default_syntax) self.code_buffer, self.text_buffer = [], [] self.lineno, self.offset = 1, 0 self.indent, self.indent_mod = 0, 0 def get_syntax(self): """ Tokens as a space separated string (default: <% %> % {{ }}) """ return self._syntax def set_syntax(self, syntax): self._syntax = syntax self._tokens = syntax.split() if not syntax in self._re_cache: names = 'block_start block_close line_start inline_start inline_end' etokens = map(re.escape, self._tokens) pattern_vars = dict(zip(names.split(), etokens)) patterns = (self._re_split, self._re_tok, self._re_inl) patterns = [re.compile(p%pattern_vars) for p in patterns] self._re_cache[syntax] = patterns self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax] syntax = property(get_syntax, set_syntax) def translate(self): if self.offset: raise RuntimeError('Parser is a one time instance.') while True: m = self.re_split.search(self.source[self.offset:]) if m: text = self.source[self.offset:self.offset+m.start()] self.text_buffer.append(text) offs = self.offset self.offset += m.end() if m.group(1): # Escape syntax line, sep, _ = self.source[self.offset:].partition('\n') self.text_buffer.append(self.source[offs+m.start():offs+m.start(1)]+m.group(2)+line+sep) self.offset += len(line+sep) continue self.flush_text() self.read_code(multiline=bool(m.group(4))) else: break self.text_buffer.append(self.source[self.offset:]) self.flush_text() return ''.join(self.code_buffer) def read_code(self, multiline): code_line, comment = '', '' while True: m = self.re_tok.search(self.source[self.offset:]) if not m: code_line += self.source[self.offset:] self.offset = len(self.source) self.write_code(code_line.strip(), comment) return code_line += self.source[self.offset:self.offset+m.start()] self.offset += m.end() _str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups() if code_line and (_blk1 or _blk2): # a if b else c code_line += _blk1 or _blk2 continue if _str: # Python string code_line += _str elif _com: # Python comment (up to EOL) comment = _com if multiline and _com.strip().endswith(self._tokens[1]): multiline = False # Allow end-of-block in comments elif _blk1: # Start-block keyword (if/for/while/def/try/...) code_line, self.indent_mod = _blk1, -1 self.indent += 1 elif _blk2: # Continue-block keyword (else/elif/except/...) code_line, self.indent_mod = _blk2, -1 elif _end: # The non-standard 'end'-keyword (ends a block) self.indent -= 1 elif _cend: # The end-code-block template token (usually '%>') if multiline: multiline = False else: code_line += _cend else: # \n self.write_code(code_line.strip(), comment) self.lineno += 1 code_line, comment, self.indent_mod = '', '', 0 if not multiline: break def flush_text(self): text = ''.join(self.text_buffer) del self.text_buffer[:] if not text: return parts, pos, nl = [], 0, '\\\n'+' '*self.indent for m in self.re_inl.finditer(text): prefix, pos = text[pos:m.start()], m.end() if prefix: parts.append(nl.join(map(repr, prefix.splitlines(True)))) if prefix.endswith('\n'): parts[-1] += nl parts.append(self.process_inline(m.group(1).strip())) if pos < len(text): prefix = text[pos:] lines = prefix.splitlines(True) if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3] elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4] parts.append(nl.join(map(repr, lines))) code = '_printlist((%s,))' % ', '.join(parts) self.lineno += code.count('\n')+1 self.write_code(code) @staticmethod def process_inline(chunk): if chunk[0] == '!': return '_str(%s)' % chunk[1:] return '_escape(%s)' % chunk def write_code(self, line, comment=''): code = ' ' * (self.indent+self.indent_mod) code += line.lstrip() + comment + '\n' self.code_buffer.append(code) def template(*args, **kwargs): """ Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. Template rendering arguments can be passed as dictionaries or directly (as keyword arguments). """ tpl = args[0] if args else None adapter = kwargs.pop('template_adapter', SimpleTemplate) lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) tplid = (id(lookup), tpl) if tplid not in TEMPLATES or DEBUG: settings = kwargs.pop('template_settings', {}) if isinstance(tpl, adapter): TEMPLATES[tplid] = tpl if settings: TEMPLATES[tplid].prepare(**settings) elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings) else: TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings) if not TEMPLATES[tplid]: abort(500, 'Template (%s) not found' % tpl) for dictarg in args[1:]: kwargs.update(dictarg) return TEMPLATES[tplid].render(kwargs) mako_template = functools.partial(template, template_adapter=MakoTemplate) cheetah_template = functools.partial(template, template_adapter=CheetahTemplate) jinja2_template = functools.partial(template, template_adapter=Jinja2Template) def view(tpl_name, **defaults): """ Decorator: renders a template for a handler. The handler can control its behavior like that: - return a dict of template vars to fill out the template - return something other than a dict and the view decorator will not process the template, but return the handler result as is. This includes returning a HTTPResponse(dict) to get, for instance, JSON with autojson or other castfilters. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if isinstance(result, (dict, DictMixin)): tplvars = defaults.copy() tplvars.update(result) return template(tpl_name, **tplvars) elif result is None: return template(tpl_name, defaults) return result return wrapper return decorator mako_view = functools.partial(view, template_adapter=MakoTemplate) cheetah_view = functools.partial(view, template_adapter=CheetahTemplate) jinja2_view = functools.partial(view, template_adapter=Jinja2Template) ############################################################################### # Constants and Globals ######################################################## ############################################################################### TEMPLATE_PATH = ['./', './views/'] TEMPLATES = {} DEBUG = False NORUN = False # If set, run() does nothing. Used by load_app() #: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found') HTTP_CODES = httplib.responses HTTP_CODES[418] = "I'm a teapot" # RFC 2324 HTTP_CODES[428] = "Precondition Required" HTTP_CODES[429] = "Too Many Requests" HTTP_CODES[431] = "Request Header Fields Too Large" HTTP_CODES[511] = "Network Authentication Required" _HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items()) #: The default template used for error pages. Override with @error() ERROR_PAGE_TEMPLATE = """ %%try: %%from %s import DEBUG, request <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN"> <html> <head> <title>Error: {{e.status}}</title> <style type="text/css"> html {background-color: #eee; font-family: sans-serif;} body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;} pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;} </style> </head> <body> <h1>Error: {{e.status}}</h1> <p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt> caused an error:</p> <pre>{{e.body}}</pre> %%if DEBUG and e.exception: <h2>Exception:</h2> <pre>{{repr(e.exception)}}</pre> %%end %%if DEBUG and e.traceback: <h2>Traceback:</h2> <pre>{{e.traceback}}</pre> %%end </body> </html> %%except ImportError: <b>ImportError:</b> Could not generate the error page. Please add bottle to the import path. %%end """ % __name__ #: A thread-safe instance of :class:`LocalRequest`. If accessed from within a #: request callback, this instance always refers to the *current* request #: (even on a multithreaded server). request = LocalRequest() #: A thread-safe instance of :class:`LocalResponse`. It is used to change the #: HTTP response for the *current* request. response = LocalResponse() #: A thread-safe namespace. Not used by Bottle. local = threading.local() # Initialize app stack (create first empty Bottle app) # BC: 0.6.4 and needed for run() app = default_app = AppStack() app.push() #: A virtual package that redirects import statements. #: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`. ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module if __name__ == '__main__': opt, args, parser = _cmd_options, _cmd_args, _cmd_parser if opt.version: _stdout('Bottle %s\n'%__version__) sys.exit(0) if not args: parser.print_help() _stderr('\nError: No application entry point specified.\n') sys.exit(1) sys.path.insert(0, '.') sys.modules.setdefault('bottle', sys.modules['__main__']) host, port = (opt.bind or 'localhost'), 8080 if ':' in host and host.rfind(']') < host.rfind(':'): host, port = host.rsplit(':', 1) host = host.strip('[]') run(args[0], host=host, port=int(port), server=opt.server, reloader=opt.reload, plugins=opt.plugin, debug=opt.debug) # THE END
39.95795
108
0.592457
from __future__ import with_statement __author__ = 'Marcel Hellkamp' __version__ = '0.13-dev' __license__ = 'MIT' if __name__ == '__main__': from optparse import OptionParser _cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app") _opt = _cmd_parser.add_option _opt("--version", action="store_true", help="show version number.") _opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.") _opt("-s", "--server", default='wsgiref', help="use SERVER as backend.") _opt("-p", "--plugin", action="append", help="install additional plugin/s.") _opt("--debug", action="store_true", help="start server in debug mode.") _opt("--reload", action="store_true", help="auto-reload on file changes.") _cmd_options, _cmd_args = _cmd_parser.parse_args() if _cmd_options.server: if _cmd_options.server.startswith('gevent'): import gevent.monkey; gevent.monkey.patch_all() elif _cmd_options.server.startswith('eventlet'): import eventlet; eventlet.monkey_patch() import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\ os, re, subprocess, sys, tempfile, threading, time, warnings from datetime import date as datedate, datetime, timedelta from tempfile import TemporaryFile from traceback import format_exc, print_exc from inspect import getargspec from unicodedata import normalize try: from simplejson import dumps as json_dumps, loads as json_lds except ImportError: try: from json import dumps as json_dumps, loads as json_lds except ImportError: try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds except ImportError: def json_dumps(data): raise ImportError("JSON support requires Python 2.6 or simplejson.") json_lds = json_dumps py = sys.version_info py3k = py >= (3, 0, 0) py25 = py < (2, 6, 0) py31 = (3, 1, 0) <= py < (3, 2, 0) # Workaround for the missing "as" keyword in py3k. def _e(): return sys.exc_info()[1] # Workaround for the "print is a keyword/function" Python 2/3 dilemma # and a fallback for mod_wsgi (resticts stdout/err attribute access) try: _stdout, _stderr = sys.stdout.write, sys.stderr.write except IOError: _stdout = lambda x: sys.stdout.write(x) _stderr = lambda x: sys.stderr.write(x) # Lots of stdlib and builtin differences. if py3k: import http.client as httplib import _thread as thread from urllib.parse import urljoin, SplitResult as UrlSplitResult from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote urlunquote = functools.partial(urlunquote, encoding='latin1') from http.cookies import SimpleCookie from collections import MutableMapping as DictMixin import pickle from io import BytesIO from configparser import ConfigParser basestring = str unicode = str json_loads = lambda s: json_lds(touni(s)) callable = lambda x: hasattr(x, '__call__') imap = map def _raise(*a): raise a[0](a[1]).with_traceback(a[2]) else: # 2.x import httplib import thread from urlparse import urljoin, SplitResult as UrlSplitResult from urllib import urlencode, quote as urlquote, unquote as urlunquote from Cookie import SimpleCookie from itertools import imap import cPickle as pickle from StringIO import StringIO as BytesIO from ConfigParser import SafeConfigParser as ConfigParser if py25: msg = "Python 2.5 support may be dropped in future versions of Bottle." warnings.warn(msg, DeprecationWarning) from UserDict import DictMixin def next(it): return it.next() bytes = str else: # 2.6, 2.7 from collections import MutableMapping as DictMixin unicode = unicode json_loads = json_lds eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec')) # Some helpers for string/byte handling def tob(s, enc='utf8'): return s.encode(enc) if isinstance(s, unicode) else bytes(s) def touni(s, enc='utf8', err='strict'): if isinstance(s, bytes): return s.decode(enc, err) else: return unicode(s or ("" if s is None else s)) tonat = touni if py3k else tob # 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense). # 3.1 needs a workaround. if py31: from io import TextIOWrapper class NCTextIOWrapper(TextIOWrapper): def close(self): pass # Keep wrapped buffer open. # A bug in functools causes it to break if the wrapper is an instance method def update_wrapper(wrapper, wrapped, *a, **ka): try: functools.update_wrapper(wrapper, wrapped, *a, **ka) except AttributeError: pass # These helpers are used at module level and need to be defined first. # And yes, I know PEP-8, but sometimes a lower-case classname makes more sense. def depr(message, strict=False): warnings.warn(message, DeprecationWarning, stacklevel=3) def makelist(data): # This is just too handy if isinstance(data, (tuple, list, set, dict)): return list(data) elif data: return [data] else: return [] class DictProperty(object): def __init__(self, attr, key=None, read_only=False): self.attr, self.key, self.read_only = attr, key, read_only def __call__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter, self.key = func, self.key or func.__name__ return self def __get__(self, obj, cls): if obj is None: return self key, storage = self.key, getattr(obj, self.attr) if key not in storage: storage[key] = self.getter(obj) return storage[key] def __set__(self, obj, value): if self.read_only: raise AttributeError("Read-Only property.") getattr(obj, self.attr)[self.key] = value def __delete__(self, obj): if self.read_only: raise AttributeError("Read-Only property.") del getattr(obj, self.attr)[self.key] class cached_property(object): def __init__(self, func): self.__doc__ = getattr(func, '__doc__') self.func = func def __get__(self, obj, cls): if obj is None: return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value class lazy_attribute(object): def __init__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter = func def __get__(self, obj, cls): value = self.getter(cls) setattr(cls, self.__name__, value) return value ############################################################################### # Exceptions and Events ######################################################## ############################################################################### class BottleException(Exception): pass ############################################################################### # Routing ###################################################################### ############################################################################### class RouteError(BottleException): class RouteReset(BottleException): class RouterUnknownModeError(RouteError): pass class RouteSyntaxError(RouteError): class RouteBuildError(RouteError): def _re_flatten(p): if '(' not in p: return p return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p) class Router(object): default_pattern = '[^/]+' default_filter = 're' #: The current CPython regexp implementation does not allow more #: than 99 matching groups per regular expression. _MAX_GROUPS_PER_PATTERN = 99 def __init__(self, strict=False): self.rules = [] # All rules in order self._groups = {} # index of regexes to find them in dyna_routes self.builder = {} # Data structure for the url builder self.static = {} # Search structure for static routes self.dyna_routes = {} self.dyna_regexes = {} # Search structure for dynamic routes #: If true, static routes are no longer checked first. self.strict_order = strict self.filters = { 're': lambda conf: (_re_flatten(conf or self.default_pattern), None, None), 'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))), 'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))), 'path': lambda conf: (r'.+?', None, None)} def add_filter(self, name, func): self.filters[name] = func rule_syntax = re.compile('(\\\\*)' '(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?: '|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)' '(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))') def _itertokens(self, rule): offset, prefix = 0, '' for match in self.rule_syntax.finditer(rule): prefix += rule[offset:match.start()] g = match.groups() if len(g[0])%2: # Escaped wildcard prefix += match.group(0)[len(g[0]):] offset = match.end() continue if prefix: yield prefix, None, None name, filtr, conf = g[4:7] if g[2] is None else g[1:4] yield name, filtr or 'default', conf or None offset, prefix = match.end(), '' if offset <= len(rule) or prefix: yield prefix+rule[offset:], None, None def add(self, rule, method, target, name=None): anons = 0 # Number of anonymous wildcards found keys = [] # Names of keys pattern = '' # Regular expression pattern with named groups filters = [] # Lists of wildcard input filters builder = [] # Data structure for the URL builder is_static = True for key, mode, conf in self._itertokens(rule): if mode: is_static = False if mode == 'default': mode = self.default_filter mask, in_filter, out_filter = self.filters[mode](conf) if not key: pattern += '(?:%s)' % mask key = 'anon%d' % anons anons += 1 else: pattern += '(?P<%s>%s)' % (key, mask) keys.append(key) if in_filter: filters.append((key, in_filter)) builder.append((key, out_filter or str)) elif key: pattern += re.escape(key) builder.append((None, key)) self.builder[rule] = builder if name: self.builder[name] = builder if is_static and not self.strict_order: self.static.setdefault(method, {}) self.static[method][self.build(rule)] = (target, None) return try: re_pattern = re.compile('^(%s)$' % pattern) re_match = re_pattern.match except re.error: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e())) if filters: def getargs(path): url_args = re_match(path).groupdict() for name, wildcard_filter in filters: try: url_args[name] = wildcard_filter(url_args[name]) except ValueError: raise HTTPError(400, 'Path has wrong format.') return url_args elif re_pattern.groupindex: def getargs(path): return re_match(path).groupdict() else: getargs = None flatpat = _re_flatten(pattern) whole_rule = (rule, flatpat, target, getargs) if (flatpat, method) in self._groups: if DEBUG: msg = 'Route <%s %s> overwrites a previously defined route' warnings.warn(msg % (method, rule), RuntimeWarning) self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule else: self.dyna_routes.setdefault(method, []).append(whole_rule) self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1 self._compile(method) def _compile(self, method): all_rules = self.dyna_routes[method] comborules = self.dyna_regexes[method] = [] maxgroups = self._MAX_GROUPS_PER_PATTERN for x in range(0, len(all_rules), maxgroups): some = all_rules[x:x+maxgroups] combined = (flatpat for (_, flatpat, _, _) in some) combined = '|'.join('(^%s$)' % flatpat for flatpat in combined) combined = re.compile(combined).match rules = [(target, getargs) for (_, _, target, getargs) in some] comborules.append((combined, rules)) def build(self, _name, *anons, **query): builder = self.builder.get(_name) if not builder: raise RouteBuildError("No route with that name.", _name) try: for i, value in enumerate(anons): query['anon%d'%i] = value url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder]) return url if not query else url+'?'+urlencode(query) except KeyError: raise RouteBuildError('Missing URL argument: %r' % _e().args[0]) def match(self, environ): verb = environ['REQUEST_METHOD'].upper() path = environ['PATH_INFO'] or '/' if verb == 'HEAD': methods = ['PROXY', verb, 'GET', 'ANY'] else: methods = ['PROXY', verb, 'ANY'] for method in methods: if method in self.static and path in self.static[method]: target, getargs = self.static[method][path] return target, getargs(path) if getargs else {} elif method in self.dyna_regexes: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: target, getargs = rules[match.lastindex - 1] return target, getargs(path) if getargs else {} # No matching route found. Collect alternative methods for 405 response allowed = set([]) nocheck = set(methods) for method in set(self.static) - nocheck: if path in self.static[method]: allowed.add(verb) for method in set(self.dyna_regexes) - allowed - nocheck: for combined, rules in self.dyna_regexes[method]: match = combined(path) if match: allowed.add(method) if allowed: allow_header = ",".join(sorted(allowed)) raise HTTPError(405, "Method not allowed.", Allow=allow_header) # No matching route and no alternative method found. We give up raise HTTPError(404, "Not found: " + repr(path)) class Route(object): def __init__(self, app, rule, method, callback, name=None, plugins=None, skiplist=None, **config): #: The application this route is installed to. self.app = app #: The path-rule string (e.g. ``/wiki/<page>``). self.rule = rule #: The HTTP method as a string (e.g. ``GET``). self.method = method #: The original callback with no plugins applied. Useful for introspection. self.callback = callback #: The name of the route (if specified) or ``None``. self.name = name or None #: A list of route-specific plugins (see :meth:`Bottle.route`). self.plugins = plugins or [] #: A list of plugins to not apply to this route (see :meth:`Bottle.route`). self.skiplist = skiplist or [] #: Additional keyword arguments passed to the :meth:`Bottle.route` #: decorator are stored in this dictionary. Used for route-specific #: plugin configuration and meta-data. self.config = ConfigDict().load_dict(config) @cached_property def call(self): return self._make_callback() def reset(self): self.__dict__.pop('call', None) def prepare(self): self.call def all_plugins(self): unique = set() for p in reversed(self.app.plugins + self.plugins): if True in self.skiplist: break name = getattr(p, 'name', False) if name and (name in self.skiplist or name in unique): continue if p in self.skiplist or type(p) in self.skiplist: continue if name: unique.add(name) yield p def _make_callback(self): callback = self.callback for plugin in self.all_plugins(): try: if hasattr(plugin, 'apply'): callback = plugin.apply(callback, self) else: callback = plugin(callback) except RouteReset: # Try again with changed configuration. return self._make_callback() if not callback is self.callback: update_wrapper(callback, self.callback) return callback def get_undecorated_callback(self): func = self.callback func = getattr(func, '__func__' if py3k else 'im_func', func) closure_attr = '__closure__' if py3k else 'func_closure' while hasattr(func, closure_attr) and getattr(func, closure_attr): func = getattr(func, closure_attr)[0].cell_contents return func def get_callback_args(self): return getargspec(self.get_undecorated_callback())[0] def get_config(self, key, default=None): for conf in (self.config, self.app.conifg): if key in conf: return conf[key] return default def __repr__(self): cb = self.get_undecorated_callback() return '<%s %r %r>' % (self.method, self.rule, cb) ############################################################################### # Application Object ########################################################### ############################################################################### class Bottle(object): def __init__(self, catchall=True, autojson=True): #: A :class:`ConfigDict` for app specific configuration. self.config = ConfigDict() self.config._on_change = functools.partial(self.trigger_hook, 'config') self.config.meta_set('autojson', 'validate', bool) self.config.meta_set('catchall', 'validate', bool) self.config['catchall'] = catchall self.config['autojson'] = autojson #: A :class:`ResourceManager` for application files self.resources = ResourceManager() self.routes = [] # List of installed :class:`Route` instances. self.router = Router() # Maps requests to :class:`Route` instances. self.error_handler = {} # Core plugins self.plugins = [] # List of installed plugins. if self.config['autojson']: self.install(JSONPlugin()) self.install(TemplatePlugin()) #: If true, most exceptions are caught and returned as :exc:`HTTPError` catchall = DictProperty('config', 'catchall') __hook_names = 'before_request', 'after_request', 'app_reset', 'config' __hook_reversed = 'after_request' @cached_property def _hooks(self): return dict((name, []) for name in self.__hook_names) def add_hook(self, name, func): if name in self.__hook_reversed: self._hooks[name].insert(0, func) else: self._hooks[name].append(func) def remove_hook(self, name, func): if name in self._hooks and func in self._hooks[name]: self._hooks[name].remove(func) return True def trigger_hook(self, __name, *args, **kwargs): return [hook(*args, **kwargs) for hook in self._hooks[__name][:]] def hook(self, name): def decorator(func): self.add_hook(name, func) return func return decorator def mount(self, prefix, app, **options): segments = [p for p in prefix.split('/') if p] if not segments: raise ValueError('Empty path prefix.') path_depth = len(segments) def mountpoint_wrapper(): try: request.path_shift(path_depth) rs = HTTPResponse([]) def start_response(status, headerlist, exc_info=None): if exc_info: _raise(*exc_info) rs.status = status for name, value in headerlist: rs.add_header(name, value) return rs.body.append body = app(request.environ, start_response) if body and rs.body: body = itertools.chain(rs.body, body) rs.body = body or rs.body return rs finally: request.path_shift(-path_depth) options.setdefault('skip', True) options.setdefault('method', 'PROXY') options.setdefault('mountpoint', {'prefix': prefix, 'target': app}) options['callback'] = mountpoint_wrapper self.route('/%s/<:re:.*>' % '/'.join(segments), **options) if not prefix.endswith('/'): self.route('/' + '/'.join(segments), **options) def merge(self, routes): if isinstance(routes, Bottle): routes = routes.routes for route in routes: self.add_route(route) def install(self, plugin): if hasattr(plugin, 'setup'): plugin.setup(self) if not callable(plugin) and not hasattr(plugin, 'apply'): raise TypeError("Plugins must be callable or implement .apply()") self.plugins.append(plugin) self.reset() return plugin def uninstall(self, plugin): removed, remove = [], plugin for i, plugin in list(enumerate(self.plugins))[::-1]: if remove is True or remove is plugin or remove is type(plugin) \ or getattr(plugin, 'name', True) == remove: removed.append(plugin) del self.plugins[i] if hasattr(plugin, 'close'): plugin.close() if removed: self.reset() return removed def reset(self, route=None): if route is None: routes = self.routes elif isinstance(route, Route): routes = [route] else: routes = [self.routes[route]] for route in routes: route.reset() if DEBUG: for route in routes: route.prepare() self.trigger_hook('app_reset') def close(self): for plugin in self.plugins: if hasattr(plugin, 'close'): plugin.close() def run(self, **kwargs): run(self, **kwargs) def match(self, environ): return self.router.match(environ) def get_url(self, routename, **kargs): scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' location = self.router.build(routename, **kargs).lstrip('/') return urljoin(urljoin('/', scriptname), location) def add_route(self, route): self.routes.append(route) self.router.add(route.rule, route.method, route, name=route.name) if DEBUG: route.prepare() def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) def decorator(callback): if isinstance(callback, basestring): callback = load(callback) for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() route = Route(self, rule, verb, callback, name=name, plugins=plugins, skiplist=skiplist, **config) self.add_route(route) return callback return decorator(callback) if callback else decorator def get(self, path=None, method='GET', **options): return self.route(path, method, **options) def post(self, path=None, method='POST', **options): return self.route(path, method, **options) def put(self, path=None, method='PUT', **options): return self.route(path, method, **options) def delete(self, path=None, method='DELETE', **options): return self.route(path, method, **options) def patch(self, path=None, method='PATCH', **options): return self.route(path, method, **options) def error(self, code=500): def wrapper(handler): self.error_handler[int(code)] = handler return handler return wrapper def default_error_handler(self, res): return tob(template(ERROR_PAGE_TEMPLATE, e=res)) def _handle(self, environ): path = environ['bottle.raw_path'] = environ['PATH_INFO'] if py3k: try: environ['PATH_INFO'] = path.encode('latin1').decode('utf8') except UnicodeError: return HTTPError(400, 'Invalid path string. Expected UTF-8') try: environ['bottle.app'] = self request.bind(environ) response.bind() try: self.trigger_hook('before_request') route, args = self.router.match(environ) environ['route.handle'] = route environ['bottle.route'] = route environ['route.url_args'] = args return route.call(**args) finally: self.trigger_hook('after_request') except HTTPResponse: return _e() except RouteReset: route.reset() return self._handle(environ) except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise stacktrace = format_exc() environ['wsgi.errors'].write(stacktrace) return HTTPError(500, "Internal Server Error", _e(), stacktrace) def _cast(self, out, peek=None): # Empty output is done here if not out: if 'Content-Length' not in response: response['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, (tuple, list))\ and isinstance(out[0], (bytes, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, bytes): if 'Content-Length' not in response: response['Content-Length'] = len(out) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) # TODO: Handle these explicitly in handle() or make them iterable. if isinstance(out, HTTPError): out.apply(response) out = self.error_handler.get(out.status_code, self.default_error_handler)(out) return self._cast(out) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.body) # File-like objects. if hasattr(out, 'read'): if 'wsgi.file_wrapper' in request.environ: return request.environ['wsgi.file_wrapper'](out) elif hasattr(out, 'close') or not hasattr(out, '__iter__'): return WSGIFileWrapper(out) # Handle Iterables. We peek into them to detect their inner type. try: iout = iter(out) first = next(iout) while not first: first = next(iout) except StopIteration: return self._cast('') except HTTPResponse: first = _e() except (KeyboardInterrupt, SystemExit, MemoryError): raise except: if not self.catchall: raise first = HTTPError(500, 'Unhandled exception', _e(), format_exc()) # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first) elif isinstance(first, bytes): new_iter = itertools.chain([first], iout) elif isinstance(first, unicode): encoder = lambda x: x.encode(response.charset) new_iter = imap(encoder, itertools.chain([first], iout)) else: msg = 'Unsupported response type: %s' % type(first) return self._cast(HTTPError(500, msg)) if hasattr(out, 'close'): new_iter = _closeiter(new_iter, out.close) return new_iter def wsgi(self, environ, start_response): try: out = self._cast(self._handle(environ)) # rfc2616 section 4.3 if response._status_code in (100, 101, 204, 304)\ or environ['REQUEST_METHOD'] == 'HEAD': if hasattr(out, 'close'): out.close() out = [] start_response(response._status_line, response.headerlist) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise except: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' \ % html_escape(environ.get('PATH_INFO', '/')) if DEBUG: err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \ '<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \ % (html_escape(repr(_e())), html_escape(format_exc())) environ['wsgi.errors'].write(err) headers = [('Content-Type', 'text/html; charset=UTF-8')] start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info()) return [tob(err)] def __call__(self, environ, start_response): return self.wsgi(environ, start_response) def __enter__(self): default_app.push(self) return self def __exit__(self, exc_type, exc_value, traceback): default_app.pop() ############################################################################### # HTTP and WSGI Tools ########################################################## ############################################################################### class BaseRequest(object): __slots__ = ('environ', ) #: Maximum size of memory buffer for :attr:`body` in bytes. MEMFILE_MAX = 102400 def __init__(self, environ=None): #: The wrapped WSGI environ dictionary. This is the only real attribute. #: All other attributes actually are read-only properties. self.environ = {} if environ is None else environ self.environ['bottle.request'] = self @DictProperty('environ', 'bottle.app', read_only=True) def app(self): raise RuntimeError('This request is not connected to an application.') @DictProperty('environ', 'bottle.route', read_only=True) def route(self): raise RuntimeError('This request is not connected to a route.') @DictProperty('environ', 'route.url_args', read_only=True) def url_args(self): raise RuntimeError('This request is not connected to a route.') @property def path(self): return '/' + self.environ.get('PATH_INFO','').lstrip('/') @property def method(self): return self.environ.get('REQUEST_METHOD', 'GET').upper() @DictProperty('environ', 'bottle.request.headers', read_only=True) def headers(self): return WSGIHeaderDict(self.environ) def get_header(self, name, default=None): return self.headers.get(name, default) @DictProperty('environ', 'bottle.request.cookies', read_only=True) def cookies(self): cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values() return FormsDict((c.key, c.value) for c in cookies) def get_cookie(self, key, default=None, secret=None): value = self.cookies.get(key) if secret and value: dec = cookie_decode(value, secret) # (key, value) tuple or None return dec[1] if dec and dec[0] == key else default return value or default @DictProperty('environ', 'bottle.request.query', read_only=True) def query(self): get = self.environ['bottle.get'] = FormsDict() pairs = _parse_qsl(self.environ.get('QUERY_STRING', '')) for key, value in pairs: get[key] = value return get @DictProperty('environ', 'bottle.request.forms', read_only=True) def forms(self): forms = FormsDict() for name, item in self.POST.allitems(): if not isinstance(item, FileUpload): forms[name] = item return forms @DictProperty('environ', 'bottle.request.params', read_only=True) def params(self): params = FormsDict() for key, value in self.query.allitems(): params[key] = value for key, value in self.forms.allitems(): params[key] = value return params @DictProperty('environ', 'bottle.request.files', read_only=True) def files(self): files = FormsDict() for name, item in self.POST.allitems(): if isinstance(item, FileUpload): files[name] = item return files @DictProperty('environ', 'bottle.request.json', read_only=True) def json(self): ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0] if ctype == 'application/json': b = self._get_body_string() if not b: return None return json_loads(b) return None def _iter_body(self, read, bufsize): maxread = max(0, self.content_length) while maxread: part = read(min(maxread, bufsize)) if not part: break yield part maxread -= len(part) @staticmethod def _iter_chunked(read, bufsize): err = HTTPError(400, 'Error while parsing chunked transfer body.') rn, sem, bs = tob('\r\n'), tob(';'), tob('') while True: header = read(1) while header[-2:] != rn: c = read(1) header += c if not c: raise err if len(header) > bufsize: raise err size, _, _ = header.partition(sem) try: maxread = int(tonat(size.strip()), 16) except ValueError: raise err if maxread == 0: break buff = bs while maxread > 0: if not buff: buff = read(min(maxread, bufsize)) part, buff = buff[:maxread], buff[maxread:] if not part: raise err yield part maxread -= len(part) if read(2) != rn: raise err @DictProperty('environ', 'bottle.request.body', read_only=True) def _body(self): body_iter = self._iter_chunked if self.chunked else self._iter_body read_func = self.environ['wsgi.input'].read body, body_size, is_temp_file = BytesIO(), 0, False for part in body_iter(read_func, self.MEMFILE_MAX): body.write(part) body_size += len(part) if not is_temp_file and body_size > self.MEMFILE_MAX: body, tmp = TemporaryFile(mode='w+b'), body body.write(tmp.getvalue()) del tmp is_temp_file = True self.environ['wsgi.input'] = body body.seek(0) return body def _get_body_string(self): clen = self.content_length if clen > self.MEMFILE_MAX: raise HTTPError(413, 'Request too large') if clen < 0: clen = self.MEMFILE_MAX + 1 data = self.body.read(clen) if len(data) > self.MEMFILE_MAX: # Fail fast raise HTTPError(413, 'Request too large') return data @property def body(self): self._body.seek(0) return self._body @property def chunked(self): return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower() #: An alias for :attr:`query`. GET = query @DictProperty('environ', 'bottle.request.post', read_only=True) def POST(self): post = FormsDict() # We default to application/x-www-form-urlencoded for everything that # is not multipart and take the fast path (also: 3.1 workaround) if not self.content_type.startswith('multipart/'): pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1')) for key, value in pairs: post[key] = value return post safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: safe_env[key] = self.environ[key] args = dict(fp=self.body, environ=safe_env, keep_blank_values=True) if py31: args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8', newline='\n') elif py3k: args['encoding'] = 'utf8' data = cgi.FieldStorage(**args) self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958 data = data.list or [] for item in data: if item.filename: post[item.name] = FileUpload(item.file, item.name, item.filename, item.headers) else: post[item.name] = item.value return post @property def url(self): return self.urlparts.geturl() @DictProperty('environ', 'bottle.request.urlparts', read_only=True) def urlparts(self): env = self.environ http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http') host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST') if not host: # HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients. host = env.get('SERVER_NAME', '127.0.0.1') port = env.get('SERVER_PORT') if port and port != ('80' if http == 'http' else '443'): host += ':' + port path = urlquote(self.fullpath) return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '') @property def fullpath(self): return urljoin(self.script_name, self.path.lstrip('/')) @property def query_string(self): return self.environ.get('QUERY_STRING', '') @property def script_name(self): script_name = self.environ.get('SCRIPT_NAME', '').strip('/') return '/' + script_name + '/' if script_name else '/' def path_shift(self, shift=1): script = self.environ.get('SCRIPT_NAME','/') self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift) @property def content_length(self): return int(self.environ.get('CONTENT_LENGTH') or -1) @property def content_type(self): return self.environ.get('CONTENT_TYPE', '').lower() @property def is_xhr(self): requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','') return requested_with.lower() == 'xmlhttprequest' @property def is_ajax(self): return self.is_xhr @property def auth(self): basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION','')) if basic: return basic ruser = self.environ.get('REMOTE_USER') if ruser: return (ruser, None) return None @property def remote_route(self): proxy = self.environ.get('HTTP_X_FORWARDED_FOR') if proxy: return [ip.strip() for ip in proxy.split(',')] remote = self.environ.get('REMOTE_ADDR') return [remote] if remote else [] @property def remote_addr(self): route = self.remote_route return route[0] if route else None def copy(self): return Request(self.environ.copy()) def get(self, value, default=None): return self.environ.get(value, default) def __getitem__(self, key): return self.environ[key] def __delitem__(self, key): self[key] = ""; del(self.environ[key]) def __iter__(self): return iter(self.environ) def __len__(self): return len(self.environ) def keys(self): return self.environ.keys() def __setitem__(self, key, value): if self.environ.get('bottle.request.readonly'): raise KeyError('The environ dictionary is read-only.') self.environ[key] = value todelete = () if key == 'wsgi.input': todelete = ('body', 'forms', 'files', 'params', 'post', 'json') elif key == 'QUERY_STRING': todelete = ('query', 'params') elif key.startswith('HTTP_'): todelete = ('headers', 'cookies') for key in todelete: self.environ.pop('bottle.request.'+key, None) def __repr__(self): return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url) def __getattr__(self, name): try: var = self.environ['bottle.request.ext.%s'%name] return var.__get__(self) if hasattr(var, '__get__') else var except KeyError: raise AttributeError('Attribute %r not defined.' % name) def __setattr__(self, name, value): if name == 'environ': return object.__setattr__(self, name, value) self.environ['bottle.request.ext.%s'%name] = value def _hkey(s): return s.title().replace('_','-') class HeaderProperty(object): def __init__(self, name, reader=None, writer=str, default=''): self.name, self.default = name, default self.reader, self.writer = reader, writer self.__doc__ = 'Current value of the %r header.' % name.title() def __get__(self, obj, _): if obj is None: return self value = obj.headers.get(self.name, self.default) return self.reader(value) if self.reader else value def __set__(self, obj, value): obj.headers[self.name] = self.writer(value) def __delete__(self, obj): del obj.headers[self.name] class BaseResponse(object): default_status = 200 default_content_type = 'text/html; charset=UTF-8' # Header blacklist for specific response codes # (rfc2616 section 10.2.3 and 10.3.5) bad_headers = { 204: set(('Content-Type',)), 304: set(('Allow', 'Content-Encoding', 'Content-Language', 'Content-Length', 'Content-Range', 'Content-Type', 'Content-Md5', 'Last-Modified'))} def __init__(self, body='', status=None, headers=None, **more_headers): self._cookies = None self._headers = {} self.body = body self.status = status or self.default_status if headers: if isinstance(headers, dict): headers = headers.items() for name, value in headers: self.add_header(name, value) if more_headers: for name, value in more_headers.items(): self.add_header(name, value) def copy(self, cls=None): cls = cls or BaseResponse assert issubclass(cls, BaseResponse) copy = cls() copy.status = self.status copy._headers = dict((k, v[:]) for (k, v) in self._headers.items()) if self._cookies: copy._cookies = SimpleCookie() copy._cookies.load(self._cookies.output()) return copy def __iter__(self): return iter(self.body) def close(self): if hasattr(self.body, 'close'): self.body.close() @property def status_line(self): return self._status_line @property def status_code(self): return self._status_code def _set_status(self, status): if isinstance(status, int): code, status = status, _HTTP_STATUS_LINES.get(status) elif ' ' in status: status = status.strip() code = int(status.split()[0]) else: raise ValueError('String status line without a reason phrase.') if not 100 <= code <= 999: raise ValueError('Status code out of range.') self._status_code = code self._status_line = str(status or ('%d Unknown' % code)) def _get_status(self): return self._status_line status = property(_get_status, _set_status, None, ''' A writeable property to change the HTTP response status. It accepts either a numeric code (100-999) or a string with a custom reason phrase (e.g. "404 Brain not found"). Both :data:`status_line` and :data:`status_code` are updated accordingly. The return value is always a status string. ''') del _get_status, _set_status @property def headers(self): hdict = HeaderDict() hdict.dict = self._headers return hdict def __contains__(self, name): return _hkey(name) in self._headers def __delitem__(self, name): del self._headers[_hkey(name)] def __getitem__(self, name): return self._headers[_hkey(name)][-1] def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)] def get_header(self, name, default=None): return self._headers.get(_hkey(name), [default])[-1] def set_header(self, name, value): self._headers[_hkey(name)] = [value if isinstance(value, unicode) else str(value)] def add_header(self, name, value): self._headers.setdefault(_hkey(name), []).append(str(value)) def iter_headers(self): return self.headerlist @property def headerlist(self): out = [] headers = list(self._headers.items()) if 'Content-Type' not in self._headers: headers.append(('Content-Type', [self.default_content_type])) if self._status_code in self.bad_headers: bad_headers = self.bad_headers[self._status_code] headers = [h for h in headers if h[0] not in bad_headers] out += [(name, val) for (name, vals) in headers for val in vals] if self._cookies: for c in self._cookies.values(): out.append(('Set-Cookie', c.OutputString())) if py3k: out = [ (k, v.encode('utf8').decode('latin1') if isinstance(v, unicode) else v) for (k, v) in out] return out content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int) expires = HeaderProperty('Expires', reader=lambda x: datetime.utcfromtimestamp(parse_date(x)), writer=lambda x: http_date(x)) @property def charset(self, default='UTF-8'): if 'charset=' in self.content_type: return self.content_type.split('charset=')[-1].split(';')[0].strip() return default def set_cookie(self, name, value, secret=None, **options): if not self._cookies: self._cookies = SimpleCookie() if secret: value = touni(cookie_encode((name, value), secret)) elif not isinstance(value, basestring): raise TypeError('Secret key missing for non-string Cookie.') if len(value) > 4096: raise ValueError('Cookie value to long.') self._cookies[name] = value for key, value in options.items(): if key == 'max_age': if isinstance(value, timedelta): value = value.seconds + value.days * 24 * 3600 if key == 'expires': if isinstance(value, (datedate, datetime)): value = value.timetuple() elif isinstance(value, (int, float)): value = time.gmtime(value) value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) self._cookies[name][key.replace('_', '-')] = value def delete_cookie(self, key, **kwargs): kwargs['max_age'] = -1 kwargs['expires'] = 0 self.set_cookie(key, '', **kwargs) def __repr__(self): out = '' for name, value in self.headerlist: out += '%s: %s\n' % (name.title(), value.strip()) return out def _local_property(): ls = threading.local() def fget(_): try: return ls.var except AttributeError: raise RuntimeError("Request context not initialized.") def fset(_, value): ls.var = value def fdel(_): del ls.var return property(fget, fset, fdel, 'Thread-local property') class LocalRequest(BaseRequest): bind = BaseRequest.__init__ environ = _local_property() class LocalResponse(BaseResponse): bind = BaseResponse.__init__ _status_line = _local_property() _status_code = _local_property() _cookies = _local_property() _headers = _local_property() body = _local_property() Request = BaseRequest Response = BaseResponse class HTTPResponse(Response, BottleException): def __init__(self, body='', status=None, headers=None, **more_headers): super(HTTPResponse, self).__init__(body, status, headers, **more_headers) def apply(self, other): other._status_code = self._status_code other._status_line = self._status_line other._headers = self._headers other._cookies = self._cookies other.body = self.body class HTTPError(HTTPResponse): default_status = 500 def __init__(self, status=None, body=None, exception=None, traceback=None, **options): self.exception = exception self.traceback = traceback super(HTTPError, self).__init__(body, status, **options) ############################################################################### # Plugins ###################################################################### ############################################################################### class PluginError(BottleException): pass class JSONPlugin(object): name = 'json' api = 2 def __init__(self, json_dumps=json_dumps): self.json_dumps = json_dumps def apply(self, callback, _): dumps = self.json_dumps if not dumps: return callback def wrapper(*a, **ka): try: rv = callback(*a, **ka) except HTTPError: rv = _e() if isinstance(rv, dict): #Attempt to serialize, raises exception on failure json_response = dumps(rv) #Set content type only if serialization successful response.content_type = 'application/json' return json_response elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict): rv.body = dumps(rv.body) rv.content_type = 'application/json' return rv return wrapper class TemplatePlugin(object): name = 'template' api = 2 def apply(self, callback, route): conf = route.config.get('template') if isinstance(conf, (tuple, list)) and len(conf) == 2: return view(conf[0], **conf[1])(callback) elif isinstance(conf, str): return view(conf)(callback) else: return callback #: Not a plugin, but part of the plugin API. TODO: Find a better place. class _ImportRedirect(object): def __init__(self, name, impmask): self.name = name self.impmask = impmask self.module = sys.modules.setdefault(name, imp.new_module(name)) self.module.__dict__.update({'__file__': __file__, '__path__': [], '__all__': [], '__loader__': self}) sys.meta_path.append(self) def find_module(self, fullname, path=None): if '.' not in fullname: return packname = fullname.rsplit('.', 1)[0] if packname != self.name: return return self def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] modname = fullname.rsplit('.', 1)[1] realname = self.impmask % modname __import__(realname) module = sys.modules[fullname] = sys.modules[realname] setattr(self.module, modname, module) module.__loader__ = self return module ############################################################################### # Common Utilities ############################################################# ############################################################################### class MultiDict(DictMixin): def __init__(self, *a, **k): self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items()) def __len__(self): return len(self.dict) def __iter__(self): return iter(self.dict) def __contains__(self, key): return key in self.dict def __delitem__(self, key): del self.dict[key] def __getitem__(self, key): return self.dict[key][-1] def __setitem__(self, key, value): self.append(key, value) def keys(self): return self.dict.keys() if py3k: def values(self): return (v[-1] for v in self.dict.values()) def items(self): return ((k, v[-1]) for k, v in self.dict.items()) def allitems(self): return ((k, v) for k, vl in self.dict.items() for v in vl) iterkeys = keys itervalues = values iteritems = items iterallitems = allitems else: def values(self): return [v[-1] for v in self.dict.values()] def items(self): return [(k, v[-1]) for k, v in self.dict.items()] def iterkeys(self): return self.dict.iterkeys() def itervalues(self): return (v[-1] for v in self.dict.itervalues()) def iteritems(self): return ((k, v[-1]) for k, v in self.dict.iteritems()) def iterallitems(self): return ((k, v) for k, vl in self.dict.iteritems() for v in vl) def allitems(self): return [(k, v) for k, vl in self.dict.iteritems() for v in vl] def get(self, key, default=None, index=-1, type=None): try: val = self.dict[key][index] return type(val) if type else val except Exception: pass return default def append(self, key, value): self.dict.setdefault(key, []).append(value) def replace(self, key, value): self.dict[key] = [value] def getall(self, key): return self.dict.get(key) or [] #: Aliases for WTForms to mimic other multi-dict APIs (Django) getone = get getlist = getall class FormsDict(MultiDict): #: Encoding used for attribute values. input_encoding = 'utf8' #: If true (default), unicode strings are first encoded with `latin1` #: and then decoded to match :attr:`input_encoding`. recode_unicode = True def _fix(self, s, encoding=None): if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI return s.encode('latin1').decode(encoding or self.input_encoding) elif isinstance(s, bytes): # Python 2 WSGI return s.decode(encoding or self.input_encoding) else: return s def decode(self, encoding=None): copy = FormsDict() enc = copy.input_encoding = encoding or self.input_encoding copy.recode_unicode = False for key, value in self.allitems(): copy.append(self._fix(key, enc), self._fix(value, enc)) return copy def getunicode(self, name, default=None, encoding=None): try: return self._fix(self[name], encoding) except (UnicodeError, KeyError): return default def __getattr__(self, name, default=unicode()): # Without this guard, pickle generates a cryptic TypeError: if name.startswith('__') and name.endswith('__'): return super(FormsDict, self).__getattr__(name) return self.getunicode(name, default=default) class HeaderDict(MultiDict): def __init__(self, *a, **ka): self.dict = {} if a or ka: self.update(*a, **ka) def __contains__(self, key): return _hkey(key) in self.dict def __delitem__(self, key): del self.dict[_hkey(key)] def __getitem__(self, key): return self.dict[_hkey(key)][-1] def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)] def append(self, key, value): self.dict.setdefault(_hkey(key), []).append(str(value)) def replace(self, key, value): self.dict[_hkey(key)] = [str(value)] def getall(self, key): return self.dict.get(_hkey(key)) or [] def get(self, key, default=None, index=-1): return MultiDict.get(self, _hkey(key), default, index) def filter(self, names): for name in [_hkey(n) for n in names]: if name in self.dict: del self.dict[name] class WSGIHeaderDict(DictMixin): #: List of keys that do not have a ``HTTP_`` prefix. cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH') def __init__(self, environ): self.environ = environ def _ekey(self, key): key = key.replace('-','_').upper() if key in self.cgikeys: return key return 'HTTP_' + key def raw(self, key, default=None): return self.environ.get(self._ekey(key), default) def __getitem__(self, key): val = self.environ[self._ekey(key)] if py3k: if isinstance(val, unicode): val = val.encode('latin1').decode('utf8') else: val = val.decode('utf8') return val def __setitem__(self, key, value): raise TypeError("%s is read-only." % self.__class__) def __delitem__(self, key): raise TypeError("%s is read-only." % self.__class__) def __iter__(self): for key in self.environ: if key[:5] == 'HTTP_': yield _hkey(key[5:]) elif key in self.cgikeys: yield _hkey(key) def keys(self): return [x for x in self] def __len__(self): return len(self.keys()) def __contains__(self, key): return self._ekey(key) in self.environ class ConfigDict(dict): __slots__ = ('_meta', '_on_change') def __init__(self): self._meta = {} self._on_change = lambda name, value: None def load_config(self, filename): conf = ConfigParser() conf.read(filename) for section in conf.sections(): for key, value in conf.items(section): if section not in ('DEFAULT', 'bottle'): key = section + '.' + key self[key] = value return self def load_dict(self, source, namespace=''): for key, value in source.items(): if isinstance(key, str): nskey = (namespace + '.' + key).strip('.') if isinstance(value, dict): self.load_dict(value, namespace=nskey) else: self[nskey] = value else: raise TypeError('Key has type %r (not a string)' % type(key)) return self def update(self, *a, **ka): prefix = '' if a and isinstance(a[0], str): prefix = a[0].strip('.') + '.' a = a[1:] for key, value in dict(*a, **ka).items(): self[prefix+key] = value def setdefault(self, key, value): if key not in self: self[key] = value return self[key] def __setitem__(self, key, value): if not isinstance(key, str): raise TypeError('Key has type %r (not a string)' % type(key)) value = self.meta_get(key, 'filter', lambda x: x)(value) if key in self and self[key] is value: return self._on_change(key, value) dict.__setitem__(self, key, value) def __delitem__(self, key): self._on_change(key, None) dict.__delitem__(self, key) def meta_get(self, key, metafield, default=None): return self._meta.get(key, {}).get(metafield, default) def meta_set(self, key, metafield, value): self._meta.setdefault(key, {})[metafield] = value if key in self: self[key] = self[key] def meta_list(self, key): return self._meta.get(key, {}).keys() class AppStack(list): def __call__(self): return self[-1] def push(self, value=None): if not isinstance(value, Bottle): value = Bottle() self.append(value) return value class WSGIFileWrapper(object): def __init__(self, fp, buffer_size=1024*64): self.fp, self.buffer_size = fp, buffer_size for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'): if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) def __iter__(self): buff, read = self.buffer_size, self.read while True: part = read(buff) if not part: return yield part class _closeiter(object): def __init__(self, iterator, close=None): self.iterator = iterator self.close_callbacks = makelist(close) def __iter__(self): return iter(self.iterator) def close(self): for func in self.close_callbacks: func() class ResourceManager(object): def __init__(self, base='./', opener=open, cachemode='all'): self.opener = opener self.base = base self.cachemode = cachemode #: A list of search paths. See :meth:`add_path` for details. self.path = [] #: A cache for resolved paths. ``res.cache.clear()`` clears the cache. self.cache = {} def add_path(self, path, base=None, index=None, create=False): base = os.path.abspath(os.path.dirname(base or self.base)) path = os.path.abspath(os.path.join(base, os.path.dirname(path))) path += os.sep if path in self.path: self.path.remove(path) if create and not os.path.isdir(path): os.makedirs(path) if index is None: self.path.append(path) else: self.path.insert(index, path) self.cache.clear() return os.path.exists(path) def __iter__(self): search = self.path[:] while search: path = search.pop() if not os.path.isdir(path): continue for name in os.listdir(path): full = os.path.join(path, name) if os.path.isdir(full): search.append(full) else: yield full def lookup(self, name): if name not in self.cache or DEBUG: for path in self.path: fpath = os.path.join(path, name) if os.path.isfile(fpath): if self.cachemode in ('all', 'found'): self.cache[name] = fpath return fpath if self.cachemode == 'all': self.cache[name] = None return self.cache[name] def open(self, name, mode='r', *args, **kwargs): fname = self.lookup(name) if not fname: raise IOError("Resource %r not found." % name) return self.opener(fname, mode=mode, *args, **kwargs) class FileUpload(object): def __init__(self, fileobj, name, filename, headers=None): #: Open file(-like) object (BytesIO buffer or temporary file) self.file = fileobj #: Name of the upload form field self.name = name #: Raw filename as sent by the client (may contain unsafe characters) self.raw_filename = filename #: A :class:`HeaderDict` with additional headers (e.g. content-type) self.headers = HeaderDict(headers) if headers else HeaderDict() content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int, default=-1) @cached_property def filename(self): fname = self.raw_filename if not isinstance(fname, unicode): fname = fname.decode('utf8', 'ignore') fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII') fname = os.path.basename(fname.replace('\\', os.path.sep)) fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip() fname = re.sub(r'[-\s]+', '-', fname).strip('.-') return fname[:255] or 'empty' def _copy_file(self, fp, chunk_size=2**16): read, write, offset = self.file.read, fp.write, self.file.tell() while 1: buf = read(chunk_size) if not buf: break write(buf) self.file.seek(offset) def save(self, destination, overwrite=False, chunk_size=2**16): if isinstance(destination, basestring): # Except file-likes here if os.path.isdir(destination): destination = os.path.join(destination, self.filename) if not overwrite and os.path.exists(destination): raise IOError('File exists.') with open(destination, 'wb') as fp: self._copy_file(fp, chunk_size) else: self._copy_file(destination, chunk_size) ############################################################################### # Application Helper ########################################################### ############################################################################### def abort(code=500, text='Unknown Error.'): raise HTTPError(code, text) def redirect(url, code=None): if not code: code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302 res = response.copy(cls=HTTPResponse) res.status = code res.body = "" res.set_header('Location', urljoin(request.url, url)) raise res def _file_iter_range(fp, offset, bytes, maxread=1024*1024): fp.seek(offset) while bytes > 0: part = fp.read(min(bytes, maxread)) if not part: break bytes -= len(part) yield part def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'): root = os.path.abspath(root) + os.sep filename = os.path.abspath(os.path.join(root, filename.strip('/\\'))) headers = dict() if not filename.startswith(root): return HTTPError(403, "Access denied.") if not os.path.exists(filename) or not os.path.isfile(filename): return HTTPError(404, "File does not exist.") if not os.access(filename, os.R_OK): return HTTPError(403, "You do not have permission to access this file.") if mimetype == 'auto': mimetype, encoding = mimetypes.guess_type(filename) if encoding: headers['Content-Encoding'] = encoding if mimetype: if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype: mimetype += '; charset=%s' % charset headers['Content-Type'] = mimetype if download: download = os.path.basename(filename if download == True else download) headers['Content-Disposition'] = 'attachment; filename="%s"' % download stats = os.stat(filename) headers['Content-Length'] = clen = stats.st_size lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime)) headers['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = parse_date(ims.split(";")[0].strip()) if ims is not None and ims >= int(stats.st_mtime): headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) return HTTPResponse(status=304, **headers) body = '' if request.method == 'HEAD' else open(filename, 'rb') headers["Accept-Ranges"] = "bytes" ranges = request.environ.get('HTTP_RANGE') if 'HTTP_RANGE' in request.environ: ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) if not ranges: return HTTPError(416, "Requested Range Not Satisfiable") offset, end = ranges[0] headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen) headers["Content-Length"] = str(end-offset) if body: body = _file_iter_range(body, offset, end-offset) return HTTPResponse(body, status=206, **headers) return HTTPResponse(body, **headers) ############################################################################### # HTTP Utilities and MISC (TODO) ############################################### ############################################################################### def debug(mode=True): global DEBUG if mode: warnings.simplefilter('default') DEBUG = bool(mode) def http_date(value): if isinstance(value, (datedate, datetime)): value = value.utctimetuple() elif isinstance(value, (int, float)): value = time.gmtime(value) if not isinstance(value, basestring): value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) return value def parse_date(ims): try: ts = email.utils.parsedate_tz(ims) return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone except (TypeError, ValueError, IndexError, OverflowError): return None def parse_auth(header): try: method, data = header.split(None, 1) if method.lower() == 'basic': user, pwd = touni(base64.b64decode(tob(data))).split(':',1) return user, pwd except (KeyError, ValueError): return None def parse_range_header(header, maxlen=0): if not header or header[:6] != 'bytes=': return ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r] for start, end in ranges: try: if not start: # bytes=-100 -> last 100 bytes start, end = max(0, maxlen-int(end)), maxlen elif not end: # bytes=100- -> all but the first 99 bytes start, end = int(start), maxlen else: # bytes=100-200 -> bytes 100-200 (inclusive) start, end = int(start), min(int(end)+1, maxlen) if 0 <= start < end <= maxlen: yield start, end except ValueError: pass def _parse_qsl(qs): r = [] for pair in qs.replace(';','&').split('&'): if not pair: continue nv = pair.split('=', 1) if len(nv) != 2: nv.append('') key = urlunquote(nv[0].replace('+', ' ')) value = urlunquote(nv[1].replace('+', ' ')) r.append((key, value)) return r def _lscmp(a, b): return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b) def cookie_encode(data, key): msg = base64.b64encode(pickle.dumps(data, -1)) sig = base64.b64encode(hmac.new(tob(key), msg).digest()) return tob('!') + sig + tob('?') + msg def cookie_decode(data, key): data = tob(data) if cookie_is_encoded(data): sig, msg = data.split(tob('?'), 1) if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())): return pickle.loads(base64.b64decode(msg)) return None def cookie_is_encoded(data): return bool(data.startswith(tob('!')) and tob('?') in data) def html_escape(string): return string.replace('&','&amp;').replace('<','&lt;').replace('>','&gt;')\ .replace('"','&quot;').replace("'",'&#039;') def html_quote(string): return '"%s"' % html_escape(string).replace('\n','&#10;')\ .replace('\r','&#13;').replace('\t','&#9;') def yieldroutes(func): path = '/' + func.__name__.replace('__','/').lstrip('/') spec = getargspec(func) argc = len(spec[0]) - len(spec[3] or []) path += ('/<%s>' * argc) % tuple(spec[0][:argc]) yield path for arg in spec[0][argc:]: path += '/<%s>' % arg yield path def path_shift(script_name, path_info, shift=1): if shift == 0: return script_name, path_info pathlist = path_info.strip('/').split('/') scriptlist = script_name.strip('/').split('/') if pathlist and pathlist[0] == '': pathlist = [] if scriptlist and scriptlist[0] == '': scriptlist = [] if 0 < shift <= len(pathlist): moved = pathlist[:shift] scriptlist = scriptlist + moved pathlist = pathlist[shift:] elif 0 > shift >= -len(scriptlist): moved = scriptlist[shift:] pathlist = moved + pathlist scriptlist = scriptlist[:shift] else: empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO' raise AssertionError("Cannot shift. Nothing left from %s" % empty) new_script_name = '/' + '/'.join(scriptlist) new_path_info = '/' + '/'.join(pathlist) if path_info.endswith('/') and pathlist: new_path_info += '/' return new_script_name, new_path_info def auth_basic(check, realm="private", text="Access denied"): def decorator(func): @functools.wraps(func) def wrapper(*a, **ka): user, password = request.auth or (None, None) if user is None or not check(user, password): err = HTTPError(401, text) err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm) return err return func(*a, **ka) return wrapper return decorator # Shortcuts for common Bottle methods. # They all refer to the current default application. def make_default_app_wrapper(name): @functools.wraps(getattr(Bottle, name)) def wrapper(*a, **ka): return getattr(app(), name)(*a, **ka) return wrapper route = make_default_app_wrapper('route') get = make_default_app_wrapper('get') post = make_default_app_wrapper('post') put = make_default_app_wrapper('put') delete = make_default_app_wrapper('delete') patch = make_default_app_wrapper('patch') error = make_default_app_wrapper('error') mount = make_default_app_wrapper('mount') hook = make_default_app_wrapper('hook') install = make_default_app_wrapper('install') uninstall = make_default_app_wrapper('uninstall') url = make_default_app_wrapper('get_url') ############################################################################### # Server Adapter ############################################################### ############################################################################### class ServerAdapter(object): quiet = False def __init__(self, host='127.0.0.1', port=8080, **options): self.options = options self.host = host self.port = int(port) def run(self, handler): # pragma: no cover pass def __repr__(self): args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()]) return "%s(%s)" % (self.__class__.__name__, args) class CGIServer(ServerAdapter): quiet = True def run(self, handler): # pragma: no cover from wsgiref.handlers import CGIHandler def fixed_environ(environ, start_response): environ.setdefault('PATH_INFO', '') return handler(environ, start_response) CGIHandler().run(fixed_environ) class FlupFCGIServer(ServerAdapter): def run(self, handler): # pragma: no cover import flup.server.fcgi self.options.setdefault('bindAddress', (self.host, self.port)) flup.server.fcgi.WSGIServer(handler, **self.options).run() class WSGIRefServer(ServerAdapter): def run(self, app): # pragma: no cover from wsgiref.simple_server import make_server from wsgiref.simple_server import WSGIRequestHandler, WSGIServer import socket class FixedHandler(WSGIRequestHandler): def address_string(self): # Prevent reverse DNS lookups please. return self.client_address[0] def log_request(*args, **kw): if not self.quiet: return WSGIRequestHandler.log_request(*args, **kw) handler_cls = self.options.get('handler_class', FixedHandler) server_cls = self.options.get('server_class', WSGIServer) if ':' in self.host: # Fix wsgiref for IPv6 addresses. if getattr(server_cls, 'address_family') == socket.AF_INET: class server_cls(server_cls): address_family = socket.AF_INET6 self.srv = make_server(self.host, self.port, app, server_cls, handler_cls) self.port = self.srv.server_port # update port actual port (0 means random) try: self.srv.serve_forever() except KeyboardInterrupt: self.srv.server_close() # Prevent ResourceWarning: unclosed socket raise class CherryPyServer(ServerAdapter): def run(self, handler): # pragma: no cover from cherrypy import wsgiserver self.options['bind_addr'] = (self.host, self.port) self.options['wsgi_app'] = handler certfile = self.options.get('certfile') if certfile: del self.options['certfile'] keyfile = self.options.get('keyfile') if keyfile: del self.options['keyfile'] server = wsgiserver.CherryPyWSGIServer(**self.options) if certfile: server.ssl_certificate = certfile if keyfile: server.ssl_private_key = keyfile try: server.start() finally: server.stop() class WaitressServer(ServerAdapter): def run(self, handler): from waitress import serve serve(handler, host=self.host, port=self.port) class PasteServer(ServerAdapter): def run(self, handler): # pragma: no cover from paste import httpserver from paste.translogger import TransLogger handler = TransLogger(handler, setup_console_handler=(not self.quiet)) httpserver.serve(handler, host=self.host, port=str(self.port), **self.options) class MeinheldServer(ServerAdapter): def run(self, handler): from meinheld import server server.listen((self.host, self.port)) server.run(handler) class FapwsServer(ServerAdapter): def run(self, handler): # pragma: no cover import fapws._evwsgi as evwsgi from fapws import base, config port = self.port if float(config.SERVER_IDENT[-2:]) > 0.4: # fapws3 silently changed its API in 0.5 port = str(port) evwsgi.start(self.host, port) # fapws3 never releases the GIL. Complain upstream. I tried. No luck. if 'BOTTLE_CHILD' in os.environ and not self.quiet: _stderr("WARNING: Auto-reloading does not work with Fapws3.\n") _stderr(" (Fapws3 breaks python thread support)\n") evwsgi.set_base_module(base) def app(environ, start_response): environ['wsgi.multiprocess'] = False return handler(environ, start_response) evwsgi.wsgi_cb(('', app)) evwsgi.run() class TornadoServer(ServerAdapter): def run(self, handler): # pragma: no cover import tornado.wsgi, tornado.httpserver, tornado.ioloop container = tornado.wsgi.WSGIContainer(handler) server = tornado.httpserver.HTTPServer(container) server.listen(port=self.port,address=self.host) tornado.ioloop.IOLoop.instance().start() class AppEngineServer(ServerAdapter): quiet = True def run(self, handler): from google.appengine.ext.webapp import util # A main() function in the handler script enables 'App Caching'. # Lets makes sure it is there. This _really_ improves performance. module = sys.modules.get('__main__') if module and not hasattr(module, 'main'): module.main = lambda: util.run_wsgi_app(handler) util.run_wsgi_app(handler) class TwistedServer(ServerAdapter): def run(self, handler): from twisted.web import server, wsgi from twisted.python.threadpool import ThreadPool from twisted.internet import reactor thread_pool = ThreadPool() thread_pool.start() reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler)) reactor.listenTCP(self.port, factory, interface=self.host) if not reactor.running: reactor.run() class DieselServer(ServerAdapter): def run(self, handler): from diesel.protocols.wsgi import WSGIApplication app = WSGIApplication(handler, port=self.port) app.run() class GeventServer(ServerAdapter): def run(self, handler): from gevent import wsgi, pywsgi, local if not isinstance(threading.local(), local.local): msg = "Bottle requires gevent.monkey.patch_all() (before import)" raise RuntimeError(msg) if not self.options.pop('fast', None): wsgi = pywsgi self.options['log'] = None if self.quiet else 'default' address = (self.host, self.port) server = wsgi.WSGIServer(address, handler, **self.options) if 'BOTTLE_CHILD' in os.environ: import signal signal.signal(signal.SIGINT, lambda s, f: server.stop()) server.serve_forever() class GeventSocketIOServer(ServerAdapter): def run(self,handler): from socketio import server address = (self.host, self.port) server.SocketIOServer(address, handler, **self.options).serve_forever() class GunicornServer(ServerAdapter): def run(self, handler): from gunicorn.app.base import Application config = {'bind': "%s:%d" % (self.host, int(self.port))} config.update(self.options) class GunicornApplication(Application): def init(self, parser, opts, args): return config def load(self): return handler GunicornApplication().run() class EventletServer(ServerAdapter): def run(self, handler): from eventlet import wsgi, listen, patcher if not patcher.is_monkey_patched(os): msg = "Bottle requires eventlet.monkey_patch() (before import)" raise RuntimeError(msg) socket_args = {} for arg in ('backlog', 'family'): try: socket_args[arg] = self.options.pop(arg) except KeyError: pass address = (self.host, self.port) try: wsgi.server(listen(address, **socket_args), handler, log_output=(not self.quiet)) except TypeError: # Fallback, if we have old version of eventlet wsgi.server(listen(address), handler) class RocketServer(ServerAdapter): def run(self, handler): from rocket import Rocket server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler }) server.start() class BjoernServer(ServerAdapter): def run(self, handler): from bjoern import run run(handler, self.host, self.port) class AutoServer(ServerAdapter): adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer] def run(self, handler): for sa in self.adapters: try: return sa(self.host, self.port, **self.options).run(handler) except ImportError: pass server_names = { 'cgi': CGIServer, 'flup': FlupFCGIServer, 'wsgiref': WSGIRefServer, 'waitress': WaitressServer, 'cherrypy': CherryPyServer, 'paste': PasteServer, 'fapws3': FapwsServer, 'tornado': TornadoServer, 'gae': AppEngineServer, 'twisted': TwistedServer, 'diesel': DieselServer, 'meinheld': MeinheldServer, 'gunicorn': GunicornServer, 'eventlet': EventletServer, 'gevent': GeventServer, 'geventSocketIO':GeventSocketIOServer, 'rocket': RocketServer, 'bjoern' : BjoernServer, 'auto': AutoServer, } ############################################################################### # Application Control ########################################################## ############################################################################### def load(target, **namespace): module, target = target.split(":", 1) if ':' in target else (target, None) if module not in sys.modules: __import__(module) if not target: return sys.modules[module] if target.isalnum(): return getattr(sys.modules[module], target) package_name = module.split('.')[0] namespace[package_name] = sys.modules[package_name] return eval('%s.%s' % (module, target), namespace) def load_app(target): global NORUN; NORUN, nr_old = True, NORUN tmp = default_app.push() # Create a new "default application" try: rv = load(target) # Import the target module return rv if callable(rv) else tmp finally: default_app.remove(tmp) # Remove the temporary added default application NORUN = nr_old _debug = debug def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, plugins=None, debug=None, **kargs): if NORUN: return if reloader and not os.environ.get('BOTTLE_CHILD'): lockfile = None try: fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) except KeyboardInterrupt: pass finally: if os.path.exists(lockfile): os.unlink(lockfile) return try: if debug is not None: _debug(debug) app = app or default_app() if isinstance(app, basestring): app = load_app(app) if not callable(app): raise ValueError("Application is not callable: %r" % app) for plugin in plugins or []: if isinstance(plugin, basestring): plugin = load(plugin) app.install(plugin) if server in server_names: server = server_names.get(server) if isinstance(server, basestring): server = load(server) if isinstance(server, type): server = server(host=host, port=port, **kargs) if not isinstance(server, ServerAdapter): raise ValueError("Unknown or unsupported server: %r" % server) server.quiet = server.quiet or quiet if not server.quiet: _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server))) _stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) _stderr("Hit Ctrl-C to quit.\n\n") if reloader: lockfile = os.environ.get('BOTTLE_LOCKFILE') bgcheck = FileCheckerThread(lockfile, interval) with bgcheck: server.run(app) if bgcheck.status == 'reload': sys.exit(3) else: server.run(app) except KeyboardInterrupt: pass except (SystemExit, MemoryError): raise except: if not reloader: raise if not getattr(server, 'quiet', quiet): print_exc() time.sleep(interval) sys.exit(3) class FileCheckerThread(threading.Thread): def __init__(self, lockfile, interval): threading.Thread.__init__(self) self.daemon = True self.lockfile, self.interval = lockfile, interval #: Is one of 'reload', 'error' or 'exit' self.status = None def run(self): exists = os.path.exists mtime = lambda p: os.stat(p).st_mtime files = dict() for module in list(sys.modules.values()): path = getattr(module, '__file__', '') if path[-4:] in ('.pyo', '.pyc'): path = path[:-1] if path and exists(path): files[path] = mtime(path) while not self.status: if not exists(self.lockfile)\ or mtime(self.lockfile) < time.time() - self.interval - 5: self.status = 'error' thread.interrupt_main() for path, lmtime in list(files.items()): if not exists(path) or mtime(path) > lmtime: self.status = 'reload' thread.interrupt_main() break time.sleep(self.interval) def __enter__(self): self.start() def __exit__(self, exc_type, *_): if not self.status: self.status = 'exit' # silent exit self.join() return exc_type is not None and issubclass(exc_type, KeyboardInterrupt) ############################################################################### # Template Adapters ############################################################ ############################################################################### class TemplateError(HTTPError): def __init__(self, message): HTTPError.__init__(self, 500, message) class BaseTemplate(object): extensions = ['tpl','html','thtml','stpl'] settings = {} #used in prepare() defaults = {} #used in render() def __init__(self, source=None, name=None, lookup=None, encoding='utf8', **settings): self.name = name self.source = source.read() if hasattr(source, 'read') else source self.filename = source.filename if hasattr(source, 'filename') else None self.lookup = [os.path.abspath(x) for x in lookup] if lookup else [] self.encoding = encoding self.settings = self.settings.copy() # Copy from class variable self.settings.update(settings) # Apply if not self.source and self.name: self.filename = self.search(self.name, self.lookup) if not self.filename: raise TemplateError('Template %s not found.' % repr(name)) if not self.source and not self.filename: raise TemplateError('No template specified.') self.prepare(**self.settings) @classmethod def search(cls, name, lookup=None): if not lookup: depr('The template lookup path list should not be empty.', True) #0.12 lookup = ['.'] if os.path.isabs(name) and os.path.isfile(name): depr('Absolute template path names are deprecated.', True) #0.12 return os.path.abspath(name) for spath in lookup: spath = os.path.abspath(spath) + os.sep fname = os.path.abspath(os.path.join(spath, name)) if not fname.startswith(spath): continue if os.path.isfile(fname): return fname for ext in cls.extensions: if os.path.isfile('%s.%s' % (fname, ext)): return '%s.%s' % (fname, ext) @classmethod def global_config(cls, key, *args): if args: cls.settings = cls.settings.copy() # Make settings local to class cls.settings[key] = args[0] else: return cls.settings[key] def prepare(self, **options): raise NotImplementedError def render(self, *args, **kwargs): raise NotImplementedError class MakoTemplate(BaseTemplate): def prepare(self, **options): from mako.template import Template from mako.lookup import TemplateLookup options.update({'input_encoding':self.encoding}) options.setdefault('format_exceptions', bool(DEBUG)) lookup = TemplateLookup(directories=self.lookup, **options) if self.source: self.tpl = Template(self.source, lookup=lookup, **options) else: self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) class CheetahTemplate(BaseTemplate): def prepare(self, **options): from Cheetah.Template import Template self.context = threading.local() self.context.vars = {} options['searchList'] = [self.context.vars] if self.source: self.tpl = Template(source=self.source, **options) else: self.tpl = Template(file=self.filename, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) self.context.vars.update(self.defaults) self.context.vars.update(kwargs) out = str(self.tpl) self.context.vars.clear() return out class Jinja2Template(BaseTemplate): def prepare(self, filters=None, tests=None, globals={}, **kwargs): from jinja2 import Environment, FunctionLoader self.env = Environment(loader=FunctionLoader(self.loader), **kwargs) if filters: self.env.filters.update(filters) if tests: self.env.tests.update(tests) if globals: self.env.globals.update(globals) if self.source: self.tpl = self.env.from_string(self.source) else: self.tpl = self.env.get_template(self.filename) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) def loader(self, name): fname = self.search(name, self.lookup) if not fname: return with open(fname, "rb") as f: return f.read().decode(self.encoding) class SimpleTemplate(BaseTemplate): def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka): self.cache = {} enc = self.encoding self._str = lambda x: touni(x, enc) self._escape = lambda x: escape_func(touni(x, enc)) self.syntax = syntax if noescape: self._str, self._escape = self._escape, self._str @cached_property def co(self): return compile(self.code, self.filename or '<string>', 'exec') @cached_property def code(self): source = self.source if not source: with open(self.filename, 'rb') as f: source = f.read() try: source, encoding = touni(source), 'utf8' except UnicodeError: depr('Template encodings other than utf8 are no longer supported.') #0.11 source, encoding = touni(source, 'latin1'), 'latin1' parser = StplParser(source, encoding=encoding, syntax=self.syntax) code = parser.translate() self.encoding = parser.encoding return code def _rebase(self, _env, _name=None, **kwargs): _env['_rebase'] = (_name, kwargs) def _include(self, _env, _name=None, **kwargs): env = _env.copy() env.update(kwargs) if _name not in self.cache: self.cache[_name] = self.__class__(name=_name, lookup=self.lookup) return self.cache[_name].execute(env['_stdout'], env) def execute(self, _stdout, kwargs): env = self.defaults.copy() env.update(kwargs) env.update({'_stdout': _stdout, '_printlist': _stdout.extend, 'include': functools.partial(self._include, env), 'rebase': functools.partial(self._rebase, env), '_rebase': None, '_str': self._str, '_escape': self._escape, 'get': env.get, 'setdefault': env.setdefault, 'defined': env.__contains__ }) eval(self.co, env) if env.get('_rebase'): subtpl, rargs = env.pop('_rebase') rargs['base'] = ''.join(_stdout) #copy stdout del _stdout[:] # clear stdout return self._include(env, subtpl, **rargs) return env def render(self, *args, **kwargs): env = {}; stdout = [] for dictarg in args: env.update(dictarg) env.update(kwargs) self.execute(stdout, env) return ''.join(stdout) class StplSyntaxError(TemplateError): pass class StplParser(object): _re_cache = {} #: Cache for compiled re patterns # This huge pile of voodoo magic splits python code into 8 different tokens. # 1: All kinds of python strings (trust me, it works) _re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \ '|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \ '|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \ '|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))' _re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later # 2: Comments (until end of line, but not the newline itself) _re_tok += '|( # 3,4: Keywords that start or continue a python block (only start of line) _re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \ '|^([ \\t]*(?:elif|else|except|finally)\\b)' # 5: Our special 'end' keyword (but only if it stands alone) _re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;| # 6: A customizable end-of-code-block template token (only end of line) _re_tok += '|(%(block_close)s[ \\t]*(?=$))' # 7: And finally, a single newline. The 8th token is 'everything else' _re_tok += '|(\\r?\\n)' # Match the start tokens of code areas in a template _re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))' # Match inline statements (may contain python strings) _re_inl = '%%(inline_start)s((?:%s|[^\'"\n]+?)*?)%%(inline_end)s' % _re_inl default_syntax = '<% %> % {{ }}' def __init__(self, source, syntax=None, encoding='utf8'): self.source, self.encoding = touni(source, encoding), encoding self.set_syntax(syntax or self.default_syntax) self.code_buffer, self.text_buffer = [], [] self.lineno, self.offset = 1, 0 self.indent, self.indent_mod = 0, 0 def get_syntax(self): return self._syntax def set_syntax(self, syntax): self._syntax = syntax self._tokens = syntax.split() if not syntax in self._re_cache: names = 'block_start block_close line_start inline_start inline_end' etokens = map(re.escape, self._tokens) pattern_vars = dict(zip(names.split(), etokens)) patterns = (self._re_split, self._re_tok, self._re_inl) patterns = [re.compile(p%pattern_vars) for p in patterns] self._re_cache[syntax] = patterns self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax] syntax = property(get_syntax, set_syntax) def translate(self): if self.offset: raise RuntimeError('Parser is a one time instance.') while True: m = self.re_split.search(self.source[self.offset:]) if m: text = self.source[self.offset:self.offset+m.start()] self.text_buffer.append(text) offs = self.offset self.offset += m.end() if m.group(1): # Escape syntax line, sep, _ = self.source[self.offset:].partition('\n') self.text_buffer.append(self.source[offs+m.start():offs+m.start(1)]+m.group(2)+line+sep) self.offset += len(line+sep) continue self.flush_text() self.read_code(multiline=bool(m.group(4))) else: break self.text_buffer.append(self.source[self.offset:]) self.flush_text() return ''.join(self.code_buffer) def read_code(self, multiline): code_line, comment = '', '' while True: m = self.re_tok.search(self.source[self.offset:]) if not m: code_line += self.source[self.offset:] self.offset = len(self.source) self.write_code(code_line.strip(), comment) return code_line += self.source[self.offset:self.offset+m.start()] self.offset += m.end() _str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups() if code_line and (_blk1 or _blk2): # a if b else c code_line += _blk1 or _blk2 continue if _str: # Python string code_line += _str elif _com: # Python comment (up to EOL) comment = _com if multiline and _com.strip().endswith(self._tokens[1]): multiline = False # Allow end-of-block in comments elif _blk1: # Start-block keyword (if/for/while/def/try/...) code_line, self.indent_mod = _blk1, -1 self.indent += 1 elif _blk2: # Continue-block keyword (else/elif/except/...) code_line, self.indent_mod = _blk2, -1 elif _end: # The non-standard 'end'-keyword (ends a block) self.indent -= 1 elif _cend: # The end-code-block template token (usually '%>') if multiline: multiline = False else: code_line += _cend else: # \n self.write_code(code_line.strip(), comment) self.lineno += 1 code_line, comment, self.indent_mod = '', '', 0 if not multiline: break def flush_text(self): text = ''.join(self.text_buffer) del self.text_buffer[:] if not text: return parts, pos, nl = [], 0, '\\\n'+' '*self.indent for m in self.re_inl.finditer(text): prefix, pos = text[pos:m.start()], m.end() if prefix: parts.append(nl.join(map(repr, prefix.splitlines(True)))) if prefix.endswith('\n'): parts[-1] += nl parts.append(self.process_inline(m.group(1).strip())) if pos < len(text): prefix = text[pos:] lines = prefix.splitlines(True) if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3] elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4] parts.append(nl.join(map(repr, lines))) code = '_printlist((%s,))' % ', '.join(parts) self.lineno += code.count('\n')+1 self.write_code(code) @staticmethod def process_inline(chunk): if chunk[0] == '!': return '_str(%s)' % chunk[1:] return '_escape(%s)' % chunk def write_code(self, line, comment=''): code = ' ' * (self.indent+self.indent_mod) code += line.lstrip() + comment + '\n' self.code_buffer.append(code) def template(*args, **kwargs): tpl = args[0] if args else None adapter = kwargs.pop('template_adapter', SimpleTemplate) lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) tplid = (id(lookup), tpl) if tplid not in TEMPLATES or DEBUG: settings = kwargs.pop('template_settings', {}) if isinstance(tpl, adapter): TEMPLATES[tplid] = tpl if settings: TEMPLATES[tplid].prepare(**settings) elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings) else: TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings) if not TEMPLATES[tplid]: abort(500, 'Template (%s) not found' % tpl) for dictarg in args[1:]: kwargs.update(dictarg) return TEMPLATES[tplid].render(kwargs) mako_template = functools.partial(template, template_adapter=MakoTemplate) cheetah_template = functools.partial(template, template_adapter=CheetahTemplate) jinja2_template = functools.partial(template, template_adapter=Jinja2Template) def view(tpl_name, **defaults): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if isinstance(result, (dict, DictMixin)): tplvars = defaults.copy() tplvars.update(result) return template(tpl_name, **tplvars) elif result is None: return template(tpl_name, defaults) return result return wrapper return decorator mako_view = functools.partial(view, template_adapter=MakoTemplate) cheetah_view = functools.partial(view, template_adapter=CheetahTemplate) jinja2_view = functools.partial(view, template_adapter=Jinja2Template) ############################################################################### # Constants and Globals ######################################################## ############################################################################### TEMPLATE_PATH = ['./', './views/'] TEMPLATES = {} DEBUG = False NORUN = False # If set, run() does nothing. Used by load_app() #: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found') HTTP_CODES = httplib.responses HTTP_CODES[418] = "I'm a teapot" # RFC 2324 HTTP_CODES[428] = "Precondition Required" HTTP_CODES[429] = "Too Many Requests" HTTP_CODES[431] = "Request Header Fields Too Large" HTTP_CODES[511] = "Network Authentication Required" _HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items()) #: The default template used for error pages. Override with @error() ERROR_PAGE_TEMPLATE = """ %%try: %%from %s import DEBUG, request <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN"> <html> <head> <title>Error: {{e.status}}</title> <style type="text/css"> html {background-color: #eee; font-family: sans-serif;} body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;} pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;} </style> </head> <body> <h1>Error: {{e.status}}</h1> <p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt> caused an error:</p> <pre>{{e.body}}</pre> %%if DEBUG and e.exception: <h2>Exception:</h2> <pre>{{repr(e.exception)}}</pre> %%end %%if DEBUG and e.traceback: <h2>Traceback:</h2> <pre>{{e.traceback}}</pre> %%end </body> </html> %%except ImportError: <b>ImportError:</b> Could not generate the error page. Please add bottle to the import path. %%end """ % __name__ #: A thread-safe instance of :class:`LocalRequest`. If accessed from within a #: request callback, this instance always refers to the *current* request #: (even on a multithreaded server). request = LocalRequest() #: A thread-safe instance of :class:`LocalResponse`. It is used to change the #: HTTP response for the *current* request. response = LocalResponse() #: A thread-safe namespace. Not used by Bottle. local = threading.local() # Initialize app stack (create first empty Bottle app) # BC: 0.6.4 and needed for run() app = default_app = AppStack() app.push() #: A virtual package that redirects import statements. #: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`. ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module if __name__ == '__main__': opt, args, parser = _cmd_options, _cmd_args, _cmd_parser if opt.version: _stdout('Bottle %s\n'%__version__) sys.exit(0) if not args: parser.print_help() _stderr('\nError: No application entry point specified.\n') sys.exit(1) sys.path.insert(0, '.') sys.modules.setdefault('bottle', sys.modules['__main__']) host, port = (opt.bind or 'localhost'), 8080 if ':' in host and host.rfind(']') < host.rfind(':'): host, port = host.rsplit(':', 1) host = host.strip('[]') run(args[0], host=host, port=int(port), server=opt.server, reloader=opt.reload, plugins=opt.plugin, debug=opt.debug) # THE END
true
true
f700e9c400b9fe92b7fafce69a144676ee7564a2
976
py
Python
src/wechaty_plugin_contrib/ding_dong_plugin.py
huangaszaq/python-wechaty
7cb046662a6135425000ef7db7539408cdc57349
[ "Apache-2.0" ]
null
null
null
src/wechaty_plugin_contrib/ding_dong_plugin.py
huangaszaq/python-wechaty
7cb046662a6135425000ef7db7539408cdc57349
[ "Apache-2.0" ]
null
null
null
src/wechaty_plugin_contrib/ding_dong_plugin.py
huangaszaq/python-wechaty
7cb046662a6135425000ef7db7539408cdc57349
[ "Apache-2.0" ]
null
null
null
"""basic ding-dong bot for the wechaty plugin""" from typing import Union from wechaty import Message, Contact, Room, FileBox from wechaty.plugin import WechatyPlugin class DingDongPlugin(WechatyPlugin): """basic ding-dong plugin""" @property def name(self): """name of the plugin""" return 'ding-dong' async def on_message(self, msg: Message): """listen message event""" from_contact = msg.talker() text = msg.text() room = msg.room() if text == '#ding': conversation: Union[ Room, Contact] = from_contact if room is None else room await conversation.ready() await conversation.say('dong') file_box = FileBox.from_url( 'https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/' 'u=1116676390,2305043183&fm=26&gp=0.jpg', name='ding-dong.jpg') await conversation.say(file_box)
32.533333
74
0.601434
from typing import Union from wechaty import Message, Contact, Room, FileBox from wechaty.plugin import WechatyPlugin class DingDongPlugin(WechatyPlugin): @property def name(self): return 'ding-dong' async def on_message(self, msg: Message): from_contact = msg.talker() text = msg.text() room = msg.room() if text == '#ding': conversation: Union[ Room, Contact] = from_contact if room is None else room await conversation.ready() await conversation.say('dong') file_box = FileBox.from_url( 'https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/' 'u=1116676390,2305043183&fm=26&gp=0.jpg', name='ding-dong.jpg') await conversation.say(file_box)
true
true
f700ea04bf267c3ec13cb8c096e3f4f44d7fe2b7
5,088
py
Python
test/python/WMCore_t/Agent_t/Heartbeat_t.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
1
2015-02-05T13:43:46.000Z
2015-02-05T13:43:46.000Z
test/python/WMCore_t/Agent_t/Heartbeat_t.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
1
2016-10-13T14:57:35.000Z
2016-10-13T14:57:35.000Z
test/python/WMCore_t/Agent_t/Heartbeat_t.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
null
null
null
""" _WorkQueueTestCase_ Unit tests for the WMBS File class. """ from __future__ import print_function import time import unittest from WMCore.Agent.HeartbeatAPI import HeartbeatAPI from WMQuality.TestInit import TestInit class HeartbeatTest(unittest.TestCase): def setUp(self): """ _setUp_ Setup the database and logging connection. Try to create all of the Heartbeat tables. Also add some dummy locations. """ self.testInit = TestInit(__file__) self.testInit.setLogging() # logLevel = logging.SQLDEBUG self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.Agent.Database"], useDefault=False) def tearDown(self): """ _tearDown_ Drop all the Heartbeat tables. """ self.testInit.clearDatabase() def testAddComponent(self): """ _testAddComponent_ Test creation of components and worker threads as well as the get heartbeat DAOs """ comp1 = HeartbeatAPI("testComponent1", pollInterval=60, heartbeatTimeout=600) comp1.registerComponent() self.assertEqual(comp1.getHeartbeatInfo(), []) # no worker thread yet comp1.registerWorker("testWorker1") self.assertEqual(len(comp1.getHeartbeatInfo()), 1) comp1.registerWorker("testWorker2") self.assertEqual(len(comp1.getHeartbeatInfo()), 2) comp2 = HeartbeatAPI("testComponent2", pollInterval=30, heartbeatTimeout=300) comp2.registerComponent() self.assertEqual(comp2.getHeartbeatInfo(), []) # no worker thread yet self.assertEqual(len(comp2.getAllHeartbeatInfo()), 2) comp2.registerWorker("testWorker21") self.assertEqual(len(comp2.getHeartbeatInfo()), 1) self.assertEqual(len(comp2.getAllHeartbeatInfo()), 3) comp1.updateWorkerHeartbeat("testWorker1", "Running") comp1.updateWorkerHeartbeat("testWorker2", "Running") comp2.updateWorkerHeartbeat("testWorker21", "Running") self.assertEqual(len(comp1.getAllHeartbeatInfo()), 3) self.assertEqual(len(comp2.getAllHeartbeatInfo()), 3) comp1Res = comp1.getHeartbeatInfo() comp2Res = comp2.getHeartbeatInfo() self.assertEqual(len(comp1Res), 2) self.assertEqual(len(comp2Res), 1) self.assertItemsEqual([item["name"] for item in comp1Res], ["testComponent1", "testComponent1"]) self.assertItemsEqual([item["worker_name"] for item in comp1Res], ["testWorker1", "testWorker2"]) self.assertItemsEqual([item["state"] for item in comp1Res], ["Running", "Running"]) self.assertItemsEqual([item["poll_interval"] for item in comp1Res], [60, 60]) self.assertItemsEqual([item["update_threshold"] for item in comp1Res], [600, 600]) self.assertItemsEqual([item["name"] for item in comp2Res], ["testComponent2"]) self.assertItemsEqual([item["worker_name"] for item in comp2Res], ["testWorker21"]) self.assertItemsEqual([item["state"] for item in comp2Res], ["Running"]) self.assertItemsEqual([item["poll_interval"] for item in comp2Res], [30]) self.assertItemsEqual([item["update_threshold"] for item in comp2Res], [300]) def testUpdateWorkers(self): """ _testUpdateWorkers_ Create a couple of components and workers and test the update methods """ comp1 = HeartbeatAPI("testComponent1", pollInterval=60, heartbeatTimeout=600) comp1.registerComponent() comp1.registerWorker("testWorker1") comp1.registerWorker("testWorker2") comp2 = HeartbeatAPI("testComponent2", pollInterval=30, heartbeatTimeout=300) comp2.registerComponent() comp2.registerWorker("testWorker21") comp1.updateWorkerCycle("testWorker1", 1.001, None) comp2.updateWorkerCycle("testWorker21", 1234.1, 100) hb1 = comp1.getHeartbeatInfo() hb2 = comp2.getHeartbeatInfo() for worker in hb1: if worker['worker_name'] == 'testWorker1': self.assertTrue(worker["cycle_time"] > 1.0) else: self.assertEqual(worker["cycle_time"], 0) self.assertItemsEqual([item["outcome"] for item in hb1], [None, None]) self.assertItemsEqual([item["error_message"] for item in hb1], [None, None]) self.assertEqual(round(hb2[0]["cycle_time"], 1), 1234.1) self.assertEqual(hb2[0]["outcome"], '100') self.assertEqual(hb2[0]["error_message"], None) # time to update workers with an error comp1.updateWorkerError("testWorker2", "BAD JOB!!!") hb1 = comp1.getHeartbeatInfo() for worker in hb1: if worker['worker_name'] == 'testWorker2': self.assertTrue(worker["last_error"] > int(time.time() - 10)) self.assertEqual(worker["state"], "Error") self.assertEqual(worker["error_message"], "BAD JOB!!!") if __name__ == "__main__": unittest.main()
38.255639
105
0.651926
from __future__ import print_function import time import unittest from WMCore.Agent.HeartbeatAPI import HeartbeatAPI from WMQuality.TestInit import TestInit class HeartbeatTest(unittest.TestCase): def setUp(self): self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.Agent.Database"], useDefault=False) def tearDown(self): self.testInit.clearDatabase() def testAddComponent(self): comp1 = HeartbeatAPI("testComponent1", pollInterval=60, heartbeatTimeout=600) comp1.registerComponent() self.assertEqual(comp1.getHeartbeatInfo(), []) comp1.registerWorker("testWorker1") self.assertEqual(len(comp1.getHeartbeatInfo()), 1) comp1.registerWorker("testWorker2") self.assertEqual(len(comp1.getHeartbeatInfo()), 2) comp2 = HeartbeatAPI("testComponent2", pollInterval=30, heartbeatTimeout=300) comp2.registerComponent() self.assertEqual(comp2.getHeartbeatInfo(), []) self.assertEqual(len(comp2.getAllHeartbeatInfo()), 2) comp2.registerWorker("testWorker21") self.assertEqual(len(comp2.getHeartbeatInfo()), 1) self.assertEqual(len(comp2.getAllHeartbeatInfo()), 3) comp1.updateWorkerHeartbeat("testWorker1", "Running") comp1.updateWorkerHeartbeat("testWorker2", "Running") comp2.updateWorkerHeartbeat("testWorker21", "Running") self.assertEqual(len(comp1.getAllHeartbeatInfo()), 3) self.assertEqual(len(comp2.getAllHeartbeatInfo()), 3) comp1Res = comp1.getHeartbeatInfo() comp2Res = comp2.getHeartbeatInfo() self.assertEqual(len(comp1Res), 2) self.assertEqual(len(comp2Res), 1) self.assertItemsEqual([item["name"] for item in comp1Res], ["testComponent1", "testComponent1"]) self.assertItemsEqual([item["worker_name"] for item in comp1Res], ["testWorker1", "testWorker2"]) self.assertItemsEqual([item["state"] for item in comp1Res], ["Running", "Running"]) self.assertItemsEqual([item["poll_interval"] for item in comp1Res], [60, 60]) self.assertItemsEqual([item["update_threshold"] for item in comp1Res], [600, 600]) self.assertItemsEqual([item["name"] for item in comp2Res], ["testComponent2"]) self.assertItemsEqual([item["worker_name"] for item in comp2Res], ["testWorker21"]) self.assertItemsEqual([item["state"] for item in comp2Res], ["Running"]) self.assertItemsEqual([item["poll_interval"] for item in comp2Res], [30]) self.assertItemsEqual([item["update_threshold"] for item in comp2Res], [300]) def testUpdateWorkers(self): comp1 = HeartbeatAPI("testComponent1", pollInterval=60, heartbeatTimeout=600) comp1.registerComponent() comp1.registerWorker("testWorker1") comp1.registerWorker("testWorker2") comp2 = HeartbeatAPI("testComponent2", pollInterval=30, heartbeatTimeout=300) comp2.registerComponent() comp2.registerWorker("testWorker21") comp1.updateWorkerCycle("testWorker1", 1.001, None) comp2.updateWorkerCycle("testWorker21", 1234.1, 100) hb1 = comp1.getHeartbeatInfo() hb2 = comp2.getHeartbeatInfo() for worker in hb1: if worker['worker_name'] == 'testWorker1': self.assertTrue(worker["cycle_time"] > 1.0) else: self.assertEqual(worker["cycle_time"], 0) self.assertItemsEqual([item["outcome"] for item in hb1], [None, None]) self.assertItemsEqual([item["error_message"] for item in hb1], [None, None]) self.assertEqual(round(hb2[0]["cycle_time"], 1), 1234.1) self.assertEqual(hb2[0]["outcome"], '100') self.assertEqual(hb2[0]["error_message"], None) comp1.updateWorkerError("testWorker2", "BAD JOB!!!") hb1 = comp1.getHeartbeatInfo() for worker in hb1: if worker['worker_name'] == 'testWorker2': self.assertTrue(worker["last_error"] > int(time.time() - 10)) self.assertEqual(worker["state"], "Error") self.assertEqual(worker["error_message"], "BAD JOB!!!") if __name__ == "__main__": unittest.main()
true
true
f700ea4c2d04bc46a69ef4752cb0e51640f0d092
1,549
py
Python
budgetportal/migrations/0054_custompage_body.py
Lunga001/datamanager
ebe9ad9db2ee7011855f1249c46d9d1bf6f4c4d1
[ "MIT" ]
3
2019-08-31T03:08:22.000Z
2020-04-03T13:09:20.000Z
budgetportal/migrations/0054_custompage_body.py
Lunga001/datamanager
ebe9ad9db2ee7011855f1249c46d9d1bf6f4c4d1
[ "MIT" ]
97
2019-04-16T07:54:38.000Z
2022-02-10T07:25:48.000Z
budgetportal/migrations/0054_custompage_body.py
OpenUpSA/budget-portal
879c5875b1d438b9287c38d6730c86be69051ac5
[ "MIT" ]
14
2019-04-23T09:48:17.000Z
2021-04-13T17:48:40.000Z
# Generated by Django 2.2.10 on 2020-03-20 13:00 import wagtail.core.blocks import wagtail.core.fields from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("budgetportal", "0053_custompage"), ] operations = [ migrations.AddField( model_name="custompage", name="body", field=wagtail.core.fields.StreamField( [ ( "section", wagtail.core.blocks.StructBlock( [ ( "presentation_class", wagtail.core.blocks.ChoiceBlock( choices=[ ("is-default", "Default"), ("is-invisible", "No background/border"), ("is-bevel", "Bevel"), ] ), ), ("heading", wagtail.core.blocks.CharBlock()), ("content", wagtail.core.blocks.RichTextBlock()), ] ), ), ("html", wagtail.core.blocks.RawHTMLBlock()), ], default=None, ), preserve_default=False, ), ]
33.673913
85
0.355068
import wagtail.core.blocks import wagtail.core.fields from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("budgetportal", "0053_custompage"), ] operations = [ migrations.AddField( model_name="custompage", name="body", field=wagtail.core.fields.StreamField( [ ( "section", wagtail.core.blocks.StructBlock( [ ( "presentation_class", wagtail.core.blocks.ChoiceBlock( choices=[ ("is-default", "Default"), ("is-invisible", "No background/border"), ("is-bevel", "Bevel"), ] ), ), ("heading", wagtail.core.blocks.CharBlock()), ("content", wagtail.core.blocks.RichTextBlock()), ] ), ), ("html", wagtail.core.blocks.RawHTMLBlock()), ], default=None, ), preserve_default=False, ), ]
true
true
f700ec8f97639e8cba334b547d045665bc55b7bc
2,445
py
Python
loss/loss_new.py
liwanjunit/ASRGAN
ac01e546939c435c246fbdce64606464f8fdfc00
[ "MIT" ]
null
null
null
loss/loss_new.py
liwanjunit/ASRGAN
ac01e546939c435c246fbdce64606464f8fdfc00
[ "MIT" ]
null
null
null
loss/loss_new.py
liwanjunit/ASRGAN
ac01e546939c435c246fbdce64606464f8fdfc00
[ "MIT" ]
null
null
null
import torch from torch import nn from torchvision.models.vgg import vgg16 class GeneratorLoss_NEW(nn.Module): def __init__(self): super(GeneratorLoss_NEW, self).__init__() vgg = vgg16(pretrained=True) # loss_network = nn.Sequential(*list(vgg.features)[:31]).eval() loss_network = nn.Sequential(*list(vgg.features)[:35]).eval() for param in loss_network.parameters(): param.requires_grad = False self.loss_network = loss_network self.mse_loss = nn.MSELoss() self.tv_loss = TVLoss() self.charbonnier_loss = L1_Charbonnier_loss() def forward(self, out_labels, out_images, target_images): # Adversarial Loss adversarial_loss = torch.mean(1 - out_labels) # Perception Loss # perception_loss = self.mse_loss(self.loss_network(out_images), self.loss_network(target_images)) perception_loss = self.charbonnier_loss(self.loss_network(out_images), self.loss_network(target_images)) # Image Loss # image_loss = self.mse_loss(out_images, target_images) image_loss = self.charbonnier_loss(out_images, target_images) # TV Loss tv_loss = self.tv_loss(out_images) return image_loss + 0.001 * adversarial_loss + 0.006 * perception_loss + 2e-8 * tv_loss class TVLoss(nn.Module): def __init__(self, tv_loss_weight=1): super(TVLoss, self).__init__() self.tv_loss_weight = tv_loss_weight def forward(self, x): batch_size = x.size()[0] h_x = x.size()[2] w_x = x.size()[3] count_h = self.tensor_size(x[:, :, 1:, :]) count_w = self.tensor_size(x[:, :, :, 1:]) h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum() w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum() return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size @staticmethod def tensor_size(t): return t.size()[1] * t.size()[2] * t.size()[3] class L1_Charbonnier_loss(torch.nn.Module): """L1 Charbonnierloss.""" def __init__(self): super(L1_Charbonnier_loss, self).__init__() self.eps = 1e-6 def forward(self, X, Y): diff = torch.add(X, -Y) error = torch.sqrt(diff * diff + self.eps) loss = torch.mean(error) return loss if __name__ == "__main__": g_loss = GeneratorLoss_NEW() print(g_loss)
35.434783
112
0.618814
import torch from torch import nn from torchvision.models.vgg import vgg16 class GeneratorLoss_NEW(nn.Module): def __init__(self): super(GeneratorLoss_NEW, self).__init__() vgg = vgg16(pretrained=True) loss_network = nn.Sequential(*list(vgg.features)[:35]).eval() for param in loss_network.parameters(): param.requires_grad = False self.loss_network = loss_network self.mse_loss = nn.MSELoss() self.tv_loss = TVLoss() self.charbonnier_loss = L1_Charbonnier_loss() def forward(self, out_labels, out_images, target_images): adversarial_loss = torch.mean(1 - out_labels) perception_loss = self.charbonnier_loss(self.loss_network(out_images), self.loss_network(target_images)) image_loss = self.charbonnier_loss(out_images, target_images) tv_loss = self.tv_loss(out_images) return image_loss + 0.001 * adversarial_loss + 0.006 * perception_loss + 2e-8 * tv_loss class TVLoss(nn.Module): def __init__(self, tv_loss_weight=1): super(TVLoss, self).__init__() self.tv_loss_weight = tv_loss_weight def forward(self, x): batch_size = x.size()[0] h_x = x.size()[2] w_x = x.size()[3] count_h = self.tensor_size(x[:, :, 1:, :]) count_w = self.tensor_size(x[:, :, :, 1:]) h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum() w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum() return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size @staticmethod def tensor_size(t): return t.size()[1] * t.size()[2] * t.size()[3] class L1_Charbonnier_loss(torch.nn.Module): def __init__(self): super(L1_Charbonnier_loss, self).__init__() self.eps = 1e-6 def forward(self, X, Y): diff = torch.add(X, -Y) error = torch.sqrt(diff * diff + self.eps) loss = torch.mean(error) return loss if __name__ == "__main__": g_loss = GeneratorLoss_NEW() print(g_loss)
true
true
f700ed4220ce4098b1a75241602cf0b9cb224983
3,572
py
Python
lib/surface/datapipelines/pipeline/create.py
google-cloud-sdk-unofficial/google-cloud-sdk
2a48a04df14be46c8745050f98768e30474a1aac
[ "Apache-2.0" ]
2
2019-11-10T09:17:07.000Z
2019-12-18T13:44:08.000Z
lib/surface/datapipelines/pipeline/create.py
google-cloud-sdk-unofficial/google-cloud-sdk
2a48a04df14be46c8745050f98768e30474a1aac
[ "Apache-2.0" ]
null
null
null
lib/surface/datapipelines/pipeline/create.py
google-cloud-sdk-unofficial/google-cloud-sdk
2a48a04df14be46c8745050f98768e30474a1aac
[ "Apache-2.0" ]
1
2020-07-25T01:40:19.000Z
2020-07-25T01:40:19.000Z
# -*- coding: utf-8 -*- # # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Command to create a Pipeline for the Data Pipelines API.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.api_lib.datapipelines import util from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.datapipelines import flags _DETAILED_HELP = { 'DESCRIPTION': '{description}', 'EXAMPLES': """ \ To create a BATCH Data Pipeline ``PIPELINE_NAME'' in project ``example'' in region ``us-central1'', run: $ {command} PIPELINE_NAME --project=example --region=us-central1 --pipeline-type=BATCH --template-file-gcs-location='gs://path_to_template_file' --parameters=inputFile="gs://path_to_input_file",output="gs://path_to_output_file" --schedule="0 * * * *" --temp-location="gs://path_to_temp_location" """, } @base.ReleaseTracks(base.ReleaseTrack.BETA) class Create(base.CreateCommand): """Creates Data Pipelines Pipeline.""" detailed_help = _DETAILED_HELP @staticmethod def Args(parser): flags.AddCreatePipelineFlags(parser) flags.GetDisplayNameArg('Data Pipelines pipeline').AddToParser(parser) flags.GetPipelineTypeArg(required=True).AddToParser(parser) flags.GetTemplateTypeArg(required=False).AddToParser(parser) flags.GetScheduleArg(required=False).AddToParser(parser) flags.GetTimeZoneArg(required=False).AddToParser(parser) flags.GetTemplateFileGcsLocationArg(required=False).AddToParser(parser) flags.GetParametersArg(required=False).AddToParser(parser) flags.GetMaxWorkersArg(required=False).AddToParser(parser) flags.GetNumWorkersArg(required=False).AddToParser(parser) flags.GetNetworkArg(required=False).AddToParser(parser) flags.GetSubnetworkArg(required=False).AddToParser(parser) flags.GetWorkerMachineTypeArg(required=False).AddToParser(parser) flags.GetTempLocationArg(required=False).AddToParser(parser) flags.GetDataflowKmsKeyArg(required=False).AddToParser(parser) flags.GetDisablePublicIpsArg(required=False).AddToParser(parser) flags.GetDataflowServiceAccountEmailArg(required=False).AddToParser(parser) flags.GetEnableStreamingEngineArg(required=False).AddToParser(parser) flags.GetAdditionalExperimentsArg(required=False).AddToParser(parser) flags.GetAdditionalUserLabelsArg(required=False).AddToParser(parser) flags.GetWorkerRegionArgs(required=False).AddToParser(parser) flags.GetFlexRsGoalArg(required=False).AddToParser(parser) flags.GetStreamingUpdateArgs(required=False).AddToParser(parser) def Run(self, args): """Run the create command.""" client = util.PipelinesClient() pipelines_ref = args.CONCEPTS.pipeline.Parse() region_ref = pipelines_ref.Parent() return client.Create( pipeline=pipelines_ref.RelativeName(), parent=region_ref.RelativeName(), args=args)
43.560976
112
0.755039
from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.api_lib.datapipelines import util from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.datapipelines import flags _DETAILED_HELP = { 'DESCRIPTION': '{description}', 'EXAMPLES': """ \ To create a BATCH Data Pipeline ``PIPELINE_NAME'' in project ``example'' in region ``us-central1'', run: $ {command} PIPELINE_NAME --project=example --region=us-central1 --pipeline-type=BATCH --template-file-gcs-location='gs://path_to_template_file' --parameters=inputFile="gs://path_to_input_file",output="gs://path_to_output_file" --schedule="0 * * * *" --temp-location="gs://path_to_temp_location" """, } @base.ReleaseTracks(base.ReleaseTrack.BETA) class Create(base.CreateCommand): detailed_help = _DETAILED_HELP @staticmethod def Args(parser): flags.AddCreatePipelineFlags(parser) flags.GetDisplayNameArg('Data Pipelines pipeline').AddToParser(parser) flags.GetPipelineTypeArg(required=True).AddToParser(parser) flags.GetTemplateTypeArg(required=False).AddToParser(parser) flags.GetScheduleArg(required=False).AddToParser(parser) flags.GetTimeZoneArg(required=False).AddToParser(parser) flags.GetTemplateFileGcsLocationArg(required=False).AddToParser(parser) flags.GetParametersArg(required=False).AddToParser(parser) flags.GetMaxWorkersArg(required=False).AddToParser(parser) flags.GetNumWorkersArg(required=False).AddToParser(parser) flags.GetNetworkArg(required=False).AddToParser(parser) flags.GetSubnetworkArg(required=False).AddToParser(parser) flags.GetWorkerMachineTypeArg(required=False).AddToParser(parser) flags.GetTempLocationArg(required=False).AddToParser(parser) flags.GetDataflowKmsKeyArg(required=False).AddToParser(parser) flags.GetDisablePublicIpsArg(required=False).AddToParser(parser) flags.GetDataflowServiceAccountEmailArg(required=False).AddToParser(parser) flags.GetEnableStreamingEngineArg(required=False).AddToParser(parser) flags.GetAdditionalExperimentsArg(required=False).AddToParser(parser) flags.GetAdditionalUserLabelsArg(required=False).AddToParser(parser) flags.GetWorkerRegionArgs(required=False).AddToParser(parser) flags.GetFlexRsGoalArg(required=False).AddToParser(parser) flags.GetStreamingUpdateArgs(required=False).AddToParser(parser) def Run(self, args): client = util.PipelinesClient() pipelines_ref = args.CONCEPTS.pipeline.Parse() region_ref = pipelines_ref.Parent() return client.Create( pipeline=pipelines_ref.RelativeName(), parent=region_ref.RelativeName(), args=args)
true
true
f700ed7add06c36d5f90865df0e28dcda3ca0828
1,129
py
Python
core/__init__.py
mensch272/sudoku
a8ae68e27a2ac98e9087a332a7093f903f0bb1a3
[ "Apache-2.0" ]
5
2020-01-18T00:55:21.000Z
2020-01-21T11:14:53.000Z
core/__init__.py
mHaisham/sudoku
a8ae68e27a2ac98e9087a332a7093f903f0bb1a3
[ "Apache-2.0" ]
null
null
null
core/__init__.py
mHaisham/sudoku
a8ae68e27a2ac98e9087a332a7093f903f0bb1a3
[ "Apache-2.0" ]
4
2020-01-18T04:52:57.000Z
2020-01-21T12:09:18.000Z
from kivy.graphics import Color from .navigation import Navigation class Colors: WHITE = Color(1, 1, 1, 1) BLACK = Color(0, 0, 0, 1) GREY = Color(.8, .8, .8, 1) RED = Color(1, 0, 0, 1) GREEN = Color(0, 1, 0, 1) BLUE = Color(0, 0, 1, 1) @staticmethod def lerp(value, *args): if value <= 0: return args[0] elif value >= 1: return args[-1] a = None b = None pos = 2 neg = -2 slice = 1 / (len(args) - 1) for i in range(len(args)): v = i * slice diff = value - v if diff == 0: return args[i] elif diff > 0: if diff < pos: b = args[i] pos = diff else: if diff > neg: a = args[i] neg = diff pvalue = pos / slice nvalue = 1 - pvalue return Color( a.r * pvalue + b.r * nvalue, a.g * pvalue + b.g * nvalue, a.b * pvalue + b.b * nvalue, 1 )
21.301887
40
0.394154
from kivy.graphics import Color from .navigation import Navigation class Colors: WHITE = Color(1, 1, 1, 1) BLACK = Color(0, 0, 0, 1) GREY = Color(.8, .8, .8, 1) RED = Color(1, 0, 0, 1) GREEN = Color(0, 1, 0, 1) BLUE = Color(0, 0, 1, 1) @staticmethod def lerp(value, *args): if value <= 0: return args[0] elif value >= 1: return args[-1] a = None b = None pos = 2 neg = -2 slice = 1 / (len(args) - 1) for i in range(len(args)): v = i * slice diff = value - v if diff == 0: return args[i] elif diff > 0: if diff < pos: b = args[i] pos = diff else: if diff > neg: a = args[i] neg = diff pvalue = pos / slice nvalue = 1 - pvalue return Color( a.r * pvalue + b.r * nvalue, a.g * pvalue + b.g * nvalue, a.b * pvalue + b.b * nvalue, 1 )
true
true
f700ed8c3574776fd1fef8fec5339212509ec5ff
2,812
py
Python
plugins/openstack/pyparts/service_features.py
aserdean/hotsos
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
[ "Apache-2.0" ]
null
null
null
plugins/openstack/pyparts/service_features.py
aserdean/hotsos
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
[ "Apache-2.0" ]
null
null
null
plugins/openstack/pyparts/service_features.py
aserdean/hotsos
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
[ "Apache-2.0" ]
null
null
null
from core.plugins.openstack import ( OpenstackChecksBase, ) FEATURES = {'neutron': {'main': [ 'availability_zone'], 'openvswitch-agent': [ 'l2_population', 'firewall_driver'], 'l3-agent': [ 'agent_mode', 'ovs_use_veth'], 'dhcp-agent': [ 'enable_metadata_network', 'enable_isolated_metadata', 'ovs_use_veth']}, 'nova': {'main': [ 'vcpu_pin_set', 'cpu_shared_set', 'cpu_dedicated_set', 'live_migration_permit_auto_converge', 'live_migration_permit_post_copy', ]}} # checked against neutron DEFAULTS = {'neutron': {'dhcp-agent': { 'enable_metadata_network': False, 'enable_isolated_metadata': False}}, 'nova': {'main': {'live_migration_permit_auto_converge': False, 'live_migration_permit_post_copy': False}}} YAML_PRIORITY = 5 class ServiceFeatureChecks(OpenstackChecksBase): @property def output(self): if self._output: return {"features": self._output} def get_service_features(self): """ This is used to display whether or not specific features are enabled. """ for service in FEATURES: for module in FEATURES[service]: module_features = {} cfg = self.ost_projects.all[service].config[module] if not cfg.exists: continue for key in FEATURES[service][module]: val = cfg.get(key) if val is not None: module_features[key] = val if key not in module_features: if key in DEFAULTS.get(service, {}).get(module, {}): default = DEFAULTS[service][module][key] module_features[key] = default # TODO: only include modules for which there is an actual agent # installed since otherwise their config is irrelevant. if module_features: if service not in self._output: self._output[service] = {} self._output[service][module] = module_features def __call__(self): # Only run if we think Openstack is installed. if not self.openstack_installed: return self.get_service_features()
37
79
0.47973
from core.plugins.openstack import ( OpenstackChecksBase, ) FEATURES = {'neutron': {'main': [ 'availability_zone'], 'openvswitch-agent': [ 'l2_population', 'firewall_driver'], 'l3-agent': [ 'agent_mode', 'ovs_use_veth'], 'dhcp-agent': [ 'enable_metadata_network', 'enable_isolated_metadata', 'ovs_use_veth']}, 'nova': {'main': [ 'vcpu_pin_set', 'cpu_shared_set', 'cpu_dedicated_set', 'live_migration_permit_auto_converge', 'live_migration_permit_post_copy', ]}} DEFAULTS = {'neutron': {'dhcp-agent': { 'enable_metadata_network': False, 'enable_isolated_metadata': False}}, 'nova': {'main': {'live_migration_permit_auto_converge': False, 'live_migration_permit_post_copy': False}}} YAML_PRIORITY = 5 class ServiceFeatureChecks(OpenstackChecksBase): @property def output(self): if self._output: return {"features": self._output} def get_service_features(self): for service in FEATURES: for module in FEATURES[service]: module_features = {} cfg = self.ost_projects.all[service].config[module] if not cfg.exists: continue for key in FEATURES[service][module]: val = cfg.get(key) if val is not None: module_features[key] = val if key not in module_features: if key in DEFAULTS.get(service, {}).get(module, {}): default = DEFAULTS[service][module][key] module_features[key] = default if module_features: if service not in self._output: self._output[service] = {} self._output[service][module] = module_features def __call__(self): if not self.openstack_installed: return self.get_service_features()
true
true
f700ee68ebec172b781ca8d0ef2916ffdef7a1fb
502
py
Python
backend/wallet/api/v1/serializers.py
crowdbotics-apps/zaka-28999
5c9fff173a11033fc15f3930c2665b077756d738
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/wallet/api/v1/serializers.py
crowdbotics-apps/zaka-28999
5c9fff173a11033fc15f3930c2665b077756d738
[ "FTL", "AML", "RSA-MD" ]
20
2021-08-16T02:16:21.000Z
2021-10-04T18:41:43.000Z
backend/wallet/api/v1/serializers.py
crowdbotics-apps/zaka-28999
5c9fff173a11033fc15f3930c2665b077756d738
[ "FTL", "AML", "RSA-MD" ]
null
null
null
from rest_framework import serializers from wallet.models import UserWallet, PaymentMethod, DriverWallet class UserWalletSerializer(serializers.ModelSerializer): class Meta: model = UserWallet fields = "__all__" class DriverWalletSerializer(serializers.ModelSerializer): class Meta: model = DriverWallet fields = "__all__" class PaymentMethodSerializer(serializers.ModelSerializer): class Meta: model = PaymentMethod fields = "__all__"
23.904762
65
0.729084
from rest_framework import serializers from wallet.models import UserWallet, PaymentMethod, DriverWallet class UserWalletSerializer(serializers.ModelSerializer): class Meta: model = UserWallet fields = "__all__" class DriverWalletSerializer(serializers.ModelSerializer): class Meta: model = DriverWallet fields = "__all__" class PaymentMethodSerializer(serializers.ModelSerializer): class Meta: model = PaymentMethod fields = "__all__"
true
true
f700eefc840a44bd548faf5f09b0af77618a2b03
1,311
py
Python
apps/covid_19/preprocess/importation.py
malanchak/AuTuMN
0cbd006d1f15da414d02eed44e48bb5c06f0802e
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
apps/covid_19/preprocess/importation.py
malanchak/AuTuMN
0cbd006d1f15da414d02eed44e48bb5c06f0802e
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
apps/covid_19/preprocess/importation.py
malanchak/AuTuMN
0cbd006d1f15da414d02eed44e48bb5c06f0802e
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
from typing import List, Callable from autumn.curve import scale_up_function def get_importation_rate_func_as_birth_rates( importation_times: List[float], importation_n_cases: List[float], detect_prop_func, starting_pops: list, ): """ When imported cases are explicitly simulated as part of the modelled population. They enter the late_infectious compartment through a birth process """ # inflate importation numbers to account for undetected cases (assumed to be asymptomatic or sympt non hospital) for i, time in enumerate(importation_times): importation_n_cases[i] /= detect_prop_func(time) # scale-up curve for importation numbers importation_numbers_scale_up = scale_up_function( importation_times, importation_n_cases, method=4, smoothness=5.0, bound_low=0.0 ) def recruitment_rate(t): return importation_numbers_scale_up(t) / sum(starting_pops) return recruitment_rate # dummy proportions for now: # FIXME: These are parameters! IMPORTATION_PROPS_BY_AGE = { "0": 0.04, "5": 0.04, "10": 0.04, "15": 0.04, "20": 0.08, "25": 0.09, "30": 0.09, "35": 0.09, "40": 0.09, "45": 0.08, "50": 0.08, "55": 0.08, "60": 0.04, "65": 0.04, "70": 0.04, "75": 0.04, }
26.755102
116
0.667429
from typing import List, Callable from autumn.curve import scale_up_function def get_importation_rate_func_as_birth_rates( importation_times: List[float], importation_n_cases: List[float], detect_prop_func, starting_pops: list, ): for i, time in enumerate(importation_times): importation_n_cases[i] /= detect_prop_func(time) importation_numbers_scale_up = scale_up_function( importation_times, importation_n_cases, method=4, smoothness=5.0, bound_low=0.0 ) def recruitment_rate(t): return importation_numbers_scale_up(t) / sum(starting_pops) return recruitment_rate IMPORTATION_PROPS_BY_AGE = { "0": 0.04, "5": 0.04, "10": 0.04, "15": 0.04, "20": 0.08, "25": 0.09, "30": 0.09, "35": 0.09, "40": 0.09, "45": 0.08, "50": 0.08, "55": 0.08, "60": 0.04, "65": 0.04, "70": 0.04, "75": 0.04, }
true
true
f700ef09d193fce405fd3b6bc62d564d19383e0d
316
py
Python
PBO_19188/Latihan_7.2.class2.py
Fazlur9/PBO
357b739c0c20ed2aa0c3cc58d48bbae843e9e946
[ "MIT" ]
null
null
null
PBO_19188/Latihan_7.2.class2.py
Fazlur9/PBO
357b739c0c20ed2aa0c3cc58d48bbae843e9e946
[ "MIT" ]
null
null
null
PBO_19188/Latihan_7.2.class2.py
Fazlur9/PBO
357b739c0c20ed2aa0c3cc58d48bbae843e9e946
[ "MIT" ]
null
null
null
class Mahasiswa: def __init__(self, nama, nilai): self.nama = nama self.nilai = nilai def hitung_nilai(self): return sum(self.nilai)/len(self.nilai) mahasiswa = Mahasiswa("Fazlur", (90,70,70,70)) print("Nama :", mahasiswa.nama) print("Total Nilai :", mahasiswa.hitung_nilai())
35.111111
48
0.642405
class Mahasiswa: def __init__(self, nama, nilai): self.nama = nama self.nilai = nilai def hitung_nilai(self): return sum(self.nilai)/len(self.nilai) mahasiswa = Mahasiswa("Fazlur", (90,70,70,70)) print("Nama :", mahasiswa.nama) print("Total Nilai :", mahasiswa.hitung_nilai())
true
true
f700f07327077fff3b3292329ec22f08ef94b3ab
22,882
py
Python
electrum_mona/plugin.py
david4neblio/electrum-mona
2d13b066be2d6205aeaa7ca859884c3ec1b92e83
[ "MIT" ]
2
2019-12-27T09:13:48.000Z
2020-09-18T14:10:48.000Z
electrum_mona/plugin.py
david4neblio/electrum-mona
2d13b066be2d6205aeaa7ca859884c3ec1b92e83
[ "MIT" ]
2
2020-07-31T20:14:43.000Z
2021-10-17T02:33:41.000Z
electrum_mona/plugin.py
david4neblio/electrum-mona
2d13b066be2d6205aeaa7ca859884c3ec1b92e83
[ "MIT" ]
3
2020-03-08T19:40:17.000Z
2021-11-10T21:41:11.000Z
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2015 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import pkgutil import importlib.util import time import threading import sys from typing import NamedTuple, Any, Union, TYPE_CHECKING, Optional from .i18n import _ from .util import (profiler, DaemonThread, UserCancelled, ThreadJob, UserFacingException) from . import bip32 from . import plugins from .simple_config import SimpleConfig from .logging import get_logger, Logger if TYPE_CHECKING: from .plugins.hw_wallet import HW_PluginBase _logger = get_logger(__name__) plugin_loaders = {} hook_names = set() hooks = {} class Plugins(DaemonThread): LOGGING_SHORTCUT = 'p' @profiler def __init__(self, config: SimpleConfig, gui_name): DaemonThread.__init__(self) self.setName('Plugins') self.pkgpath = os.path.dirname(plugins.__file__) self.config = config self.hw_wallets = {} self.plugins = {} self.gui_name = gui_name self.descriptions = {} self.device_manager = DeviceMgr(config) self.load_plugins() self.add_jobs(self.device_manager.thread_jobs()) self.start() def load_plugins(self): for loader, name, ispkg in pkgutil.iter_modules([self.pkgpath]): full_name = f'electrum_mona.plugins.{name}' spec = importlib.util.find_spec(full_name) if spec is None: # pkgutil found it but importlib can't ?! raise Exception(f"Error pre-loading {full_name}: no spec") try: module = importlib.util.module_from_spec(spec) # sys.modules needs to be modified for relative imports to work # see https://stackoverflow.com/a/50395128 sys.modules[spec.name] = module spec.loader.exec_module(module) except Exception as e: raise Exception(f"Error pre-loading {full_name}: {repr(e)}") from e d = module.__dict__ gui_good = self.gui_name in d.get('available_for', []) if not gui_good: continue details = d.get('registers_wallet_type') if details: self.register_wallet_type(name, gui_good, details) details = d.get('registers_keystore') if details: self.register_keystore(name, gui_good, details) self.descriptions[name] = d if not d.get('requires_wallet_type') and self.config.get('use_' + name): try: self.load_plugin(name) except BaseException as e: self.logger.exception(f"cannot initialize plugin {name}: {e}") def get(self, name): return self.plugins.get(name) def count(self): return len(self.plugins) def load_plugin(self, name): if name in self.plugins: return self.plugins[name] full_name = f'electrum_mona.plugins.{name}.{self.gui_name}' spec = importlib.util.find_spec(full_name) if spec is None: raise RuntimeError("%s implementation for %s plugin not found" % (self.gui_name, name)) try: module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) plugin = module.Plugin(self, self.config, name) except Exception as e: raise Exception(f"Error loading {name} plugin: {repr(e)}") from e self.add_jobs(plugin.thread_jobs()) self.plugins[name] = plugin self.logger.info(f"loaded {name}") return plugin def close_plugin(self, plugin): self.remove_jobs(plugin.thread_jobs()) def enable(self, name): self.config.set_key('use_' + name, True, True) p = self.get(name) if p: return p return self.load_plugin(name) def disable(self, name): self.config.set_key('use_' + name, False, True) p = self.get(name) if not p: return self.plugins.pop(name) p.close() self.logger.info(f"closed {name}") def toggle(self, name): p = self.get(name) return self.disable(name) if p else self.enable(name) def is_available(self, name, w): d = self.descriptions.get(name) if not d: return False deps = d.get('requires', []) for dep, s in deps: try: __import__(dep) except ImportError as e: self.logger.warning(f'Plugin {name} unavailable: {repr(e)}') return False requires = d.get('requires_wallet_type', []) return not requires or w.wallet_type in requires def get_hardware_support(self): out = [] for name, (gui_good, details) in self.hw_wallets.items(): if gui_good: try: p = self.get_plugin(name) if p.is_enabled(): out.append(HardwarePluginToScan(name=name, description=details[2], plugin=p, exception=None)) except Exception as e: self.logger.exception(f"cannot load plugin for: {name}") out.append(HardwarePluginToScan(name=name, description=details[2], plugin=None, exception=e)) return out def register_wallet_type(self, name, gui_good, wallet_type): from .wallet import register_wallet_type, register_constructor self.logger.info(f"registering wallet type {(wallet_type, name)}") def loader(): plugin = self.get_plugin(name) register_constructor(wallet_type, plugin.wallet_class) register_wallet_type(wallet_type) plugin_loaders[wallet_type] = loader def register_keystore(self, name, gui_good, details): from .keystore import register_keystore def dynamic_constructor(d): return self.get_plugin(name).keystore_class(d) if details[0] == 'hardware': self.hw_wallets[name] = (gui_good, details) self.logger.info(f"registering hardware {name}: {details}") register_keystore(details[1], dynamic_constructor) def get_plugin(self, name): if not name in self.plugins: self.load_plugin(name) return self.plugins[name] def run(self): while self.is_running(): time.sleep(0.1) self.run_jobs() self.on_stop() def hook(func): hook_names.add(func.__name__) return func def run_hook(name, *args): results = [] f_list = hooks.get(name, []) for p, f in f_list: if p.is_enabled(): try: r = f(*args) except Exception: _logger.exception(f"Plugin error. plugin: {p}, hook: {name}") r = False if r: results.append(r) if results: assert len(results) == 1, results return results[0] class BasePlugin(Logger): def __init__(self, parent, config, name): self.parent = parent # The plugins object self.name = name self.config = config self.wallet = None Logger.__init__(self) # add self to hooks for k in dir(self): if k in hook_names: l = hooks.get(k, []) l.append((self, getattr(self, k))) hooks[k] = l def __str__(self): return self.name def close(self): # remove self from hooks for attr_name in dir(self): if attr_name in hook_names: # found attribute in self that is also the name of a hook l = hooks.get(attr_name, []) try: l.remove((self, getattr(self, attr_name))) except ValueError: # maybe attr name just collided with hook name and was not hook continue hooks[attr_name] = l self.parent.close_plugin(self) self.on_close() def on_close(self): pass def requires_settings(self): return False def thread_jobs(self): return [] def is_enabled(self): return self.is_available() and self.config.get('use_'+self.name) is True def is_available(self): return True def can_user_disable(self): return True def settings_dialog(self): pass class DeviceUnpairableError(UserFacingException): pass class HardwarePluginLibraryUnavailable(Exception): pass class Device(NamedTuple): path: Union[str, bytes] interface_number: int id_: str product_key: Any # when using hid, often Tuple[int, int] usage_page: int transport_ui_string: str class DeviceInfo(NamedTuple): device: Device label: Optional[str] = None initialized: Optional[bool] = None exception: Optional[Exception] = None class HardwarePluginToScan(NamedTuple): name: str description: str plugin: Optional['HW_PluginBase'] exception: Optional[Exception] class DeviceMgr(ThreadJob): '''Manages hardware clients. A client communicates over a hardware channel with the device. In addition to tracking device HID IDs, the device manager tracks hardware wallets and manages wallet pairing. A HID ID may be paired with a wallet when it is confirmed that the hardware device matches the wallet, i.e. they have the same master public key. A HID ID can be unpaired if e.g. it is wiped. Because of hotplugging, a wallet must request its client dynamically each time it is required, rather than caching it itself. The device manager is shared across plugins, so just one place does hardware scans when needed. By tracking HID IDs, if a device is plugged into a different port the wallet is automatically re-paired. Wallets are informed on connect / disconnect events. It must implement connected(), disconnected() callbacks. Being connected implies a pairing. Callbacks can happen in any thread context, and we do them without holding the lock. Confusingly, the HID ID (serial number) reported by the HID system doesn't match the device ID reported by the device itself. We use the HID IDs. This plugin is thread-safe. Currently only devices supported by hidapi are implemented.''' def __init__(self, config): ThreadJob.__init__(self) # Keyed by xpub. The value is the device id # has been paired, and None otherwise. self.xpub_ids = {} # A list of clients. The key is the client, the value is # a (path, id_) pair. self.clients = {} # What we recognise. Each entry is a (vendor_id, product_id) # pair. self.recognised_hardware = set() # Custom enumerate functions for devices we don't know about. self.enumerate_func = set() # For synchronization self.lock = threading.RLock() self.hid_lock = threading.RLock() self.config = config def thread_jobs(self): # Thread job to handle device timeouts return [self] def run(self): '''Handle device timeouts. Runs in the context of the Plugins thread.''' with self.lock: clients = list(self.clients.keys()) cutoff = time.time() - self.config.get_session_timeout() for client in clients: client.timeout(cutoff) def register_devices(self, device_pairs): for pair in device_pairs: self.recognised_hardware.add(pair) def register_enumerate_func(self, func): self.enumerate_func.add(func) def create_client(self, device, handler, plugin): # Get from cache first client = self.client_lookup(device.id_) if client: return client client = plugin.create_client(device, handler) if client: self.logger.info(f"Registering {client}") with self.lock: self.clients[client] = (device.path, device.id_) return client def xpub_id(self, xpub): with self.lock: return self.xpub_ids.get(xpub) def xpub_by_id(self, id_): with self.lock: for xpub, xpub_id in self.xpub_ids.items(): if xpub_id == id_: return xpub return None def unpair_xpub(self, xpub): with self.lock: if xpub not in self.xpub_ids: return _id = self.xpub_ids.pop(xpub) self._close_client(_id) def unpair_id(self, id_): xpub = self.xpub_by_id(id_) if xpub: self.unpair_xpub(xpub) else: self._close_client(id_) def _close_client(self, id_): client = self.client_lookup(id_) self.clients.pop(client, None) if client: client.close() def pair_xpub(self, xpub, id_): with self.lock: self.xpub_ids[xpub] = id_ def client_lookup(self, id_): with self.lock: for client, (path, client_id) in self.clients.items(): if client_id == id_: return client return None def client_by_id(self, id_): '''Returns a client for the device ID if one is registered. If a device is wiped or in bootloader mode pairing is impossible; in such cases we communicate by device ID and not wallet.''' self.scan_devices() return self.client_lookup(id_) def client_for_keystore(self, plugin, handler, keystore, force_pair): self.logger.info("getting client for keystore") if handler is None: raise Exception(_("Handler not found for") + ' ' + plugin.name + '\n' + _("A library is probably missing.")) handler.update_status(False) devices = self.scan_devices() xpub = keystore.xpub derivation = keystore.get_derivation() client = self.client_by_xpub(plugin, xpub, handler, devices) if client is None and force_pair: info = self.select_device(plugin, handler, keystore, devices) client = self.force_pair_xpub(plugin, handler, info, xpub, derivation, devices) if client: handler.update_status(True) self.logger.info("end client for keystore") return client def client_by_xpub(self, plugin, xpub, handler, devices): _id = self.xpub_id(xpub) client = self.client_lookup(_id) if client: # An unpaired client might have another wallet's handler # from a prior scan. Replace to fix dialog parenting. client.handler = handler return client for device in devices: if device.id_ == _id: return self.create_client(device, handler, plugin) def force_pair_xpub(self, plugin, handler, info, xpub, derivation, devices): # The wallet has not been previously paired, so let the user # choose an unpaired device and compare its first address. xtype = bip32.xpub_type(xpub) client = self.client_lookup(info.device.id_) if client and client.is_pairable(): # See comment above for same code client.handler = handler # This will trigger a PIN/passphrase entry request try: client_xpub = client.get_xpub(derivation, xtype) except (UserCancelled, RuntimeError): # Bad / cancelled PIN / passphrase client_xpub = None if client_xpub == xpub: self.pair_xpub(xpub, info.device.id_) return client # The user input has wrong PIN or passphrase, or cancelled input, # or it is not pairable raise DeviceUnpairableError( _('Electrum cannot pair with your {}.\n\n' 'Before you request bitcoins to be sent to addresses in this ' 'wallet, ensure you can pair with your device, or that you have ' 'its seed (and passphrase, if any). Otherwise all bitcoins you ' 'receive will be unspendable.').format(plugin.device)) def unpaired_device_infos(self, handler, plugin: 'HW_PluginBase', devices=None, include_failing_clients=False): '''Returns a list of DeviceInfo objects: one for each connected, unpaired device accepted by the plugin.''' if not plugin.libraries_available: message = plugin.get_library_not_available_message() raise HardwarePluginLibraryUnavailable(message) if devices is None: devices = self.scan_devices() devices = [dev for dev in devices if not self.xpub_by_id(dev.id_)] infos = [] for device in devices: if device.product_key not in plugin.DEVICE_IDS: continue try: client = self.create_client(device, handler, plugin) except Exception as e: self.logger.error(f'failed to create client for {plugin.name} at {device.path}: {repr(e)}') if include_failing_clients: infos.append(DeviceInfo(device=device, exception=e)) continue if not client: continue infos.append(DeviceInfo(device=device, label=client.label(), initialized=client.is_initialized())) return infos def select_device(self, plugin, handler, keystore, devices=None): '''Ask the user to select a device to use if there is more than one, and return the DeviceInfo for the device.''' while True: infos = self.unpaired_device_infos(handler, plugin, devices) if infos: break msg = _('Please insert your {}').format(plugin.device) if keystore.label: msg += ' ({})'.format(keystore.label) msg += '. {}\n\n{}'.format( _('Verify the cable is connected and that ' 'no other application is using it.'), _('Try to connect again?') ) if not handler.yes_no_question(msg): raise UserCancelled() devices = None if len(infos) == 1: return infos[0] # select device by label for info in infos: if info.label == keystore.label: return info msg = _("Please select which {} device to use:").format(plugin.device) descriptions = [str(info.label) + ' (%s)'%(_("initialized") if info.initialized else _("wiped")) for info in infos] c = handler.query_choice(msg, descriptions) if c is None: raise UserCancelled() info = infos[c] # save new label keystore.set_label(info.label) if handler.win.wallet is not None: handler.win.wallet.save_keystore() return info def _scan_devices_with_hid(self): try: import hid except ImportError: return [] with self.hid_lock: hid_list = hid.enumerate(0, 0) devices = [] for d in hid_list: product_key = (d['vendor_id'], d['product_id']) if product_key in self.recognised_hardware: # Older versions of hid don't provide interface_number interface_number = d.get('interface_number', -1) usage_page = d['usage_page'] id_ = d['serial_number'] if len(id_) == 0: id_ = str(d['path']) id_ += str(interface_number) + str(usage_page) devices.append(Device(path=d['path'], interface_number=interface_number, id_=id_, product_key=product_key, usage_page=usage_page, transport_ui_string='hid')) return devices def scan_devices(self): self.logger.info("scanning devices...") # First see what's connected that we know about devices = self._scan_devices_with_hid() # Let plugin handlers enumerate devices we don't know about for f in self.enumerate_func: try: new_devices = f() except BaseException as e: self.logger.error('custom device enum failed. func {}, error {}' .format(str(f), repr(e))) else: devices.extend(new_devices) # find out what was disconnected pairs = [(dev.path, dev.id_) for dev in devices] disconnected_ids = [] with self.lock: connected = {} for client, pair in self.clients.items(): if pair in pairs and client.has_usable_connection_with_device(): connected[client] = pair else: disconnected_ids.append(pair[1]) self.clients = connected # Unpair disconnected devices for id_ in disconnected_ids: self.unpair_id(id_) return devices
36.378378
123
0.589721
import os import pkgutil import importlib.util import time import threading import sys from typing import NamedTuple, Any, Union, TYPE_CHECKING, Optional from .i18n import _ from .util import (profiler, DaemonThread, UserCancelled, ThreadJob, UserFacingException) from . import bip32 from . import plugins from .simple_config import SimpleConfig from .logging import get_logger, Logger if TYPE_CHECKING: from .plugins.hw_wallet import HW_PluginBase _logger = get_logger(__name__) plugin_loaders = {} hook_names = set() hooks = {} class Plugins(DaemonThread): LOGGING_SHORTCUT = 'p' @profiler def __init__(self, config: SimpleConfig, gui_name): DaemonThread.__init__(self) self.setName('Plugins') self.pkgpath = os.path.dirname(plugins.__file__) self.config = config self.hw_wallets = {} self.plugins = {} self.gui_name = gui_name self.descriptions = {} self.device_manager = DeviceMgr(config) self.load_plugins() self.add_jobs(self.device_manager.thread_jobs()) self.start() def load_plugins(self): for loader, name, ispkg in pkgutil.iter_modules([self.pkgpath]): full_name = f'electrum_mona.plugins.{name}' spec = importlib.util.find_spec(full_name) if spec is None: raise Exception(f"Error pre-loading {full_name}: no spec") try: module = importlib.util.module_from_spec(spec) # sys.modules needs to be modified for relative imports to work # see https://stackoverflow.com/a/50395128 sys.modules[spec.name] = module spec.loader.exec_module(module) except Exception as e: raise Exception(f"Error pre-loading {full_name}: {repr(e)}") from e d = module.__dict__ gui_good = self.gui_name in d.get('available_for', []) if not gui_good: continue details = d.get('registers_wallet_type') if details: self.register_wallet_type(name, gui_good, details) details = d.get('registers_keystore') if details: self.register_keystore(name, gui_good, details) self.descriptions[name] = d if not d.get('requires_wallet_type') and self.config.get('use_' + name): try: self.load_plugin(name) except BaseException as e: self.logger.exception(f"cannot initialize plugin {name}: {e}") def get(self, name): return self.plugins.get(name) def count(self): return len(self.plugins) def load_plugin(self, name): if name in self.plugins: return self.plugins[name] full_name = f'electrum_mona.plugins.{name}.{self.gui_name}' spec = importlib.util.find_spec(full_name) if spec is None: raise RuntimeError("%s implementation for %s plugin not found" % (self.gui_name, name)) try: module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) plugin = module.Plugin(self, self.config, name) except Exception as e: raise Exception(f"Error loading {name} plugin: {repr(e)}") from e self.add_jobs(plugin.thread_jobs()) self.plugins[name] = plugin self.logger.info(f"loaded {name}") return plugin def close_plugin(self, plugin): self.remove_jobs(plugin.thread_jobs()) def enable(self, name): self.config.set_key('use_' + name, True, True) p = self.get(name) if p: return p return self.load_plugin(name) def disable(self, name): self.config.set_key('use_' + name, False, True) p = self.get(name) if not p: return self.plugins.pop(name) p.close() self.logger.info(f"closed {name}") def toggle(self, name): p = self.get(name) return self.disable(name) if p else self.enable(name) def is_available(self, name, w): d = self.descriptions.get(name) if not d: return False deps = d.get('requires', []) for dep, s in deps: try: __import__(dep) except ImportError as e: self.logger.warning(f'Plugin {name} unavailable: {repr(e)}') return False requires = d.get('requires_wallet_type', []) return not requires or w.wallet_type in requires def get_hardware_support(self): out = [] for name, (gui_good, details) in self.hw_wallets.items(): if gui_good: try: p = self.get_plugin(name) if p.is_enabled(): out.append(HardwarePluginToScan(name=name, description=details[2], plugin=p, exception=None)) except Exception as e: self.logger.exception(f"cannot load plugin for: {name}") out.append(HardwarePluginToScan(name=name, description=details[2], plugin=None, exception=e)) return out def register_wallet_type(self, name, gui_good, wallet_type): from .wallet import register_wallet_type, register_constructor self.logger.info(f"registering wallet type {(wallet_type, name)}") def loader(): plugin = self.get_plugin(name) register_constructor(wallet_type, plugin.wallet_class) register_wallet_type(wallet_type) plugin_loaders[wallet_type] = loader def register_keystore(self, name, gui_good, details): from .keystore import register_keystore def dynamic_constructor(d): return self.get_plugin(name).keystore_class(d) if details[0] == 'hardware': self.hw_wallets[name] = (gui_good, details) self.logger.info(f"registering hardware {name}: {details}") register_keystore(details[1], dynamic_constructor) def get_plugin(self, name): if not name in self.plugins: self.load_plugin(name) return self.plugins[name] def run(self): while self.is_running(): time.sleep(0.1) self.run_jobs() self.on_stop() def hook(func): hook_names.add(func.__name__) return func def run_hook(name, *args): results = [] f_list = hooks.get(name, []) for p, f in f_list: if p.is_enabled(): try: r = f(*args) except Exception: _logger.exception(f"Plugin error. plugin: {p}, hook: {name}") r = False if r: results.append(r) if results: assert len(results) == 1, results return results[0] class BasePlugin(Logger): def __init__(self, parent, config, name): self.parent = parent # The plugins object self.name = name self.config = config self.wallet = None Logger.__init__(self) # add self to hooks for k in dir(self): if k in hook_names: l = hooks.get(k, []) l.append((self, getattr(self, k))) hooks[k] = l def __str__(self): return self.name def close(self): # remove self from hooks for attr_name in dir(self): if attr_name in hook_names: # found attribute in self that is also the name of a hook l = hooks.get(attr_name, []) try: l.remove((self, getattr(self, attr_name))) except ValueError: # maybe attr name just collided with hook name and was not hook continue hooks[attr_name] = l self.parent.close_plugin(self) self.on_close() def on_close(self): pass def requires_settings(self): return False def thread_jobs(self): return [] def is_enabled(self): return self.is_available() and self.config.get('use_'+self.name) is True def is_available(self): return True def can_user_disable(self): return True def settings_dialog(self): pass class DeviceUnpairableError(UserFacingException): pass class HardwarePluginLibraryUnavailable(Exception): pass class Device(NamedTuple): path: Union[str, bytes] interface_number: int id_: str product_key: Any # when using hid, often Tuple[int, int] usage_page: int transport_ui_string: str class DeviceInfo(NamedTuple): device: Device label: Optional[str] = None initialized: Optional[bool] = None exception: Optional[Exception] = None class HardwarePluginToScan(NamedTuple): name: str description: str plugin: Optional['HW_PluginBase'] exception: Optional[Exception] class DeviceMgr(ThreadJob): def __init__(self, config): ThreadJob.__init__(self) # Keyed by xpub. The value is the device id # has been paired, and None otherwise. self.xpub_ids = {} # A list of clients. The key is the client, the value is # a (path, id_) pair. self.clients = {} # What we recognise. Each entry is a (vendor_id, product_id) # pair. self.recognised_hardware = set() # Custom enumerate functions for devices we don't know about. self.enumerate_func = set() self.lock = threading.RLock() self.hid_lock = threading.RLock() self.config = config def thread_jobs(self): return [self] def run(self): with self.lock: clients = list(self.clients.keys()) cutoff = time.time() - self.config.get_session_timeout() for client in clients: client.timeout(cutoff) def register_devices(self, device_pairs): for pair in device_pairs: self.recognised_hardware.add(pair) def register_enumerate_func(self, func): self.enumerate_func.add(func) def create_client(self, device, handler, plugin): client = self.client_lookup(device.id_) if client: return client client = plugin.create_client(device, handler) if client: self.logger.info(f"Registering {client}") with self.lock: self.clients[client] = (device.path, device.id_) return client def xpub_id(self, xpub): with self.lock: return self.xpub_ids.get(xpub) def xpub_by_id(self, id_): with self.lock: for xpub, xpub_id in self.xpub_ids.items(): if xpub_id == id_: return xpub return None def unpair_xpub(self, xpub): with self.lock: if xpub not in self.xpub_ids: return _id = self.xpub_ids.pop(xpub) self._close_client(_id) def unpair_id(self, id_): xpub = self.xpub_by_id(id_) if xpub: self.unpair_xpub(xpub) else: self._close_client(id_) def _close_client(self, id_): client = self.client_lookup(id_) self.clients.pop(client, None) if client: client.close() def pair_xpub(self, xpub, id_): with self.lock: self.xpub_ids[xpub] = id_ def client_lookup(self, id_): with self.lock: for client, (path, client_id) in self.clients.items(): if client_id == id_: return client return None def client_by_id(self, id_): self.scan_devices() return self.client_lookup(id_) def client_for_keystore(self, plugin, handler, keystore, force_pair): self.logger.info("getting client for keystore") if handler is None: raise Exception(_("Handler not found for") + ' ' + plugin.name + '\n' + _("A library is probably missing.")) handler.update_status(False) devices = self.scan_devices() xpub = keystore.xpub derivation = keystore.get_derivation() client = self.client_by_xpub(plugin, xpub, handler, devices) if client is None and force_pair: info = self.select_device(plugin, handler, keystore, devices) client = self.force_pair_xpub(plugin, handler, info, xpub, derivation, devices) if client: handler.update_status(True) self.logger.info("end client for keystore") return client def client_by_xpub(self, plugin, xpub, handler, devices): _id = self.xpub_id(xpub) client = self.client_lookup(_id) if client: # from a prior scan. Replace to fix dialog parenting. client.handler = handler return client for device in devices: if device.id_ == _id: return self.create_client(device, handler, plugin) def force_pair_xpub(self, plugin, handler, info, xpub, derivation, devices): # The wallet has not been previously paired, so let the user # choose an unpaired device and compare its first address. xtype = bip32.xpub_type(xpub) client = self.client_lookup(info.device.id_) if client and client.is_pairable(): # See comment above for same code client.handler = handler # This will trigger a PIN/passphrase entry request try: client_xpub = client.get_xpub(derivation, xtype) except (UserCancelled, RuntimeError): # Bad / cancelled PIN / passphrase client_xpub = None if client_xpub == xpub: self.pair_xpub(xpub, info.device.id_) return client # The user input has wrong PIN or passphrase, or cancelled input, # or it is not pairable raise DeviceUnpairableError( _('Electrum cannot pair with your {}.\n\n' 'Before you request bitcoins to be sent to addresses in this ' 'wallet, ensure you can pair with your device, or that you have ' 'its seed (and passphrase, if any). Otherwise all bitcoins you ' 'receive will be unspendable.').format(plugin.device)) def unpaired_device_infos(self, handler, plugin: 'HW_PluginBase', devices=None, include_failing_clients=False): if not plugin.libraries_available: message = plugin.get_library_not_available_message() raise HardwarePluginLibraryUnavailable(message) if devices is None: devices = self.scan_devices() devices = [dev for dev in devices if not self.xpub_by_id(dev.id_)] infos = [] for device in devices: if device.product_key not in plugin.DEVICE_IDS: continue try: client = self.create_client(device, handler, plugin) except Exception as e: self.logger.error(f'failed to create client for {plugin.name} at {device.path}: {repr(e)}') if include_failing_clients: infos.append(DeviceInfo(device=device, exception=e)) continue if not client: continue infos.append(DeviceInfo(device=device, label=client.label(), initialized=client.is_initialized())) return infos def select_device(self, plugin, handler, keystore, devices=None): while True: infos = self.unpaired_device_infos(handler, plugin, devices) if infos: break msg = _('Please insert your {}').format(plugin.device) if keystore.label: msg += ' ({})'.format(keystore.label) msg += '. {}\n\n{}'.format( _('Verify the cable is connected and that ' 'no other application is using it.'), _('Try to connect again?') ) if not handler.yes_no_question(msg): raise UserCancelled() devices = None if len(infos) == 1: return infos[0] # select device by label for info in infos: if info.label == keystore.label: return info msg = _("Please select which {} device to use:").format(plugin.device) descriptions = [str(info.label) + ' (%s)'%(_("initialized") if info.initialized else _("wiped")) for info in infos] c = handler.query_choice(msg, descriptions) if c is None: raise UserCancelled() info = infos[c] # save new label keystore.set_label(info.label) if handler.win.wallet is not None: handler.win.wallet.save_keystore() return info def _scan_devices_with_hid(self): try: import hid except ImportError: return [] with self.hid_lock: hid_list = hid.enumerate(0, 0) devices = [] for d in hid_list: product_key = (d['vendor_id'], d['product_id']) if product_key in self.recognised_hardware: # Older versions of hid don't provide interface_number interface_number = d.get('interface_number', -1) usage_page = d['usage_page'] id_ = d['serial_number'] if len(id_) == 0: id_ = str(d['path']) id_ += str(interface_number) + str(usage_page) devices.append(Device(path=d['path'], interface_number=interface_number, id_=id_, product_key=product_key, usage_page=usage_page, transport_ui_string='hid')) return devices def scan_devices(self): self.logger.info("scanning devices...") devices = self._scan_devices_with_hid() # Let plugin handlers enumerate devices we don't know about for f in self.enumerate_func: try: new_devices = f() except BaseException as e: self.logger.error('custom device enum failed. func {}, error {}' .format(str(f), repr(e))) else: devices.extend(new_devices) pairs = [(dev.path, dev.id_) for dev in devices] disconnected_ids = [] with self.lock: connected = {} for client, pair in self.clients.items(): if pair in pairs and client.has_usable_connection_with_device(): connected[client] = pair else: disconnected_ids.append(pair[1]) self.clients = connected for id_ in disconnected_ids: self.unpair_id(id_) return devices
true
true
f700f0e1159085a7fc47b72ba8e35fe3cab0e1ca
2,830
py
Python
roblox/groups.py
Warhawk947/ro.py
dac29116ef72f577d2e086e3297a79201d58c895
[ "MIT" ]
1
2021-11-25T02:29:12.000Z
2021-11-25T02:29:12.000Z
roblox/groups.py
Warhawk947/ro.py
dac29116ef72f577d2e086e3297a79201d58c895
[ "MIT" ]
null
null
null
roblox/groups.py
Warhawk947/ro.py
dac29116ef72f577d2e086e3297a79201d58c895
[ "MIT" ]
null
null
null
""" Contains classes related to Roblox group data and parsing. """ from typing import Optional, Tuple from .bases.basegroup import BaseGroup from .partials.partialuser import PartialUser from .shout import Shout from .utilities.shared import ClientSharedObject class Group(BaseGroup): """ Represents a Join Request Attributes: _shared: The shared object, which is passed to all objects this client generates. id: the id of the group. name: name of the group. description: description of the group. owner: player who owns the group. shout: the current group shout. member_count: about of members in the group. is_builders_club_only: can only people with builder club join. public_entry_allowed: can you join without your join request having to be accepted. is_locked: Is the group locked? """ def __init__(self, shared: ClientSharedObject, data: dict): """ Arguments: data: The data we get back from the endpoint. shared: The shared object, which is passed to all objects this client generates. """ super().__init__(shared, data["id"]) self._shared: ClientSharedObject = shared self.id: int = data["id"] self.name: str = data["name"] self.description: str = data["description"] self.owner: PartialUser = PartialUser(shared=shared, data=data["owner"]) self.shout: Optional[Shout] = data["shout"] and Shout( shared=self._shared, data=data["shout"] ) or None self.member_count: int = data["memberCount"] self.is_builders_club_only: bool = data["isBuildersClubOnly"] self.public_entry_allowed: bool = data["publicEntryAllowed"] self.is_locked: bool = data.get("isLocked") or False def __repr__(self): return f"<{self.__class__.__name__} id={self.id} name={self.name!r} owner={self.owner}>" async def update_shout(self, message: str, update_self: bool = True) -> Tuple[Optional[Shout], Optional[Shout]]: """ Updates the shout. Arguments: message: The new shout message. update_self: Whether to update self.shout automatically. """ shout_response = await self._requests.patch( url=self._shared.url_generator.get_url("groups", f"v1/groups/{self.id}/status"), json={ "message": message } ) shout_data = shout_response.json() old_shout: Optional[Shout] = self.shout new_shout: Optional[Shout] = shout_data and Shout( shared=self._shared, data=shout_data ) or None if update_self: self.shout = new_shout return old_shout, new_shout
33.294118
116
0.631449
from typing import Optional, Tuple from .bases.basegroup import BaseGroup from .partials.partialuser import PartialUser from .shout import Shout from .utilities.shared import ClientSharedObject class Group(BaseGroup): def __init__(self, shared: ClientSharedObject, data: dict): super().__init__(shared, data["id"]) self._shared: ClientSharedObject = shared self.id: int = data["id"] self.name: str = data["name"] self.description: str = data["description"] self.owner: PartialUser = PartialUser(shared=shared, data=data["owner"]) self.shout: Optional[Shout] = data["shout"] and Shout( shared=self._shared, data=data["shout"] ) or None self.member_count: int = data["memberCount"] self.is_builders_club_only: bool = data["isBuildersClubOnly"] self.public_entry_allowed: bool = data["publicEntryAllowed"] self.is_locked: bool = data.get("isLocked") or False def __repr__(self): return f"<{self.__class__.__name__} id={self.id} name={self.name!r} owner={self.owner}>" async def update_shout(self, message: str, update_self: bool = True) -> Tuple[Optional[Shout], Optional[Shout]]: shout_response = await self._requests.patch( url=self._shared.url_generator.get_url("groups", f"v1/groups/{self.id}/status"), json={ "message": message } ) shout_data = shout_response.json() old_shout: Optional[Shout] = self.shout new_shout: Optional[Shout] = shout_data and Shout( shared=self._shared, data=shout_data ) or None if update_self: self.shout = new_shout return old_shout, new_shout
true
true
f700f0f139cfbd083c127c5a1431fb7583bf64a6
537
py
Python
tests/input/custom_serializer.py
larribas/dagger
1441fed570e535a43fdc23142b0c1a897ad9e992
[ "Apache-2.0" ]
9
2021-09-06T14:22:38.000Z
2022-02-08T07:48:39.000Z
tests/input/custom_serializer.py
larribas/dagger
1441fed570e535a43fdc23142b0c1a897ad9e992
[ "Apache-2.0" ]
36
2021-09-04T06:20:19.000Z
2021-12-26T17:54:59.000Z
tests/input/custom_serializer.py
larribas/dagger
1441fed570e535a43fdc23142b0c1a897ad9e992
[ "Apache-2.0" ]
4
2021-09-06T08:07:19.000Z
2021-10-18T19:13:18.000Z
# noqa from typing import Any, BinaryIO class CustomSerializer: """Custom serializer implementation to test the injection of different serialization strategies to an input.""" @property def extension(self) -> str: # noqa return "ext" def serialize(self, value: Any, writer: BinaryIO): # noqa raise NotImplementedError() def deserialize(self, reader: BinaryIO) -> Any: # noqa raise NotImplementedError() def __repr__(self) -> str: # noqa return "CustomSerializerInstance"
26.85
115
0.67784
from typing import Any, BinaryIO class CustomSerializer: @property def extension(self) -> str: return "ext" def serialize(self, value: Any, writer: BinaryIO): raise NotImplementedError() def deserialize(self, reader: BinaryIO) -> Any: raise NotImplementedError() def __repr__(self) -> str: return "CustomSerializerInstance"
true
true
f700f14379bb536b6a19366d6ecc906744bae9ed
13,352
py
Python
env/Lib/site-packages/imagesize.py
camilorojase/GithubActionsTest
7b448e365d31ec84bf55b68033a88bb00e8e5358
[ "MIT" ]
2
2022-01-19T02:33:11.000Z
2022-01-19T02:33:13.000Z
env/Lib/site-packages/imagesize.py
camilorojase/GithubActionsTest
7b448e365d31ec84bf55b68033a88bb00e8e5358
[ "MIT" ]
null
null
null
env/Lib/site-packages/imagesize.py
camilorojase/GithubActionsTest
7b448e365d31ec84bf55b68033a88bb00e8e5358
[ "MIT" ]
null
null
null
import io import os import re import struct from xml.etree import ElementTree _UNIT_KM = -3 _UNIT_100M = -2 _UNIT_10M = -1 _UNIT_1M = 0 _UNIT_10CM = 1 _UNIT_CM = 2 _UNIT_MM = 3 _UNIT_0_1MM = 4 _UNIT_0_01MM = 5 _UNIT_UM = 6 _UNIT_INCH = 6 _TIFF_TYPE_SIZES = { 1: 1, 2: 1, 3: 2, 4: 4, 5: 8, 6: 1, 7: 1, 8: 2, 9: 4, 10: 8, 11: 4, 12: 8, } def _convertToDPI(density, unit): if unit == _UNIT_KM: return int(density * 0.0000254 + 0.5) elif unit == _UNIT_100M: return int(density * 0.000254 + 0.5) elif unit == _UNIT_10M: return int(density * 0.00254 + 0.5) elif unit == _UNIT_1M: return int(density * 0.0254 + 0.5) elif unit == _UNIT_10CM: return int(density * 0.254 + 0.5) elif unit == _UNIT_CM: return int(density * 2.54 + 0.5) elif unit == _UNIT_MM: return int(density * 25.4 + 0.5) elif unit == _UNIT_0_1MM: return density * 254 elif unit == _UNIT_0_01MM: return density * 2540 elif unit == _UNIT_UM: return density * 25400 return density def _convertToPx(value): matched = re.match(r"(\d+(?:\.\d+)?)?([a-z]*)$", value) if not matched: raise ValueError("unknown length value: %s" % value) length, unit = matched.groups() if unit == "": return float(length) elif unit == "cm": return float(length) * 96 / 2.54 elif unit == "mm": return float(length) * 96 / 2.54 / 10 elif unit == "in": return float(length) * 96 elif unit == "pc": return float(length) * 96 / 6 elif unit == "pt": return float(length) * 96 / 6 elif unit == "px": return float(length) raise ValueError("unknown unit type: %s" % unit) def get(filepath): """ Return (width, height) for a given img file content no requirements :type filepath: Union[bytes, str, pathlib.Path] :rtype Tuple[int, int] """ height = -1 width = -1 if isinstance(filepath, io.BytesIO): # file-like object fhandle = filepath else: fhandle = open(filepath, 'rb') try: head = fhandle.read(24) size = len(head) # handle GIFs if size >= 10 and head[:6] in (b'GIF87a', b'GIF89a'): # Check to see if content_type is correct try: width, height = struct.unpack("<hh", head[6:10]) except struct.error: raise ValueError("Invalid GIF file") # see png edition spec bytes are below chunk length then and finally the elif size >= 24 and head.startswith(b'\211PNG\r\n\032\n') and head[12:16] == b'IHDR': try: width, height = struct.unpack(">LL", head[16:24]) except struct.error: raise ValueError("Invalid PNG file") # Maybe this is for an older PNG version. elif size >= 16 and head.startswith(b'\211PNG\r\n\032\n'): # Check to see if we have the right content type try: width, height = struct.unpack(">LL", head[8:16]) except struct.error: raise ValueError("Invalid PNG file") # handle JPEGs elif size >= 2 and head.startswith(b'\377\330'): try: fhandle.seek(0) # Read 0xff next size = 2 ftype = 0 while not 0xc0 <= ftype <= 0xcf or ftype in [0xc4, 0xc8, 0xcc]: fhandle.seek(size, 1) byte = fhandle.read(1) while ord(byte) == 0xff: byte = fhandle.read(1) ftype = ord(byte) size = struct.unpack('>H', fhandle.read(2))[0] - 2 # We are at a SOFn block fhandle.seek(1, 1) # Skip `precision' byte. height, width = struct.unpack('>HH', fhandle.read(4)) except (struct.error, TypeError): raise ValueError("Invalid JPEG file") # handle JPEG2000s elif size >= 12 and head.startswith(b'\x00\x00\x00\x0cjP \r\n\x87\n'): fhandle.seek(48) try: height, width = struct.unpack('>LL', fhandle.read(8)) except struct.error: raise ValueError("Invalid JPEG2000 file") # handle big endian TIFF elif size >= 8 and head.startswith(b"\x4d\x4d\x00\x2a"): offset = struct.unpack('>L', head[4:8])[0] fhandle.seek(offset) ifdsize = struct.unpack(">H", fhandle.read(2))[0] for i in range(ifdsize): tag, datatype, count, data = struct.unpack(">HHLL", fhandle.read(12)) if tag == 256: if datatype == 3: width = int(data / 65536) elif datatype == 4: width = data else: raise ValueError("Invalid TIFF file: width column data type should be SHORT/LONG.") elif tag == 257: if datatype == 3: height = int(data / 65536) elif datatype == 4: height = data else: raise ValueError("Invalid TIFF file: height column data type should be SHORT/LONG.") if width != -1 and height != -1: break if width == -1 or height == -1: raise ValueError("Invalid TIFF file: width and/or height IDS entries are missing.") elif size >= 8 and head.startswith(b"\x49\x49\x2a\x00"): offset = struct.unpack('<L', head[4:8])[0] fhandle.seek(offset) ifdsize = struct.unpack("<H", fhandle.read(2))[0] for i in range(ifdsize): tag, datatype, count, data = struct.unpack("<HHLL", fhandle.read(12)) if tag == 256: width = data elif tag == 257: height = data if width != -1 and height != -1: break if width == -1 or height == -1: raise ValueError("Invalid TIFF file: width and/or height IDS entries are missing.") # handle little endian BigTiff elif size >= 8 and head.startswith(b"\x49\x49\x2b\x00"): bytesize_offset = struct.unpack('<L', head[4:8])[0] if bytesize_offset != 8: raise ValueError('Invalid BigTIFF file: Expected offset to be 8, found {} instead.'.format(offset)) offset = struct.unpack('<Q', head[8:16])[0] fhandle.seek(offset) ifdsize = struct.unpack("<Q", fhandle.read(8))[0] for i in range(ifdsize): tag, datatype, count, data = struct.unpack("<HHQQ", fhandle.read(20)) if tag == 256: width = data elif tag == 257: height = data if width != -1 and height != -1: break if width == -1 or height == -1: raise ValueError("Invalid BigTIFF file: width and/or height IDS entries are missing.") # handle SVGs elif size >= 5 and (head.startswith(b'<?xml') or head.startswith(b'<svg')): fhandle.seek(0) data = fhandle.read(1024) try: data = data.decode('utf-8') width = re.search(r'[^-]width="(.*?)"', data).group(1) height = re.search(r'[^-]height="(.*?)"', data).group(1) except Exception: raise ValueError("Invalid SVG file") width = _convertToPx(width) height = _convertToPx(height) # handle Netpbm elif head[:1] == b"P" and head[1:2] in b"123456": fhandle.seek(2) sizes = [] while True: next_chr = fhandle.read(1) if next_chr.isspace(): continue if next_chr == b"": raise ValueError("Invalid Netpbm file") if next_chr == b"#": fhandle.readline() continue if not next_chr.isdigit(): raise ValueError("Invalid character found on Netpbm file") size = next_chr next_chr = fhandle.read(1) while next_chr.isdigit(): size += next_chr next_chr = fhandle.read(1) sizes.append(int(size)) if len(sizes) == 2: break fhandle.seek(-1, os.SEEK_CUR) width, height = sizes finally: fhandle.close() return width, height def getDPI(filepath): """ Return (x DPI, y DPI) for a given img file content no requirements :type filepath: Union[bytes, str, pathlib.Path] :rtype Tuple[int, int] """ xDPI = -1 yDPI = -1 if not isinstance(filepath, bytes): filepath = str(filepath) with open(filepath, 'rb') as fhandle: head = fhandle.read(24) size = len(head) # handle GIFs # GIFs doesn't have density if size >= 10 and head[:6] in (b'GIF87a', b'GIF89a'): pass # see png edition spec bytes are below chunk length then and finally the elif size >= 24 and head.startswith(b'\211PNG\r\n\032\n'): chunkOffset = 8 chunk = head[8:] while True: chunkType = chunk[4:8] if chunkType == b'pHYs': try: xDensity, yDensity, unit = struct.unpack(">LLB", chunk[8:]) except struct.error: raise ValueError("Invalid PNG file") if unit: xDPI = _convertToDPI(xDensity, _UNIT_1M) yDPI = _convertToDPI(yDensity, _UNIT_1M) else: # no unit xDPI = xDensity yDPI = yDensity break elif chunkType == b'IDAT': break else: try: dataSize, = struct.unpack(">L", chunk[0:4]) except struct.error: raise ValueError("Invalid PNG file") chunkOffset += dataSize + 12 fhandle.seek(chunkOffset) chunk = fhandle.read(17) # handle JPEGs elif size >= 2 and head.startswith(b'\377\330'): try: fhandle.seek(0) # Read 0xff next size = 2 ftype = 0 while not 0xc0 <= ftype <= 0xcf: if ftype == 0xe0: # APP0 marker fhandle.seek(7, 1) unit, xDensity, yDensity = struct.unpack(">BHH", fhandle.read(5)) if unit == 1 or unit == 0: xDPI = xDensity yDPI = yDensity elif unit == 2: xDPI = _convertToDPI(xDensity, _UNIT_CM) yDPI = _convertToDPI(yDensity, _UNIT_CM) break fhandle.seek(size, 1) byte = fhandle.read(1) while ord(byte) == 0xff: byte = fhandle.read(1) ftype = ord(byte) size = struct.unpack('>H', fhandle.read(2))[0] - 2 except struct.error: raise ValueError("Invalid JPEG file") # handle JPEG2000s elif size >= 12 and head.startswith(b'\x00\x00\x00\x0cjP \r\n\x87\n'): fhandle.seek(32) # skip JP2 image header box headerSize = struct.unpack('>L', fhandle.read(4))[0] - 8 fhandle.seek(4, 1) foundResBox = False try: while headerSize > 0: boxHeader = fhandle.read(8) boxType = boxHeader[4:] if boxType == b'res ': # find resolution super box foundResBox = True headerSize -= 8 break boxSize, = struct.unpack('>L', boxHeader[:4]) fhandle.seek(boxSize - 8, 1) headerSize -= boxSize if foundResBox: while headerSize > 0: boxHeader = fhandle.read(8) boxType = boxHeader[4:] if boxType == b'resd': # Display resolution box yDensity, xDensity, yUnit, xUnit = struct.unpack(">HHBB", fhandle.read(10)) xDPI = _convertToDPI(xDensity, xUnit) yDPI = _convertToDPI(yDensity, yUnit) break boxSize, = struct.unpack('>L', boxHeader[:4]) fhandle.seek(boxSize - 8, 1) headerSize -= boxSize except struct.error as e: raise ValueError("Invalid JPEG2000 file") return xDPI, yDPI
36.580822
115
0.479778
import io import os import re import struct from xml.etree import ElementTree _UNIT_KM = -3 _UNIT_100M = -2 _UNIT_10M = -1 _UNIT_1M = 0 _UNIT_10CM = 1 _UNIT_CM = 2 _UNIT_MM = 3 _UNIT_0_1MM = 4 _UNIT_0_01MM = 5 _UNIT_UM = 6 _UNIT_INCH = 6 _TIFF_TYPE_SIZES = { 1: 1, 2: 1, 3: 2, 4: 4, 5: 8, 6: 1, 7: 1, 8: 2, 9: 4, 10: 8, 11: 4, 12: 8, } def _convertToDPI(density, unit): if unit == _UNIT_KM: return int(density * 0.0000254 + 0.5) elif unit == _UNIT_100M: return int(density * 0.000254 + 0.5) elif unit == _UNIT_10M: return int(density * 0.00254 + 0.5) elif unit == _UNIT_1M: return int(density * 0.0254 + 0.5) elif unit == _UNIT_10CM: return int(density * 0.254 + 0.5) elif unit == _UNIT_CM: return int(density * 2.54 + 0.5) elif unit == _UNIT_MM: return int(density * 25.4 + 0.5) elif unit == _UNIT_0_1MM: return density * 254 elif unit == _UNIT_0_01MM: return density * 2540 elif unit == _UNIT_UM: return density * 25400 return density def _convertToPx(value): matched = re.match(r"(\d+(?:\.\d+)?)?([a-z]*)$", value) if not matched: raise ValueError("unknown length value: %s" % value) length, unit = matched.groups() if unit == "": return float(length) elif unit == "cm": return float(length) * 96 / 2.54 elif unit == "mm": return float(length) * 96 / 2.54 / 10 elif unit == "in": return float(length) * 96 elif unit == "pc": return float(length) * 96 / 6 elif unit == "pt": return float(length) * 96 / 6 elif unit == "px": return float(length) raise ValueError("unknown unit type: %s" % unit) def get(filepath): height = -1 width = -1 if isinstance(filepath, io.BytesIO): fhandle = filepath else: fhandle = open(filepath, 'rb') try: head = fhandle.read(24) size = len(head) if size >= 10 and head[:6] in (b'GIF87a', b'GIF89a'): try: width, height = struct.unpack("<hh", head[6:10]) except struct.error: raise ValueError("Invalid GIF file") elif size >= 24 and head.startswith(b'\211PNG\r\n\032\n') and head[12:16] == b'IHDR': try: width, height = struct.unpack(">LL", head[16:24]) except struct.error: raise ValueError("Invalid PNG file") elif size >= 16 and head.startswith(b'\211PNG\r\n\032\n'): try: width, height = struct.unpack(">LL", head[8:16]) except struct.error: raise ValueError("Invalid PNG file") elif size >= 2 and head.startswith(b'\377\330'): try: fhandle.seek(0) size = 2 ftype = 0 while not 0xc0 <= ftype <= 0xcf or ftype in [0xc4, 0xc8, 0xcc]: fhandle.seek(size, 1) byte = fhandle.read(1) while ord(byte) == 0xff: byte = fhandle.read(1) ftype = ord(byte) size = struct.unpack('>H', fhandle.read(2))[0] - 2 fhandle.seek(1, 1) height, width = struct.unpack('>HH', fhandle.read(4)) except (struct.error, TypeError): raise ValueError("Invalid JPEG file") # handle JPEG2000s elif size >= 12 and head.startswith(b'\x00\x00\x00\x0cjP \r\n\x87\n'): fhandle.seek(48) try: height, width = struct.unpack('>LL', fhandle.read(8)) except struct.error: raise ValueError("Invalid JPEG2000 file") # handle big endian TIFF elif size >= 8 and head.startswith(b"\x4d\x4d\x00\x2a"): offset = struct.unpack('>L', head[4:8])[0] fhandle.seek(offset) ifdsize = struct.unpack(">H", fhandle.read(2))[0] for i in range(ifdsize): tag, datatype, count, data = struct.unpack(">HHLL", fhandle.read(12)) if tag == 256: if datatype == 3: width = int(data / 65536) elif datatype == 4: width = data else: raise ValueError("Invalid TIFF file: width column data type should be SHORT/LONG.") elif tag == 257: if datatype == 3: height = int(data / 65536) elif datatype == 4: height = data else: raise ValueError("Invalid TIFF file: height column data type should be SHORT/LONG.") if width != -1 and height != -1: break if width == -1 or height == -1: raise ValueError("Invalid TIFF file: width and/or height IDS entries are missing.") elif size >= 8 and head.startswith(b"\x49\x49\x2a\x00"): offset = struct.unpack('<L', head[4:8])[0] fhandle.seek(offset) ifdsize = struct.unpack("<H", fhandle.read(2))[0] for i in range(ifdsize): tag, datatype, count, data = struct.unpack("<HHLL", fhandle.read(12)) if tag == 256: width = data elif tag == 257: height = data if width != -1 and height != -1: break if width == -1 or height == -1: raise ValueError("Invalid TIFF file: width and/or height IDS entries are missing.") # handle little endian BigTiff elif size >= 8 and head.startswith(b"\x49\x49\x2b\x00"): bytesize_offset = struct.unpack('<L', head[4:8])[0] if bytesize_offset != 8: raise ValueError('Invalid BigTIFF file: Expected offset to be 8, found {} instead.'.format(offset)) offset = struct.unpack('<Q', head[8:16])[0] fhandle.seek(offset) ifdsize = struct.unpack("<Q", fhandle.read(8))[0] for i in range(ifdsize): tag, datatype, count, data = struct.unpack("<HHQQ", fhandle.read(20)) if tag == 256: width = data elif tag == 257: height = data if width != -1 and height != -1: break if width == -1 or height == -1: raise ValueError("Invalid BigTIFF file: width and/or height IDS entries are missing.") # handle SVGs elif size >= 5 and (head.startswith(b'<?xml') or head.startswith(b'<svg')): fhandle.seek(0) data = fhandle.read(1024) try: data = data.decode('utf-8') width = re.search(r'[^-]width="(.*?)"', data).group(1) height = re.search(r'[^-]height="(.*?)"', data).group(1) except Exception: raise ValueError("Invalid SVG file") width = _convertToPx(width) height = _convertToPx(height) # handle Netpbm elif head[:1] == b"P" and head[1:2] in b"123456": fhandle.seek(2) sizes = [] while True: next_chr = fhandle.read(1) if next_chr.isspace(): continue if next_chr == b"": raise ValueError("Invalid Netpbm file") if next_chr == b"#": fhandle.readline() continue if not next_chr.isdigit(): raise ValueError("Invalid character found on Netpbm file") size = next_chr next_chr = fhandle.read(1) while next_chr.isdigit(): size += next_chr next_chr = fhandle.read(1) sizes.append(int(size)) if len(sizes) == 2: break fhandle.seek(-1, os.SEEK_CUR) width, height = sizes finally: fhandle.close() return width, height def getDPI(filepath): xDPI = -1 yDPI = -1 if not isinstance(filepath, bytes): filepath = str(filepath) with open(filepath, 'rb') as fhandle: head = fhandle.read(24) size = len(head) # handle GIFs # GIFs doesn't have density if size >= 10 and head[:6] in (b'GIF87a', b'GIF89a'): pass elif size >= 24 and head.startswith(b'\211PNG\r\n\032\n'): chunkOffset = 8 chunk = head[8:] while True: chunkType = chunk[4:8] if chunkType == b'pHYs': try: xDensity, yDensity, unit = struct.unpack(">LLB", chunk[8:]) except struct.error: raise ValueError("Invalid PNG file") if unit: xDPI = _convertToDPI(xDensity, _UNIT_1M) yDPI = _convertToDPI(yDensity, _UNIT_1M) else: xDPI = xDensity yDPI = yDensity break elif chunkType == b'IDAT': break else: try: dataSize, = struct.unpack(">L", chunk[0:4]) except struct.error: raise ValueError("Invalid PNG file") chunkOffset += dataSize + 12 fhandle.seek(chunkOffset) chunk = fhandle.read(17) elif size >= 2 and head.startswith(b'\377\330'): try: fhandle.seek(0) size = 2 ftype = 0 while not 0xc0 <= ftype <= 0xcf: if ftype == 0xe0: fhandle.seek(7, 1) unit, xDensity, yDensity = struct.unpack(">BHH", fhandle.read(5)) if unit == 1 or unit == 0: xDPI = xDensity yDPI = yDensity elif unit == 2: xDPI = _convertToDPI(xDensity, _UNIT_CM) yDPI = _convertToDPI(yDensity, _UNIT_CM) break fhandle.seek(size, 1) byte = fhandle.read(1) while ord(byte) == 0xff: byte = fhandle.read(1) ftype = ord(byte) size = struct.unpack('>H', fhandle.read(2))[0] - 2 except struct.error: raise ValueError("Invalid JPEG file") elif size >= 12 and head.startswith(b'\x00\x00\x00\x0cjP \r\n\x87\n'): fhandle.seek(32) headerSize = struct.unpack('>L', fhandle.read(4))[0] - 8 fhandle.seek(4, 1) foundResBox = False try: while headerSize > 0: boxHeader = fhandle.read(8) boxType = boxHeader[4:] if boxType == b'res ': foundResBox = True headerSize -= 8 break boxSize, = struct.unpack('>L', boxHeader[:4]) fhandle.seek(boxSize - 8, 1) headerSize -= boxSize if foundResBox: while headerSize > 0: boxHeader = fhandle.read(8) boxType = boxHeader[4:] if boxType == b'resd': yDensity, xDensity, yUnit, xUnit = struct.unpack(">HHBB", fhandle.read(10)) xDPI = _convertToDPI(xDensity, xUnit) yDPI = _convertToDPI(yDensity, yUnit) break boxSize, = struct.unpack('>L', boxHeader[:4]) fhandle.seek(boxSize - 8, 1) headerSize -= boxSize except struct.error as e: raise ValueError("Invalid JPEG2000 file") return xDPI, yDPI
true
true
f700f1578f586a50af86ffe0c113eb3c249ef6c2
104
py
Python
molsysmt/_private/digestion/target.py
uibcdf/MolModSAKs
02263fb710693f0c41817f1a318459b35fd5462a
[ "MIT" ]
null
null
null
molsysmt/_private/digestion/target.py
uibcdf/MolModSAKs
02263fb710693f0c41817f1a318459b35fd5462a
[ "MIT" ]
null
null
null
molsysmt/_private/digestion/target.py
uibcdf/MolModSAKs
02263fb710693f0c41817f1a318459b35fd5462a
[ "MIT" ]
null
null
null
def digest_target(target): from .element import digest_element return digest_element(target)
14.857143
39
0.759615
def digest_target(target): from .element import digest_element return digest_element(target)
true
true
f700f167a8cab5f46c0dbe65870fac00e29032be
6,073
py
Python
src/kerod/core/sampling_ops.py
LSanselme/kerod
cb52775ed501cbe4bd5fc0f22ec0359ca1d5f902
[ "MIT" ]
35
2020-06-14T11:20:54.000Z
2022-03-02T14:42:25.000Z
src/kerod/core/sampling_ops.py
LSanselme/kerod
cb52775ed501cbe4bd5fc0f22ec0359ca1d5f902
[ "MIT" ]
9
2021-02-03T15:01:39.000Z
2022-03-16T01:46:38.000Z
src/kerod/core/sampling_ops.py
LSanselme/kerod
cb52775ed501cbe4bd5fc0f22ec0359ca1d5f902
[ "MIT" ]
11
2021-02-02T09:41:16.000Z
2022-03-01T15:43:31.000Z
# Copyright 2017 The TensorFlow Authors and modified by Emilien Garreau. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Method to subsample minibatches by balancing positives and negatives. Subsamples minibatches based on a pre-specified positive fraction in range [0,1]. The class presumes there are many more negatives than positive examples: if the desired sample_size cannot be achieved with the pre-specified positive fraction, it fills the rest with negative examples. If this is not sufficient for obtaining the desired sample_size, it returns fewer examples. The main function to call is Subsample(self, indicator, labels). For convenience one can also call SubsampleWeights(self, weights, labels) which is defined in the minibatch_sampler base class. When is_static is True, it implements a method that guarantees static shapes. It also ensures the length of output of the subsample is always sample_size, even when number of examples set to True in indicator is less than sample_size. """ import tensorflow as tf from kerod.utils import ops def subsample_indicator(indicator, num_samples): """Subsample indicator vector. Given a boolean indicator vector with M elements set to `True`, the function assigns all but `num_samples` of these previously `True` elements to `False`. If `num_samples` is greater than M, the original indicator vector is returned. Arguments: - *indicator*: a 1-dimensional boolean tensor indicating which elements are allowed to be sampled and which are not. - *num_samples*: int32 scalar tensor Returns: A boolean tensor with the same shape as input (indicator) tensor """ indices = tf.where(indicator) indices = tf.random.shuffle(indices) indices = tf.reshape(indices, [-1]) num_samples = tf.minimum(tf.size(indices), num_samples) selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0]) return tf.equal(selected_indicator, 1) def sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5): """Subsamples minibatches to a desired balance of positives and negatives. Arguments: - *indicator*: boolean tensor of shape [N] whose True entries can be sampled. - *sample_size*: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches positive_fraction. - *labels*: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. - *positive_fraction*: desired fraction of positive examples (scalar in [0,1]) in the batch. Returns: *sampled_idx_indicator*: boolean tensor of shape [N], True for entries which are sampled. """ negative_idx = tf.logical_not(labels) positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if sample_size is None: max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32)) else: max_num_pos = int(positive_fraction * sample_size) sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) if sample_size is None: negative_positive_ratio = (1 - positive_fraction) / positive_fraction max_num_neg = tf.cast(negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32), dtype=tf.int32) else: max_num_neg = sample_size - num_sampled_pos sampled_neg_idx = subsample_indicator(negative_idx, max_num_neg) return tf.logical_or(sampled_pos_idx, sampled_neg_idx) def batch_sample_balanced_positive_negative(indicators, sample_size, labels, positive_fraction=0.5, dtype=tf.float32): """Subsamples minibatches to a desired balance of positives and negatives. Arguments: - *indicator*: boolean tensor of shape [batch_size, N] whose True entries can be sampled. - *sample_size*: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches positive_fraction. - *labels*: boolean tensor of shape [batch_size, N] denoting positive(=True) and negative (=False) examples. - *positive_fraction*: desired fraction of positive examples (scalar in [0,1]) in the batch. Returns: A boolean tensor of shape [M, N], True for entries which are sampled. """ def _minibatch_subsample_fn(inputs): indicators, targets = inputs return sample_balanced_positive_negative(tf.cast(indicators, tf.bool), sample_size, tf.cast(targets, tf.bool), positive_fraction=positive_fraction) return tf.cast(tf.map_fn(_minibatch_subsample_fn, [indicators, labels], dtype=tf.bool, parallel_iterations=16, back_prop=True), dtype=dtype)
42.468531
99
0.680883
import tensorflow as tf from kerod.utils import ops def subsample_indicator(indicator, num_samples): indices = tf.where(indicator) indices = tf.random.shuffle(indices) indices = tf.reshape(indices, [-1]) num_samples = tf.minimum(tf.size(indices), num_samples) selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0]) return tf.equal(selected_indicator, 1) def sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5): negative_idx = tf.logical_not(labels) positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) if sample_size is None: max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32)) else: max_num_pos = int(positive_fraction * sample_size) sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) if sample_size is None: negative_positive_ratio = (1 - positive_fraction) / positive_fraction max_num_neg = tf.cast(negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32), dtype=tf.int32) else: max_num_neg = sample_size - num_sampled_pos sampled_neg_idx = subsample_indicator(negative_idx, max_num_neg) return tf.logical_or(sampled_pos_idx, sampled_neg_idx) def batch_sample_balanced_positive_negative(indicators, sample_size, labels, positive_fraction=0.5, dtype=tf.float32): def _minibatch_subsample_fn(inputs): indicators, targets = inputs return sample_balanced_positive_negative(tf.cast(indicators, tf.bool), sample_size, tf.cast(targets, tf.bool), positive_fraction=positive_fraction) return tf.cast(tf.map_fn(_minibatch_subsample_fn, [indicators, labels], dtype=tf.bool, parallel_iterations=16, back_prop=True), dtype=dtype)
true
true
f700f20444454593e2536cb9e2591f4eae5a213c
7,178
py
Python
src/config.py
volovodenko/English
860ae0f971909b9aa299c193ea7d0161c88d0b22
[ "Apache-2.0" ]
null
null
null
src/config.py
volovodenko/English
860ae0f971909b9aa299c193ea7d0161c88d0b22
[ "Apache-2.0" ]
null
null
null
src/config.py
volovodenko/English
860ae0f971909b9aa299c193ea7d0161c88d0b22
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import os import re import json import os.path import unittest reg_cmnt = re.compile(r"/\*.*?\*/", re.DOTALL) class Config: "Работа с конфигурационным файлом" def __init__(self, main_path=None, user_path=None): if main_path is None: self._main_path = "config.json5" else: self._main_path = main_path if user_path is None: self._user_path = "config_user.json5" else: self._user_path = user_path self._cfg_dict = {} def __getitem__(self, key): return self._cfg_dict[key] def __len__(self): return len(self._cfg_dict) def _load_json(self, path): data = {} if os.path.exists(path): txt = open(path).read() txt = reg_cmnt.sub("", txt) # remove comments data = json.loads(txt) return data def _set_default(self, cfg): cfg["path_to_dict"] = cfg.get("path_to_dict", "dict.json") cfg["path_to_stat"] = cfg.get("path_to_stat", "statistic.json") cfg["words_per_lesson"] = int(cfg.get("words_per_lesson", 5)) cfg["CntStudyWords"] = int(cfg.get("CntStudyWords", 50)) cfg["MinPercent"] = float(cfg.get("MinPercent", 97.0)) cfg["MinSuccessCnt"] = int(cfg.get("MinSuccessCnt", 10)) cfg["retry_time"] = int(cfg.get("retry_time", 1800)) cfg["hide_transcription"] = cfg.get("hide_transcription", "no") cfg["start_time_delay"] = int(cfg.get("start_time_delay", 1)) cfg["stat_count_row"] = int(cfg.get("stat_count_row", 200)) cfg["right_answer_percent"] = float(cfg.get("right_answer_percent", 10.0)) cfg["wrong_answer_percent"] = float(cfg.get("wrong_answer_percent", 40.0)) cfg["empty_answer_is_error"] = cfg.get("empty_answer_is_error", "no") cfg["internet_dictionary_url"] = cfg.get("internet_dictionary_url", {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/", "RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}) def create_default_user_config(self): if not os.path.isfile(self._user_path): txt = "{\n /*\n User config\n */\n\n}" open(self._user_path, "wt").write(txt) def reload(self): self._cfg_dict = {} self._cfg_dict.update(self._load_json(self._main_path)) self._cfg_dict.update(self._load_json(self._user_path)) self._set_default(self._cfg_dict) return self._cfg_dict def get_dict(self): return self._cfg_dict class ConfigTestCase(unittest.TestCase): "Набор тестов для класса Config" def setUp(self): if os.path.isfile("test_config_user.json"): os.remove("test_config_user.json") def tearDown(self): if os.path.isfile("test_config_user.json"): os.remove("test_config_user.json") def equal_cfg(self, cfg, test_dict): for key, val in test_dict.items(): self.assertEqual(cfg[key], val) self.assertEqual(len(cfg), 14) def test_main(self): "Тестирование загрузки основного файла с конфигурацией" test_dict = { "path_to_dict": "dict.json", "path_to_stat": "statistic.json", "words_per_lesson": 5, "CntStudyWords": 50, "MinPercent": 97.0, "MinSuccessCnt": 10, "retry_time": 1800, "hide_transcription": "no", "start_time_delay": 1, "stat_count_row": 200, "right_answer_percent": 10.0, "wrong_answer_percent": 40.0, "empty_answer_is_error": "no", "internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/", "RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}} cfg = Config("config.json5", "fake_config_user.json") cfg.reload() self.equal_cfg(cfg, test_dict) def test_user(self): "Тестирование загрузки пользовательского файла с конфигурацией" test_dict = { "path_to_dict": "dict1.json", "path_to_stat": "statistic1.json", "words_per_lesson": 6, "CntStudyWords": 60, "MinPercent": 98.0, "MinSuccessCnt": 11, "retry_time": 1801, "hide_transcription": "yes", "start_time_delay": 2, "stat_count_row": 300, "right_answer_percent": 20.0, "wrong_answer_percent": 50.0, "empty_answer_is_error": "yes", "internet_dictionary_url": {"EN_RU": "http1://slovari.yandex.ru/{word}/en-ru/#lingvo/", "RU_EN": "http1://slovari.yandex.ru/{word}/en/#lingvo/"}} json.dump(test_dict, open("test_config_user.json", "w")) cfg = Config("config.json5", "test_config_user.json") cfg.reload() self.equal_cfg(cfg, test_dict) def test_user_part(self): "Тестирование загрузки пользовательского файла с конфигурацией, который перекрывает только часть настроек" test_dict = { "path_to_dict": "dict1.json", "path_to_stat": "statistic1.json", "words_per_lesson": 6, "CntStudyWords": 60, "MinPercent": 98.0, "MinSuccessCnt": 11} json.dump(test_dict, open("test_config_user.json", "w")) test_dict.update({ "retry_time": 1800, "hide_transcription": "no", "start_time_delay": 1, "stat_count_row": 200, "right_answer_percent": 10.0, "wrong_answer_percent": 40.0, "empty_answer_is_error": "no"}) cfg = Config("config.json5", "test_config_user.json") cfg.reload() self.equal_cfg(cfg, test_dict) def test_not_exists(self): "Тестирование выставления дефолтных настроек" test_dict = { "path_to_dict": "dict.json", "path_to_stat": "statistic.json", "words_per_lesson": 5, "CntStudyWords": 50, "MinPercent": 97.0, "MinSuccessCnt": 10, "retry_time": 1800, "hide_transcription": "no", "start_time_delay": 1, "stat_count_row": 200, "right_answer_percent": 10.0, "wrong_answer_percent": 40.0, "empty_answer_is_error": "no", "internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/", "RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}} cfg = Config("config.json5", "fake_config_user.json") cfg.reload() self.equal_cfg(cfg, test_dict) cfg = Config("fake_config.json", "fake_config_user.json") cfg.reload() if __name__ == "__main__": os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) suite = unittest.TestLoader().loadTestsFromTestCase(ConfigTestCase) unittest.TextTestRunner(verbosity=2).run(suite)
36.622449
114
0.57314
import os import re import json import os.path import unittest reg_cmnt = re.compile(r"/\*.*?\*/", re.DOTALL) class Config: def __init__(self, main_path=None, user_path=None): if main_path is None: self._main_path = "config.json5" else: self._main_path = main_path if user_path is None: self._user_path = "config_user.json5" else: self._user_path = user_path self._cfg_dict = {} def __getitem__(self, key): return self._cfg_dict[key] def __len__(self): return len(self._cfg_dict) def _load_json(self, path): data = {} if os.path.exists(path): txt = open(path).read() txt = reg_cmnt.sub("", txt) data = json.loads(txt) return data def _set_default(self, cfg): cfg["path_to_dict"] = cfg.get("path_to_dict", "dict.json") cfg["path_to_stat"] = cfg.get("path_to_stat", "statistic.json") cfg["words_per_lesson"] = int(cfg.get("words_per_lesson", 5)) cfg["CntStudyWords"] = int(cfg.get("CntStudyWords", 50)) cfg["MinPercent"] = float(cfg.get("MinPercent", 97.0)) cfg["MinSuccessCnt"] = int(cfg.get("MinSuccessCnt", 10)) cfg["retry_time"] = int(cfg.get("retry_time", 1800)) cfg["hide_transcription"] = cfg.get("hide_transcription", "no") cfg["start_time_delay"] = int(cfg.get("start_time_delay", 1)) cfg["stat_count_row"] = int(cfg.get("stat_count_row", 200)) cfg["right_answer_percent"] = float(cfg.get("right_answer_percent", 10.0)) cfg["wrong_answer_percent"] = float(cfg.get("wrong_answer_percent", 40.0)) cfg["empty_answer_is_error"] = cfg.get("empty_answer_is_error", "no") cfg["internet_dictionary_url"] = cfg.get("internet_dictionary_url", {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/", "RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}) def create_default_user_config(self): if not os.path.isfile(self._user_path): txt = "{\n /*\n User config\n */\n\n}" open(self._user_path, "wt").write(txt) def reload(self): self._cfg_dict = {} self._cfg_dict.update(self._load_json(self._main_path)) self._cfg_dict.update(self._load_json(self._user_path)) self._set_default(self._cfg_dict) return self._cfg_dict def get_dict(self): return self._cfg_dict class ConfigTestCase(unittest.TestCase): def setUp(self): if os.path.isfile("test_config_user.json"): os.remove("test_config_user.json") def tearDown(self): if os.path.isfile("test_config_user.json"): os.remove("test_config_user.json") def equal_cfg(self, cfg, test_dict): for key, val in test_dict.items(): self.assertEqual(cfg[key], val) self.assertEqual(len(cfg), 14) def test_main(self): test_dict = { "path_to_dict": "dict.json", "path_to_stat": "statistic.json", "words_per_lesson": 5, "CntStudyWords": 50, "MinPercent": 97.0, "MinSuccessCnt": 10, "retry_time": 1800, "hide_transcription": "no", "start_time_delay": 1, "stat_count_row": 200, "right_answer_percent": 10.0, "wrong_answer_percent": 40.0, "empty_answer_is_error": "no", "internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/", "RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}} cfg = Config("config.json5", "fake_config_user.json") cfg.reload() self.equal_cfg(cfg, test_dict) def test_user(self): test_dict = { "path_to_dict": "dict1.json", "path_to_stat": "statistic1.json", "words_per_lesson": 6, "CntStudyWords": 60, "MinPercent": 98.0, "MinSuccessCnt": 11, "retry_time": 1801, "hide_transcription": "yes", "start_time_delay": 2, "stat_count_row": 300, "right_answer_percent": 20.0, "wrong_answer_percent": 50.0, "empty_answer_is_error": "yes", "internet_dictionary_url": {"EN_RU": "http1://slovari.yandex.ru/{word}/en-ru/#lingvo/", "RU_EN": "http1://slovari.yandex.ru/{word}/en/#lingvo/"}} json.dump(test_dict, open("test_config_user.json", "w")) cfg = Config("config.json5", "test_config_user.json") cfg.reload() self.equal_cfg(cfg, test_dict) def test_user_part(self): test_dict = { "path_to_dict": "dict1.json", "path_to_stat": "statistic1.json", "words_per_lesson": 6, "CntStudyWords": 60, "MinPercent": 98.0, "MinSuccessCnt": 11} json.dump(test_dict, open("test_config_user.json", "w")) test_dict.update({ "retry_time": 1800, "hide_transcription": "no", "start_time_delay": 1, "stat_count_row": 200, "right_answer_percent": 10.0, "wrong_answer_percent": 40.0, "empty_answer_is_error": "no"}) cfg = Config("config.json5", "test_config_user.json") cfg.reload() self.equal_cfg(cfg, test_dict) def test_not_exists(self): test_dict = { "path_to_dict": "dict.json", "path_to_stat": "statistic.json", "words_per_lesson": 5, "CntStudyWords": 50, "MinPercent": 97.0, "MinSuccessCnt": 10, "retry_time": 1800, "hide_transcription": "no", "start_time_delay": 1, "stat_count_row": 200, "right_answer_percent": 10.0, "wrong_answer_percent": 40.0, "empty_answer_is_error": "no", "internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/", "RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}} cfg = Config("config.json5", "fake_config_user.json") cfg.reload() self.equal_cfg(cfg, test_dict) cfg = Config("fake_config.json", "fake_config_user.json") cfg.reload() if __name__ == "__main__": os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) suite = unittest.TestLoader().loadTestsFromTestCase(ConfigTestCase) unittest.TextTestRunner(verbosity=2).run(suite)
true
true
f700f25f477cd57dd0432a787354f47b88697b1c
532
py
Python
tests/test_model.py
margrietpalm/covid_swimming_chaos
e615d84d2716ce167e21a179c86c3adc4d53928a
[ "MIT" ]
null
null
null
tests/test_model.py
margrietpalm/covid_swimming_chaos
e615d84d2716ce167e21a179c86c3adc4d53928a
[ "MIT" ]
null
null
null
tests/test_model.py
margrietpalm/covid_swimming_chaos
e615d84d2716ce167e21a179c86c3adc4d53928a
[ "MIT" ]
null
null
null
import pytest import copy from pathlib import Path import sys sys.path.append(str(Path(__file__).absolute().parent.parent)) from swimmer_abm.model import Model def test_init(): model = Model(nswimmers=3) assert len(model.swimmers) == 3 def test_step(): model = Model(nswimmers=1) swimmer = copy.deepcopy(model.swimmers[0]) dt = 1 swimmer.swim(dt) model.step(dt) assert swimmer.pos == model.swimmers[0].pos def test_repr(): model = Model(nswimmers=1) assert isinstance(str(model), str)
23.130435
61
0.695489
import pytest import copy from pathlib import Path import sys sys.path.append(str(Path(__file__).absolute().parent.parent)) from swimmer_abm.model import Model def test_init(): model = Model(nswimmers=3) assert len(model.swimmers) == 3 def test_step(): model = Model(nswimmers=1) swimmer = copy.deepcopy(model.swimmers[0]) dt = 1 swimmer.swim(dt) model.step(dt) assert swimmer.pos == model.swimmers[0].pos def test_repr(): model = Model(nswimmers=1) assert isinstance(str(model), str)
true
true
f700f33dffc00e1f53d3e1c3b1d4f0189b0c7b82
696
py
Python
src/repl.py
PolyglotSymposium/mm-i
6520f718b2100dfea2c20d3ae73b33d46292b730
[ "MIT" ]
null
null
null
src/repl.py
PolyglotSymposium/mm-i
6520f718b2100dfea2c20d3ae73b33d46292b730
[ "MIT" ]
null
null
null
src/repl.py
PolyglotSymposium/mm-i
6520f718b2100dfea2c20d3ae73b33d46292b730
[ "MIT" ]
null
null
null
#!/usr/bin/python3 class Evaluator: def __init__(self, lexer): self.__lexer = lexer def evaluate(self, line): return int(next(self.__lexer.tokenize(line)).raw_value) class REPL: def __init__(self, read, print, evaluate): self.__read = read self.__eval = evaluate self.__print = print def loop(self): while True: try: line = self.__read('mm-i> ') result = self.__eval(line) self.__print(result) except KeyboardInterrupt: break if __name__ == '__main__': from lexer import Lexer REPL(input, print, Evaluator(Lexer()).evaluate).loop()
25.777778
63
0.573276
class Evaluator: def __init__(self, lexer): self.__lexer = lexer def evaluate(self, line): return int(next(self.__lexer.tokenize(line)).raw_value) class REPL: def __init__(self, read, print, evaluate): self.__read = read self.__eval = evaluate self.__print = print def loop(self): while True: try: line = self.__read('mm-i> ') result = self.__eval(line) self.__print(result) except KeyboardInterrupt: break if __name__ == '__main__': from lexer import Lexer REPL(input, print, Evaluator(Lexer()).evaluate).loop()
true
true
f700f3f235ce44f72728af90479dc63e15aa97b8
4,426
py
Python
distributed/diagnostics/tests/test_task_stream.py
edyounis/distributed
bb091d5ec7d3ce4eb4a58e0957cba9cdf3da1d6a
[ "BSD-3-Clause" ]
null
null
null
distributed/diagnostics/tests/test_task_stream.py
edyounis/distributed
bb091d5ec7d3ce4eb4a58e0957cba9cdf3da1d6a
[ "BSD-3-Clause" ]
1
2022-02-28T22:02:10.000Z
2022-02-28T22:02:10.000Z
distributed/diagnostics/tests/test_task_stream.py
graingert/distributed
5feb17151cdf660a3443abf8596444a9f51dc575
[ "BSD-3-Clause" ]
null
null
null
import os from time import sleep import pytest from tlz import frequencies from distributed import get_task_stream from distributed.client import wait from distributed.diagnostics.task_stream import TaskStreamPlugin from distributed.metrics import time from distributed.utils_test import div, gen_cluster, inc, slowinc @gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3) async def test_TaskStreamPlugin(c, s, *workers): es = TaskStreamPlugin(s) s.add_plugin(es) assert not es.buffer futures = c.map(div, [1] * 10, range(10)) total = c.submit(sum, futures[1:]) await wait(total) assert len(es.buffer) == 11 workers = dict() rects = es.rectangles(0, 10, workers) assert workers assert all(n == "div" for n in rects["name"]) assert all(d > 0 for d in rects["duration"]) counts = frequencies(rects["color"]) assert counts["black"] == 1 assert set(counts.values()) == {9, 1} assert len(set(rects["y"])) == 3 rects = es.rectangles(2, 5, workers) assert all(len(L) == 3 for L in rects.values()) starts = sorted(rects["start"]) rects = es.rectangles( 2, 5, workers=workers, start_boundary=(starts[0] + starts[1]) / 2000 ) assert set(rects["start"]).issubset(set(starts[1:])) @gen_cluster(client=True) async def test_maxlen(c, s, a, b): tasks = TaskStreamPlugin(s, maxlen=5) s.add_plugin(tasks) futures = c.map(inc, range(10)) await wait(futures) assert len(tasks.buffer) == 5 @gen_cluster(client=True) async def test_collect(c, s, a, b): tasks = TaskStreamPlugin(s) s.add_plugin(tasks) start = time() futures = c.map(slowinc, range(10), delay=0.1) await wait(futures) L = tasks.collect() assert len(L) == len(futures) L = tasks.collect(start=start) assert len(L) == len(futures) L = tasks.collect(start=start + 0.2) assert 4 <= len(L) <= len(futures) L = tasks.collect(start="20 s") assert len(L) == len(futures) L = tasks.collect(start="500ms") assert 0 < len(L) <= len(futures) L = tasks.collect(count=3) assert len(L) == 3 assert L == list(tasks.buffer)[-3:] assert tasks.collect(stop=start + 100, count=3) == tasks.collect(count=3) assert tasks.collect(start=start, count=3) == list(tasks.buffer)[:3] @gen_cluster(client=True) async def test_no_startstops(c, s, a, b): tasks = TaskStreamPlugin(s) s.add_plugin(tasks) # just to create the key on the scheduler future = c.submit(inc, 1) await wait(future) assert len(tasks.buffer) == 1 tasks.transition(future.key, "processing", "erred") # Transition was not recorded because it didn't contain `startstops` assert len(tasks.buffer) == 1 tasks.transition(future.key, "processing", "erred", startstops=[]) # Transition was not recorded because `startstops` was empty assert len(tasks.buffer) == 1 tasks.transition( future.key, "processing", "erred", startstops=[dict(start=time(), stop=time())] ) assert len(tasks.buffer) == 2 @gen_cluster(client=True) async def test_client(c, s, a, b): L = await c.get_task_stream() assert L == () futures = c.map(slowinc, range(10), delay=0.1) await wait(futures) tasks = s.plugins[TaskStreamPlugin.name] L = await c.get_task_stream() assert L == tuple(tasks.buffer) def test_client_sync(client): with get_task_stream(client=client) as ts: sleep(0.1) # to smooth over time differences on the scheduler # to smooth over time differences on the scheduler futures = client.map(inc, range(10)) wait(futures) assert len(ts.data) == 10 @gen_cluster(client=True) async def test_get_task_stream_plot(c, s, a, b): bokeh = pytest.importorskip("bokeh") await c.get_task_stream() futures = c.map(slowinc, range(10), delay=0.1) await wait(futures) data, figure = await c.get_task_stream(plot=True) assert isinstance(figure, bokeh.plotting.Figure) def test_get_task_stream_save(client, tmpdir): bokeh = pytest.importorskip("bokeh") tmpdir = str(tmpdir) fn = os.path.join(tmpdir, "foo.html") with get_task_stream(plot="save", filename=fn) as ts: wait(client.map(inc, range(10))) with open(fn) as f: data = f.read() assert "inc" in data assert "bokeh" in data assert isinstance(ts.figure, bokeh.plotting.Figure)
28.191083
87
0.658156
import os from time import sleep import pytest from tlz import frequencies from distributed import get_task_stream from distributed.client import wait from distributed.diagnostics.task_stream import TaskStreamPlugin from distributed.metrics import time from distributed.utils_test import div, gen_cluster, inc, slowinc @gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3) async def test_TaskStreamPlugin(c, s, *workers): es = TaskStreamPlugin(s) s.add_plugin(es) assert not es.buffer futures = c.map(div, [1] * 10, range(10)) total = c.submit(sum, futures[1:]) await wait(total) assert len(es.buffer) == 11 workers = dict() rects = es.rectangles(0, 10, workers) assert workers assert all(n == "div" for n in rects["name"]) assert all(d > 0 for d in rects["duration"]) counts = frequencies(rects["color"]) assert counts["black"] == 1 assert set(counts.values()) == {9, 1} assert len(set(rects["y"])) == 3 rects = es.rectangles(2, 5, workers) assert all(len(L) == 3 for L in rects.values()) starts = sorted(rects["start"]) rects = es.rectangles( 2, 5, workers=workers, start_boundary=(starts[0] + starts[1]) / 2000 ) assert set(rects["start"]).issubset(set(starts[1:])) @gen_cluster(client=True) async def test_maxlen(c, s, a, b): tasks = TaskStreamPlugin(s, maxlen=5) s.add_plugin(tasks) futures = c.map(inc, range(10)) await wait(futures) assert len(tasks.buffer) == 5 @gen_cluster(client=True) async def test_collect(c, s, a, b): tasks = TaskStreamPlugin(s) s.add_plugin(tasks) start = time() futures = c.map(slowinc, range(10), delay=0.1) await wait(futures) L = tasks.collect() assert len(L) == len(futures) L = tasks.collect(start=start) assert len(L) == len(futures) L = tasks.collect(start=start + 0.2) assert 4 <= len(L) <= len(futures) L = tasks.collect(start="20 s") assert len(L) == len(futures) L = tasks.collect(start="500ms") assert 0 < len(L) <= len(futures) L = tasks.collect(count=3) assert len(L) == 3 assert L == list(tasks.buffer)[-3:] assert tasks.collect(stop=start + 100, count=3) == tasks.collect(count=3) assert tasks.collect(start=start, count=3) == list(tasks.buffer)[:3] @gen_cluster(client=True) async def test_no_startstops(c, s, a, b): tasks = TaskStreamPlugin(s) s.add_plugin(tasks) future = c.submit(inc, 1) await wait(future) assert len(tasks.buffer) == 1 tasks.transition(future.key, "processing", "erred") assert len(tasks.buffer) == 1 tasks.transition(future.key, "processing", "erred", startstops=[]) # Transition was not recorded because `startstops` was empty assert len(tasks.buffer) == 1 tasks.transition( future.key, "processing", "erred", startstops=[dict(start=time(), stop=time())] ) assert len(tasks.buffer) == 2 @gen_cluster(client=True) async def test_client(c, s, a, b): L = await c.get_task_stream() assert L == () futures = c.map(slowinc, range(10), delay=0.1) await wait(futures) tasks = s.plugins[TaskStreamPlugin.name] L = await c.get_task_stream() assert L == tuple(tasks.buffer) def test_client_sync(client): with get_task_stream(client=client) as ts: sleep(0.1) # to smooth over time differences on the scheduler # to smooth over time differences on the scheduler futures = client.map(inc, range(10)) wait(futures) assert len(ts.data) == 10 @gen_cluster(client=True) async def test_get_task_stream_plot(c, s, a, b): bokeh = pytest.importorskip("bokeh") await c.get_task_stream() futures = c.map(slowinc, range(10), delay=0.1) await wait(futures) data, figure = await c.get_task_stream(plot=True) assert isinstance(figure, bokeh.plotting.Figure) def test_get_task_stream_save(client, tmpdir): bokeh = pytest.importorskip("bokeh") tmpdir = str(tmpdir) fn = os.path.join(tmpdir, "foo.html") with get_task_stream(plot="save", filename=fn) as ts: wait(client.map(inc, range(10))) with open(fn) as f: data = f.read() assert "inc" in data assert "bokeh" in data assert isinstance(ts.figure, bokeh.plotting.Figure)
true
true
f700f4bd539b8bf548a33cc2116e009d6b138a96
16,593
py
Python
ote_sdk/ote_sdk/entities/dataset_item.py
vraoresearch/openvino_training_extensions
5cdade68a1ec25f694efddc40913fe2527e00e82
[ "Apache-2.0" ]
null
null
null
ote_sdk/ote_sdk/entities/dataset_item.py
vraoresearch/openvino_training_extensions
5cdade68a1ec25f694efddc40913fe2527e00e82
[ "Apache-2.0" ]
null
null
null
ote_sdk/ote_sdk/entities/dataset_item.py
vraoresearch/openvino_training_extensions
5cdade68a1ec25f694efddc40913fe2527e00e82
[ "Apache-2.0" ]
1
2020-12-13T22:13:51.000Z
2020-12-13T22:13:51.000Z
"""This module implements the dataset item entity""" # Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import abc import copy import itertools import logging from threading import Lock from typing import List, Optional, Sequence import numpy as np from ote_sdk.entities.annotation import Annotation, AnnotationSceneEntity from ote_sdk.entities.label import LabelEntity from ote_sdk.entities.media import IMedia2DEntity from ote_sdk.entities.metadata import IMetadata, MetadataItemEntity from ote_sdk.entities.model import ModelEntity from ote_sdk.entities.scored_label import ScoredLabel from ote_sdk.entities.shapes.rectangle import Rectangle from ote_sdk.entities.subset import Subset from ote_sdk.utils.shape_factory import ShapeFactory logger = logging.getLogger(__name__) class DatasetItemEntity(metaclass=abc.ABCMeta): """ DatasetItemEntity represents an item in the DatasetEntity. It holds a media item, annotation and an ROI. The ROI determines the region of interest for the dataset item, and is described by a shape entity. Dataset items hold five fundamental properties: - A 2d media entity (e.g. Image) - A 2d annotation entity for the full resolution media entity - An ROI, describing the region of interest. - The subset it belongs to - Metadata for the media entity (e.g. saliency map or active score) .. rubric:: Getting data from dataset item The first step is to fetch the input data for the network. >>> dataset_item = DatasetItemEntity() >>> media_numpy = dataset_item.numpy # RGB media data (Height, Width, Channels) This returns the numpy data for the assigned ROI. But it is possible to extract any arbitrary region. >>> from ote_sdk.entities.shapes.rectangle import Rectangle >>> top_left_quart_roi = Annotation(Rectangle(x1=0.0, y1=0.0, x2=0.5, y2=0.5), labels=[]) >>> top_left_quart_numpy = dataset_item.roi_numpy(roi=top_left_quart_roi) Get the subset of labels for the item ROI: >>> labels = dataset_item.get_roi_labels(labels=...) Get the annotations __visible__ in the ROI: >>> dataset_item.get_annotations() .. rubric:: Adding output data to dataset item It is possible to add shapes or just labels for the ROI. Add shapes to dataset item: >>> box = Rectangle(x1=0.2, y1=0.3, x2=0.6, y2=0.5) >>> dataset_item.append_annotations(annotations=[Annotation(box, labels=[...])]) Add labels to ROI: >>> dataset_item.append_labels(labels=[...]) :param media: Media item :param annotation_scene: Annotation scene :param roi: Region Of Interest :param metadata: Metadata attached to dataset item :param subset: `Subset` for item. E.g. `Subset.VALIDATION` """ # pylint: disable=too-many-arguments def __init__( self, media: IMedia2DEntity, annotation_scene: AnnotationSceneEntity, roi: Optional[Annotation] = None, metadata: Optional[Sequence[MetadataItemEntity]] = None, subset: Subset = Subset.NONE, ): self.__media: IMedia2DEntity = media self.__annotation_scene: AnnotationSceneEntity = annotation_scene self.__subset: Subset = subset self.__roi_lock = Lock() # set ROI if roi is None: for annotation in annotation_scene.annotations: # if there is a full box in annotation.shapes, set it as ROI if Rectangle.is_full_box(annotation.shape): roi = annotation break self.__roi = roi self.__metadata: List[MetadataItemEntity] = [] if metadata is not None: self.__metadata = list(metadata) @property def metadata(self) -> Sequence[MetadataItemEntity]: """Provides access to metadata.""" return self.__metadata def __repr__(self): return ( f"{self.__class__.__name__}(" f"media={self.media}, " f"annotation_scene={self.annotation_scene}, " f"roi={self.roi}, " f"subset={self.subset})" ) @property def roi(self) -> Annotation: """Region Of Interest.""" with self.__roi_lock: if self.__roi is None: requested_roi = Annotation(Rectangle.generate_full_box(), labels=[]) self.__roi = requested_roi else: requested_roi = self.__roi return requested_roi @roi.setter def roi(self, roi: Optional[Annotation]): with self.__roi_lock: self.__roi = roi @property def subset(self) -> Subset: """ Returns the subset that the IDatasetItem belongs to. e.g. Subset.TRAINING. """ return self.__subset @subset.setter def subset(self, value: Subset): self.__subset = value @property def media(self) -> IMedia2DEntity: """Media.""" return self.__media def roi_numpy(self, roi: Optional[Annotation] = None) -> np.ndarray: """ Gives the numpy data for the media, given an ROI. This function allows to take a crop of any arbitrary region of the media in the Dataset entity. If the ROI is not given, the ROI assigned to the DatasetItem will be used as default. :param roi: Shape entity. The shape will be converted if needed, to extract the ROI numpy. :return: Numpy array with media data """ if roi is None: roi = self.roi if roi is not None: roi.shape = ShapeFactory.shape_as_rectangle(roi.shape) return self.media.roi_numpy(roi=roi) @property def numpy(self) -> np.ndarray: """ Returns the numpy data for the media, taking ROI into account. :return: Numpy array. RGB array of shape (Height, Width, Channels) """ return self.roi_numpy() @property def width(self) -> int: """ The width of the dataset item, taking into account the ROI. """ roi_shape_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) roi_shape_as_box = roi_shape_as_box.clip_to_visible_region() width = self.media.width # Note that we cannot directly use roi_shape_as_box.width due to the rounding # because round(x2 - x1) is not always equal to round(x2) - round(x1) x1 = int(round(roi_shape_as_box.x1 * width)) x2 = int(round(roi_shape_as_box.x2 * width)) return x2 - x1 @property def height(self) -> int: """ The height of the dataset item, taking into account the ROI. """ roi_shape_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) roi_shape_as_box = roi_shape_as_box.clip_to_visible_region() height = self.media.height # Note that we cannot directly use roi_shape_as_box.height due to the rounding # because round(y2 - y1) is not always equal to round(y2) - round(y1) y1 = int(round(roi_shape_as_box.y1 * height)) y2 = int(round(roi_shape_as_box.y2 * height)) return y2 - y1 @property def annotation_scene(self) -> AnnotationSceneEntity: """Access to annotation scene.""" return self.__annotation_scene @annotation_scene.setter def annotation_scene(self, value: AnnotationSceneEntity): self.__annotation_scene = value def get_annotations( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False, ) -> List[Annotation]: """ Returns a list of annotations that exist in the dataset item (wrt. ROI). This is done by checking that the center of the annotation is located in the ROI. :param labels: Subset of input labels to filter with; if ``None``, all the shapes within the ROI are returned :param include_empty: if True, returns both empty and non-empty labels :return: The intersection of the input label set and those present within the ROI """ is_full_box = Rectangle.is_full_box(self.roi.shape) annotations = [] if is_full_box and labels is None and not include_empty: # Fast path for the case where we do not need to change the shapes # todo: this line is incorrect. CVS-75919 annotations = self.annotation_scene.annotations else: # Todo: improve speed. This is O(n) for n shapes. roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) labels_set = {label.name for label in labels} if labels is not None else {} for annotation in self.annotation_scene.annotations: if not is_full_box and not self.roi.shape.contains_center( annotation.shape ): continue shape_labels = annotation.get_labels(include_empty) if labels is not None: shape_labels = [ label for label in shape_labels if label.name in labels_set ] if len(shape_labels) == 0: continue if not is_full_box: # Create a denormalized copy of the shape. shape = annotation.shape.denormalize_wrt_roi_shape(roi_as_box) else: # Also create a copy of the shape, so that we can safely modify the labels # without tampering with the original shape. shape = copy.deepcopy(annotation.shape) annotations.append(Annotation(shape=shape, labels=shape_labels)) return annotations def append_annotations(self, annotations: Sequence[Annotation]): """ Adds a list of shapes to the annotation """ roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) validated_annotations = [ Annotation( shape=annotation.shape.normalize_wrt_roi_shape(roi_as_box), labels=annotation.get_labels(), ) for annotation in annotations if ShapeFactory().shape_produces_valid_crop( shape=annotation.shape, media_width=self.media.width, media_height=self.media.height, ) ] n_invalid_shapes = len(annotations) - len(validated_annotations) if n_invalid_shapes > 0: logger.info( "%d shapes will not be added to the dataset item as they " "would produce invalid crops (this is expected for some tasks, " "such as segmentation).", n_invalid_shapes, ) self.annotation_scene.append_annotations(validated_annotations) def get_roi_labels( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False ) -> List[LabelEntity]: """ Return the subset of the input labels which exist in the dataset item (wrt. ROI). :param labels: Subset of input labels to filter with; if ``None``, all the labels within the ROI are returned :param include_empty: if True, returns both empty and non-empty labels :return: The intersection of the input label set and those present within the ROI """ filtered_labels = set() for label in self.roi.get_labels(include_empty): if labels is None or label.get_label() in labels: filtered_labels.add(label.get_label()) return sorted(list(filtered_labels), key=lambda x: x.name) def get_shapes_labels( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False ) -> List[LabelEntity]: """ Get the labels of the shapes present in this dataset item. if a label list is supplied, only labels present within that list are returned. if include_empty is True, present empty labels are returned as well. :param labels: if supplied only labels present in this list are returned :param include_empty: if True, returns both empty and non-empty labels :return: a list of labels from the shapes within the roi of this dataset item """ annotations = self.get_annotations() scored_label_set = set( itertools.chain( *[annotation.get_labels(include_empty) for annotation in annotations] ) ) label_set = {scored_label.get_label() for scored_label in scored_label_set} if labels is None: return list(label_set) return [label for label in label_set if label in labels] def append_labels(self, labels: List[ScoredLabel]): """ Appends labels to the DatasetItem and adds it to the the annotation label as well if it's not yet there :param labels: list of labels to be appended """ if len(labels) == 0: return roi_annotation = None for annotation in self.annotation_scene.annotations: if annotation == self.roi: roi_annotation = annotation break if roi_annotation is None: # no annotation found with shape roi_annotation = self.roi self.annotation_scene.append_annotation(roi_annotation) for label in labels: if label not in self.roi.get_labels(include_empty=True): self.roi.append_label(label) if label not in roi_annotation.get_labels(include_empty=True): roi_annotation.append_label(label) def __eq__(self, other): if isinstance(other, DatasetItemEntity): return ( self.media == other.media and self.annotation_scene == other.annotation_scene and self.roi == other.roi and self.subset == other.subset ) return False def __deepcopy__(self, memo): """ When we deepcopy this object, be sure not to deep copy the lock, as this is not possible, make a new lock instead. """ # Call ROI getter to ensure original object has an ROI. _ = self.roi clone = copy.copy(self) for name, value in vars(self).items(): if "__roi_lock" in name: setattr(clone, name, Lock()) else: setattr(clone, name, copy.deepcopy(value, memo)) return clone def append_metadata_item( self, data: IMetadata, model: Optional[ModelEntity] = None ): """ Appends metadata produced by some model to the dataset item. .. rubric:: Adding visualization heatmap (ResultMediaEntity) to DatasetItemEntity >>> from ote_sdk.entities.image import Image >>> from ote_sdk.entities.result_media import ResultMediaEntity >>> media = Image(file_path='image.jpeg') >>> annotation = NullAnnotationSceneEntity() >>> dataset_item = DatasetItem(media=media, annotation_scene=annotation) >>> data = np.ones((120, 120, 3)).astype(np.uint8) * 255 # Saliency numpy >>> result_media = ResultMediaEntity(name="Gradcam++", ... type="Gradcam++", ... annotation_scene=annotation, ... numpy=data) >>> dataset_item.append_metadata_item(result_media) .. rubric:: Representation vector for active learning >>> from ote_sdk.entities.tensor import TensorEntity >>> tensor = TensorEntity(name="representation_vector", numpy=data) >>> dataset_item.append_metadata_item(data=tensor, model=model) :param data: any object of a class inherited from IMetadata. (e.g., FloatMetadata, Tensor) :param model: model that was used to generated metadata """ self.__metadata.append(MetadataItemEntity(data=data, model=model)) def get_metadata_by_name_and_model( self, name: str, model: Optional[ModelEntity] ) -> Sequence[MetadataItemEntity]: """ Returns a metadata item with `name` and generated by `model`. :param name: the name of the metadata :param model: the model which was used to generate the metadata. :return: """ return [ meta for meta in self.metadata if meta.data.name == name and meta.model == model ]
37.62585
117
0.629121
import abc import copy import itertools import logging from threading import Lock from typing import List, Optional, Sequence import numpy as np from ote_sdk.entities.annotation import Annotation, AnnotationSceneEntity from ote_sdk.entities.label import LabelEntity from ote_sdk.entities.media import IMedia2DEntity from ote_sdk.entities.metadata import IMetadata, MetadataItemEntity from ote_sdk.entities.model import ModelEntity from ote_sdk.entities.scored_label import ScoredLabel from ote_sdk.entities.shapes.rectangle import Rectangle from ote_sdk.entities.subset import Subset from ote_sdk.utils.shape_factory import ShapeFactory logger = logging.getLogger(__name__) class DatasetItemEntity(metaclass=abc.ABCMeta): def __init__( self, media: IMedia2DEntity, annotation_scene: AnnotationSceneEntity, roi: Optional[Annotation] = None, metadata: Optional[Sequence[MetadataItemEntity]] = None, subset: Subset = Subset.NONE, ): self.__media: IMedia2DEntity = media self.__annotation_scene: AnnotationSceneEntity = annotation_scene self.__subset: Subset = subset self.__roi_lock = Lock() if roi is None: for annotation in annotation_scene.annotations: if Rectangle.is_full_box(annotation.shape): roi = annotation break self.__roi = roi self.__metadata: List[MetadataItemEntity] = [] if metadata is not None: self.__metadata = list(metadata) @property def metadata(self) -> Sequence[MetadataItemEntity]: return self.__metadata def __repr__(self): return ( f"{self.__class__.__name__}(" f"media={self.media}, " f"annotation_scene={self.annotation_scene}, " f"roi={self.roi}, " f"subset={self.subset})" ) @property def roi(self) -> Annotation: with self.__roi_lock: if self.__roi is None: requested_roi = Annotation(Rectangle.generate_full_box(), labels=[]) self.__roi = requested_roi else: requested_roi = self.__roi return requested_roi @roi.setter def roi(self, roi: Optional[Annotation]): with self.__roi_lock: self.__roi = roi @property def subset(self) -> Subset: return self.__subset @subset.setter def subset(self, value: Subset): self.__subset = value @property def media(self) -> IMedia2DEntity: return self.__media def roi_numpy(self, roi: Optional[Annotation] = None) -> np.ndarray: if roi is None: roi = self.roi if roi is not None: roi.shape = ShapeFactory.shape_as_rectangle(roi.shape) return self.media.roi_numpy(roi=roi) @property def numpy(self) -> np.ndarray: return self.roi_numpy() @property def width(self) -> int: roi_shape_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) roi_shape_as_box = roi_shape_as_box.clip_to_visible_region() width = self.media.width x1 = int(round(roi_shape_as_box.x1 * width)) x2 = int(round(roi_shape_as_box.x2 * width)) return x2 - x1 @property def height(self) -> int: roi_shape_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) roi_shape_as_box = roi_shape_as_box.clip_to_visible_region() height = self.media.height y1 = int(round(roi_shape_as_box.y1 * height)) y2 = int(round(roi_shape_as_box.y2 * height)) return y2 - y1 @property def annotation_scene(self) -> AnnotationSceneEntity: return self.__annotation_scene @annotation_scene.setter def annotation_scene(self, value: AnnotationSceneEntity): self.__annotation_scene = value def get_annotations( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False, ) -> List[Annotation]: is_full_box = Rectangle.is_full_box(self.roi.shape) annotations = [] if is_full_box and labels is None and not include_empty: annotations = self.annotation_scene.annotations else: roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) labels_set = {label.name for label in labels} if labels is not None else {} for annotation in self.annotation_scene.annotations: if not is_full_box and not self.roi.shape.contains_center( annotation.shape ): continue shape_labels = annotation.get_labels(include_empty) if labels is not None: shape_labels = [ label for label in shape_labels if label.name in labels_set ] if len(shape_labels) == 0: continue if not is_full_box: shape = annotation.shape.denormalize_wrt_roi_shape(roi_as_box) else: shape = copy.deepcopy(annotation.shape) annotations.append(Annotation(shape=shape, labels=shape_labels)) return annotations def append_annotations(self, annotations: Sequence[Annotation]): roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) validated_annotations = [ Annotation( shape=annotation.shape.normalize_wrt_roi_shape(roi_as_box), labels=annotation.get_labels(), ) for annotation in annotations if ShapeFactory().shape_produces_valid_crop( shape=annotation.shape, media_width=self.media.width, media_height=self.media.height, ) ] n_invalid_shapes = len(annotations) - len(validated_annotations) if n_invalid_shapes > 0: logger.info( "%d shapes will not be added to the dataset item as they " "would produce invalid crops (this is expected for some tasks, " "such as segmentation).", n_invalid_shapes, ) self.annotation_scene.append_annotations(validated_annotations) def get_roi_labels( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False ) -> List[LabelEntity]: filtered_labels = set() for label in self.roi.get_labels(include_empty): if labels is None or label.get_label() in labels: filtered_labels.add(label.get_label()) return sorted(list(filtered_labels), key=lambda x: x.name) def get_shapes_labels( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False ) -> List[LabelEntity]: annotations = self.get_annotations() scored_label_set = set( itertools.chain( *[annotation.get_labels(include_empty) for annotation in annotations] ) ) label_set = {scored_label.get_label() for scored_label in scored_label_set} if labels is None: return list(label_set) return [label for label in label_set if label in labels] def append_labels(self, labels: List[ScoredLabel]): if len(labels) == 0: return roi_annotation = None for annotation in self.annotation_scene.annotations: if annotation == self.roi: roi_annotation = annotation break if roi_annotation is None: roi_annotation = self.roi self.annotation_scene.append_annotation(roi_annotation) for label in labels: if label not in self.roi.get_labels(include_empty=True): self.roi.append_label(label) if label not in roi_annotation.get_labels(include_empty=True): roi_annotation.append_label(label) def __eq__(self, other): if isinstance(other, DatasetItemEntity): return ( self.media == other.media and self.annotation_scene == other.annotation_scene and self.roi == other.roi and self.subset == other.subset ) return False def __deepcopy__(self, memo): _ = self.roi clone = copy.copy(self) for name, value in vars(self).items(): if "__roi_lock" in name: setattr(clone, name, Lock()) else: setattr(clone, name, copy.deepcopy(value, memo)) return clone def append_metadata_item( self, data: IMetadata, model: Optional[ModelEntity] = None ): self.__metadata.append(MetadataItemEntity(data=data, model=model)) def get_metadata_by_name_and_model( self, name: str, model: Optional[ModelEntity] ) -> Sequence[MetadataItemEntity]: return [ meta for meta in self.metadata if meta.data.name == name and meta.model == model ]
true
true
f700f5af5a55ee69e9bcfc5b9683c37f5e27231c
706
py
Python
charlie2/_scratch/event_tester.py
sammosummo/Charlie2
e856b9bfc83c11e57a63d487fa14a63764e3f6ae
[ "MIT" ]
5
2019-10-10T08:22:29.000Z
2021-04-09T02:34:13.000Z
charlie2/_scratch/event_tester.py
sammosummo/Charlie2
e856b9bfc83c11e57a63d487fa14a63764e3f6ae
[ "MIT" ]
20
2018-06-20T21:15:48.000Z
2018-09-06T17:13:46.000Z
charlie2/_scratch/event_tester.py
sammosummo/Charlie2
e856b9bfc83c11e57a63d487fa14a63764e3f6ae
[ "MIT" ]
3
2019-11-24T04:10:40.000Z
2020-04-04T07:50:57.000Z
from sys import argv, exit from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget class MainWindow(QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setCentralWidget(CustomWidget(self)) self.show() class CustomWidget(QWidget): def __init__(self, parent=None): super(CustomWidget, self).__init__(parent) self.setFocusPolicy(Qt.StrongFocus) pass def mousePressEvent(self, event): print(event) def keyPressEvent(self, event): print(event) if __name__ == "__main__": app = QApplication(argv) ex = MainWindow() exit(app.exec_())
21.393939
62
0.677054
from sys import argv, exit from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget class MainWindow(QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setCentralWidget(CustomWidget(self)) self.show() class CustomWidget(QWidget): def __init__(self, parent=None): super(CustomWidget, self).__init__(parent) self.setFocusPolicy(Qt.StrongFocus) pass def mousePressEvent(self, event): print(event) def keyPressEvent(self, event): print(event) if __name__ == "__main__": app = QApplication(argv) ex = MainWindow() exit(app.exec_())
true
true
f700f5cfd4cc030d3b6feeef81a024d23319dedc
11,085
py
Python
neutron_vpnaas/openstack/common/processutils.py
citrix-openstack-build/neutron-vpnaas
d1ee6923425eca52f400a2de23d1541f16568c2b
[ "Apache-2.0" ]
null
null
null
neutron_vpnaas/openstack/common/processutils.py
citrix-openstack-build/neutron-vpnaas
d1ee6923425eca52f400a2de23d1541f16568c2b
[ "Apache-2.0" ]
null
null
null
neutron_vpnaas/openstack/common/processutils.py
citrix-openstack-build/neutron-vpnaas
d1ee6923425eca52f400a2de23d1541f16568c2b
[ "Apache-2.0" ]
null
null
null
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ import errno import logging import multiprocessing import os import random import shlex import signal from eventlet.green import subprocess from eventlet import greenthread from oslo.utils import strutils import six from neutron_vpnaas.openstack.common._i18n import _ LOG = logging.getLogger(__name__) class InvalidArgumentError(Exception): def __init__(self, message=None): super(InvalidArgumentError, self).__init__(message) class UnknownArgumentError(Exception): def __init__(self, message=None): super(UnknownArgumentError, self).__init__(message) class ProcessExecutionError(Exception): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' message = _('%(description)s\n' 'Command: %(cmd)s\n' 'Exit code: %(exit_code)s\n' 'Stdout: %(stdout)r\n' 'Stderr: %(stderr)r') % {'description': description, 'cmd': cmd, 'exit_code': exit_code, 'stdout': stdout, 'stderr': stderr} super(ProcessExecutionError, self).__init__(message) class NoRootWrapSpecified(Exception): def __init__(self, message=None): super(NoRootWrapSpecified, self).__init__(message) def _subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def execute(*cmd, **kwargs): """Helper method to shell out and execute a command through subprocess. Allows optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param process_input: Send to opened process. :type process_input: string :param env_variables: Environment variables and their values that will be set for the process. :type env_variables: dict :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless program exits with one of these code. :type check_exit_code: boolean, int, or [int] :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :type delay_on_retry: boolean :param attempts: How many times to retry cmd. :type attempts: int :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper kwarg. :type run_as_root: boolean :param root_helper: command to prefix to commands called with run_as_root=True :type root_helper: string :param shell: whether or not there should be a shell used to execute this command. Defaults to false. :type shell: boolean :param loglevel: log level for execute commands. :type loglevel: int. (Should be logging.DEBUG or logging.INFO) :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments :raises: :class:`ProcessExecutionError` """ process_input = kwargs.pop('process_input', None) env_variables = kwargs.pop('env_variables', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) loglevel = kwargs.pop('loglevel', logging.DEBUG) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] if kwargs: raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs) if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( message=_('Command requested root, but did not ' 'specify a root helper.')) cmd = shlex.split(root_helper) + list(cmd) cmd = map(str, cmd) sanitized_cmd = strutils.mask_password(' '.join(cmd)) while attempts > 0: attempts -= 1 try: LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': preexec_fn = None close_fds = False else: preexec_fn = _subprocess_setup close_fds = True obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=close_fds, preexec_fn=preexec_fn, shell=shell, env=env_variables) result = None for _i in six.moves.range(20): # NOTE(russellb) 20 is an arbitrary number of retries to # prevent any chance of looping forever here. try: if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() except OSError as e: if e.errno in (errno.EAGAIN, errno.EINTR): continue raise break obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 LOG.log(loglevel, 'Result was %s' % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result sanitized_stdout = strutils.mask_password(stdout) sanitized_stderr = strutils.mask_password(stderr) raise ProcessExecutionError(exit_code=_returncode, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) return result except ProcessExecutionError: if not attempts: raise else: LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) def trycmd(*args, **kwargs): """A wrapper around execute() to more easily handle warnings and errors. Returns an (out, err) tuple of strings containing the output of the command's stdout and stderr. If 'err' is not empty then the command can be considered to have failed. :discard_warnings True | False. Defaults to False. If set to True, then for succeeding commands, stderr is cleared """ discard_warnings = kwargs.pop('discard_warnings', False) try: out, err = execute(*args, **kwargs) failed = False except ProcessExecutionError as exn: out, err = '', six.text_type(exn) failed = True if not failed and discard_warnings and err: # Handle commands that output to stderr but otherwise succeed err = '' return out, err def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): sanitized_cmd = strutils.mask_password(cmd) LOG.debug('Running cmd (SSH): %s', sanitized_cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise InvalidArgumentError(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() sanitized_stdout = strutils.mask_password(stdout) stderr = stderr_stream.read() sanitized_stderr = strutils.mask_password(stderr) stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s' % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) return (sanitized_stdout, sanitized_stderr) def get_worker_count(): """Utility to get the default worker count. @return: The number of CPUs if that can be determined, else a default worker count of 1 is returned. """ try: return multiprocessing.cpu_count() except NotImplementedError: return 1
38.224138
79
0.589445
import errno import logging import multiprocessing import os import random import shlex import signal from eventlet.green import subprocess from eventlet import greenthread from oslo.utils import strutils import six from neutron_vpnaas.openstack.common._i18n import _ LOG = logging.getLogger(__name__) class InvalidArgumentError(Exception): def __init__(self, message=None): super(InvalidArgumentError, self).__init__(message) class UnknownArgumentError(Exception): def __init__(self, message=None): super(UnknownArgumentError, self).__init__(message) class ProcessExecutionError(Exception): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' message = _('%(description)s\n' 'Command: %(cmd)s\n' 'Exit code: %(exit_code)s\n' 'Stdout: %(stdout)r\n' 'Stderr: %(stderr)r') % {'description': description, 'cmd': cmd, 'exit_code': exit_code, 'stdout': stdout, 'stderr': stderr} super(ProcessExecutionError, self).__init__(message) class NoRootWrapSpecified(Exception): def __init__(self, message=None): super(NoRootWrapSpecified, self).__init__(message) def _subprocess_setup(): signal.signal(signal.SIGPIPE, signal.SIG_DFL) def execute(*cmd, **kwargs): process_input = kwargs.pop('process_input', None) env_variables = kwargs.pop('env_variables', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) loglevel = kwargs.pop('loglevel', logging.DEBUG) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] if kwargs: raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs) if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( message=_('Command requested root, but did not ' 'specify a root helper.')) cmd = shlex.split(root_helper) + list(cmd) cmd = map(str, cmd) sanitized_cmd = strutils.mask_password(' '.join(cmd)) while attempts > 0: attempts -= 1 try: LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) _PIPE = subprocess.PIPE if os.name == 'nt': preexec_fn = None close_fds = False else: preexec_fn = _subprocess_setup close_fds = True obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=close_fds, preexec_fn=preexec_fn, shell=shell, env=env_variables) result = None for _i in six.moves.range(20): try: if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() except OSError as e: if e.errno in (errno.EAGAIN, errno.EINTR): continue raise break obj.stdin.close() _returncode = obj.returncode LOG.log(loglevel, 'Result was %s' % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result sanitized_stdout = strutils.mask_password(stdout) sanitized_stderr = strutils.mask_password(stderr) raise ProcessExecutionError(exit_code=_returncode, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) return result except ProcessExecutionError: if not attempts: raise else: LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: greenthread.sleep(0) def trycmd(*args, **kwargs): discard_warnings = kwargs.pop('discard_warnings', False) try: out, err = execute(*args, **kwargs) failed = False except ProcessExecutionError as exn: out, err = '', six.text_type(exn) failed = True if not failed and discard_warnings and err: err = '' return out, err def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): sanitized_cmd = strutils.mask_password(cmd) LOG.debug('Running cmd (SSH): %s', sanitized_cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) if process_input: raise InvalidArgumentError(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel stdout = stdout_stream.read() sanitized_stdout = strutils.mask_password(stdout) stderr = stderr_stream.read() sanitized_stderr = strutils.mask_password(stderr) stdin_stream.close() exit_status = channel.recv_exit_status() if exit_status != -1: LOG.debug('Result was %s' % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) return (sanitized_stdout, sanitized_stderr) def get_worker_count(): try: return multiprocessing.cpu_count() except NotImplementedError: return 1
true
true
f700f64d326367612e46eead5f8b7101bfc54b8e
1,552
py
Python
39-combination-sum/solution.py
phenix3443/leetcode
b6d8486e859b2db0bf3d58f55a6e1d439b0b891a
[ "MIT" ]
null
null
null
39-combination-sum/solution.py
phenix3443/leetcode
b6d8486e859b2db0bf3d58f55a6e1d439b0b891a
[ "MIT" ]
null
null
null
39-combination-sum/solution.py
phenix3443/leetcode
b6d8486e859b2db0bf3d58f55a6e1d439b0b891a
[ "MIT" ]
null
null
null
# -*- coding:utf-8; -*- class SolutionV1: def combinationSum(self, candidates, target): # 1. 定义保存结果的组合 result = set() # 2. 定义递归函数,i表示递归层数,但是具体含义还不知道 def helper(nums, candidates, target): # 4. 编写递归模板 # 1) 定义递归终止条件 # 应该是从candidate选出来的数的sum=target就返回,所以这时候递归层数i应该是候选值列表。 # 修改递归参数第一个参数i为nums,表示一个候选值列表 if sum(nums) == target: result.append(tuple(nums)) return # 5. 那么sum(nums)>target 这时候也应该停止了,因为candidates都是正整数 if sum(nums) > target: return # 2) 处理当前层逻辑 # 6. 当前层逻辑处理:如果sum(nums)<target,那么下一步nums中新增元素可能是candidates中的任一元素 newNums = [nums + [i] for i in candidates] # 3)下探递归 # 7. 递归下一层 for nums in newNums: helper(nums, candidates, target) # 4)清理当前层,当前层没有要清理的 # 3. 首次调用递归函数 helper([], candidates, target) return [list(nums) for nums in result] class Solution: """ 递归代码优化,语言层面优化代码 """ def combinationSum(self, candidates, target): result = set() def helper(nums, candidates, target): if sum(nums) == target: result.add(tuple(sorted(nums))) return if sum(nums) > target: return for i in candidates: helper(nums + [i], candidates, target) helper([], candidates, target) return [list(nums) for nums in result]
25.866667
77
0.523196
class SolutionV1: def combinationSum(self, candidates, target): result = set() def helper(nums, candidates, target): if sum(nums) == target: result.append(tuple(nums)) return if sum(nums) > target: return newNums = [nums + [i] for i in candidates] for nums in newNums: helper(nums, candidates, target) helper([], candidates, target) return [list(nums) for nums in result] class Solution: def combinationSum(self, candidates, target): result = set() def helper(nums, candidates, target): if sum(nums) == target: result.add(tuple(sorted(nums))) return if sum(nums) > target: return for i in candidates: helper(nums + [i], candidates, target) helper([], candidates, target) return [list(nums) for nums in result]
true
true
f700f66d7a6fb738d17f3ac606e54056809a5935
19,266
py
Python
tests/sentry/integrations/github_enterprise/test_webhooks.py
pierredup/sentry
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
[ "BSD-3-Clause" ]
null
null
null
tests/sentry/integrations/github_enterprise/test_webhooks.py
pierredup/sentry
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
[ "BSD-3-Clause" ]
null
null
null
tests/sentry/integrations/github_enterprise/test_webhooks.py
pierredup/sentry
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import import six from datetime import datetime from django.utils import timezone from sentry.models import Commit, CommitAuthor, Integration, PullRequest, Repository from sentry.testutils import APITestCase from uuid import uuid4 from .testutils import ( PUSH_EVENT_EXAMPLE_INSTALLATION, PULL_REQUEST_OPENED_EVENT_EXAMPLE, PULL_REQUEST_EDITED_EVENT_EXAMPLE, PULL_REQUEST_CLOSED_EVENT_EXAMPLE, ) from sentry.utils.compat.mock import patch class WebhookTest(APITestCase): def test_get(self): url = "/extensions/github-enterprise/webhook/" response = self.client.get(url) assert response.status_code == 405 def test_unknown_host_event(self): # No integration defined in the database, so event should be rejected # because we can't find metadata and secret for it url = "/extensions/github-enterprise/webhook/" response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="99.99.99.99", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 400 def test_unregistered_event(self): project = self.project # force creation url = u"/extensions/github-enterprise/webhook/".format(project.organization.id) response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="UnregisteredEvent", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=56a3df597e02adbc17fb617502c70e19d96a6136", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_invalid_signature_event(self, mock_installation): mock_installation.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } url = "/extensions/github-enterprise/webhook/" response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=33521abeaaf9a57c2abf486e0ccd54d23cf36fec", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 401 @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_missing_signature_ok(self, mock_installation): # Old Github:e doesn't send a signature, so we have to accept that. mock_installation.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } url = "/extensions/github-enterprise/webhook/" response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 class PushEventWebhookTest(APITestCase): @patch("sentry.integrations.github_enterprise.client.get_jwt") @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_simple(self, mock_get_installation_metadata, mock_get_jwt): mock_get_jwt.return_value = "" project = self.project # force creation url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) integration = Integration.objects.create( external_id="35.232.149.196:12345", provider="github_enterprise", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation_id": "12345", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=2a0586cc46490b17441834e1e143ec3d8c1fe032", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 commit_list = list( Commit.objects.filter( # organization_id=project.organization_id, ) .select_related("author") .order_by("-date_added") ) assert len(commit_list) == 2 commit = commit_list[0] assert commit.key == "133d60480286590a610a0eb7352ff6e02b9674c4" assert commit.message == u"Update README.md (àgain)" assert commit.author.name == u"bàxterthehacker" assert commit.author.email == "[email protected]" assert commit.author.external_id is None assert commit.date_added == datetime(2015, 5, 5, 23, 45, 15, tzinfo=timezone.utc) commit = commit_list[1] assert commit.key == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c" assert commit.message == "Update README.md" assert commit.author.name == u"bàxterthehacker" assert commit.author.email == "[email protected]" assert commit.author.external_id is None assert commit.date_added == datetime(2015, 5, 5, 23, 40, 15, tzinfo=timezone.utc) @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_anonymous_lookup(self, mock_get_installation_metadata): project = self.project # force creation url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } integration = Integration.objects.create( provider="github_enterprise", external_id="35.232.149.196:12345", name="octocat", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) CommitAuthor.objects.create( external_id="github_enterprise:baxterthehacker", organization_id=project.organization_id, email="[email protected]", name=u"bàxterthehacker", ) response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=2a0586cc46490b17441834e1e143ec3d8c1fe032", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 commit_list = list( Commit.objects.filter(organization_id=project.organization_id) .select_related("author") .order_by("-date_added") ) # should be skipping the #skipsentry commit assert len(commit_list) == 2 commit = commit_list[0] assert commit.key == "133d60480286590a610a0eb7352ff6e02b9674c4" assert commit.message == u"Update README.md (àgain)" assert commit.author.name == u"bàxterthehacker" assert commit.author.email == "[email protected]" assert commit.date_added == datetime(2015, 5, 5, 23, 45, 15, tzinfo=timezone.utc) commit = commit_list[1] assert commit.key == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c" assert commit.message == "Update README.md" assert commit.author.name == u"bàxterthehacker" assert commit.author.email == "[email protected]" assert commit.date_added == datetime(2015, 5, 5, 23, 40, 15, tzinfo=timezone.utc) @patch("sentry.integrations.github_enterprise.client.get_jwt") @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_multiple_orgs(self, mock_get_installation_metadata, mock_get_jwt): mock_get_jwt.return_value = "" project = self.project # force creation url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) integration = Integration.objects.create( external_id="35.232.149.196:12345", provider="github_enterprise", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation_id": "12345", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) org2 = self.create_organization() project2 = self.create_project(organization=org2, name="bar") Repository.objects.create( organization_id=project2.organization.id, external_id="77", provider="integrations:github_enterprise", name="another/repo", ) integration = Integration.objects.create( external_id="35.232.149.196:99", provider="github_enterprise", metadata={ "domain_name": "35.232.149.196/another", "installation": { "installation_id": "99", "id": "2", "private_key": "private_key", "verify_ssl": True, }, }, ) integration.add_organization(org2, self.user) response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=2a0586cc46490b17441834e1e143ec3d8c1fe032", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 commit_list = list( Commit.objects.filter(organization_id=project.organization_id) .select_related("author") .order_by("-date_added") ) assert len(commit_list) == 2 commit_list = list( Commit.objects.filter(organization_id=org2.id) .select_related("author") .order_by("-date_added") ) assert len(commit_list) == 0 class PullRequestEventWebhook(APITestCase): @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_opened(self, mock_get_installation_metadata): project = self.project # force creation url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } integration = Integration.objects.create( provider="github_enterprise", external_id="35.232.149.196:234", name="octocat", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) repo = Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) response = self.client.post( path=url, data=PULL_REQUEST_OPENED_EVENT_EXAMPLE, content_type="application/json", HTTP_X_GITHUB_EVENT="pull_request", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=aa5b11bc52b9fac082cb59f9ee8667cb222c3aff", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 prs = PullRequest.objects.filter( repository_id=repo.id, organization_id=project.organization.id ) assert len(prs) == 1 pr = prs[0] assert pr.key == "1" assert pr.message == u"This is a pretty simple change that we need to pull into master." assert pr.title == u"Update the README with new information" assert pr.author.name == u"baxterthehacker" @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_edited(self, mock_get_installation_metadata): project = self.project # force creation url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } integration = Integration.objects.create( provider="github_enterprise", external_id="35.232.149.196:234", name="octocat", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) repo = Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) pr = PullRequest.objects.create( key="1", repository_id=repo.id, organization_id=project.organization.id ) response = self.client.post( path=url, data=PULL_REQUEST_EDITED_EVENT_EXAMPLE, content_type="application/json", HTTP_X_GITHUB_EVENT="pull_request", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=b50a13afd33b514e8e62e603827ea62530f0690e", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 pr = PullRequest.objects.get(id=pr.id) assert pr.key == "1" assert pr.message == u"new edited body" assert pr.title == u"new edited title" assert pr.author.name == u"baxterthehacker" @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_closed(self, mock_get_installation_metadata): project = self.project # force creation url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } integration = Integration.objects.create( provider="github_enterprise", external_id="35.232.149.196:234", name="octocat", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) repo = Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) response = self.client.post( path=url, data=PULL_REQUEST_CLOSED_EVENT_EXAMPLE, content_type="application/json", HTTP_X_GITHUB_EVENT="pull_request", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=dff1c803cf1e48c1b9aefe4a17952ea132758806", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 prs = PullRequest.objects.filter( repository_id=repo.id, organization_id=project.organization.id ) assert len(prs) == 1 pr = prs[0] assert pr.key == "1" assert pr.message == u"new closed body" assert pr.title == u"new closed title" assert pr.author.name == u"baxterthehacker" assert pr.merge_commit_sha == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c"
37.555556
96
0.61772
from __future__ import absolute_import import six from datetime import datetime from django.utils import timezone from sentry.models import Commit, CommitAuthor, Integration, PullRequest, Repository from sentry.testutils import APITestCase from uuid import uuid4 from .testutils import ( PUSH_EVENT_EXAMPLE_INSTALLATION, PULL_REQUEST_OPENED_EVENT_EXAMPLE, PULL_REQUEST_EDITED_EVENT_EXAMPLE, PULL_REQUEST_CLOSED_EVENT_EXAMPLE, ) from sentry.utils.compat.mock import patch class WebhookTest(APITestCase): def test_get(self): url = "/extensions/github-enterprise/webhook/" response = self.client.get(url) assert response.status_code == 405 def test_unknown_host_event(self): url = "/extensions/github-enterprise/webhook/" response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="99.99.99.99", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 400 def test_unregistered_event(self): project = self.project # force creation url = u"/extensions/github-enterprise/webhook/".format(project.organization.id) response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="UnregisteredEvent", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=56a3df597e02adbc17fb617502c70e19d96a6136", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_invalid_signature_event(self, mock_installation): mock_installation.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } url = "/extensions/github-enterprise/webhook/" response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=33521abeaaf9a57c2abf486e0ccd54d23cf36fec", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 401 @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_missing_signature_ok(self, mock_installation): # Old Github:e doesn't send a signature, so we have to accept that. mock_installation.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } url = "/extensions/github-enterprise/webhook/" response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 class PushEventWebhookTest(APITestCase): @patch("sentry.integrations.github_enterprise.client.get_jwt") @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_simple(self, mock_get_installation_metadata, mock_get_jwt): mock_get_jwt.return_value = "" project = self.project url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) integration = Integration.objects.create( external_id="35.232.149.196:12345", provider="github_enterprise", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation_id": "12345", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=2a0586cc46490b17441834e1e143ec3d8c1fe032", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 commit_list = list( Commit.objects.filter( ) .select_related("author") .order_by("-date_added") ) assert len(commit_list) == 2 commit = commit_list[0] assert commit.key == "133d60480286590a610a0eb7352ff6e02b9674c4" assert commit.message == u"Update README.md (àgain)" assert commit.author.name == u"bàxterthehacker" assert commit.author.email == "[email protected]" assert commit.author.external_id is None assert commit.date_added == datetime(2015, 5, 5, 23, 45, 15, tzinfo=timezone.utc) commit = commit_list[1] assert commit.key == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c" assert commit.message == "Update README.md" assert commit.author.name == u"bàxterthehacker" assert commit.author.email == "[email protected]" assert commit.author.external_id is None assert commit.date_added == datetime(2015, 5, 5, 23, 40, 15, tzinfo=timezone.utc) @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_anonymous_lookup(self, mock_get_installation_metadata): project = self.project url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } integration = Integration.objects.create( provider="github_enterprise", external_id="35.232.149.196:12345", name="octocat", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) CommitAuthor.objects.create( external_id="github_enterprise:baxterthehacker", organization_id=project.organization_id, email="[email protected]", name=u"bàxterthehacker", ) response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=2a0586cc46490b17441834e1e143ec3d8c1fe032", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 commit_list = list( Commit.objects.filter(organization_id=project.organization_id) .select_related("author") .order_by("-date_added") ) assert len(commit_list) == 2 commit = commit_list[0] assert commit.key == "133d60480286590a610a0eb7352ff6e02b9674c4" assert commit.message == u"Update README.md (àgain)" assert commit.author.name == u"bàxterthehacker" assert commit.author.email == "[email protected]" assert commit.date_added == datetime(2015, 5, 5, 23, 45, 15, tzinfo=timezone.utc) commit = commit_list[1] assert commit.key == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c" assert commit.message == "Update README.md" assert commit.author.name == u"bàxterthehacker" assert commit.author.email == "[email protected]" assert commit.date_added == datetime(2015, 5, 5, 23, 40, 15, tzinfo=timezone.utc) @patch("sentry.integrations.github_enterprise.client.get_jwt") @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_multiple_orgs(self, mock_get_installation_metadata, mock_get_jwt): mock_get_jwt.return_value = "" project = self.project url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) integration = Integration.objects.create( external_id="35.232.149.196:12345", provider="github_enterprise", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation_id": "12345", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) org2 = self.create_organization() project2 = self.create_project(organization=org2, name="bar") Repository.objects.create( organization_id=project2.organization.id, external_id="77", provider="integrations:github_enterprise", name="another/repo", ) integration = Integration.objects.create( external_id="35.232.149.196:99", provider="github_enterprise", metadata={ "domain_name": "35.232.149.196/another", "installation": { "installation_id": "99", "id": "2", "private_key": "private_key", "verify_ssl": True, }, }, ) integration.add_organization(org2, self.user) response = self.client.post( path=url, data=PUSH_EVENT_EXAMPLE_INSTALLATION, content_type="application/json", HTTP_X_GITHUB_EVENT="push", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=2a0586cc46490b17441834e1e143ec3d8c1fe032", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 commit_list = list( Commit.objects.filter(organization_id=project.organization_id) .select_related("author") .order_by("-date_added") ) assert len(commit_list) == 2 commit_list = list( Commit.objects.filter(organization_id=org2.id) .select_related("author") .order_by("-date_added") ) assert len(commit_list) == 0 class PullRequestEventWebhook(APITestCase): @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_opened(self, mock_get_installation_metadata): project = self.project url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } integration = Integration.objects.create( provider="github_enterprise", external_id="35.232.149.196:234", name="octocat", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) repo = Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) response = self.client.post( path=url, data=PULL_REQUEST_OPENED_EVENT_EXAMPLE, content_type="application/json", HTTP_X_GITHUB_EVENT="pull_request", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=aa5b11bc52b9fac082cb59f9ee8667cb222c3aff", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 prs = PullRequest.objects.filter( repository_id=repo.id, organization_id=project.organization.id ) assert len(prs) == 1 pr = prs[0] assert pr.key == "1" assert pr.message == u"This is a pretty simple change that we need to pull into master." assert pr.title == u"Update the README with new information" assert pr.author.name == u"baxterthehacker" @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_edited(self, mock_get_installation_metadata): project = self.project url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } integration = Integration.objects.create( provider="github_enterprise", external_id="35.232.149.196:234", name="octocat", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) repo = Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) pr = PullRequest.objects.create( key="1", repository_id=repo.id, organization_id=project.organization.id ) response = self.client.post( path=url, data=PULL_REQUEST_EDITED_EVENT_EXAMPLE, content_type="application/json", HTTP_X_GITHUB_EVENT="pull_request", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=b50a13afd33b514e8e62e603827ea62530f0690e", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 pr = PullRequest.objects.get(id=pr.id) assert pr.key == "1" assert pr.message == u"new edited body" assert pr.title == u"new edited title" assert pr.author.name == u"baxterthehacker" @patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata") def test_closed(self, mock_get_installation_metadata): project = self.project url = "/extensions/github-enterprise/webhook/" mock_get_installation_metadata.return_value = { "url": "35.232.149.196", "id": "2", "name": "test-app", "webhook_secret": "b3002c3e321d4b7880360d397db2ccfd", "private_key": "private_key", "verify_ssl": True, } integration = Integration.objects.create( provider="github_enterprise", external_id="35.232.149.196:234", name="octocat", metadata={ "domain_name": "35.232.149.196/baxterthehacker", "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True}, }, ) integration.add_organization(project.organization, self.user) repo = Repository.objects.create( organization_id=project.organization.id, external_id="35129377", provider="integrations:github_enterprise", name="baxterthehacker/public-repo", ) response = self.client.post( path=url, data=PULL_REQUEST_CLOSED_EVENT_EXAMPLE, content_type="application/json", HTTP_X_GITHUB_EVENT="pull_request", HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196", HTTP_X_HUB_SIGNATURE="sha1=dff1c803cf1e48c1b9aefe4a17952ea132758806", HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()), ) assert response.status_code == 204 prs = PullRequest.objects.filter( repository_id=repo.id, organization_id=project.organization.id ) assert len(prs) == 1 pr = prs[0] assert pr.key == "1" assert pr.message == u"new closed body" assert pr.title == u"new closed title" assert pr.author.name == u"baxterthehacker" assert pr.merge_commit_sha == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c"
true
true
f700f699c24a7decc3d4d0de8d4c7caa6080333a
10,663
py
Python
qcengine/programs/turbomole/runner.py
kexul/QCEngine
d7ced823512ce58133739327684cae380592309c
[ "BSD-3-Clause" ]
105
2018-08-15T14:47:27.000Z
2022-02-14T01:53:28.000Z
qcengine/programs/turbomole/runner.py
kexul/QCEngine
d7ced823512ce58133739327684cae380592309c
[ "BSD-3-Clause" ]
338
2018-08-18T15:48:25.000Z
2022-03-30T09:02:40.000Z
qcengine/programs/turbomole/runner.py
kexul/QCEngine
d7ced823512ce58133739327684cae380592309c
[ "BSD-3-Clause" ]
74
2018-08-28T04:37:04.000Z
2022-03-31T06:57:51.000Z
""" Calls the Turbomole executable. """ import os import re from decimal import Decimal from pathlib import Path from typing import Any, Dict, Optional, Tuple from qcelemental.models import AtomicResult, Provenance, BasisSet from qcelemental.util import safe_version, which from ...exceptions import InputError from ..model import ProgramHarness from ..qcvar_identities_resources import build_atomicproperties, build_out from ...util import execute, temporary_directory from .define import execute_define, prepare_stdin from .harvester import harvest from .methods import KEYWORDS, METHODS class TurbomoleHarness(ProgramHarness): _defaults = { "name": "Turbomole", "scratch": True, "thread_safe": False, "thread_parallel": False, "node_parallel": True, "managed_memory": True, } version_cache: Dict[str, str] = {} @staticmethod def found(raise_error: bool = False) -> bool: return which( "define", return_bool=True, raise_error=raise_error, raise_msg="Please install via http://www.cosmologic.de/turbomole/home.html", ) def get_version(self) -> str: which_prog = which("define") if which_prog not in self.version_cache: # We use basically a dummy stdin as we dont want to pipe any real # input into define. We only want to parse the version number from # the string. with temporary_directory(suffix="_define_scratch") as tmpdir: tmpdir = Path(tmpdir) stdout = execute_define("\n", cwd=tmpdir) # Tested with V7.3 and V7.4.0 version_re = re.compile("TURBOMOLE (?:rev\. )?(V.+?)\s+") mobj = version_re.search(stdout) version = mobj[1] self.version_cache[which_prog] = safe_version(version) return self.version_cache[which_prog] def compute(self, input_model: "AtomicInput", config: "TaskConfig") -> "AtomicResult": self.found(raise_error=True) job_inputs = self.build_input(input_model, config) success, dexe = self.execute(job_inputs) # TODO: handle input errors?! But then define probably already crashed... # if 'There is an error in the input file' in dexe["stdout"]: # raise InputError(dexe["stdout"]) if success: dexe["outfiles"]["stdout"] = dexe["stdout"] dexe["outfiles"]["stderr"] = dexe["stderr"] return self.parse_output(dexe["outfiles"], input_model) def sub_control(self, control, pattern, repl, **kwargs): control_subbed = re.sub(pattern, repl, control, **kwargs) return control_subbed def append_control(self, control, to_append): return self.sub_control(control, "\$end", f"{to_append}\n$end") def build_input( self, input_model: "AtomicInput", config: "TaskConfig", template: Optional[str] = None ) -> Dict[str, Any]: # The 'define' wrapper can only handle normal string basis set input. If # a QCSchema basis set is given we break early, because this is not handled # right now. if isinstance(input_model.model.basis, BasisSet): raise InputError("QCSchema BasisSet for model.basis not implemented. Use string basis name.") turbomolerec = { "infiles": {}, "outfiles": {"control": "control"}, "scratch_directory": config.scratch_directory, } # Handle molecule # TODO: what's up with moldata? Do I need it? coord_str, moldata = input_model.molecule.to_string(dtype="turbomole", return_data=True) # Prepare stdin for define call model = input_model.model # geeopt will hold the for which to calculate the gradient. # 'x' corresponds to the ground state, 'a 1' would be the GS too. # 'a1 2' would be the 1st excited state of the irreducible group A1. # Right now only GS are supported, so this is hardcoded as 'x'. geoopt = "x" if input_model.driver.derivative_int() > 0 else "" stdin, subs = prepare_stdin( model.method, model.basis, input_model.keywords, input_model.molecule.molecular_charge, input_model.molecule.molecular_multiplicity, geoopt, ) with temporary_directory(suffix="_define_scratch") as tmpdir: tmpdir = Path(tmpdir) with open(tmpdir / "coord", "w") as handle: handle.write(coord_str) stdout = execute_define(stdin, cwd=tmpdir) # The define scratch will be populated by some files that we want to keep to_keep = "basis auxbasis coord control alpha beta mos".split() for fn in to_keep: full_fn = tmpdir / fn if not full_fn.exists(): continue with open(full_fn) as handle: turbomolerec["infiles"][fn] = handle.read() env = os.environ.copy() env["PARA_ARCH"] = "SMP" env["PARNODES"] = str(config.ncores) env["SMPCPUS"] = str(config.ncores) turbomolerec["environment"] = env # Memory is set in the control file keywords = input_model.keywords ######################## # DETERMINE SOME FLAGS # ######################## ri_calculation = any([keywords.get(ri_kw, False) for ri_kw in KEYWORDS["ri"]]) ricc2_calculation = model.method in METHODS["ricc2"] ################### # MEMORY HANDLING # ################### # Central file that controls Turbomole. We assign it here to the "control" # variable as we may need to modify it, e.g. for a Hessian calculation. control = turbomolerec["infiles"]["control"] # Calculate total available memory in MB mem_mb = config.memory * (1024 ** 3) / 1e6 ri_fraction = 0.25 # Total amount of memory allocated to ricore ricore = 0 if ri_calculation: # This is the default given by Turbomole ricore = mem_mb * ri_fraction ri_per_core = int(ricore / config.ncores) # Update $ricore entry in the control file control = self.sub_control(control, "\$ricore\s+(\d+)", f"$ricore {ri_per_core} MiB per_core") # Calculate remaining memory maxcor = mem_mb - ricore assert maxcor > 0, "Not enough memory for maxcor! Need {-maxcor} MB more!" # maxcore per_core per_core = int(maxcor / config.ncores) # Update $maxcor entry in the control file control = self.sub_control(control, "\$maxcor\s+(\d+)\s+MiB\s+per_core", f"$maxcor {per_core} MiB per_core") ############################ # DETERMINE SHELL COMMANDS # ############################ # ----------------------# # | Energy calculations | # ----------------------# # Set appropriate commands. We always need a reference wavefunction # so the first command will be dscf or ridft to converge the SCF. commands = ["ridft"] if ri_calculation else ["dscf"] # ------------------------# # | Gradient calculations | # ------------------------# # Keep the gradient file for parsing if input_model.driver.derivative_int() == 1: turbomolerec["outfiles"]["gradient"] = "gradient" # ricc2 will also calculate the gradient. But this requires setting # 'geoopt (state)' in the control file. This is currently handled in the # 'define' call. if ricc2_calculation: commands.append("ricc2") # Gradient calculation for DFT/HF elif input_model.driver.derivative_int() == 1: grad_command = "rdgrad" if ri_calculation else "grad" commands.append(grad_command) # -----------------------# # | Hessian calculations | # -----------------------# if input_model.driver.derivative_int() == 2: freq_command = "NumForce -level cc2" if ricc2_calculation else "aoforce" # NumForce seems to ignore the nprhessian command and will always # write to hessian hessian_outfile = "hessian" if ricc2_calculation else "nprhessian" commands.append(freq_command) # Add some keywords to the control file # noproj: Don't project out translation and rotation # nprhessian: Set filename of un-projected hessian control = self.append_control(control, "$noproj\n$nprhessian file=nprhessian") turbomolerec["outfiles"][hessian_outfile] = None # Build the full shell command and set it command = ["; ".join(commands)] turbomolerec["command"] = command # Re-assign the potentially modified control file, e.g. for a Hessian calculation turbomolerec["infiles"]["control"] = control # TODO: check if the chosen commands are available with which()? return turbomolerec def execute( self, inputs: Dict[str, Any], *, extra_outfiles=None, extra_commands=None, scratch_name=None, timeout=None ) -> Tuple[bool, Dict]: success, dexe = execute( inputs["command"], inputs["infiles"], inputs["outfiles"], shell=True, # TODO: scratch_messy? # scratch_messy=False, ) return success, dexe def parse_output( self, outfiles: Dict[str, str], input_model: "AtomicInput" ) -> "AtomicResult": # lgtm: [py/similar-function] stdout = outfiles.pop("stdout") qcvars, gradient, hessian = harvest(input_model.molecule, stdout, **outfiles) if gradient is not None: qcvars["CURRENT GRADIENT"] = gradient if hessian is not None: qcvars["CURRENT HESSIAN"] = hessian retres = qcvars[f"CURRENT {input_model.driver.upper()}"] if isinstance(retres, Decimal): retres = float(retres) build_out(qcvars) atprop = build_atomicproperties(qcvars) output_data = input_model.dict() output_data["extras"]["outfiles"] = outfiles output_data["properties"] = atprop output_data["provenance"] = Provenance(creator="Turbomole", version=self.get_version(), routine="turbomole") output_data["return_result"] = retres output_data["stdout"] = stdout output_data["success"] = True return AtomicResult(**output_data)
38.634058
116
0.598331
import os import re from decimal import Decimal from pathlib import Path from typing import Any, Dict, Optional, Tuple from qcelemental.models import AtomicResult, Provenance, BasisSet from qcelemental.util import safe_version, which from ...exceptions import InputError from ..model import ProgramHarness from ..qcvar_identities_resources import build_atomicproperties, build_out from ...util import execute, temporary_directory from .define import execute_define, prepare_stdin from .harvester import harvest from .methods import KEYWORDS, METHODS class TurbomoleHarness(ProgramHarness): _defaults = { "name": "Turbomole", "scratch": True, "thread_safe": False, "thread_parallel": False, "node_parallel": True, "managed_memory": True, } version_cache: Dict[str, str] = {} @staticmethod def found(raise_error: bool = False) -> bool: return which( "define", return_bool=True, raise_error=raise_error, raise_msg="Please install via http://www.cosmologic.de/turbomole/home.html", ) def get_version(self) -> str: which_prog = which("define") if which_prog not in self.version_cache: with temporary_directory(suffix="_define_scratch") as tmpdir: tmpdir = Path(tmpdir) stdout = execute_define("\n", cwd=tmpdir) version_re = re.compile("TURBOMOLE (?:rev\. )?(V.+?)\s+") mobj = version_re.search(stdout) version = mobj[1] self.version_cache[which_prog] = safe_version(version) return self.version_cache[which_prog] def compute(self, input_model: "AtomicInput", config: "TaskConfig") -> "AtomicResult": self.found(raise_error=True) job_inputs = self.build_input(input_model, config) success, dexe = self.execute(job_inputs) if success: dexe["outfiles"]["stdout"] = dexe["stdout"] dexe["outfiles"]["stderr"] = dexe["stderr"] return self.parse_output(dexe["outfiles"], input_model) def sub_control(self, control, pattern, repl, **kwargs): control_subbed = re.sub(pattern, repl, control, **kwargs) return control_subbed def append_control(self, control, to_append): return self.sub_control(control, "\$end", f"{to_append}\n$end") def build_input( self, input_model: "AtomicInput", config: "TaskConfig", template: Optional[str] = None ) -> Dict[str, Any]: if isinstance(input_model.model.basis, BasisSet): raise InputError("QCSchema BasisSet for model.basis not implemented. Use string basis name.") turbomolerec = { "infiles": {}, "outfiles": {"control": "control"}, "scratch_directory": config.scratch_directory, } coord_str, moldata = input_model.molecule.to_string(dtype="turbomole", return_data=True) # Prepare stdin for define call model = input_model.model # geeopt will hold the for which to calculate the gradient. # 'x' corresponds to the ground state, 'a 1' would be the GS too. # 'a1 2' would be the 1st excited state of the irreducible group A1. # Right now only GS are supported, so this is hardcoded as 'x'. geoopt = "x" if input_model.driver.derivative_int() > 0 else "" stdin, subs = prepare_stdin( model.method, model.basis, input_model.keywords, input_model.molecule.molecular_charge, input_model.molecule.molecular_multiplicity, geoopt, ) with temporary_directory(suffix="_define_scratch") as tmpdir: tmpdir = Path(tmpdir) with open(tmpdir / "coord", "w") as handle: handle.write(coord_str) stdout = execute_define(stdin, cwd=tmpdir) # The define scratch will be populated by some files that we want to keep to_keep = "basis auxbasis coord control alpha beta mos".split() for fn in to_keep: full_fn = tmpdir / fn if not full_fn.exists(): continue with open(full_fn) as handle: turbomolerec["infiles"][fn] = handle.read() env = os.environ.copy() env["PARA_ARCH"] = "SMP" env["PARNODES"] = str(config.ncores) env["SMPCPUS"] = str(config.ncores) turbomolerec["environment"] = env # Memory is set in the control file keywords = input_model.keywords ######################## # DETERMINE SOME FLAGS # ######################## ri_calculation = any([keywords.get(ri_kw, False) for ri_kw in KEYWORDS["ri"]]) ricc2_calculation = model.method in METHODS["ricc2"] ################### # MEMORY HANDLING # ################### # Central file that controls Turbomole. We assign it here to the "control" # variable as we may need to modify it, e.g. for a Hessian calculation. control = turbomolerec["infiles"]["control"] # Calculate total available memory in MB mem_mb = config.memory * (1024 ** 3) / 1e6 ri_fraction = 0.25 # Total amount of memory allocated to ricore ricore = 0 if ri_calculation: # This is the default given by Turbomole ricore = mem_mb * ri_fraction ri_per_core = int(ricore / config.ncores) # Update $ricore entry in the control file control = self.sub_control(control, "\$ricore\s+(\d+)", f"$ricore {ri_per_core} MiB per_core") # Calculate remaining memory maxcor = mem_mb - ricore assert maxcor > 0, "Not enough memory for maxcor! Need {-maxcor} MB more!" # maxcore per_core per_core = int(maxcor / config.ncores) # Update $maxcor entry in the control file control = self.sub_control(control, "\$maxcor\s+(\d+)\s+MiB\s+per_core", f"$maxcor {per_core} MiB per_core") ############################ # DETERMINE SHELL COMMANDS # ############################ # ----------------------# # | Energy calculations | # ----------------------# # Set appropriate commands. We always need a reference wavefunction # so the first command will be dscf or ridft to converge the SCF. commands = ["ridft"] if ri_calculation else ["dscf"] # ------------------------# # | Gradient calculations | # ------------------------# # Keep the gradient file for parsing if input_model.driver.derivative_int() == 1: turbomolerec["outfiles"]["gradient"] = "gradient" # ricc2 will also calculate the gradient. But this requires setting # 'geoopt (state)' in the control file. This is currently handled in the # 'define' call. if ricc2_calculation: commands.append("ricc2") # Gradient calculation for DFT/HF elif input_model.driver.derivative_int() == 1: grad_command = "rdgrad" if ri_calculation else "grad" commands.append(grad_command) # -----------------------# # | Hessian calculations | # -----------------------# if input_model.driver.derivative_int() == 2: freq_command = "NumForce -level cc2" if ricc2_calculation else "aoforce" # NumForce seems to ignore the nprhessian command and will always # write to hessian hessian_outfile = "hessian" if ricc2_calculation else "nprhessian" commands.append(freq_command) # Add some keywords to the control file # noproj: Don't project out translation and rotation control = self.append_control(control, "$noproj\n$nprhessian file=nprhessian") turbomolerec["outfiles"][hessian_outfile] = None command = ["; ".join(commands)] turbomolerec["command"] = command turbomolerec["infiles"]["control"] = control return turbomolerec def execute( self, inputs: Dict[str, Any], *, extra_outfiles=None, extra_commands=None, scratch_name=None, timeout=None ) -> Tuple[bool, Dict]: success, dexe = execute( inputs["command"], inputs["infiles"], inputs["outfiles"], shell=True, ) return success, dexe def parse_output( self, outfiles: Dict[str, str], input_model: "AtomicInput" ) -> "AtomicResult": stdout = outfiles.pop("stdout") qcvars, gradient, hessian = harvest(input_model.molecule, stdout, **outfiles) if gradient is not None: qcvars["CURRENT GRADIENT"] = gradient if hessian is not None: qcvars["CURRENT HESSIAN"] = hessian retres = qcvars[f"CURRENT {input_model.driver.upper()}"] if isinstance(retres, Decimal): retres = float(retres) build_out(qcvars) atprop = build_atomicproperties(qcvars) output_data = input_model.dict() output_data["extras"]["outfiles"] = outfiles output_data["properties"] = atprop output_data["provenance"] = Provenance(creator="Turbomole", version=self.get_version(), routine="turbomole") output_data["return_result"] = retres output_data["stdout"] = stdout output_data["success"] = True return AtomicResult(**output_data)
true
true
f700f6f6992552d2b05a043e6c989920e50c2f1a
1,811
py
Python
symbiotic/actions.py
StefanoFrazzetto/symbiotic
d163ad18e60ff1a5e89ed5daee1e3ad1b9f64ddb
[ "Apache-2.0" ]
null
null
null
symbiotic/actions.py
StefanoFrazzetto/symbiotic
d163ad18e60ff1a5e89ed5daee1e3ad1b9f64ddb
[ "Apache-2.0" ]
null
null
null
symbiotic/actions.py
StefanoFrazzetto/symbiotic
d163ad18e60ff1a5e89ed5daee1e3ad1b9f64ddb
[ "Apache-2.0" ]
null
null
null
from datetime import datetime from functools import partial from typing import Callable, List, Union from symbiotic.schedule import Schedule class Action(object): def __init__(self, callback: Callable, *args, **kwargs): self._callback: partial = partial(callback, *args, **kwargs) self._schedule: Union[Schedule, None] = None self._next_execution: Union[datetime, None] = None def __repr__(self): rep = f'{self.__class__.__qualname__}:' rep += f' {self._callback.func.__name__},' rep += f' args: {self._callback.args},' rep += f' kwargs: {self._callback.keywords}' return rep def __call__(self): return self._callback() def set_schedule(self, schedule: Schedule) -> None: self._schedule = schedule self.schedule_next_execution() def should_execute(self): return datetime.now() > self._next_execution def schedule_next_execution(self): datetimes = [instant.next_datetime() for instant in self._schedule.instants()] self._next_execution = min(datetimes) # get the earliest execution datetime class ActionScheduler(object): def __init__(self): self.actions: List[Action] = [] self._schedule: Union[Schedule, None] = None def start_session(self, schedule: Schedule): self._schedule = schedule def add(self, callback: Callable, *args, **kwargs): action = Action(callback, *args, *kwargs) action.set_schedule(self._schedule) self.actions.append(action) return action def end_session(self): self._schedule = None def run(self): for action in self.actions[:]: if action.should_execute(): action() action.schedule_next_execution()
30.183333
86
0.649365
from datetime import datetime from functools import partial from typing import Callable, List, Union from symbiotic.schedule import Schedule class Action(object): def __init__(self, callback: Callable, *args, **kwargs): self._callback: partial = partial(callback, *args, **kwargs) self._schedule: Union[Schedule, None] = None self._next_execution: Union[datetime, None] = None def __repr__(self): rep = f'{self.__class__.__qualname__}:' rep += f' {self._callback.func.__name__},' rep += f' args: {self._callback.args},' rep += f' kwargs: {self._callback.keywords}' return rep def __call__(self): return self._callback() def set_schedule(self, schedule: Schedule) -> None: self._schedule = schedule self.schedule_next_execution() def should_execute(self): return datetime.now() > self._next_execution def schedule_next_execution(self): datetimes = [instant.next_datetime() for instant in self._schedule.instants()] self._next_execution = min(datetimes) class ActionScheduler(object): def __init__(self): self.actions: List[Action] = [] self._schedule: Union[Schedule, None] = None def start_session(self, schedule: Schedule): self._schedule = schedule def add(self, callback: Callable, *args, **kwargs): action = Action(callback, *args, *kwargs) action.set_schedule(self._schedule) self.actions.append(action) return action def end_session(self): self._schedule = None def run(self): for action in self.actions[:]: if action.should_execute(): action() action.schedule_next_execution()
true
true
f700f79e05c6776dc943cca7b6d50b88ffccb6d0
1,557
py
Python
web-scrapers/combine-schema.py
matildarehm/big-city
37910719dbd6c79ea5ba98372be354c435c4ebbd
[ "MIT" ]
null
null
null
web-scrapers/combine-schema.py
matildarehm/big-city
37910719dbd6c79ea5ba98372be354c435c4ebbd
[ "MIT" ]
null
null
null
web-scrapers/combine-schema.py
matildarehm/big-city
37910719dbd6c79ea5ba98372be354c435c4ebbd
[ "MIT" ]
null
null
null
import os import json def combine_schema(borough_name): borough_name = borough_name.lower() neighborhood_data = "" with open('../scraped_data/borough_schema/' + borough_name + ".json", 'r', encoding='utf-8') as json_file: data = json.load(json_file) for zipCodes in range(len(data[borough_name])): with open('../scraped_data/neighborhood_schema/' + borough_name + ".json", 'r+', encoding='utf-8') as zipcode_file: neighborhood_data = json.load(zipcode_file) neighborhood_data[borough_name][zipCodes]["zipCodes"] = data[borough_name][zipCodes]["zipCodes"] print(neighborhood_data) with open('../scraped_data/neighborhood_schema/' + borough_name + ".json", 'w', encoding='utf-8') as combined_file: json.dump(neighborhood_data, combined_file, sort_keys=True, indent='\t', separators=(',', ': ')) def main(): borough_files = os.listdir("./boroughs") for borough in borough_files: name = borough.split(".")[0].replace("-", " ").title() parse_borough = input(name + " => ") if parse_borough != "skip": convert_to_json = input("Convert " + name + " data to json format? (yes/no) => ") if convert_to_json == "yes": print("Writing to file ...") combine_schema(name) else: print("Will not convert data json ...") else: print("Skipping borough: " + name + " ... ") if __name__ == '__main__': main()
43.25
131
0.587669
import os import json def combine_schema(borough_name): borough_name = borough_name.lower() neighborhood_data = "" with open('../scraped_data/borough_schema/' + borough_name + ".json", 'r', encoding='utf-8') as json_file: data = json.load(json_file) for zipCodes in range(len(data[borough_name])): with open('../scraped_data/neighborhood_schema/' + borough_name + ".json", 'r+', encoding='utf-8') as zipcode_file: neighborhood_data = json.load(zipcode_file) neighborhood_data[borough_name][zipCodes]["zipCodes"] = data[borough_name][zipCodes]["zipCodes"] print(neighborhood_data) with open('../scraped_data/neighborhood_schema/' + borough_name + ".json", 'w', encoding='utf-8') as combined_file: json.dump(neighborhood_data, combined_file, sort_keys=True, indent='\t', separators=(',', ': ')) def main(): borough_files = os.listdir("./boroughs") for borough in borough_files: name = borough.split(".")[0].replace("-", " ").title() parse_borough = input(name + " => ") if parse_borough != "skip": convert_to_json = input("Convert " + name + " data to json format? (yes/no) => ") if convert_to_json == "yes": print("Writing to file ...") combine_schema(name) else: print("Will not convert data json ...") else: print("Skipping borough: " + name + " ... ") if __name__ == '__main__': main()
true
true
f700f7f47d7e357b6d4b4a24c4774fb80fedbb46
5,515
py
Python
nobrainer/metrics.py
tapasi-brahma/nobrainer
c46586658d226bc3ca22869fd45a2674fdd52be9
[ "Apache-2.0" ]
2
2021-06-20T00:49:22.000Z
2021-12-09T23:20:19.000Z
nobrainer/metrics.py
Aakanksha-Rana/nobrainer
3c8c983e7736e60a94c405e595f37248e503b393
[ "Apache-2.0" ]
1
2021-12-09T23:37:20.000Z
2021-12-09T23:37:20.000Z
nobrainer/metrics.py
tapasi-brahma/nobrainer
c46586658d226bc3ca22869fd45a2674fdd52be9
[ "Apache-2.0" ]
6
2021-12-09T15:56:34.000Z
2021-12-09T16:45:25.000Z
"""Implementations of metrics for 3D semantic segmentation.""" import tensorflow as tf def average_volume_difference(): raise NotImplementedError() def dice(y_true, y_pred, axis=(1, 2, 3, 4)): """Calculate Dice similarity between labels and predictions. Dice similarity is in [0, 1], where 1 is perfect overlap and 0 is no overlap. If both labels and predictions are empty (e.g., all background), then Dice similarity is 1. If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an axis parameter of `(1, 2, 3)` will result in a tensor that contains a Dice score for every class in every item in the batch. The shape of this tensor will be `(batch, classes)`. If the inputs only have one class (e.g., binary segmentation), then an axis parameter of `(1, 2, 3, 4)` should be used. This will result in a tensor of shape `(batch,)`, where every value is the Dice similarity for that prediction. Implemented according to https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4533825/#Equ6 Returns ------- Tensor of Dice similarities. Citations --------- Taha AA, Hanbury A. Metrics for evaluating 3D medical image segmentation: analysis, selection, and tool. BMC Med Imaging. 2015;15:29. Published 2015 Aug 12. doi:10.1186/s12880-015-0068-x """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) eps = tf.keras.backend.epsilon() intersection = tf.reduce_sum(y_true * y_pred, axis=axis) summation = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis) return (2 * intersection + eps) / (summation + eps) def generalized_dice(y_true, y_pred, axis=(1, 2, 3)): """Calculate Generalized Dice similarity. This is useful for multi-class predictions. If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an axis parameter of `(1, 2, 3)` should be used. This will result in a tensor of shape `(batch,)`, where every value is the Generalized Dice similarity for that prediction, across all classes. Returns ------- Tensor of Generalized Dice similarities. """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2: raise ValueError("y_true and y_pred must be at least rank 2.") epsilon = tf.keras.backend.epsilon() w = tf.math.reciprocal(tf.square(tf.reduce_sum(y_true, axis=axis))) w = tf.where(tf.math.is_finite(w), w, epsilon) num = 2 * tf.reduce_sum(w * tf.reduce_sum(y_true * y_pred, axis= axis), axis=-1) den = tf.reduce_sum(w * tf.reduce_sum(y_true + y_pred, axis= axis), axis=-1) gdice = num/den gdice = tf.where(tf.math.is_finite(gdice), gdice, tf.zeros_like(gdice)) return gdice def hamming(y_true, y_pred, axis=(1, 2, 3)): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return tf.reduce_mean(tf.not_equal(y_pred, y_true), axis=axis) def haussdorf(): raise NotADirectoryError() def jaccard(y_true, y_pred, axis=(1, 2, 3, 4)): """Calculate Jaccard similarity between labels and predictions. Jaccard similarity is in [0, 1], where 1 is perfect overlap and 0 is no overlap. If both labels and predictions are empty (e.g., all background), then Jaccard similarity is 1. If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an axis parameter of `(1, 2, 3)` will result in a tensor that contains a Jaccard score for every class in every item in the batch. The shape of this tensor will be `(batch, classes)`. If the inputs only have one class (e.g., binary segmentation), then an axis parameter of `(1, 2, 3, 4)` should be used. This will result in a tensor of shape `(batch,)`, where every value is the Jaccard similarity for that prediction. Implemented according to https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4533825/#Equ7 Returns ------- Tensor of Jaccard similarities. Citations --------- Taha AA, Hanbury A. Metrics for evaluating 3D medical image segmentation: analysis, selection, and tool. BMC Med Imaging. 2015;15:29. Published 2015 Aug 12. doi:10.1186/s12880-015-0068-x """ y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) eps = tf.keras.backend.epsilon() intersection = tf.reduce_sum(y_true * y_pred, axis=axis) union = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis) return (intersection + eps) / (union - intersection + eps) def tversky(y_true, y_pred, axis=(1, 2, 3), alpha=0.3, beta=0.7): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2: raise ValueError("y_true and y_pred must be at least rank 2.") eps = tf.keras.backend.epsilon() num = tf.reduce_sum(y_pred * y_true, axis=axis) den = ( num + alpha * tf.reduce_sum(y_pred * (1 - y_true), axis=axis) + beta * tf.reduce_sum((1 - y_pred) * y_true, axis=axis) ) # Sum over classes. return tf.reduce_sum((num + eps) / (den + eps), axis=-1) def dice_coef_multilabel(y_true, y_pred): n_classes= tf.shape(y_pred)[-1] dice_coeff=0 for index in range(n_classes): dice_coeff -= dice(y_true[:,:,:,:,index], y_pred[:,:,:,:,index]) return dice_coeff
37.773973
87
0.670535
import tensorflow as tf def average_volume_difference(): raise NotImplementedError() def dice(y_true, y_pred, axis=(1, 2, 3, 4)): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) eps = tf.keras.backend.epsilon() intersection = tf.reduce_sum(y_true * y_pred, axis=axis) summation = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis) return (2 * intersection + eps) / (summation + eps) def generalized_dice(y_true, y_pred, axis=(1, 2, 3)): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2: raise ValueError("y_true and y_pred must be at least rank 2.") epsilon = tf.keras.backend.epsilon() w = tf.math.reciprocal(tf.square(tf.reduce_sum(y_true, axis=axis))) w = tf.where(tf.math.is_finite(w), w, epsilon) num = 2 * tf.reduce_sum(w * tf.reduce_sum(y_true * y_pred, axis= axis), axis=-1) den = tf.reduce_sum(w * tf.reduce_sum(y_true + y_pred, axis= axis), axis=-1) gdice = num/den gdice = tf.where(tf.math.is_finite(gdice), gdice, tf.zeros_like(gdice)) return gdice def hamming(y_true, y_pred, axis=(1, 2, 3)): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return tf.reduce_mean(tf.not_equal(y_pred, y_true), axis=axis) def haussdorf(): raise NotADirectoryError() def jaccard(y_true, y_pred, axis=(1, 2, 3, 4)): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) eps = tf.keras.backend.epsilon() intersection = tf.reduce_sum(y_true * y_pred, axis=axis) union = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis) return (intersection + eps) / (union - intersection + eps) def tversky(y_true, y_pred, axis=(1, 2, 3), alpha=0.3, beta=0.7): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2: raise ValueError("y_true and y_pred must be at least rank 2.") eps = tf.keras.backend.epsilon() num = tf.reduce_sum(y_pred * y_true, axis=axis) den = ( num + alpha * tf.reduce_sum(y_pred * (1 - y_true), axis=axis) + beta * tf.reduce_sum((1 - y_pred) * y_true, axis=axis) ) return tf.reduce_sum((num + eps) / (den + eps), axis=-1) def dice_coef_multilabel(y_true, y_pred): n_classes= tf.shape(y_pred)[-1] dice_coeff=0 for index in range(n_classes): dice_coeff -= dice(y_true[:,:,:,:,index], y_pred[:,:,:,:,index]) return dice_coeff
true
true
f700f86cbf5234bbc538edad7ad5ecd48dcd69e4
1,654
py
Python
security_monkey/tests/watchers/vpc/test_peering.py
alvaroaleman/security_monkey
b174a705124f12aeee612f9ef93820f2b4227e0e
[ "Apache-2.0" ]
3
2018-05-18T17:32:36.000Z
2021-12-09T13:46:35.000Z
security_monkey/tests/watchers/vpc/test_peering.py
alvaroaleman/security_monkey
b174a705124f12aeee612f9ef93820f2b4227e0e
[ "Apache-2.0" ]
9
2019-01-11T17:55:08.000Z
2021-06-25T15:17:38.000Z
security_monkey/tests/watchers/vpc/test_peering.py
cxmcc/security_monkey
ae4c4b5b278505a97f0513f5ae44db3eb23c175c
[ "Apache-2.0" ]
2
2018-06-15T16:55:11.000Z
2020-04-30T16:26:59.000Z
# Copyright 2016 Bridgewater Associates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module: security_monkey.tests.watchers.vpc.test_peering :platform: Unix .. version:: $$VERSION$$ .. moduleauthor:: Bridgewater OSS <[email protected]> """ from security_monkey.tests.watchers import SecurityMonkeyWatcherTestCase from security_monkey.watchers.vpc.peering import Peering import boto from moto import mock_sts, mock_ec2 from freezegun import freeze_time class PeeringWatcherTestCase(SecurityMonkeyWatcherTestCase): @freeze_time("2016-07-18 12:00:00") @mock_sts @mock_ec2 def test_slurp(self): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") peer_vpc = conn.create_vpc("10.0.0.0/16") conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) watcher = Peering(accounts=[self.account.name]) item_list, exception_map = watcher.slurp() self.assertIs( expr1=len(item_list), expr2=1, msg="Watcher should have 1 item but has {}".format(len(item_list)))
33.08
79
0.702539
from security_monkey.tests.watchers import SecurityMonkeyWatcherTestCase from security_monkey.watchers.vpc.peering import Peering import boto from moto import mock_sts, mock_ec2 from freezegun import freeze_time class PeeringWatcherTestCase(SecurityMonkeyWatcherTestCase): @freeze_time("2016-07-18 12:00:00") @mock_sts @mock_ec2 def test_slurp(self): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.create_vpc("10.0.0.0/16") peer_vpc = conn.create_vpc("10.0.0.0/16") conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) watcher = Peering(accounts=[self.account.name]) item_list, exception_map = watcher.slurp() self.assertIs( expr1=len(item_list), expr2=1, msg="Watcher should have 1 item but has {}".format(len(item_list)))
true
true
f700f8db77ef5e63af48fd81de77ff3b4c152594
2,147
py
Python
avijst/tensorflow/data.py
huylb314/AVIAD_AVIJST
bf8e0617849b4f8f4b95ea345be1565ea063ee38
[ "MIT" ]
4
2021-03-17T08:36:52.000Z
2022-03-31T10:32:18.000Z
avijst/tensorflow/data.py
huylb314/AVIAD_AVIJST
bf8e0617849b4f8f4b95ea345be1565ea063ee38
[ "MIT" ]
null
null
null
avijst/tensorflow/data.py
huylb314/AVIAD_AVIJST
bf8e0617849b4f8f4b95ea345be1565ea063ee38
[ "MIT" ]
null
null
null
import numpy as np from sklearn import metrics import math from keras.preprocessing import sequence from keras.preprocessing.text import Tokenizer from typing import * # fastai utility def listify(o): if o is None: return [] if isinstance(o, list): return o if isinstance(o, str): return [o] if isinstance(o, Iterable): return list(o) return [o] def compose(x, funcs, *args, **kwargs): for f in listify(funcs): x = f(x, **kwargs) return x class Onehotify(): def __init__(self, vocab_size): self.vocab_size = vocab_size self.tokenizer = Tokenizer(num_words=vocab_size) def __call__(self, item): return self.tokenizer.sequences_to_matrix([item], mode='binary') class Padify(): def __init__(self, maxlen): self.maxlen = maxlen def __call__(self, item): return sequence.pad_sequences([item], maxlen=self.maxlen) class YOnehotify(): def __init__(self, num_classes): self.num_classes = num_classes def __call__(self, item): categorical = np.zeros((1, self.num_classes)) categorical[0, item] = 1 return categorical class Dataset(): def __init__(self, x, y, tfms_x, tfms_y): self.x, self.y = x, y self.x_tfms, self.y_tfms = tfms_x, tfms_y def __len__(self): return len(self.x) def _get_transform(self, i, tfms): return compose(i, tfms) def __getitem__(self, i): batch_x, batch_y = self.x[i], self.y[i] return_x, return_y = [], [] if isinstance(i, slice): return_x = [self._get_transform(o, self.x_tfms) for o in batch_x] if isinstance(i, slice): return_y = [self._get_transform(o, self.y_tfms) for o in batch_y] return np.vstack(return_x), np.vstack(return_y) class DataLoader(): def __init__(self, ds, bs, drop_last=True): self.ds, self.bs, self.drop_last = ds, bs, drop_last def __iter__(self): length = len(self.ds) // self.bs if self.drop_last else math.ceil(len(self.ds) / self.bs) for i in range(0, length, 1): yield self.ds[(i*self.bs):(i*self.bs)+self.bs]
33.546875
100
0.640429
import numpy as np from sklearn import metrics import math from keras.preprocessing import sequence from keras.preprocessing.text import Tokenizer from typing import * def listify(o): if o is None: return [] if isinstance(o, list): return o if isinstance(o, str): return [o] if isinstance(o, Iterable): return list(o) return [o] def compose(x, funcs, *args, **kwargs): for f in listify(funcs): x = f(x, **kwargs) return x class Onehotify(): def __init__(self, vocab_size): self.vocab_size = vocab_size self.tokenizer = Tokenizer(num_words=vocab_size) def __call__(self, item): return self.tokenizer.sequences_to_matrix([item], mode='binary') class Padify(): def __init__(self, maxlen): self.maxlen = maxlen def __call__(self, item): return sequence.pad_sequences([item], maxlen=self.maxlen) class YOnehotify(): def __init__(self, num_classes): self.num_classes = num_classes def __call__(self, item): categorical = np.zeros((1, self.num_classes)) categorical[0, item] = 1 return categorical class Dataset(): def __init__(self, x, y, tfms_x, tfms_y): self.x, self.y = x, y self.x_tfms, self.y_tfms = tfms_x, tfms_y def __len__(self): return len(self.x) def _get_transform(self, i, tfms): return compose(i, tfms) def __getitem__(self, i): batch_x, batch_y = self.x[i], self.y[i] return_x, return_y = [], [] if isinstance(i, slice): return_x = [self._get_transform(o, self.x_tfms) for o in batch_x] if isinstance(i, slice): return_y = [self._get_transform(o, self.y_tfms) for o in batch_y] return np.vstack(return_x), np.vstack(return_y) class DataLoader(): def __init__(self, ds, bs, drop_last=True): self.ds, self.bs, self.drop_last = ds, bs, drop_last def __iter__(self): length = len(self.ds) // self.bs if self.drop_last else math.ceil(len(self.ds) / self.bs) for i in range(0, length, 1): yield self.ds[(i*self.bs):(i*self.bs)+self.bs]
true
true
f700f8e66d717d6fd7e8d9734c1db24bc7968239
281
py
Python
tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_30/ar_12/test_artificial_1024_RelativeDifference_Lag1Trend_30_12_20.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_30/ar_12/test_artificial_1024_RelativeDifference_Lag1Trend_30_12_20.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
1
2019-11-30T23:39:38.000Z
2019-12-01T04:34:35.000Z
tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_30/ar_12/test_artificial_1024_RelativeDifference_Lag1Trend_30_12_20.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
import pyaf.Bench.TS_datasets as tsds import pyaf.tests.artificial.process_artificial_dataset as art art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 12);
40.142857
176
0.743772
import pyaf.Bench.TS_datasets as tsds import pyaf.tests.artificial.process_artificial_dataset as art art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 12);
true
true
f700f8f83ac5032ea84cc6fc6b5be17bdb522c59
168
py
Python
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_ConstantTrend_Seasonal_DayOfMonth_MLP.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_ConstantTrend_Seasonal_DayOfMonth_MLP.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
1
2019-11-30T23:39:38.000Z
2019-12-01T04:34:35.000Z
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_ConstantTrend_Seasonal_DayOfMonth_MLP.py
jmabry/pyaf
afbc15a851a2445a7824bf255af612dc429265af
[ "BSD-3-Clause" ]
null
null
null
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_model( ['BoxCox'] , ['ConstantTrend'] , ['Seasonal_DayOfMonth'] , ['MLP'] );
42
90
0.761905
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_model( ['BoxCox'] , ['ConstantTrend'] , ['Seasonal_DayOfMonth'] , ['MLP'] );
true
true
f700f90a4863b2f52518d4c47af307e35d2b3220
1,599
py
Python
scripts/rainbow.py
orrinjelo/AedanWallpaper
c5d67c45d7d295d90bc979f2cda645e0b578f10c
[ "MIT" ]
null
null
null
scripts/rainbow.py
orrinjelo/AedanWallpaper
c5d67c45d7d295d90bc979f2cda645e0b578f10c
[ "MIT" ]
null
null
null
scripts/rainbow.py
orrinjelo/AedanWallpaper
c5d67c45d7d295d90bc979f2cda645e0b578f10c
[ "MIT" ]
null
null
null
from PIL import Image import numpy as np import colorsys import os, sys import argparse import matplotlib.pyplot as plt rgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv) hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb) def crop(image, box=None): if box: imageBox = box else: imageBox = image.getbbox() return image.crop(imageBox) def hue_shift(image, value): im = image.convert('RGBA') arr = np.array(np.asarray(im).astype(float)) r,g,b,a = np.rollaxis(arr, axis=-1) # print(np.max(r)) h,s,v = rgb_to_hsv(r, g, b) r, g, b = hsv_to_rgb((h + value/360.0) % 1.0, s, v) arr = np.dstack((r, g, b, a)) # print(np.max(r)) # plt.imshow(arr.astype(int), aspect='auto') # plt.show() return Image.fromarray(arr.astype('uint8'), 'RGBA') parser = argparse.ArgumentParser(description='Rainbow an image batch') parser.add_argument('--filename', dest='filename', type=str) parser.add_argument('--step', dest='step', type=float, default=5.0) parser.add_argument('--max_step', dest='max_step', type=float, default=360.0) args = parser.parse_args() color_image = Image.open(args.filename) basename = os.path.basename(args.filename) base, ext = os.path.splitext(basename) if not os.path.exists('anim'): os.mkdir('anim') for n in range(0, int(args.max_step/args.step)): dtheta = n*args.step print('Writing out', dtheta) cropped = crop(color_image, (1620, 780, 2220, 1380)) new_im = hue_shift(cropped, dtheta) new_fn = os.path.join('anim','{0}_{1}{2}'.format(base, n, ext)) n += 1 new_im.save(new_fn)
29.072727
78
0.661038
from PIL import Image import numpy as np import colorsys import os, sys import argparse import matplotlib.pyplot as plt rgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv) hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb) def crop(image, box=None): if box: imageBox = box else: imageBox = image.getbbox() return image.crop(imageBox) def hue_shift(image, value): im = image.convert('RGBA') arr = np.array(np.asarray(im).astype(float)) r,g,b,a = np.rollaxis(arr, axis=-1) h,s,v = rgb_to_hsv(r, g, b) r, g, b = hsv_to_rgb((h + value/360.0) % 1.0, s, v) arr = np.dstack((r, g, b, a)) return Image.fromarray(arr.astype('uint8'), 'RGBA') parser = argparse.ArgumentParser(description='Rainbow an image batch') parser.add_argument('--filename', dest='filename', type=str) parser.add_argument('--step', dest='step', type=float, default=5.0) parser.add_argument('--max_step', dest='max_step', type=float, default=360.0) args = parser.parse_args() color_image = Image.open(args.filename) basename = os.path.basename(args.filename) base, ext = os.path.splitext(basename) if not os.path.exists('anim'): os.mkdir('anim') for n in range(0, int(args.max_step/args.step)): dtheta = n*args.step print('Writing out', dtheta) cropped = crop(color_image, (1620, 780, 2220, 1380)) new_im = hue_shift(cropped, dtheta) new_fn = os.path.join('anim','{0}_{1}{2}'.format(base, n, ext)) n += 1 new_im.save(new_fn)
true
true
f700fad174e6ff6fa2e5dbcde5ec690589efd9de
301
py
Python
Alert Notification.py
Behordeun/simple-python-projects
c2d088a2c1ebd842ca4d9817d569da4fd6b7f637
[ "Apache-2.0" ]
1
2021-09-09T10:55:23.000Z
2021-09-09T10:55:23.000Z
Alert Notification.py
Behordeun/simple-python-projects
c2d088a2c1ebd842ca4d9817d569da4fd6b7f637
[ "Apache-2.0" ]
null
null
null
Alert Notification.py
Behordeun/simple-python-projects
c2d088a2c1ebd842ca4d9817d569da4fd6b7f637
[ "Apache-2.0" ]
null
null
null
# This is a simple application for alert system from tkinter import * from tkinter import messagebox root = Tk() root.geometry("200x200") def message(): messagebox.showwarning("Alert Box", "Stop virus found") but = Button(root, text="ok", command=Message) but.place(x=100, y=100) root.mainloop()
25.083333
59
0.734219
from tkinter import * from tkinter import messagebox root = Tk() root.geometry("200x200") def message(): messagebox.showwarning("Alert Box", "Stop virus found") but = Button(root, text="ok", command=Message) but.place(x=100, y=100) root.mainloop()
true
true
f700fb1b9f83ac8ec1529d16e00f5a8bbe3d433f
2,355
py
Python
dataflow/sampleGetGlacierOutlineFromInventories.py
GLAMOS/dataflow
af7ca2dea122b3bccee0bdfd4292190bd71eccca
[ "MIT" ]
1
2018-05-22T14:46:48.000Z
2018-05-22T14:46:48.000Z
dataflow/sampleGetGlacierOutlineFromInventories.py
GLAMOS/dataflow
af7ca2dea122b3bccee0bdfd4292190bd71eccca
[ "MIT" ]
22
2018-05-18T15:28:48.000Z
2019-09-05T06:19:33.000Z
dataflow/sampleGetGlacierOutlineFromInventories.py
GLAMOS/dataflow
af7ca2dea122b3bccee0bdfd4292190bd71eccca
[ "MIT" ]
4
2018-07-09T05:08:49.000Z
2021-03-23T08:19:36.000Z
''' Created on 26.07.2018 @author: yvo ''' import configparser import sys from dataflow.DataReaders.DatabaseReaders.GlacierReader import GlacierReader from dataflow.DataReaders.DatabaseReaders.InventoryReader import InventoryReader def printLatestOutline(glaciers): for glacier in glaciers.values(): print("---") print(glacier) print(glacier.latestInventoryGeometry) if __name__ == '__main__': config = configparser.ConfigParser() config.read("dataflow.cfg") privateDatabaseAccessConfiguration = r".\databaseAccessConfiguration.gladmin.cfg" focusGlaciers = ['C14-10', 'B36-26', 'B83-03'] # Basodino (VAW-ID = 104), Aletsch (VAW-ID = 5), Corbassiere (VAW-ID = 38) # Getting the dataflow.DataReaders.DatabaseReaders.GlacierReader ready to retrieve glacier objects from the database. glacierReader = GlacierReader(privateDatabaseAccessConfiguration) # Empty directory for the found focus glaciers. glaciers = dict() # Getting all the data readers for the attribute values of the glaciers ready. dataReaders = [] dataReaders.append(InventoryReader(privateDatabaseAccessConfiguration)) try: # Check if the database is available. If not, get alternative glaciers for plotting. if glacierReader.isDatabaseAvailable == True: print("The GLAMOS database is available. Glacier objects are read from the database.") for focusGlacier in focusGlaciers: glacierFound = glacierReader.getGlacierBySgi(focusGlacier) glaciers[glacierFound.pkSgi] = glacierFound # Getting the attributes from the database. for glacier in glaciers.values(): # Polymorphistic approach to read attribute data by a list of readers. for dataReader in dataReaders: dataReader.getData(glacier) # Printing the pandas.DataFrame of the mass-balances of the glaciers. printLatestOutline(glaciers) else: print("Database not available! Application will terminate.") sys.exit(2) except Exception as e: print(e.message) print("Sample script aborted!")
34.130435
125
0.650955
import configparser import sys from dataflow.DataReaders.DatabaseReaders.GlacierReader import GlacierReader from dataflow.DataReaders.DatabaseReaders.InventoryReader import InventoryReader def printLatestOutline(glaciers): for glacier in glaciers.values(): print("---") print(glacier) print(glacier.latestInventoryGeometry) if __name__ == '__main__': config = configparser.ConfigParser() config.read("dataflow.cfg") privateDatabaseAccessConfiguration = r".\databaseAccessConfiguration.gladmin.cfg" focusGlaciers = ['C14-10', 'B36-26', 'B83-03'] glacierReader = GlacierReader(privateDatabaseAccessConfiguration) glaciers = dict() dataReaders = [] dataReaders.append(InventoryReader(privateDatabaseAccessConfiguration)) try: if glacierReader.isDatabaseAvailable == True: print("The GLAMOS database is available. Glacier objects are read from the database.") for focusGlacier in focusGlaciers: glacierFound = glacierReader.getGlacierBySgi(focusGlacier) glaciers[glacierFound.pkSgi] = glacierFound for glacier in glaciers.values(): for dataReader in dataReaders: dataReader.getData(glacier) printLatestOutline(glaciers) else: print("Database not available! Application will terminate.") sys.exit(2) except Exception as e: print(e.message) print("Sample script aborted!")
true
true
f700fb91eabccac2226537f58a347cff505076df
8,131
py
Python
tensorflow/python/keras/regularizers_test.py
leike666666/tensorflow
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
[ "Apache-2.0" ]
12
2020-12-28T18:42:10.000Z
2022-03-24T17:34:21.000Z
tensorflow/python/keras/regularizers_test.py
sagol/tensorflow
04f2870814d2773e09dcfa00cbe76a66a2c4de88
[ "Apache-2.0" ]
2
2021-08-25T15:58:11.000Z
2022-02-10T01:47:24.000Z
tensorflow/python/keras/regularizers_test.py
sagol/tensorflow
04f2870814d2773e09dcfa00cbe76a66a2c4de88
[ "Apache-2.0" ]
3
2020-03-09T19:17:02.000Z
2020-06-26T23:14:31.000Z
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras regularizers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import regularizers from tensorflow.python.keras import testing_utils from tensorflow.python.keras.utils import np_utils from tensorflow.python.ops import math_ops from tensorflow.python.platform import test DATA_DIM = 5 NUM_CLASSES = 2 class KerasRegularizersTest(keras_parameterized.TestCase, parameterized.TestCase): def create_model(self, kernel_regularizer=None, activity_regularizer=None): model = keras.models.Sequential() model.add(keras.layers.Dense(NUM_CLASSES, kernel_regularizer=kernel_regularizer, activity_regularizer=activity_regularizer, input_shape=(DATA_DIM,))) return model def get_data(self): (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=10, test_samples=10, input_shape=(DATA_DIM,), num_classes=NUM_CLASSES) y_train = np_utils.to_categorical(y_train, NUM_CLASSES) y_test = np_utils.to_categorical(y_test, NUM_CLASSES) return (x_train, y_train), (x_test, y_test) def create_multi_input_model_from(self, layer1, layer2): input_1 = keras.layers.Input(shape=(DATA_DIM,)) input_2 = keras.layers.Input(shape=(DATA_DIM,)) out1 = layer1(input_1) out2 = layer2(input_2) out = keras.layers.Average()([out1, out2]) model = keras.models.Model([input_1, input_2], out) model.add_loss(keras.backend.mean(out2)) model.add_loss(math_ops.reduce_sum(input_1)) return model @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_kernel_regularization(self, regularizer): (x_train, y_train), _ = self.get_data() model = self.create_model(kernel_regularizer=regularizer) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(len(model.losses), 1) model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ('l2_zero', keras.regularizers.l2(0.)), ]) def test_activity_regularization(self, regularizer): (x_train, y_train), _ = self.get_data() model = self.create_model(activity_regularizer=regularizer) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(len(model.losses), 1 if context.executing_eagerly() else 1) model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_zero_regularization(self): # Verifies that training with zero regularization works. x, y = np.ones((10, 10)), np.ones((10, 3)) model = testing_utils.get_model_from_layers( [keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, batch_size=5, epochs=1) def test_custom_regularizer_saving(self): def my_regularizer(weights): return math_ops.reduce_sum(math_ops.abs(weights)) inputs = keras.Input((10,)) outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(inputs) model = keras.Model(inputs, outputs) model2 = model.from_config( model.get_config(), custom_objects={'my_regularizer': my_regularizer}) self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_regularization_shared_layer(self, regularizer): dense_layer = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer, activity_regularizer=regularizer) model = self.create_multi_input_model_from(dense_layer, dense_layer) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertLen(model.losses, 5) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_regularization_shared_model(self, regularizer): dense_layer = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer, activity_regularizer=regularizer) input_tensor = keras.layers.Input(shape=(DATA_DIM,)) dummy_model = keras.models.Model(input_tensor, dense_layer(input_tensor)) model = self.create_multi_input_model_from(dummy_model, dummy_model) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertLen(model.losses, 6) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_regularization_shared_layer_in_different_models(self, regularizer): shared_dense = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer, activity_regularizer=regularizer) models = [] for _ in range(2): input_tensor = keras.layers.Input(shape=(DATA_DIM,)) unshared_dense = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer) out = unshared_dense(shared_dense(input_tensor)) models.append(keras.models.Model(input_tensor, out)) model = self.create_multi_input_model_from( layer1=models[0], layer2=models[1]) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) # We expect to see 9 losses on the model: # - 2 from the 2 add_loss calls on the outer model. # - 3 from the weight regularizers on the shared_dense layer, unshared_dense # in inner model 1, unshared_dense in inner model 2. # - 4 from activity regularizers on the shared_dense layer. self.assertLen(model.losses, 9) if __name__ == '__main__': test.main()
38.535545
80
0.710245
from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.eager import context from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import regularizers from tensorflow.python.keras import testing_utils from tensorflow.python.keras.utils import np_utils from tensorflow.python.ops import math_ops from tensorflow.python.platform import test DATA_DIM = 5 NUM_CLASSES = 2 class KerasRegularizersTest(keras_parameterized.TestCase, parameterized.TestCase): def create_model(self, kernel_regularizer=None, activity_regularizer=None): model = keras.models.Sequential() model.add(keras.layers.Dense(NUM_CLASSES, kernel_regularizer=kernel_regularizer, activity_regularizer=activity_regularizer, input_shape=(DATA_DIM,))) return model def get_data(self): (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data( train_samples=10, test_samples=10, input_shape=(DATA_DIM,), num_classes=NUM_CLASSES) y_train = np_utils.to_categorical(y_train, NUM_CLASSES) y_test = np_utils.to_categorical(y_test, NUM_CLASSES) return (x_train, y_train), (x_test, y_test) def create_multi_input_model_from(self, layer1, layer2): input_1 = keras.layers.Input(shape=(DATA_DIM,)) input_2 = keras.layers.Input(shape=(DATA_DIM,)) out1 = layer1(input_1) out2 = layer2(input_2) out = keras.layers.Average()([out1, out2]) model = keras.models.Model([input_1, input_2], out) model.add_loss(keras.backend.mean(out2)) model.add_loss(math_ops.reduce_sum(input_1)) return model @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_kernel_regularization(self, regularizer): (x_train, y_train), _ = self.get_data() model = self.create_model(kernel_regularizer=regularizer) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(len(model.losses), 1) model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ('l2_zero', keras.regularizers.l2(0.)), ]) def test_activity_regularization(self, regularizer): (x_train, y_train), _ = self.get_data() model = self.create_model(activity_regularizer=regularizer) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertEqual(len(model.losses), 1 if context.executing_eagerly() else 1) model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0) @keras_parameterized.run_all_keras_modes @keras_parameterized.run_with_all_model_types def test_zero_regularization(self): x, y = np.ones((10, 10)), np.ones((10, 3)) model = testing_utils.get_model_from_layers( [keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))], input_shape=(10,)) model.compile( 'sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) model.fit(x, y, batch_size=5, epochs=1) def test_custom_regularizer_saving(self): def my_regularizer(weights): return math_ops.reduce_sum(math_ops.abs(weights)) inputs = keras.Input((10,)) outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(inputs) model = keras.Model(inputs, outputs) model2 = model.from_config( model.get_config(), custom_objects={'my_regularizer': my_regularizer}) self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_regularization_shared_layer(self, regularizer): dense_layer = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer, activity_regularizer=regularizer) model = self.create_multi_input_model_from(dense_layer, dense_layer) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertLen(model.losses, 5) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_regularization_shared_model(self, regularizer): dense_layer = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer, activity_regularizer=regularizer) input_tensor = keras.layers.Input(shape=(DATA_DIM,)) dummy_model = keras.models.Model(input_tensor, dense_layer(input_tensor)) model = self.create_multi_input_model_from(dummy_model, dummy_model) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertLen(model.losses, 6) @keras_parameterized.run_all_keras_modes @parameterized.named_parameters([ ('l1', regularizers.l1()), ('l2', regularizers.l2()), ('l1_l2', regularizers.l1_l2()), ]) def test_regularization_shared_layer_in_different_models(self, regularizer): shared_dense = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer, activity_regularizer=regularizer) models = [] for _ in range(2): input_tensor = keras.layers.Input(shape=(DATA_DIM,)) unshared_dense = keras.layers.Dense( NUM_CLASSES, kernel_regularizer=regularizer) out = unshared_dense(shared_dense(input_tensor)) models.append(keras.models.Model(input_tensor, out)) model = self.create_multi_input_model_from( layer1=models[0], layer2=models[1]) model.compile( loss='categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly(), experimental_run_tf_function=testing_utils.should_run_tf_function()) self.assertLen(model.losses, 9) if __name__ == '__main__': test.main()
true
true
f700fbd89f1ece538292bf32b2ec1b717f09f289
638
py
Python
termsandconditions_demo/wsgi.py
jlachowski/django-termsandconditions
69085ad7c1ec510371bdbe7c033b449af5f97bff
[ "BSD-3-Clause" ]
null
null
null
termsandconditions_demo/wsgi.py
jlachowski/django-termsandconditions
69085ad7c1ec510371bdbe7c033b449af5f97bff
[ "BSD-3-Clause" ]
null
null
null
termsandconditions_demo/wsgi.py
jlachowski/django-termsandconditions
69085ad7c1ec510371bdbe7c033b449af5f97bff
[ "BSD-3-Clause" ]
null
null
null
"""WSGI File that enables Apache/GUnicorn to run Django""" # pylint: disable=C0103 import os import sys from django.core.wsgi import get_wsgi_application sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(os.pardir), os.pardir))) sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__))))) os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'termsandconditions_demo.settings') # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. application = get_wsgi_application()
33.578947
93
0.786834
import os import sys from django.core.wsgi import get_wsgi_application sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(os.pardir), os.pardir))) sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__))))) os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'termsandconditions_demo.settings') # setting points here. application = get_wsgi_application()
true
true
f700fd10ec2ca5a09f5d8a94531213dc5a8acc0d
811
py
Python
app/server.py
betandr/grpcdemo
1f1ab18923973f5a6cab36c804978435bbc3a6e5
[ "MIT" ]
5
2018-07-23T16:08:39.000Z
2021-11-10T04:32:12.000Z
app/server.py
betandr/grpcdemo
1f1ab18923973f5a6cab36c804978435bbc3a6e5
[ "MIT" ]
null
null
null
app/server.py
betandr/grpcdemo
1f1ab18923973f5a6cab36c804978435bbc3a6e5
[ "MIT" ]
null
null
null
from concurrent import futures import time import grpc import app.helloworld_pb2 as helloworld_pb2 import app.helloworld_pb2_grpc as helloworld_pb2_grpc _ONE_DAY_IN_SECONDS = 60 * 60 * 24 class Greeter(helloworld_pb2_grpc.GreeterServicer): def Greet(self, request, context): print('Saying `hello` to %s' % request.name) return helloworld_pb2.GreetResponse(message='Hello, {}!'.format(request.name)) def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server) server.add_insecure_port('[::]:50051') server.start() try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0) if __name__ == '__main__': serve()
24.575758
86
0.718866
from concurrent import futures import time import grpc import app.helloworld_pb2 as helloworld_pb2 import app.helloworld_pb2_grpc as helloworld_pb2_grpc _ONE_DAY_IN_SECONDS = 60 * 60 * 24 class Greeter(helloworld_pb2_grpc.GreeterServicer): def Greet(self, request, context): print('Saying `hello` to %s' % request.name) return helloworld_pb2.GreetResponse(message='Hello, {}!'.format(request.name)) def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server) server.add_insecure_port('[::]:50051') server.start() try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0) if __name__ == '__main__': serve()
true
true
f700fe26bc9bfda21d39a0bddd89180f5de442ab
2,544
py
Python
StyleText/utils/logging.py
Bourne-M/PaddleOCR
865e737413d430798b8c17525dcc22db4d106752
[ "Apache-2.0" ]
20,401
2020-05-08T10:56:13.000Z
2022-03-31T23:34:38.000Z
StyleText/utils/logging.py
Bourne-M/PaddleOCR
865e737413d430798b8c17525dcc22db4d106752
[ "Apache-2.0" ]
4,988
2020-05-10T08:19:41.000Z
2022-03-31T17:57:11.000Z
StyleText/utils/logging.py
Bourne-M/PaddleOCR
865e737413d430798b8c17525dcc22db4d106752
[ "Apache-2.0" ]
4,479
2020-05-08T11:12:13.000Z
2022-03-31T11:55:28.000Z
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import logging import functools import paddle.distributed as dist logger_initialized = {} @functools.lru_cache() def get_logger(name='srnet', log_file=None, log_level=logging.INFO): """Initialize and get a logger by name. If the logger has not been initialized, this method will initialize the logger by adding one or two handlers, otherwise the initialized logger will be directly returned. During initialization, a StreamHandler will always be added. If `log_file` is specified a FileHandler will also be added. Args: name (str): Logger name. log_file (str | None): The log filename. If specified, a FileHandler will be added to the logger. log_level (int): The logger level. Note that only the process of rank 0 is affected, and other processes will set the level to "Error" thus be silent most of the time. Returns: logging.Logger: The expected logger. """ logger = logging.getLogger(name) if name in logger_initialized: return logger for logger_name in logger_initialized: if name.startswith(logger_name): return logger formatter = logging.Formatter( '[%(asctime)s] %(name)s %(levelname)s: %(message)s', datefmt="%Y/%m/%d %H:%M:%S") stream_handler = logging.StreamHandler(stream=sys.stdout) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) if log_file is not None and dist.get_rank() == 0: log_file_folder = os.path.split(log_file)[0] os.makedirs(log_file_folder, exist_ok=True) file_handler = logging.FileHandler(log_file, 'a') file_handler.setFormatter(formatter) logger.addHandler(file_handler) if dist.get_rank() == 0: logger.setLevel(log_level) else: logger.setLevel(logging.ERROR) logger_initialized[name] = True return logger
38.545455
79
0.704009
import os import sys import logging import functools import paddle.distributed as dist logger_initialized = {} @functools.lru_cache() def get_logger(name='srnet', log_file=None, log_level=logging.INFO): logger = logging.getLogger(name) if name in logger_initialized: return logger for logger_name in logger_initialized: if name.startswith(logger_name): return logger formatter = logging.Formatter( '[%(asctime)s] %(name)s %(levelname)s: %(message)s', datefmt="%Y/%m/%d %H:%M:%S") stream_handler = logging.StreamHandler(stream=sys.stdout) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) if log_file is not None and dist.get_rank() == 0: log_file_folder = os.path.split(log_file)[0] os.makedirs(log_file_folder, exist_ok=True) file_handler = logging.FileHandler(log_file, 'a') file_handler.setFormatter(formatter) logger.addHandler(file_handler) if dist.get_rank() == 0: logger.setLevel(log_level) else: logger.setLevel(logging.ERROR) logger_initialized[name] = True return logger
true
true
f70101d2e677dfa1c95b8d12717565b56481d031
11,171
py
Python
server/server/organizations/models.py
connectiveproject/connective
8866082b2147feef0e5254ac4215987b9d881396
[ "MIT" ]
4
2021-07-05T10:49:26.000Z
2021-11-24T11:34:43.000Z
server/server/organizations/models.py
connectiveproject/connective
8866082b2147feef0e5254ac4215987b9d881396
[ "MIT" ]
39
2021-06-21T15:02:37.000Z
2022-02-28T15:07:42.000Z
server/server/organizations/models.py
connectiveproject/connective
8866082b2147feef0e5254ac4215987b9d881396
[ "MIT" ]
17
2021-06-16T08:59:45.000Z
2021-09-29T11:35:38.000Z
from django.core.validators import RegexValidator from django.db import models from django.utils.translation import gettext_lazy as _ from taggit.managers import TaggableManager from server.connective_tags.models import ConnectiveTaggedItem from server.schools.models import School from server.utils.db_utils import get_base_model from server.utils.model_fields import random_slug class SchoolActivityGroupManager(models.Manager): def get_activity_container_only_group(self, activity_group): container_only_groups = self.filter( activity_order=activity_group.activity_order, group_type=SchoolActivityGroup.GroupTypes.CONTAINER_ONLY, ) if container_only_groups.exists(): return container_only_groups[0] class ImportedOrganization(get_base_model()): slug = models.CharField(max_length=40, default=random_slug, unique=True) organization_number = models.CharField(max_length=10, unique=True) email = models.EmailField(null=True, blank=True) description = models.CharField(max_length=4096, null=True, blank=True) website_url = models.URLField(null=True, blank=True) name = models.CharField(max_length=256, null=True, blank=True) goal = models.CharField(max_length=4096, null=True, blank=True) year_founded = models.CharField(max_length=128, null=True, blank=True) status = models.CharField(max_length=50, null=True, blank=True) target_audience = models.JSONField(null=True, blank=True) number_of_employees = models.PositiveIntegerField(null=True, blank=True) number_of_members = models.PositiveIntegerField(null=True, blank=True) number_of_volunteers = models.PositiveIntegerField(null=True, blank=True) location_lon = models.DecimalField( max_digits=9, decimal_places=6, null=True, blank=True, ) location_lat = models.DecimalField( max_digits=9, decimal_places=6, null=True, blank=True, ) address_city = models.CharField(max_length=256, null=True, blank=True) address_street = models.CharField(max_length=256, null=True, blank=True) address_house_num = models.CharField(max_length=30, null=True, blank=True) address_zipcode = models.CharField(max_length=9, null=True, blank=True) cities = models.JSONField(null=True, blank=True) districts = models.JSONField(null=True, blank=True) union_type = models.CharField(max_length=50, null=True, blank=True) def __str__(self): return f"{self.name} | {self.organization_number} | {self.slug}" class Organization(get_base_model()): slug = models.CharField(max_length=40, default=random_slug, unique=True) organization_number = models.CharField(max_length=10, unique=True, null=True) email = models.EmailField() description = models.CharField(max_length=300) website_url = models.URLField(null=True, blank=True) name = models.CharField(max_length=100) goal = models.CharField(max_length=300, null=True, blank=True) year_founded = models.CharField(max_length=4, null=True, blank=True) status = models.CharField(max_length=50, null=True, blank=True) target_audience = models.JSONField(null=True, blank=True) number_of_employees = models.PositiveIntegerField(null=True, blank=True) number_of_members = models.PositiveIntegerField(null=True, blank=True) number_of_volunteers = models.PositiveIntegerField(null=True, blank=True) location_lon = models.DecimalField( max_digits=9, decimal_places=6, null=True, blank=True, ) location_lat = models.DecimalField( max_digits=9, decimal_places=6, null=True, blank=True, ) address_city = models.CharField(max_length=150, null=True, blank=True) address_street = models.CharField(max_length=150, null=True, blank=True) address_house_num = models.CharField(max_length=20, null=True, blank=True) address_zipcode = models.CharField(max_length=9, null=True, blank=True) cities = models.JSONField(null=True, blank=True) districts = models.JSONField(null=True, blank=True) union_type = models.CharField(max_length=50, null=True, blank=True) def __str__(self): return f"{self.name} | {self.organization_number} | {self.slug}" class Activity(get_base_model()): class Domain(models.TextChoices): SCIENCE_AND_TECH = "SCIENCE_AND_TECH", "Science And Tech" EXTREME_SPORTS = "EXTREME_SPORTS", "Extreme Sports" FIELD = "FIELD", "Field" OTHER = "OTHER", "Other" tags = TaggableManager(blank=True, through=ConnectiveTaggedItem) slug = models.CharField(max_length=40, default=random_slug, unique=True) name = models.CharField(max_length=35) target_audience = models.JSONField() domain = models.CharField(max_length=55, null=True, choices=Domain.choices) originization = models.ForeignKey( Organization, on_delete=models.SET_NULL, null=True, blank=True, related_name="activities", ) activity_website_url = models.URLField(max_length=750, null=True, blank=True) activity_email = models.EmailField(null=True, blank=True) description = models.CharField(max_length=550, default="") contact_name = models.CharField(max_length=60, default="") logo = models.ImageField(blank=True, null=True) phone_number = models.CharField( blank=True, max_length=15, validators=[ RegexValidator( regex=r"^\d{9,15}$", message=_("phone number must be between 9-15 digits"), ) ], ) def __str__(self): try: return f"{self.name} | {self.slug} | {self.originization.name}" except AttributeError: return f"{self.name} | {self.slug}" class ImportedActivity(get_base_model()): slug = models.CharField(max_length=40, default=random_slug, unique=True) activity_code = models.IntegerField() name = models.CharField(max_length=550) raw_name = models.CharField(max_length=550) target_audience = models.JSONField() organization_number = models.IntegerField() organization_name = models.CharField(max_length=1550, default="") target_gender = models.JSONField() target_gender = models.JSONField() target_population = models.JSONField() target_time = models.JSONField() target_size = models.JSONField() target_migzar = models.JSONField() target_pikuah = models.JSONField() profession = models.JSONField() goal = models.CharField(max_length=1550, default="") is_active = models.BooleanField() activity_website_url = models.URLField(max_length=750, null=True, blank=True) activity_email = models.EmailField(null=True, blank=True) description = models.CharField(max_length=1550, default="") contact_name = models.CharField(max_length=100, default="") phone_number = models.CharField( blank=True, max_length=15, validators=[ RegexValidator( regex=r"^\d{9,15}$", message=_("phone number must be between 9-15 digits"), ) ], ) def __str__(self): return f"{self.name} | {self.slug} | {self.activity_code}" class ActivityMedia(get_base_model()): slug = models.CharField(max_length=40, default=random_slug, unique=True) name = models.CharField(max_length=40, null=True, blank=True) image_url = models.ImageField(blank=True, null=True) video_url = models.URLField(blank=True, null=True) activity = models.ForeignKey( Activity, on_delete=models.CASCADE, related_name="rich_media", ) def __str__(self): return f"{self.name} | {self.slug} | {self.activity.name}" class OrganizationMember(get_base_model()): user = models.OneToOneField( "users.User", on_delete=models.CASCADE, related_name="organization_member" ) organization = models.ForeignKey( Organization, on_delete=models.CASCADE, related_name="organization_member", ) def __str__(self): return f"{self.user.email} | {self.organization.name}" class SchoolActivityOrder(get_base_model()): class Meta: constraints = [ models.UniqueConstraint(fields=["school", "activity"], name="unique_order") ] class Status(models.TextChoices): CANCELLED = "CANCELLED", "Cancelled" PENDING_ADMIN_APPROVAL = "PENDING_ADMIN_APPROVAL", "Pending Admin Approval" APPROVED = "APPROVED", "Approved" DENIED = "DENIED", "Denied" base_status = Status.PENDING_ADMIN_APPROVAL slug = models.CharField(max_length=40, default=random_slug, unique=True) requested_by = models.ForeignKey( "users.User", on_delete=models.SET_NULL, null=True, blank=True, related_name="requested_orders", ) last_updated_by = models.ForeignKey( "users.User", on_delete=models.SET_NULL, null=True, blank=True, related_name="last_updated_by_me_orders", ) school = models.ForeignKey( School, on_delete=models.CASCADE, related_name="school_activity_orders" ) activity = models.ForeignKey( Activity, on_delete=models.CASCADE, related_name="school_activity_orders" ) status = models.CharField( _("status"), max_length=50, choices=Status.choices, default=base_status ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status_reason = models.CharField( max_length=250, blank=True, ) def __str__(self): return f"{self.activity} | {self.school} | {self.status} | {self.pk}" class SchoolActivityGroup(get_base_model()): class GroupTypes(models.TextChoices): CONTAINER_ONLY = "CONTAINER_ONLY", "Container Only" DISABLED_CONSUMERS = "DISABLED_CONSUMERS", "Disabled Consumers" NO_REGISTRATION = "NO_REGISTRATION", "No Registration" DEFAULT = "DEFAULT", "Default" objects = SchoolActivityGroupManager() slug = models.CharField(max_length=40, default=random_slug, unique=True) activity_order = models.ForeignKey( SchoolActivityOrder, on_delete=models.CASCADE, related_name="activity_groups" ) name = models.CharField(_("name"), max_length=50) description = models.CharField(_("description"), max_length=550) consumers = models.ManyToManyField( "users.Consumer", related_name="activity_groups", blank=True, ) group_type = models.CharField( _("group type"), max_length=50, choices=GroupTypes.choices, default=GroupTypes.DEFAULT, ) instructor = models.ForeignKey( "users.Instructor", on_delete=models.SET_NULL, related_name="managed_activity_groups", null=True, blank=True, ) def __str__(self): return f""" {self.name} : {self.group_type} : {self.slug} : {self.activity_order.activity.name} : {self.activity_order.school.name} """
37.612795
87
0.688569
from django.core.validators import RegexValidator from django.db import models from django.utils.translation import gettext_lazy as _ from taggit.managers import TaggableManager from server.connective_tags.models import ConnectiveTaggedItem from server.schools.models import School from server.utils.db_utils import get_base_model from server.utils.model_fields import random_slug class SchoolActivityGroupManager(models.Manager): def get_activity_container_only_group(self, activity_group): container_only_groups = self.filter( activity_order=activity_group.activity_order, group_type=SchoolActivityGroup.GroupTypes.CONTAINER_ONLY, ) if container_only_groups.exists(): return container_only_groups[0] class ImportedOrganization(get_base_model()): slug = models.CharField(max_length=40, default=random_slug, unique=True) organization_number = models.CharField(max_length=10, unique=True) email = models.EmailField(null=True, blank=True) description = models.CharField(max_length=4096, null=True, blank=True) website_url = models.URLField(null=True, blank=True) name = models.CharField(max_length=256, null=True, blank=True) goal = models.CharField(max_length=4096, null=True, blank=True) year_founded = models.CharField(max_length=128, null=True, blank=True) status = models.CharField(max_length=50, null=True, blank=True) target_audience = models.JSONField(null=True, blank=True) number_of_employees = models.PositiveIntegerField(null=True, blank=True) number_of_members = models.PositiveIntegerField(null=True, blank=True) number_of_volunteers = models.PositiveIntegerField(null=True, blank=True) location_lon = models.DecimalField( max_digits=9, decimal_places=6, null=True, blank=True, ) location_lat = models.DecimalField( max_digits=9, decimal_places=6, null=True, blank=True, ) address_city = models.CharField(max_length=256, null=True, blank=True) address_street = models.CharField(max_length=256, null=True, blank=True) address_house_num = models.CharField(max_length=30, null=True, blank=True) address_zipcode = models.CharField(max_length=9, null=True, blank=True) cities = models.JSONField(null=True, blank=True) districts = models.JSONField(null=True, blank=True) union_type = models.CharField(max_length=50, null=True, blank=True) def __str__(self): return f"{self.name} | {self.organization_number} | {self.slug}" class Organization(get_base_model()): slug = models.CharField(max_length=40, default=random_slug, unique=True) organization_number = models.CharField(max_length=10, unique=True, null=True) email = models.EmailField() description = models.CharField(max_length=300) website_url = models.URLField(null=True, blank=True) name = models.CharField(max_length=100) goal = models.CharField(max_length=300, null=True, blank=True) year_founded = models.CharField(max_length=4, null=True, blank=True) status = models.CharField(max_length=50, null=True, blank=True) target_audience = models.JSONField(null=True, blank=True) number_of_employees = models.PositiveIntegerField(null=True, blank=True) number_of_members = models.PositiveIntegerField(null=True, blank=True) number_of_volunteers = models.PositiveIntegerField(null=True, blank=True) location_lon = models.DecimalField( max_digits=9, decimal_places=6, null=True, blank=True, ) location_lat = models.DecimalField( max_digits=9, decimal_places=6, null=True, blank=True, ) address_city = models.CharField(max_length=150, null=True, blank=True) address_street = models.CharField(max_length=150, null=True, blank=True) address_house_num = models.CharField(max_length=20, null=True, blank=True) address_zipcode = models.CharField(max_length=9, null=True, blank=True) cities = models.JSONField(null=True, blank=True) districts = models.JSONField(null=True, blank=True) union_type = models.CharField(max_length=50, null=True, blank=True) def __str__(self): return f"{self.name} | {self.organization_number} | {self.slug}" class Activity(get_base_model()): class Domain(models.TextChoices): SCIENCE_AND_TECH = "SCIENCE_AND_TECH", "Science And Tech" EXTREME_SPORTS = "EXTREME_SPORTS", "Extreme Sports" FIELD = "FIELD", "Field" OTHER = "OTHER", "Other" tags = TaggableManager(blank=True, through=ConnectiveTaggedItem) slug = models.CharField(max_length=40, default=random_slug, unique=True) name = models.CharField(max_length=35) target_audience = models.JSONField() domain = models.CharField(max_length=55, null=True, choices=Domain.choices) originization = models.ForeignKey( Organization, on_delete=models.SET_NULL, null=True, blank=True, related_name="activities", ) activity_website_url = models.URLField(max_length=750, null=True, blank=True) activity_email = models.EmailField(null=True, blank=True) description = models.CharField(max_length=550, default="") contact_name = models.CharField(max_length=60, default="") logo = models.ImageField(blank=True, null=True) phone_number = models.CharField( blank=True, max_length=15, validators=[ RegexValidator( regex=r"^\d{9,15}$", message=_("phone number must be between 9-15 digits"), ) ], ) def __str__(self): try: return f"{self.name} | {self.slug} | {self.originization.name}" except AttributeError: return f"{self.name} | {self.slug}" class ImportedActivity(get_base_model()): slug = models.CharField(max_length=40, default=random_slug, unique=True) activity_code = models.IntegerField() name = models.CharField(max_length=550) raw_name = models.CharField(max_length=550) target_audience = models.JSONField() organization_number = models.IntegerField() organization_name = models.CharField(max_length=1550, default="") target_gender = models.JSONField() target_gender = models.JSONField() target_population = models.JSONField() target_time = models.JSONField() target_size = models.JSONField() target_migzar = models.JSONField() target_pikuah = models.JSONField() profession = models.JSONField() goal = models.CharField(max_length=1550, default="") is_active = models.BooleanField() activity_website_url = models.URLField(max_length=750, null=True, blank=True) activity_email = models.EmailField(null=True, blank=True) description = models.CharField(max_length=1550, default="") contact_name = models.CharField(max_length=100, default="") phone_number = models.CharField( blank=True, max_length=15, validators=[ RegexValidator( regex=r"^\d{9,15}$", message=_("phone number must be between 9-15 digits"), ) ], ) def __str__(self): return f"{self.name} | {self.slug} | {self.activity_code}" class ActivityMedia(get_base_model()): slug = models.CharField(max_length=40, default=random_slug, unique=True) name = models.CharField(max_length=40, null=True, blank=True) image_url = models.ImageField(blank=True, null=True) video_url = models.URLField(blank=True, null=True) activity = models.ForeignKey( Activity, on_delete=models.CASCADE, related_name="rich_media", ) def __str__(self): return f"{self.name} | {self.slug} | {self.activity.name}" class OrganizationMember(get_base_model()): user = models.OneToOneField( "users.User", on_delete=models.CASCADE, related_name="organization_member" ) organization = models.ForeignKey( Organization, on_delete=models.CASCADE, related_name="organization_member", ) def __str__(self): return f"{self.user.email} | {self.organization.name}" class SchoolActivityOrder(get_base_model()): class Meta: constraints = [ models.UniqueConstraint(fields=["school", "activity"], name="unique_order") ] class Status(models.TextChoices): CANCELLED = "CANCELLED", "Cancelled" PENDING_ADMIN_APPROVAL = "PENDING_ADMIN_APPROVAL", "Pending Admin Approval" APPROVED = "APPROVED", "Approved" DENIED = "DENIED", "Denied" base_status = Status.PENDING_ADMIN_APPROVAL slug = models.CharField(max_length=40, default=random_slug, unique=True) requested_by = models.ForeignKey( "users.User", on_delete=models.SET_NULL, null=True, blank=True, related_name="requested_orders", ) last_updated_by = models.ForeignKey( "users.User", on_delete=models.SET_NULL, null=True, blank=True, related_name="last_updated_by_me_orders", ) school = models.ForeignKey( School, on_delete=models.CASCADE, related_name="school_activity_orders" ) activity = models.ForeignKey( Activity, on_delete=models.CASCADE, related_name="school_activity_orders" ) status = models.CharField( _("status"), max_length=50, choices=Status.choices, default=base_status ) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) status_reason = models.CharField( max_length=250, blank=True, ) def __str__(self): return f"{self.activity} | {self.school} | {self.status} | {self.pk}" class SchoolActivityGroup(get_base_model()): class GroupTypes(models.TextChoices): CONTAINER_ONLY = "CONTAINER_ONLY", "Container Only" DISABLED_CONSUMERS = "DISABLED_CONSUMERS", "Disabled Consumers" NO_REGISTRATION = "NO_REGISTRATION", "No Registration" DEFAULT = "DEFAULT", "Default" objects = SchoolActivityGroupManager() slug = models.CharField(max_length=40, default=random_slug, unique=True) activity_order = models.ForeignKey( SchoolActivityOrder, on_delete=models.CASCADE, related_name="activity_groups" ) name = models.CharField(_("name"), max_length=50) description = models.CharField(_("description"), max_length=550) consumers = models.ManyToManyField( "users.Consumer", related_name="activity_groups", blank=True, ) group_type = models.CharField( _("group type"), max_length=50, choices=GroupTypes.choices, default=GroupTypes.DEFAULT, ) instructor = models.ForeignKey( "users.Instructor", on_delete=models.SET_NULL, related_name="managed_activity_groups", null=True, blank=True, ) def __str__(self): return f""" {self.name} : {self.group_type} : {self.slug} : {self.activity_order.activity.name} : {self.activity_order.school.name} """
true
true
f70102464c8e5466183c3e69b120499038456a17
6,962
py
Python
podship/test/engine/functional/api/v1/user.py
candango/socialspider
81b4406a7e30308d904157c76eff949562c8ce28
[ "Apache-2.0" ]
5
2015-05-27T15:22:39.000Z
2015-07-10T21:09:16.000Z
podship/test/engine/functional/api/v1/user.py
candango/socialspider
81b4406a7e30308d904157c76eff949562c8ce28
[ "Apache-2.0" ]
25
2015-10-12T00:36:30.000Z
2016-05-27T17:23:34.000Z
podship/test/engine/functional/api/v1/user.py
candango/podship
81b4406a7e30308d904157c76eff949562c8ce28
[ "Apache-2.0" ]
5
2016-07-06T04:26:59.000Z
2017-12-06T13:05:35.000Z
#!/usr/bin/env python # # Copyright 2015-2016 Flavio Garcia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import (absolute_import, division, print_function, with_statement) import unittest from tornado import httpclient import logging from tornado.escape import json_decode, json_encode from ..v1 import api_url_v1 logger = logging.getLogger(__name__) class UserApiV1FunctionalTestCase(unittest.TestCase): """ Case that covers the account service. """ def test_login_empty_body(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None error_code = 0 body_error_code = 0 try: http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body='')) except httpclient.HTTPError as e: # HTTPError is raised for non-200 responses; the response # can be found in e.response. response_body = json_decode(e.response.body) error_code = e.code body_error_code = int(response_body['status']) print(e.response.error) except Exception as e: # Other errors are possible, such as IOError. logger.error("Error: %s" % str(e)) http_client.close() # Bad Request http error self.assertEquals(error_code, 500) self.assertEquals(body_error_code, 500) # Has 1 error self.assertEquals(len(response_body['errors']), 1) # Username message self.assertEquals(response_body['errors']['schema'][0], "Invalid json body content.") def test_login_invalid_json(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None data = "invalid json string" error_code = 0 body_error_code = 0 try: http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body='')) except httpclient.HTTPError as e: # HTTPError is raised for non-200 responses; the response # can be found in e.response. response_body = json_decode(e.response.body) error_code = e.code body_error_code = int(response_body['status']) print(e.response.error) except Exception as e: # Other errors are possible, such as IOError. logger.error("Error: %s" % str(e)) http_client.close() # Bad Request http error self.assertEquals(error_code, 500) self.assertEquals(body_error_code, 500) # Has 1 error self.assertEquals(len(response_body['errors']), 1) # Username message self.assertEquals(response_body['errors']['schema'][0], "Invalid json body content.") def test_login_without_username(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None error_code = 0 data = { 'payload': { 'password': "", } } try: response = http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body=json_encode(data))) except httpclient.HTTPError as e: # HTTPError is raised for non-200 responses; the response # can be found in e.response. logger.error("Error: %s" % str(e)) error_code = e.code response_body = json_decode(e.response.body) except Exception as e: # Other errors are possible, such as IOError. logger.error("Error: %s" % str(e)) http_client.close() # Unauthorized http error self.assertEquals(error_code, 400) # Has 2 errors self.assertEquals(len(response_body['errors']), 1) # Username message self.assertEquals(response_body['errors']['schema'], "'username' is a required property") def test_login_without_password(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None error_code = 0 data = { 'payload': { 'username': "", } } try: response = http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body=json_encode(data))) except httpclient.HTTPError as e: # HTTPError is raised for non-200 responses; the response # can be found in e.response. logger.error("Error: %s" % str(e)) error_code = e.code response_body = json_decode(e.response.body) except Exception as e: # Other errors are possible, such as IOError. logger.error("Error: %s" % str(e)) http_client.close() # Unauthorized http error self.assertEquals(error_code, 400) # Has 2 errors self.assertEquals(len(response_body['errors']), 1) # Username message print(response_body['errors']['schema']) self.assertEquals(response_body['errors']['schema'], "'password' is a required property") def test_valid_login(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None code = 0 data = { 'payload': { 'username': "test", 'password': "test", } } try: response = http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body=json_encode(data))) code = response.code response_body = json_decode(response.body) except httpclient.HTTPError as e: # HTTPError is raised for non-200 responses; the response # can be found in e.response. logger.error("Error: %s" % str(e)) except Exception as e: # Other errors are possible, such as IOError. logger.error("Error: %s" % str(e)) http_client.close() # Unauthorized http error self.assertEquals(code, 200) # Username message self.assertEquals(response_body['userid'], 1)
38.043716
74
0.594082
from __future__ import (absolute_import, division, print_function, with_statement) import unittest from tornado import httpclient import logging from tornado.escape import json_decode, json_encode from ..v1 import api_url_v1 logger = logging.getLogger(__name__) class UserApiV1FunctionalTestCase(unittest.TestCase): def test_login_empty_body(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None error_code = 0 body_error_code = 0 try: http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body='')) except httpclient.HTTPError as e: response_body = json_decode(e.response.body) error_code = e.code body_error_code = int(response_body['status']) print(e.response.error) except Exception as e: logger.error("Error: %s" % str(e)) http_client.close() self.assertEquals(error_code, 500) self.assertEquals(body_error_code, 500) self.assertEquals(len(response_body['errors']), 1) self.assertEquals(response_body['errors']['schema'][0], "Invalid json body content.") def test_login_invalid_json(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None data = "invalid json string" error_code = 0 body_error_code = 0 try: http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body='')) except httpclient.HTTPError as e: response_body = json_decode(e.response.body) error_code = e.code body_error_code = int(response_body['status']) print(e.response.error) except Exception as e: logger.error("Error: %s" % str(e)) http_client.close() self.assertEquals(error_code, 500) self.assertEquals(body_error_code, 500) self.assertEquals(len(response_body['errors']), 1) self.assertEquals(response_body['errors']['schema'][0], "Invalid json body content.") def test_login_without_username(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None error_code = 0 data = { 'payload': { 'password': "", } } try: response = http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body=json_encode(data))) except httpclient.HTTPError as e: logger.error("Error: %s" % str(e)) error_code = e.code response_body = json_decode(e.response.body) except Exception as e: logger.error("Error: %s" % str(e)) http_client.close() self.assertEquals(error_code, 400) self.assertEquals(len(response_body['errors']), 1) self.assertEquals(response_body['errors']['schema'], "'username' is a required property") def test_login_without_password(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None error_code = 0 data = { 'payload': { 'username': "", } } try: response = http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body=json_encode(data))) except httpclient.HTTPError as e: logger.error("Error: %s" % str(e)) error_code = e.code response_body = json_decode(e.response.body) except Exception as e: logger.error("Error: %s" % str(e)) http_client.close() self.assertEquals(error_code, 400) self.assertEquals(len(response_body['errors']), 1) print(response_body['errors']['schema']) self.assertEquals(response_body['errors']['schema'], "'password' is a required property") def test_valid_login(self): http_client = httpclient.HTTPClient() login_url = "%s/user/login" % api_url_v1 response_body = None code = 0 data = { 'payload': { 'username': "test", 'password': "test", } } try: response = http_client.fetch(httpclient.HTTPRequest( url=login_url, method='POST', body=json_encode(data))) code = response.code response_body = json_decode(response.body) except httpclient.HTTPError as e: logger.error("Error: %s" % str(e)) except Exception as e: logger.error("Error: %s" % str(e)) http_client.close() self.assertEquals(code, 200) self.assertEquals(response_body['userid'], 1)
true
true
f7010268e559e7e0ea188f556c3ffbe7b894a88f
243
py
Python
postprocess/_5_1_chemprop.py
ersilia-os/osm-series4-candidates
2d06ae0a5c26efea70d2a21f06a376625977b8b7
[ "MIT" ]
5
2021-06-01T16:52:28.000Z
2021-08-03T11:06:56.000Z
postprocess/_5_1_chemprop.py
ersilia-os/osm-series4-candidates
2d06ae0a5c26efea70d2a21f06a376625977b8b7
[ "MIT" ]
null
null
null
postprocess/_5_1_chemprop.py
ersilia-os/osm-series4-candidates
2d06ae0a5c26efea70d2a21f06a376625977b8b7
[ "MIT" ]
null
null
null
from tqdm import tqdm import pandas as pd from __init__ import FILE df = pd.read_csv(FILE) smiles = list(df["Smiles"]) with open("_chemprop.csv", "w") as f: f.write("smiles\n") for smi in smiles: f.write("{0}\n".format(smi))
20.25
37
0.650206
from tqdm import tqdm import pandas as pd from __init__ import FILE df = pd.read_csv(FILE) smiles = list(df["Smiles"]) with open("_chemprop.csv", "w") as f: f.write("smiles\n") for smi in smiles: f.write("{0}\n".format(smi))
true
true
f70102e75136d1fd056604a40b88ee06d7af2da9
18,984
py
Python
torch/distributed/_shard/sharded_tensor/__init__.py
jaketae/pytorch
5654e6339879e438efb7cf50e88e356472eb0545
[ "Intel" ]
1
2022-02-01T18:50:09.000Z
2022-02-01T18:50:09.000Z
torch/distributed/_shard/sharded_tensor/__init__.py
ellhe-blaster/pytorch
e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25
[ "Intel" ]
null
null
null
torch/distributed/_shard/sharded_tensor/__init__.py
ellhe-blaster/pytorch
e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25
[ "Intel" ]
null
null
null
# coding=utf-8 import copy import functools from typing import List import torch import torch.distributed._shard.sharding_spec as shard_spec from .api import ( _register_sharded_op, Shard, ShardedTensor, ShardedTensorMetadata, TensorProperties, ) from .metadata import ShardMetadata # noqa: F401 from .partial_tensor import _PartialTensor def empty(sharding_spec: shard_spec.ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: """ Returns a :class:`ShardedTensor` filled with uninitialized data. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification describing how to shard the Tensor. size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. Default: ``torch.strided``. requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``. pin_memory (bool, optional): If set, returned tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: ``False``. memory_format (:class:`torch.memory_format`, optional): the desired memory format of returned Tensor. Default: ``torch.contiguous_format``. process_group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. init_rrefs (bool, optional): Whether or not to initialize :class:`torch.distributed.rpc.RRef`s pointing to remote shards. Need to initialize the RPC Framework if specified as ``True``. Default: ``False``. Returns: A :class:`ShardedTensor` object on each rank """ return ShardedTensor( sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs, ) def ones(sharding_spec: shard_spec.ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: """ Returns a :class:`ShardedTensor` with the scalar value 1. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification describing how to shard the Tensor. size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. Default: ``torch.strided``. requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``. pin_memory (bool, optional): If set, returned tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: ``False``. process_group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. init_rrefs (bool, optional): Whether or not to initialize :class:`torch.distributed.rpc.RRef`s pointing to remote shards. Need to initialize the RPC Framework if specified as ``True``. Default: ``False``. Returns: A :class:`ShardedTensor` object on each rank """ return full( sharding_spec, size, fill_value=1, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs ) def zeros(sharding_spec: shard_spec.ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: """ Returns a :class:`ShardedTensor` filled with the scalar value 0. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification describing how to shard the Tensor. size (int...): a sequence of integers defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple. Keyword args: dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. Default: ``torch.strided``. requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``. pin_memory (bool, optional): If set, returned tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: ``False``. process_group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. init_rrefs (bool, optional): Whether or not to initialize :class:`torch.distributed.rpc.RRef`s pointing to remote shards. Need to initialize the RPC Framework if specified as ``True``. Default: ``False``. Returns: A :class:`ShardedTensor` object on each rank """ return full( sharding_spec, size, fill_value=0, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs ) def full(sharding_spec: shard_spec.ShardingSpec, size, fill_value=torch.types.Number, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: """ Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype is inferred from fill_value. If dtype is specified, it will override the inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification describing how to shard the Tensor. size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the output tensor. fill_value (Scalar) – the value to fill the output tensor with. Keyword args: dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. Default: ``torch.strided``. requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``. pin_memory (bool, optional): If set, returned tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: ``False``. process_group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. init_rrefs (bool, optional): Whether or not to initialize :class:`torch.distributed.rpc.RRef`s pointing to remote shards. Need to initialize the RPC Framework if specified as ``True``. Default: ``False``. Returns: A :class:`ShardedTensor` object on each rank """ sharded_tensor = ShardedTensor( sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs, ) torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type] return sharded_tensor def rand(sharding_spec: shard_spec.ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: """ Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype is inferred from fill_value. If dtype is specified, it will override the inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification describing how to shard the Tensor. size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the output tensor. fill_value (Scalar) – the value to fill the output tensor with. Keyword args: dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`). layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. Default: ``torch.strided``. requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``. pin_memory (bool, optional): If set, returned tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: ``False``. process_group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. init_rrefs (bool, optional): Whether or not to initialize :class:`torch.distributed.rpc.RRef`s pointing to remote shards. Need to initialize the RPC Framework if specified as ``True``. Default: ``False``. Returns: A :class:`ShardedTensor` object on each rank """ sharded_tensor = ShardedTensor( sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs, ) torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type] return sharded_tensor def init_from_local_shards( local_shards: List[Shard], *global_size, process_group=None, init_rrefs=False) -> ShardedTensor: """ Creates an :class:`ShardedTensor` from local shards and the global metadata. Needs to be called on all ranks in an SPMD fashion. Args: local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list of shards that represent the local shards on this rank. global_size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the overall sharded tensor. Keyword args: process_group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. init_rrefs (bool, optional): Whether or not to initialize :class:`torch.distributed.rpc.RRef`s pointing to remote shards. Need to initialize the RPC Framework if specified as ``True``. Default: ``False``. Returns: A :class:`ShardedTensor` object handle on this rank Examples: Suppose we want construct a sharded tensor on two ranks, global size = (10, 5), each shard have a (5, 5) local tensor, we can do it like below: on rank 0: >>> local_shard_metadata = ShardMetadata( >>> shard_offsets=[0, 0] >>> shard_lengths=[5, 5] >>> placement="rank:0/cuda:0" >>> ) >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)] >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5]) on rank 1: >>> local_shard_metadata = ShardMetadata( >>> shard_offsets=[5, 0] >>> shard_lengths=[5, 5] >>> placement="rank:1/cuda:1" >>> ) >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)] >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5]) """ return ShardedTensor._init_from_local_shards( local_shards, *global_size, process_group=process_group, init_rrefs=init_rrefs ) def state_dict_hook(module, destination, prefix, local_metadata): """ Hook to add ShardedTensor to Module's ``state_dict``. Needs to be registered to the Module using :meth:`torch.nn.Module._register_state_dict_hook`. """ for submodule_name, submodule in module.named_modules(): for attr_name, attr in submodule.__dict__.items(): if isinstance(attr, ShardedTensor): destination[prefix + submodule_name + '.' + attr_name] = attr def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """ Pre-load state dict hook to add ShardedTensor to the module. """ for submodule_name, submodule in module.named_modules(): for attr_name, attr in submodule.__dict__.items(): key = prefix + submodule_name + '.' + attr_name if key in state_dict: if isinstance(state_dict[key], ShardedTensor): setattr(submodule, attr_name, state_dict[key]) def sharded_op_impl(func): """ Provides a way for users to write their own custom sharded operator. This can be used to override existing ShardedTensor operators or write a new one not supported by ShardedTensor. If the operator in question is covered by ``__torch_function__`` dispatch and has a ShardedTensor as any of its parameters, the function provided will be invoked for that operator. Example:: >>> @sharded_op_impl(torch.nn.functional.linear) >>> def my_custom_sharded_linear(types, args, kwargs, process_group): >>> .... >>> >>> input = torch.rand(10, 32) >>> weight = sharded_tensor.rand(32, 16) >>> bias = torch.rand(16) >>> # This will call 'my_custom_sharded_linear' >>> torch.nn.functional.linear(input, weight, bias) The types, args and kwargs parameters are the same parameters that are passed to ``__torch_function__`` dispatch API (https://pytorch.org/docs/stable/notes/extending.html#extending-torch). There is an additional ``process_group`` parameter which is the process_group used for the ShardedTensor and can be used by implementations for communications within a sharded implementation. Args: func(Callable): Torch function for which we want to provide a sharded implementation (ex: torch.nn.functional.linear) """ def decorator_sharded_func(wrapped_func): _register_sharded_op(func, wrapped_func) @functools.wraps(wrapped_func) def wrapper(*args, **kwargs): return wrapped_func(*args, **kwargs) return wrapper return decorator_sharded_func # Import all builtin sharded ops from ._ops import * # noqa: F403 def _reshard_output( module: torch.nn.Module, resharding_spec: shard_spec.ShardingSpec) -> torch.nn.Module: """ Hook a module with local shards collection in the forward pass according to the given ``resharding_spec``. Args: module (:class:`torch.nn.Module`): Module whose output needs to be resharded. resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification describing how the output of the module will be resharded. Returns: A :class:`torch.nn.Module` object with collection API hooked. """ def hook_func(_module, _input, output): if isinstance(output, ShardedTensor) or isinstance(output, _PartialTensor): return output.reshard(resharding_spec) return output module.register_forward_hook(hook_func) return module def _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module: """ Hook a module with local shards collection in the forward pass. This API is typically used to convert a sharded representation back to data parallel representation. In particular, it returns the local tensor for this Shard. If the size along the sharding dimension for the local tensor is 1, this dimension is removed from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically a local Tensor of size [16] across each rank and not [1, 16] across each rank. Args: module (:class:`torch.nn.Module`): Module whose output needs to be resharded. Returns: A :class:`torch.nn.Module` object with collection API hooked. """ def hook_func(_module, _input, output): if isinstance(output, ShardedTensor): local_tensor = output.local_tensor() # Squeeze the # of dimensions manually. if local_tensor.size(output._sharding_spec.dim) == 1: # type: ignore[attr-defined] local_tensor = local_tensor.squeeze( output._sharding_spec.dim # type: ignore[attr-defined] ) return local_tensor module.register_forward_hook(hook_func) return module
42
124
0.656711
import copy import functools from typing import List import torch import torch.distributed._shard.sharding_spec as shard_spec from .api import ( _register_sharded_op, Shard, ShardedTensor, ShardedTensorMetadata, TensorProperties, ) from .metadata import ShardMetadata from .partial_tensor import _PartialTensor def empty(sharding_spec: shard_spec.ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: return ShardedTensor( sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs, ) def ones(sharding_spec: shard_spec.ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: return full( sharding_spec, size, fill_value=1, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs ) def zeros(sharding_spec: shard_spec.ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: return full( sharding_spec, size, fill_value=0, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs ) def full(sharding_spec: shard_spec.ShardingSpec, size, fill_value=torch.types.Number, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: sharded_tensor = ShardedTensor( sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs, ) torch.nn.init.constant_(sharded_tensor, fill_value) return sharded_tensor def rand(sharding_spec: shard_spec.ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor: sharded_tensor = ShardedTensor( sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs, ) torch.nn.init.uniform_(sharded_tensor, 0, 1) return sharded_tensor def init_from_local_shards( local_shards: List[Shard], *global_size, process_group=None, init_rrefs=False) -> ShardedTensor: return ShardedTensor._init_from_local_shards( local_shards, *global_size, process_group=process_group, init_rrefs=init_rrefs ) def state_dict_hook(module, destination, prefix, local_metadata): for submodule_name, submodule in module.named_modules(): for attr_name, attr in submodule.__dict__.items(): if isinstance(attr, ShardedTensor): destination[prefix + submodule_name + '.' + attr_name] = attr def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): for submodule_name, submodule in module.named_modules(): for attr_name, attr in submodule.__dict__.items(): key = prefix + submodule_name + '.' + attr_name if key in state_dict: if isinstance(state_dict[key], ShardedTensor): setattr(submodule, attr_name, state_dict[key]) def sharded_op_impl(func): def decorator_sharded_func(wrapped_func): _register_sharded_op(func, wrapped_func) @functools.wraps(wrapped_func) def wrapper(*args, **kwargs): return wrapped_func(*args, **kwargs) return wrapper return decorator_sharded_func from ._ops import * def _reshard_output( module: torch.nn.Module, resharding_spec: shard_spec.ShardingSpec) -> torch.nn.Module: def hook_func(_module, _input, output): if isinstance(output, ShardedTensor) or isinstance(output, _PartialTensor): return output.reshard(resharding_spec) return output module.register_forward_hook(hook_func) return module def _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module: def hook_func(_module, _input, output): if isinstance(output, ShardedTensor): local_tensor = output.local_tensor() if local_tensor.size(output._sharding_spec.dim) == 1: local_tensor = local_tensor.squeeze( output._sharding_spec.dim ) return local_tensor module.register_forward_hook(hook_func) return module
true
true
f701031bd231b0e34d374a632472743d709b8fc1
20,585
py
Python
fairseq/data/iterators.py
ofirpress/shortformer
edc411ff896ae042c01d939a32c1e4a33e238083
[ "MIT" ]
143
2020-12-30T21:40:00.000Z
2022-01-06T21:19:24.000Z
fairseq/data/iterators.py
jbdatascience/shortformer
0281f7618fb3833c8ac99f3e8e0512aed95fa2a1
[ "MIT" ]
4
2020-12-31T01:04:24.000Z
2021-10-16T23:06:04.000Z
fairseq/data/iterators.py
jbdatascience/shortformer
0281f7618fb3833c8ac99f3e8e0512aed95fa2a1
[ "MIT" ]
7
2020-12-31T17:34:54.000Z
2021-05-21T14:25:57.000Z
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import logging import math import operator import os import queue import time from threading import Thread import numpy as np import torch from fairseq.data import data_utils logger = logging.getLogger(__name__) # Object used by _background_consumer to signal the source is exhausted # to the main thread. _sentinel = object() class CountingIterator(object): """Wrapper around an iterable that maintains the iteration count. Args: iterable (iterable): iterable to wrap start (int): starting iteration count. Note that this doesn't actually advance the iterator. total (int): override the iterator length returned by ``__len__``. This can be used to truncate *iterator*. Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, start=None, total=None): self.iterable = iterable self.itr = iter(self) if start is None: self.n = getattr(iterable, 'n', 0) else: self.n = start if total is None: self.total = self.n + len(iterable) else: self.total = total def __len__(self): return self.total def __iter__(self): for x in self.iterable: if self.n >= self.total: raise RuntimeError( 'Mismatch between actual and expected iterable length. ' 'Please report this to the fairseq developers.' ) self.n += 1 yield x def __next__(self): return next(self.itr) def has_next(self): """Whether the iterator has been exhausted.""" return self.n < len(self) def skip(self, num_to_skip): """Fast-forward the iterator by skipping *num_to_skip* elements.""" next(itertools.islice(self.itr, num_to_skip, num_to_skip), None) return self def take(self, n): """ Truncates the iterator to n elements at most. """ self.total = min(self.total, n) # Propagate this change to the underlying iterator # Only take after what we have already consumed (i.e. after restarting # from checkpoint mid epoch, we have to subtract self.n which is the # starting point) # # This to maintain the invariant self.total = self.n + len(iterable), # before calling __next__ or __iter__ propagated_take = max(n - self.n, 0) if hasattr(self.iterable, "take"): self.iterable.take(propagated_take) else: self.iterable = itertools.islice(self.iterable, propagated_take) class EpochBatchIterating(object): def __len__(self) -> int: raise NotImplementedError @property def next_epoch_idx(self): raise NotImplementedError def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False): """Return a new iterator over the dataset. Args: shuffle (bool, optional): shuffle batches before returning the iterator (default: True). fix_batches_to_gpus: ensure that batches are always allocated to the same shards across epochs. Requires that :attr:`dataset` supports prefetching (default: False). """ raise NotImplementedError def end_of_epoch(self) -> bool: """Returns whether the most recent epoch iterator has been exhausted""" raise NotImplementedError @property def iterations_in_epoch(self) -> int: """The number of consumed batches in the current epoch.""" raise NotImplementedError def state_dict(self): """Returns a dictionary containing a whole state of the iterator.""" raise NotImplementedError def load_state_dict(self, state_dict): """Copies the state of the iterator from the given *state_dict*.""" raise NotImplementedError class StreamingEpochBatchIterator(EpochBatchIterating): def __init__( self, dataset, epoch=1, num_shards=1, shard_id=0, ): assert isinstance(dataset, torch.utils.data.IterableDataset) self.dataset = dataset self.epoch = max(epoch, 1) # we use 1-based indexing for epochs self._current_epoch_iterator = None self.num_shards = num_shards self.shard_id = shard_id @property def next_epoch_idx(self): """Return the epoch index after *next_epoch_itr* is called.""" if self._current_epoch_iterator is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False): self.epoch = self.next_epoch_idx self.dataset.set_epoch(self.epoch) self._current_epoch_iterator = CountingIterator( iterable=ShardedIterator( iterable=self.dataset, num_shards=self.num_shards, shard_id=self.shard_id, ), ) return self._current_epoch_iterator def end_of_epoch(self) -> bool: return not self._current_epoch_iterator.has_next() @property def iterations_in_epoch(self) -> int: if self._current_epoch_iterator is not None: return self._current_epoch_iterator.n return 0 def state_dict(self): return { 'epoch': self.epoch, } def load_state_dict(self, state_dict): self.epoch = state_dict['epoch'] class EpochBatchIterator(EpochBatchIterating): """A multi-epoch iterator over a :class:`torch.utils.data.Dataset`. Compared to :class:`torch.utils.data.DataLoader`, this iterator: - can be reused across multiple epochs with the :func:`next_epoch_itr` method (optionally shuffled between epochs) - can be serialized/deserialized with the :func:`state_dict` and :func:`load_state_dict` methods - supports sharding with the *num_shards* and *shard_id* arguments Args: dataset (~torch.utils.data.Dataset): dataset from which to load the data collate_fn (callable): merges a list of samples to form a mini-batch batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of indices, or a callable to create such an iterator (~torch.utils.data.Sampler). A callable batch_sampler will be called for each epoch to enable per epoch dynamic batch iterators defined by this callable batch_sampler. seed (int, optional): seed for random number generator for reproducibility (default: 1). num_shards (int, optional): shard the data iterator into N shards (default: 1). shard_id (int, optional): which shard of the data iterator to return (default: 0). num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 1). buffer_size (int, optional): the number of batches to keep ready in the queue. Helps speeding up dataloading. When buffer_size is zero, the default torch.utils.data.DataLoader preloading is used. timeout (int, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: ``0``) """ def __init__( self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, buffer_size=0, timeout=0, ): assert isinstance(dataset, torch.utils.data.Dataset) self.dataset = dataset self.collate_fn = collate_fn self.batch_sampler = batch_sampler self._frozen_batches = tuple(batch_sampler) if not callable(batch_sampler) else None self.seed = seed self.num_shards = num_shards self.shard_id = shard_id self.num_workers = num_workers # This upper limit here is to prevent people from abusing this feature # in a shared computing environment. self.buffer_size = min(buffer_size, 20) self.timeout = timeout self.epoch = max(epoch, 1) # we use 1-based indexing for epochs self.shuffle = True self._cur_epoch_itr = None self._next_epoch_itr = None self._supports_prefetch = getattr(dataset, 'supports_prefetch', False) @property def frozen_batches(self): if self._frozen_batches is None: self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch)) return self._frozen_batches def __len__(self): return int(math.ceil(len(self.frozen_batches) / float(self.num_shards))) @property def n(self): return self.iterations_in_epoch @property def next_epoch_idx(self): """Return the epoch index after *next_epoch_itr* is called.""" if self._next_epoch_itr is not None: return self.epoch elif self._cur_epoch_itr is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False): """Return a new iterator over the dataset. Args: shuffle (bool, optional): shuffle batches before returning the iterator (default: True). fix_batches_to_gpus: ensure that batches are always allocated to the same shards across epochs. Requires that :attr:`dataset` supports prefetching (default: False). """ self.epoch = self.next_epoch_idx self.dataset.set_epoch(self.epoch) if self._next_epoch_itr is not None: self._cur_epoch_itr = self._next_epoch_itr self._next_epoch_itr = None else: if callable(self.batch_sampler): # reset _frozen_batches to refresh the next epoch self._frozen_batches = None self._cur_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus, ) self.shuffle = shuffle return self._cur_epoch_itr def end_of_epoch(self) -> bool: """Returns whether the most recent epoch iterator has been exhausted""" return not self._cur_epoch_itr.has_next() @property def iterations_in_epoch(self): """The number of consumed batches in the current epoch.""" if self._cur_epoch_itr is not None: return self._cur_epoch_itr.n elif self._next_epoch_itr is not None: return self._next_epoch_itr.n return 0 def state_dict(self): """Returns a dictionary containing a whole state of the iterator.""" if self.end_of_epoch(): epoch = self.epoch + 1 iter_in_epoch = 0 else: epoch = self.epoch iter_in_epoch = self.iterations_in_epoch return { 'version': 2, 'epoch': epoch, 'iterations_in_epoch': iter_in_epoch, 'shuffle': self.shuffle, } def load_state_dict(self, state_dict): """Copies the state of the iterator from the given *state_dict*.""" self.epoch = state_dict['epoch'] itr_pos = state_dict.get('iterations_in_epoch', 0) version = state_dict.get('version', 1) if itr_pos > 0: # fast-forward epoch iterator self._next_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle=state_dict.get('shuffle', True), offset=itr_pos, ) if self._next_epoch_itr is None: if version == 1: # legacy behavior: we finished the epoch, increment epoch counter self.epoch += 1 else: raise RuntimeError( 'Cannot resume training due to dataloader mismatch, please ' 'report this to the fairseq developers. You can relaunch ' 'training with `--reset-dataloader` and it should work.' ) else: self._next_epoch_itr = None def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0): def shuffle_batches(batches, seed): with data_utils.numpy_seed(seed): np.random.shuffle(batches) return batches if self._supports_prefetch: batches = self.frozen_batches if shuffle and not fix_batches_to_gpus: batches = shuffle_batches(list(batches), self.seed + epoch) batches = list(ShardedIterator( batches, self.num_shards, self.shard_id, fill_value=[] )) self.dataset.prefetch([i for s in batches for i in s]) if shuffle and fix_batches_to_gpus: batches = shuffle_batches(batches, self.seed + epoch + self.shard_id) else: if shuffle: batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch) else: batches = self.frozen_batches batches = list(ShardedIterator( batches, self.num_shards, self.shard_id, fill_value=[] )) if offset > 0 and offset >= len(batches): return None if self.num_workers > 0: os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' # Create data loader itr = torch.utils.data.DataLoader( self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers, timeout=self.timeout, ) # Wrap with a BufferedIterator if needed if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) # Wrap with CoutingIterator itr = CountingIterator(itr, start=offset) return itr class GroupedIterator(CountingIterator): """Wrapper around an iterable that returns groups (chunks) of items. Args: iterable (iterable): iterable to wrap chunk_size (int): size of each chunk Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, chunk_size): itr = _chunk_iterator(iterable, chunk_size) super().__init__( itr, start=int(math.ceil(getattr(iterable, 'n', 0) / float(chunk_size))), total=int(math.ceil(len(iterable) / float(chunk_size))), ) self.chunk_size = chunk_size def _chunk_iterator(itr, chunk_size): chunk = [] for x in itr: chunk.append(x) if len(chunk) == chunk_size: yield chunk chunk = [] if len(chunk) > 0: yield chunk class ShardedIterator(CountingIterator): """A sharded wrapper around an iterable, padded to length. Args: iterable (iterable): iterable to wrap num_shards (int): number of shards to split the iterable into shard_id (int): which shard to iterator over fill_value (Any, optional): padding value when the iterable doesn't evenly divide *num_shards* (default: None). Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, num_shards, shard_id, fill_value=None): if shard_id < 0 or shard_id >= num_shards: raise ValueError('shard_id must be between 0 and num_shards') sharded_len = int(math.ceil(len(iterable) / float(num_shards))) batch_size = len(list(iterable)[0]) last = max( list(map(max, *list(iterable)))) # This function receives a list [1,2,3,...., last] where each number represents one of the input subsequences # In the unmodified fairseq, if you have 4 GPUS, fairseq will give the first GPU subsequences [1,5,9,13,...], # the second GPU will get [2,6,10,14,..], the third GPU will get [3,7,11,15] and so on... # If we want to do caching, we can't use that. We need each GPU to get a continuous list of input subsequences (like [1,2,3,4,5,...]). # So what the following code does, is it splits the input into *continuous* chunks of subsequences. For example, if we have # 4 GPUs and 100,000 input subsequences, the first GPU will get [1,2,3,...,25000], the second GPU will get [25001,25002,25003,...], # and so on. # The above description was written with the assumption that batch_size is 1. This function also works when batch_size is greater than 1. iterable = range(0, last) all_itrs = [] for i in range(shard_id*batch_size, (shard_id+1)*batch_size): itr = list(itertools.islice(iterable, i * sharded_len, (i +1 )* sharded_len )) all_itrs.append(itr) itr = [x for x in itertools.chain(*itertools.zip_longest(*all_itrs)) if x is not None] itr = [itr[i:i+batch_size] for i in range(0, len(itr), batch_size)] #split to batches if len(itr) != sharded_len: #this makes sure that we don't miss any input subsequences to_add = sharded_len - len(itr) to_add = [[e] for e in range(sharded_len-to_add, sharded_len)] itr = itr + to_add super().__init__( itr, start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))), total=sharded_len, ) class BackgroundConsumer(Thread): def __init__(self, queue, source, max_len): Thread.__init__(self) self._queue = queue self._source = source self._max_len = max_len self.count = 0 def run(self): try: for item in self._source: self._queue.put(item) # Stop if we reached the maximum length self.count += 1 if self._max_len is not None and self.count >= self._max_len: break # Signal the consumer we are done. self._queue.put(_sentinel) except Exception as e: self._queue.put(e) class BufferedIterator(object): def __init__(self, size, iterable): self._queue = queue.Queue(size) self._iterable = iterable self._consumer = None self.start_time = time.time() self.warning_time = None self.total = len(iterable) def _create_consumer(self): self._consumer = BackgroundConsumer( self._queue, self._iterable, self.total, ) self._consumer.daemon = True self._consumer.start() def __iter__(self): return self def __len__(self): return self.total def take(self, n): self.total = min(self.total, n) # Propagate this change to the underlying iterator if hasattr(self._iterable, "take"): self._iterable.take(n) else: self._iterable = itertools.islice(self._iterable, n) def __next__(self): # Create consumer if not created yet if self._consumer is None: self._create_consumer() # Notify the user if there is a data loading bottleneck if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)): if time.time() - self.start_time > 5 * 60: if self.warning_time is None or time.time() - self.warning_time > 15 * 60: logger.debug( "Data loading buffer is empty or nearly empty. This may " "indicate a data loading bottleneck, and increasing the " "number of workers (--num-workers) may help." ) self.warning_time = time.time() # Get next example item = self._queue.get(True) if isinstance(item, Exception): raise item if item is _sentinel: raise StopIteration() return item
35.8
145
0.614914
import itertools import logging import math import operator import os import queue import time from threading import Thread import numpy as np import torch from fairseq.data import data_utils logger = logging.getLogger(__name__) _sentinel = object() class CountingIterator(object): def __init__(self, iterable, start=None, total=None): self.iterable = iterable self.itr = iter(self) if start is None: self.n = getattr(iterable, 'n', 0) else: self.n = start if total is None: self.total = self.n + len(iterable) else: self.total = total def __len__(self): return self.total def __iter__(self): for x in self.iterable: if self.n >= self.total: raise RuntimeError( 'Mismatch between actual and expected iterable length. ' 'Please report this to the fairseq developers.' ) self.n += 1 yield x def __next__(self): return next(self.itr) def has_next(self): return self.n < len(self) def skip(self, num_to_skip): next(itertools.islice(self.itr, num_to_skip, num_to_skip), None) return self def take(self, n): self.total = min(self.total, n) propagated_take = max(n - self.n, 0) if hasattr(self.iterable, "take"): self.iterable.take(propagated_take) else: self.iterable = itertools.islice(self.iterable, propagated_take) class EpochBatchIterating(object): def __len__(self) -> int: raise NotImplementedError @property def next_epoch_idx(self): raise NotImplementedError def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False): raise NotImplementedError def end_of_epoch(self) -> bool: raise NotImplementedError @property def iterations_in_epoch(self) -> int: raise NotImplementedError def state_dict(self): raise NotImplementedError def load_state_dict(self, state_dict): raise NotImplementedError class StreamingEpochBatchIterator(EpochBatchIterating): def __init__( self, dataset, epoch=1, num_shards=1, shard_id=0, ): assert isinstance(dataset, torch.utils.data.IterableDataset) self.dataset = dataset self.epoch = max(epoch, 1) self._current_epoch_iterator = None self.num_shards = num_shards self.shard_id = shard_id @property def next_epoch_idx(self): if self._current_epoch_iterator is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False): self.epoch = self.next_epoch_idx self.dataset.set_epoch(self.epoch) self._current_epoch_iterator = CountingIterator( iterable=ShardedIterator( iterable=self.dataset, num_shards=self.num_shards, shard_id=self.shard_id, ), ) return self._current_epoch_iterator def end_of_epoch(self) -> bool: return not self._current_epoch_iterator.has_next() @property def iterations_in_epoch(self) -> int: if self._current_epoch_iterator is not None: return self._current_epoch_iterator.n return 0 def state_dict(self): return { 'epoch': self.epoch, } def load_state_dict(self, state_dict): self.epoch = state_dict['epoch'] class EpochBatchIterator(EpochBatchIterating): def __init__( self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, buffer_size=0, timeout=0, ): assert isinstance(dataset, torch.utils.data.Dataset) self.dataset = dataset self.collate_fn = collate_fn self.batch_sampler = batch_sampler self._frozen_batches = tuple(batch_sampler) if not callable(batch_sampler) else None self.seed = seed self.num_shards = num_shards self.shard_id = shard_id self.num_workers = num_workers self.buffer_size = min(buffer_size, 20) self.timeout = timeout self.epoch = max(epoch, 1) self.shuffle = True self._cur_epoch_itr = None self._next_epoch_itr = None self._supports_prefetch = getattr(dataset, 'supports_prefetch', False) @property def frozen_batches(self): if self._frozen_batches is None: self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch)) return self._frozen_batches def __len__(self): return int(math.ceil(len(self.frozen_batches) / float(self.num_shards))) @property def n(self): return self.iterations_in_epoch @property def next_epoch_idx(self): if self._next_epoch_itr is not None: return self.epoch elif self._cur_epoch_itr is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False): self.epoch = self.next_epoch_idx self.dataset.set_epoch(self.epoch) if self._next_epoch_itr is not None: self._cur_epoch_itr = self._next_epoch_itr self._next_epoch_itr = None else: if callable(self.batch_sampler): self._frozen_batches = None self._cur_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus, ) self.shuffle = shuffle return self._cur_epoch_itr def end_of_epoch(self) -> bool: return not self._cur_epoch_itr.has_next() @property def iterations_in_epoch(self): if self._cur_epoch_itr is not None: return self._cur_epoch_itr.n elif self._next_epoch_itr is not None: return self._next_epoch_itr.n return 0 def state_dict(self): if self.end_of_epoch(): epoch = self.epoch + 1 iter_in_epoch = 0 else: epoch = self.epoch iter_in_epoch = self.iterations_in_epoch return { 'version': 2, 'epoch': epoch, 'iterations_in_epoch': iter_in_epoch, 'shuffle': self.shuffle, } def load_state_dict(self, state_dict): self.epoch = state_dict['epoch'] itr_pos = state_dict.get('iterations_in_epoch', 0) version = state_dict.get('version', 1) if itr_pos > 0: self._next_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle=state_dict.get('shuffle', True), offset=itr_pos, ) if self._next_epoch_itr is None: if version == 1: self.epoch += 1 else: raise RuntimeError( 'Cannot resume training due to dataloader mismatch, please ' 'report this to the fairseq developers. You can relaunch ' 'training with `--reset-dataloader` and it should work.' ) else: self._next_epoch_itr = None def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0): def shuffle_batches(batches, seed): with data_utils.numpy_seed(seed): np.random.shuffle(batches) return batches if self._supports_prefetch: batches = self.frozen_batches if shuffle and not fix_batches_to_gpus: batches = shuffle_batches(list(batches), self.seed + epoch) batches = list(ShardedIterator( batches, self.num_shards, self.shard_id, fill_value=[] )) self.dataset.prefetch([i for s in batches for i in s]) if shuffle and fix_batches_to_gpus: batches = shuffle_batches(batches, self.seed + epoch + self.shard_id) else: if shuffle: batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch) else: batches = self.frozen_batches batches = list(ShardedIterator( batches, self.num_shards, self.shard_id, fill_value=[] )) if offset > 0 and offset >= len(batches): return None if self.num_workers > 0: os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' itr = torch.utils.data.DataLoader( self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers, timeout=self.timeout, ) if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) itr = CountingIterator(itr, start=offset) return itr class GroupedIterator(CountingIterator): def __init__(self, iterable, chunk_size): itr = _chunk_iterator(iterable, chunk_size) super().__init__( itr, start=int(math.ceil(getattr(iterable, 'n', 0) / float(chunk_size))), total=int(math.ceil(len(iterable) / float(chunk_size))), ) self.chunk_size = chunk_size def _chunk_iterator(itr, chunk_size): chunk = [] for x in itr: chunk.append(x) if len(chunk) == chunk_size: yield chunk chunk = [] if len(chunk) > 0: yield chunk class ShardedIterator(CountingIterator): def __init__(self, iterable, num_shards, shard_id, fill_value=None): if shard_id < 0 or shard_id >= num_shards: raise ValueError('shard_id must be between 0 and num_shards') sharded_len = int(math.ceil(len(iterable) / float(num_shards))) batch_size = len(list(iterable)[0]) last = max( list(map(max, *list(iterable)))) # So what the following code does, is it splits the input into *continuous* chunks of subsequences. For example, if we have # 4 GPUs and 100,000 input subsequences, the first GPU will get [1,2,3,...,25000], the second GPU will get [25001,25002,25003,...], # and so on. # The above description was written with the assumption that batch_size is 1. This function also works when batch_size is greater than 1. iterable = range(0, last) all_itrs = [] for i in range(shard_id*batch_size, (shard_id+1)*batch_size): itr = list(itertools.islice(iterable, i * sharded_len, (i +1 )* sharded_len )) all_itrs.append(itr) itr = [x for x in itertools.chain(*itertools.zip_longest(*all_itrs)) if x is not None] itr = [itr[i:i+batch_size] for i in range(0, len(itr), batch_size)] #split to batches if len(itr) != sharded_len: #this makes sure that we don't miss any input subsequences to_add = sharded_len - len(itr) to_add = [[e] for e in range(sharded_len-to_add, sharded_len)] itr = itr + to_add super().__init__( itr, start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))), total=sharded_len, ) class BackgroundConsumer(Thread): def __init__(self, queue, source, max_len): Thread.__init__(self) self._queue = queue self._source = source self._max_len = max_len self.count = 0 def run(self): try: for item in self._source: self._queue.put(item) self.count += 1 if self._max_len is not None and self.count >= self._max_len: break self._queue.put(_sentinel) except Exception as e: self._queue.put(e) class BufferedIterator(object): def __init__(self, size, iterable): self._queue = queue.Queue(size) self._iterable = iterable self._consumer = None self.start_time = time.time() self.warning_time = None self.total = len(iterable) def _create_consumer(self): self._consumer = BackgroundConsumer( self._queue, self._iterable, self.total, ) self._consumer.daemon = True self._consumer.start() def __iter__(self): return self def __len__(self): return self.total def take(self, n): self.total = min(self.total, n) if hasattr(self._iterable, "take"): self._iterable.take(n) else: self._iterable = itertools.islice(self._iterable, n) def __next__(self): if self._consumer is None: self._create_consumer() if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)): if time.time() - self.start_time > 5 * 60: if self.warning_time is None or time.time() - self.warning_time > 15 * 60: logger.debug( "Data loading buffer is empty or nearly empty. This may " "indicate a data loading bottleneck, and increasing the " "number of workers (--num-workers) may help." ) self.warning_time = time.time() item = self._queue.get(True) if isinstance(item, Exception): raise item if item is _sentinel: raise StopIteration() return item
true
true
f70103317a07a6f7d5eeac3ff180b3d79a0fc6b7
1,126
py
Python
gen_ndx_for_h.py
fio2003/GPA_star
33530056500fed30bba27aaade3db544f76c4c50
[ "MIT" ]
null
null
null
gen_ndx_for_h.py
fio2003/GPA_star
33530056500fed30bba27aaade3db544f76c4c50
[ "MIT" ]
null
null
null
gen_ndx_for_h.py
fio2003/GPA_star
33530056500fed30bba27aaade3db544f76c4c50
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from parse_topology_for_hydrogens import parse_top_for_h def gen_h_ndx(orig_ndx, topology, out_name='h_prot.ndx'): ndx_ind = list() with open(orig_ndx, 'r') as f: line = f.readline() while '[ Protein ]' not in line: line = f.readline() line = f.readline() while ';' == line[0]: line = f.readline() line = line.strip() while len(line): ndx_ind.extend(line.split()) line = f.readline().strip() ndx_ind = [int(elem) for elem in ndx_ind] good_ind = parse_top_for_h(topology) filtered_h_ind = [elem for elem in ndx_ind if elem in good_ind] formated_h_ind = ['{:>4} '.format(elem) for elem in filtered_h_ind] with open(out_name, 'w') as new_file: ind = 0 new_file.write('[ Protein ]\n') while ind < len(filtered_h_ind): new_file.write(''.join(formated_h_ind[ind:ind+15])) new_file.write('\n') # print(''.join(formated_h_ind[ind:ind+15])) ind += 15 # gen_h_ndx('./prot_dir/prot.ndx', './prot_dir/topol.top')
32.171429
71
0.584369
from parse_topology_for_hydrogens import parse_top_for_h def gen_h_ndx(orig_ndx, topology, out_name='h_prot.ndx'): ndx_ind = list() with open(orig_ndx, 'r') as f: line = f.readline() while '[ Protein ]' not in line: line = f.readline() line = f.readline() while ';' == line[0]: line = f.readline() line = line.strip() while len(line): ndx_ind.extend(line.split()) line = f.readline().strip() ndx_ind = [int(elem) for elem in ndx_ind] good_ind = parse_top_for_h(topology) filtered_h_ind = [elem for elem in ndx_ind if elem in good_ind] formated_h_ind = ['{:>4} '.format(elem) for elem in filtered_h_ind] with open(out_name, 'w') as new_file: ind = 0 new_file.write('[ Protein ]\n') while ind < len(filtered_h_ind): new_file.write(''.join(formated_h_ind[ind:ind+15])) new_file.write('\n') ind += 15
true
true
f7010346ccc7068236993498a0b696ad3a8f117c
943
py
Python
fixture/soap.py
nicholas-y/python_mantis
27c993f6d47804ae12f15664e783cf27db89ed06
[ "Apache-2.0" ]
null
null
null
fixture/soap.py
nicholas-y/python_mantis
27c993f6d47804ae12f15664e783cf27db89ed06
[ "Apache-2.0" ]
null
null
null
fixture/soap.py
nicholas-y/python_mantis
27c993f6d47804ae12f15664e783cf27db89ed06
[ "Apache-2.0" ]
null
null
null
from suds.client import Client from suds import WebFault from model.project import Project class SoapHelper: def __init__(self, app): self.app = app def can_login(self, username, password): client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl") try: client.service.mc_login(username, password) return True except WebFault: return False def get_project_list(self, username, password): project_list = [] client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl") projects = client.service.mc_projects_get_user_accessible(username, password) for i in range(len(projects)): name = projects[i].name description = projects[i].description project_list.append(Project(name=name, description=description)) return project_list
32.517241
96
0.662778
from suds.client import Client from suds import WebFault from model.project import Project class SoapHelper: def __init__(self, app): self.app = app def can_login(self, username, password): client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl") try: client.service.mc_login(username, password) return True except WebFault: return False def get_project_list(self, username, password): project_list = [] client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl") projects = client.service.mc_projects_get_user_accessible(username, password) for i in range(len(projects)): name = projects[i].name description = projects[i].description project_list.append(Project(name=name, description=description)) return project_list
true
true
f701037a52d81611de31cc7b16b479f6c00e8253
483
py
Python
trigger.py
melnikk/docker-kibana
588136b6ab89fab82f2c6a35d18bd698bbbb5a5b
[ "MIT" ]
null
null
null
trigger.py
melnikk/docker-kibana
588136b6ab89fab82f2c6a35d18bd698bbbb5a5b
[ "MIT" ]
null
null
null
trigger.py
melnikk/docker-kibana
588136b6ab89fab82f2c6a35d18bd698bbbb5a5b
[ "MIT" ]
null
null
null
#!/usr/bin/python import requests import os token = os.getenv("DOCKER_HUB_TOKEN") version = os.getenv("VERSION") if token==None: print "env DOCKER_HUB_TOKEN not set" exit(1) if version==None: print "env VERSION not set" exit(1) url = "https://registry.hub.docker.com/u/skbkontur/kibana/trigger/%s/" % token payload = { 'docker_tag' : version } headers = { 'Content-Type': 'application/json'} res = requests.post(url, data=payload, headers=headers) print res
21.954545
82
0.693582
import requests import os token = os.getenv("DOCKER_HUB_TOKEN") version = os.getenv("VERSION") if token==None: print "env DOCKER_HUB_TOKEN not set" exit(1) if version==None: print "env VERSION not set" exit(1) url = "https://registry.hub.docker.com/u/skbkontur/kibana/trigger/%s/" % token payload = { 'docker_tag' : version } headers = { 'Content-Type': 'application/json'} res = requests.post(url, data=payload, headers=headers) print res
false
true
f70105949195595111fc3e5cde8ede620a8580fd
1,859
py
Python
denim/scaffold/__init__.py
timsavage/denim
8aafd7d7b0d6cc4029e451e36e6fa25d4c842a04
[ "BSD-2-Clause" ]
null
null
null
denim/scaffold/__init__.py
timsavage/denim
8aafd7d7b0d6cc4029e451e36e6fa25d4c842a04
[ "BSD-2-Clause" ]
1
2015-08-06T22:24:16.000Z
2015-08-18T07:17:38.000Z
denim/scaffold/__init__.py
timsavage/denim
8aafd7d7b0d6cc4029e451e36e6fa25d4c842a04
[ "BSD-2-Clause" ]
null
null
null
# -*- encoding:utf8 -*- try: from jinja2 import Environment, PackageLoader except ImportError: raise ImportError('Scaffolding support requires the Jinja 2 templating library to be installed.') template_environment = Environment(loader=PackageLoader('denim.scaffold')) def single(template_file, output_name, context): """ Generate a single file. :param template_file: :param output_name: :param context: :return: """ template = template_environment.get_template(template_file) print template.render(**context) def environment(template_file, output_name, context): """ Generate multiple files based on the from the env list. :param template_file: :param output_name: :param context: :return: """ envs = context.env for env in envs: context['env'] = env single(template_file, output_name, context) # Name: (Template, Target, Generation method, Required parameters) SCAFFOLDS = { 'nginx': ('nginx.conf.txt', 'conf/nginx/%(env)s.conf', environment, ('env', )), 'django.fabric': ('django/fabfile.py.txt', 'fabfile.py', single, ('env', ('scm', 'hg'))), 'django.supervisor': ('django/supervisor.conf.txt', 'conf/supervisor.conf', single, None), } def generate_scaffold(scaffold_code): scaffold = SCAFFOLDS.get(scaffold_code) if not scaffold: raise NotImplementedError('This scaffold does not exist') #template = template_environment.get_template('django/fabfile.py.txt') #context = { # 'deploy_scm': 'git', # 'deployment_envs': [{ # 'name': 'production', # 'hosts': ['192.168.0.1', '192.168.0.2',] # }, { # 'name': 'staging', # 'hosts': ['192.168.1.1', '192.168.1.2',] # }, { # 'name': 'development', # 'hosts': ['127.0.0.1',] # }] #} #print template.render(**context)
26.942029
101
0.641205
try: from jinja2 import Environment, PackageLoader except ImportError: raise ImportError('Scaffolding support requires the Jinja 2 templating library to be installed.') template_environment = Environment(loader=PackageLoader('denim.scaffold')) def single(template_file, output_name, context): """ Generate a single file. :param template_file: :param output_name: :param context: :return: """ template = template_environment.get_template(template_file) print template.render(**context) def environment(template_file, output_name, context): """ Generate multiple files based on the from the env list. :param template_file: :param output_name: :param context: :return: """ envs = context.env for env in envs: context['env'] = env single(template_file, output_name, context) SCAFFOLDS = { 'nginx': ('nginx.conf.txt', 'conf/nginx/%(env)s.conf', environment, ('env', )), 'django.fabric': ('django/fabfile.py.txt', 'fabfile.py', single, ('env', ('scm', 'hg'))), 'django.supervisor': ('django/supervisor.conf.txt', 'conf/supervisor.conf', single, None), } def generate_scaffold(scaffold_code): scaffold = SCAFFOLDS.get(scaffold_code) if not scaffold: raise NotImplementedError('This scaffold does not exist')
false
true
f7010728fb2ff36ef48aac18ff34445fa2e903b4
137
py
Python
10870/solution.py
bossm0n5t3r/BOJ
03132388a0c76ef66d6b0dec2053aeca65c4aee6
[ "MIT" ]
2
2020-01-14T07:27:25.000Z
2020-02-12T07:49:58.000Z
2747/solution.py
bossm0n5t3r/BOJ
03132388a0c76ef66d6b0dec2053aeca65c4aee6
[ "MIT" ]
1
2020-01-14T07:29:30.000Z
2021-11-28T11:29:08.000Z
2748/solution.py
bossm0n5t3r/BOJ
03132388a0c76ef66d6b0dec2053aeca65c4aee6
[ "MIT" ]
null
null
null
def sol(): a, b = 0, 1 for i in range(int(input())): a, b = b, a + b print(a) if __name__ == "__main__": sol()
13.7
33
0.445255
def sol(): a, b = 0, 1 for i in range(int(input())): a, b = b, a + b print(a) if __name__ == "__main__": sol()
true
true
f70107c16aa8da73270e2e4f859fa54ad72fc815
5,031
py
Python
slackclient/slackrequest.py
MicahLyle/python-slackclient
ad3710611bc6b061feac46df2535c9089ec38677
[ "MIT" ]
null
null
null
slackclient/slackrequest.py
MicahLyle/python-slackclient
ad3710611bc6b061feac46df2535c9089ec38677
[ "MIT" ]
null
null
null
slackclient/slackrequest.py
MicahLyle/python-slackclient
ad3710611bc6b061feac46df2535c9089ec38677
[ "MIT" ]
null
null
null
import json import platform import requests import six import sys from .version import __version__ class SlackRequest(object): def __init__( self, proxies=None ): # HTTP configs self.custom_user_agent = None self.proxies = proxies # Construct the user-agent header with the package info, Python version and OS version. self.default_user_agent = { # __name__ returns all classes, we only want the client "client": "{0}/{1}".format(__name__.split('.')[0], __version__), "python": "Python/{v.major}.{v.minor}.{v.micro}".format(v=sys.version_info), "system": "{0}/{1}".format(platform.system(), platform.release()) } def get_user_agent(self): # Check for custom user-agent and append if found if self.custom_user_agent: custom_ua_list = ["/".join(client_info) for client_info in self.custom_user_agent] custom_ua_string = " ".join(custom_ua_list) self.default_user_agent['custom'] = custom_ua_string # Concatenate and format the user-agent string to be passed into request headers ua_string = [] for key, val in self.default_user_agent.items(): ua_string.append(val) user_agent_string = " ".join(ua_string) return user_agent_string def append_user_agent(self, name, version): if self.custom_user_agent: self.custom_user_agent.append([name.replace("/", ":"), version.replace("/", ":")]) else: self.custom_user_agent = [[name, version]] def do(self, token=None, request="?", post_data=None, as_user=None, domain="slack.com", timeout=None): """ Perform a POST request to the Slack Web API Args: token (str): your authentication token request (str): the method to call from the Slack API. For example: 'channels.list' post_data (dict): key/value arguments to pass for the request. For example: {'channel': 'CABC12345'} as_user (str): if using a workspace app, the user_id of the user to act on behalf of domain (str): if for some reason you want to send your request to something other than slack.com timeout (float): stop waiting for a response after a given number of seconds """ # Pull `file` out so it isn't JSON encoded like normal fields. # Only do this for requests that are UPLOADING files; downloading files # use the 'file' argument to point to a File ID. post_data = post_data or {} # Move singular file objects into `files` upload_requests = ['files.upload'] # Move file content into requests' `files` param files = None if request in upload_requests: files = {'file': post_data.pop('file')} if 'file' in post_data else None # Check for plural fields and convert them to comma-separated strings if needed for field in {'channels', 'users', 'types'} & set(post_data.keys()): if isinstance(post_data[field], list): post_data[field] = ",".join(post_data[field]) # Convert any params which are list-like to JSON strings # Example: `attachments` is a dict, and needs to be passed as JSON for k, v in six.iteritems(post_data): if isinstance(v, (list, dict)): post_data[k] = json.dumps(v) return self.post_http_request(token, request, post_data, as_user, files, timeout, domain) def post_http_request(self, token, api_method, post_data, as_user=None, files=None, timeout=None, domain="slack.com"): """ This method build and submits the Web API HTTP request :param token: You app's Slack access token :param api_method: The API method endpoint to submit the request to :param post_data: The request payload :param as_user: The user_id if using a workspace app on behalf of a user :param files: Any files to be submitted during upload calls :param timeout: Stop waiting for a response after a given number of seconds :param domain: The URL to submit the API request to :return: """ # Override token header if `token` is passed in post_data if post_data is not None and "token" in post_data: token = post_data['token'] # Set user-agent and auth headers headers = { 'user-agent': self.get_user_agent(), 'Authorization': 'Bearer {}'.format(token) } if as_user: headers["X-Slack-User"] = as_user # Submit the request res = requests.post( 'https://{0}/api/{1}'.format(domain, api_method), headers=headers, data=post_data, files=files, timeout=timeout, proxies=self.proxies ) return res
40.572581
97
0.611608
import json import platform import requests import six import sys from .version import __version__ class SlackRequest(object): def __init__( self, proxies=None ): self.custom_user_agent = None self.proxies = proxies self.default_user_agent = { "client": "{0}/{1}".format(__name__.split('.')[0], __version__), "python": "Python/{v.major}.{v.minor}.{v.micro}".format(v=sys.version_info), "system": "{0}/{1}".format(platform.system(), platform.release()) } def get_user_agent(self): if self.custom_user_agent: custom_ua_list = ["/".join(client_info) for client_info in self.custom_user_agent] custom_ua_string = " ".join(custom_ua_list) self.default_user_agent['custom'] = custom_ua_string ua_string = [] for key, val in self.default_user_agent.items(): ua_string.append(val) user_agent_string = " ".join(ua_string) return user_agent_string def append_user_agent(self, name, version): if self.custom_user_agent: self.custom_user_agent.append([name.replace("/", ":"), version.replace("/", ":")]) else: self.custom_user_agent = [[name, version]] def do(self, token=None, request="?", post_data=None, as_user=None, domain="slack.com", timeout=None): # Only do this for requests that are UPLOADING files; downloading files # use the 'file' argument to point to a File ID. post_data = post_data or {} # Move singular file objects into `files` upload_requests = ['files.upload'] # Move file content into requests' `files` param files = None if request in upload_requests: files = {'file': post_data.pop('file')} if 'file' in post_data else None for field in {'channels', 'users', 'types'} & set(post_data.keys()): if isinstance(post_data[field], list): post_data[field] = ",".join(post_data[field]) for k, v in six.iteritems(post_data): if isinstance(v, (list, dict)): post_data[k] = json.dumps(v) return self.post_http_request(token, request, post_data, as_user, files, timeout, domain) def post_http_request(self, token, api_method, post_data, as_user=None, files=None, timeout=None, domain="slack.com"): if post_data is not None and "token" in post_data: token = post_data['token'] headers = { 'user-agent': self.get_user_agent(), 'Authorization': 'Bearer {}'.format(token) } if as_user: headers["X-Slack-User"] = as_user res = requests.post( 'https://{0}/api/{1}'.format(domain, api_method), headers=headers, data=post_data, files=files, timeout=timeout, proxies=self.proxies ) return res
true
true
f70107d74dd229b54d77f5a86ff7c2a5e86c85cc
13,891
py
Python
src/controller/python/chip/ChipStack.py
tymoteuszblochmobica/connectedhomeip
fe4cf7813766b96df13ceb88f6e1cccf083ae1a9
[ "Apache-2.0" ]
null
null
null
src/controller/python/chip/ChipStack.py
tymoteuszblochmobica/connectedhomeip
fe4cf7813766b96df13ceb88f6e1cccf083ae1a9
[ "Apache-2.0" ]
null
null
null
src/controller/python/chip/ChipStack.py
tymoteuszblochmobica/connectedhomeip
fe4cf7813766b96df13ceb88f6e1cccf083ae1a9
[ "Apache-2.0" ]
null
null
null
# # Copyright (c) 2020 Project CHIP Authors # Copyright (c) 2020 Google LLC. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # @file # Python interface for Chip Stack # """Chip Stack interface """ from __future__ import absolute_import from __future__ import print_function import sys import os import time import glob import platform import logging from threading import Lock, Event from ctypes import * from .ChipUtility import ChipUtility from .ChipExceptions import * __all__ = [ "DeviceStatusStruct", "ChipStackException", "DeviceError", "ChipStackError", "ChipStack", ] ChipStackDLLBaseName = "_ChipDeviceCtrl.so" def _singleton(cls): instance = [None] def wrapper(*args, **kwargs): if instance[0] is None: instance[0] = cls(*args, **kwargs) return instance[0] return wrapper class DeviceStatusStruct(Structure): _fields_ = [ ("ProfileId", c_uint32), ("StatusCode", c_uint16), ("SysErrorCode", c_uint32), ] class LogCategory(object): """Debug logging categories used by chip.""" # NOTE: These values must correspond to those used in the chip C++ code. Disabled = 0 Error = 1 Progress = 2 Detail = 3 Retain = 4 @staticmethod def categoryToLogLevel(cat): if cat == LogCategory.Error: return logging.ERROR elif cat == LogCategory.Progress: return logging.INFO elif cat == LogCategory.Detail: return logging.DEBUG elif cat == LogCategory.Retain: return logging.CRITICAL else: return logging.NOTSET class ChipLogFormatter(logging.Formatter): """A custom logging.Formatter for logging chip library messages.""" def __init__( self, datefmt=None, logModulePrefix=False, logLevel=False, logTimestamp=False, logMSecs=True, ): fmt = "%(message)s" if logModulePrefix: fmt = "CHIP:%(chip-module)s: " + fmt if logLevel: fmt = "%(levelname)s:" + fmt if datefmt is not None or logTimestamp: fmt = "%(asctime)s " + fmt super(ChipLogFormatter, self).__init__(fmt=fmt, datefmt=datefmt) self.logMSecs = logMSecs def formatTime(self, record, datefmt=None): if datefmt is None: timestampStr = time.strftime("%Y-%m-%d %H:%M:%S%z") if self.logMSecs: timestampUS = record.__dict__.get("timestamp-usec", 0) timestampStr = "%s.%03ld" % (timestampStr, timestampUS / 1000) return timestampStr _CompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p) _ErrorFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_ulong, POINTER(DeviceStatusStruct)) _LogMessageFunct = CFUNCTYPE(None, c_int64, c_int64, c_char_p, c_uint8, c_char_p) @_singleton class ChipStack(object): def __init__(self, installDefaultLogHandler=True): self.networkLock = Lock() self.completeEvent = Event() self._ChipStackLib = None self._chipDLLPath = None self.devMgr = None self.callbackRes = None self._activeLogFunct = None self.addModulePrefixToLogMessage = True # Locate and load the chip shared library. self._loadLib() # Arrange to log output from the chip library to a python logger object with the # name 'chip.ChipStack'. If desired, applications can override this behavior by # setting self.logger to a different python logger object, or by calling setLogFunct() # with their own logging function. self.logger = logging.getLogger(__name__) self.setLogFunct(self.defaultLogFunct) # Determine if there are already handlers installed for the logger. Python 3.5+ # has a method for this; on older versions the check has to be done manually. if hasattr(self.logger, "hasHandlers"): hasHandlers = self.logger.hasHandlers() else: hasHandlers = False logger = self.logger while logger is not None: if len(logger.handlers) > 0: hasHandlers = True break if not logger.propagate: break logger = logger.parent # If a logging handler has not already been initialized for 'chip.ChipStack', # or any one of its parent loggers, automatically configure a handler to log to # stdout. This maintains compatibility with a number of applications which expect # chip log output to go to stdout by default. # # This behavior can be overridden in a variety of ways: # - Initialize a different log handler before ChipStack is initialized. # - Pass installDefaultLogHandler=False when initializing ChipStack. # - Replace the StreamHandler on self.logger with a different handler object. # - Set a different Formatter object on the existing StreamHandler object. # - Reconfigure the existing ChipLogFormatter object. # - Configure chip to call an application-specific logging function by # calling self.setLogFunct(). # - Call self.setLogFunct(None), which will configure the chip library # to log directly to stdout, bypassing python altogether. # if installDefaultLogHandler and not hasHandlers: logHandler = logging.StreamHandler(stream=sys.stdout) logHandler.setFormatter(ChipLogFormatter()) self.logger.addHandler(logHandler) self.logger.setLevel(logging.DEBUG) def HandleComplete(appState, reqState): self.callbackRes = True self.completeEvent.set() def HandleError(appState, reqState, err, devStatusPtr): self.callbackRes = self.ErrorToException(err, devStatusPtr) self.completeEvent.set() self.cbHandleComplete = _CompleteFunct(HandleComplete) self.cbHandleError = _ErrorFunct(HandleError) self.blockingCB = None # set by other modules(BLE) that require service by thread while thread blocks. # Initialize the chip library res = self._ChipStackLib.pychip_Stack_Init() if res != 0: raise self._ChipStack.ErrorToException(res) @property def defaultLogFunct(self): """Returns a python callable which, when called, logs a message to the python logger object currently associated with the ChipStack object. The returned function is suitable for passing to the setLogFunct() method.""" def logFunct(timestamp, timestampUSec, moduleName, logCat, message): moduleName = ChipUtility.CStringToString(moduleName) message = ChipUtility.CStringToString(message) if self.addModulePrefixToLogMessage: message = "CHIP:%s: %s" % (moduleName, message) logLevel = LogCategory.categoryToLogLevel(logCat) msgAttrs = { "chip-module": moduleName, "timestamp": timestamp, "timestamp-usec": timestampUSec, } self.logger.log(logLevel, message, extra=msgAttrs) return logFunct def setLogFunct(self, logFunct): """Set the function used by the chip library to log messages. The supplied object must be a python callable that accepts the following arguments: timestamp (integer) timestampUS (integer) module name (encoded UTF-8 string) log category (integer) message (encoded UTF-8 string) Specifying None configures the chip library to log directly to stdout.""" if logFunct is None: logFunct = 0 if not isinstance(logFunct, _LogMessageFunct): logFunct = _LogMessageFunct(logFunct) with self.networkLock: # NOTE: ChipStack must hold a reference to the CFUNCTYPE object while it is # set. Otherwise it may get garbage collected, and logging calls from the # chip library will fail. self._activeLogFunct = logFunct self._ChipStackLib.pychip_Stack_SetLogFunct(logFunct) def Shutdown(self): self._ChipStack.Call(lambda: self._dmLib.pychip_Stack_Shutdown()) self.networkLock = None self.completeEvent = None self._ChipStackLib = None self._chipDLLPath = None self.devMgr = None self.callbackRes = None def Call(self, callFunct): # throw error if op in progress self.callbackRes = None self.completeEvent.clear() with self.networkLock: res = callFunct() self.completeEvent.set() if res == 0 and self.callbackRes != None: return self.callbackRes return res def CallAsync(self, callFunct): # throw error if op in progress self.callbackRes = None self.completeEvent.clear() with self.networkLock: res = callFunct() if res != 0: self.completeEvent.set() raise self.ErrorToException(res) while not self.completeEvent.isSet(): if self.blockingCB: self.blockingCB() self.completeEvent.wait(0.05) if isinstance(self.callbackRes, ChipStackException): raise self.callbackRes return self.callbackRes def ErrorToException(self, err, devStatusPtr=None): if err == 4044 and devStatusPtr: devStatus = devStatusPtr.contents msg = ChipUtility.CStringToString( ( self._ChipStackLib.pychip_Stack_StatusReportToString( devStatus.ProfileId, devStatus.StatusCode ) ) ) sysErrorCode = ( devStatus.SysErrorCode if (devStatus.SysErrorCode != 0) else None ) if sysErrorCode != None: msg = msg + " (system err %d)" % (sysErrorCode) return DeviceError( devStatus.ProfileId, devStatus.StatusCode, sysErrorCode, msg ) else: return ChipStackError( err, ChipUtility.CStringToString( (self._ChipStackLib.pychip_Stack_ErrorToString(err)) ), ) def LocateChipDLL(self): if self._chipDLLPath: return self._chipDLLPath scriptDir = os.path.dirname(os.path.abspath(__file__)) # When properly installed in the chip package, the Chip Device Manager DLL will # be located in the package root directory, along side the package's # modules. dmDLLPath = os.path.join(scriptDir, ChipStackDLLBaseName) if os.path.exists(dmDLLPath): self._chipDLLPath = dmDLLPath return self._chipDLLPath # For the convenience of developers, search the list of parent paths relative to the # running script looking for an CHIP build directory containing the Chip Device # Manager DLL. This makes it possible to import and use the ChipDeviceMgr module # directly from a built copy of the CHIP source tree. buildMachineGlob = "%s-*-%s*" % (platform.machine(), platform.system().lower()) relDMDLLPathGlob = os.path.join( "build", buildMachineGlob, "src/controller/python/.libs", ChipStackDLLBaseName, ) for dir in self._AllDirsToRoot(scriptDir): dmDLLPathGlob = os.path.join(dir, relDMDLLPathGlob) for dmDLLPath in glob.glob(dmDLLPathGlob): if os.path.exists(dmDLLPath): self._chipDLLPath = dmDLLPath return self._chipDLLPath raise Exception( "Unable to locate Chip Device Manager DLL (%s); expected location: %s" % (ChipStackDLLBaseName, scriptDir) ) # ----- Private Members ----- def _AllDirsToRoot(self, dir): dir = os.path.abspath(dir) while True: yield dir parent = os.path.dirname(dir) if parent == "" or parent == dir: break dir = parent def _loadLib(self): if self._ChipStackLib is None: self._ChipStackLib = CDLL(self.LocateChipDLL()) self._ChipStackLib.pychip_Stack_Init.argtypes = [] self._ChipStackLib.pychip_Stack_Init.restype = c_uint32 self._ChipStackLib.pychip_Stack_Shutdown.argtypes = [] self._ChipStackLib.pychip_Stack_Shutdown.restype = c_uint32 self._ChipStackLib.pychip_Stack_StatusReportToString.argtypes = [ c_uint32, c_uint16, ] self._ChipStackLib.pychip_Stack_StatusReportToString.restype = c_char_p self._ChipStackLib.pychip_Stack_ErrorToString.argtypes = [c_uint32] self._ChipStackLib.pychip_Stack_ErrorToString.restype = c_char_p self._ChipStackLib.pychip_Stack_SetLogFunct.argtypes = [_LogMessageFunct] self._ChipStackLib.pychip_Stack_SetLogFunct.restype = c_uint32
37.042667
111
0.626593
from __future__ import absolute_import from __future__ import print_function import sys import os import time import glob import platform import logging from threading import Lock, Event from ctypes import * from .ChipUtility import ChipUtility from .ChipExceptions import * __all__ = [ "DeviceStatusStruct", "ChipStackException", "DeviceError", "ChipStackError", "ChipStack", ] ChipStackDLLBaseName = "_ChipDeviceCtrl.so" def _singleton(cls): instance = [None] def wrapper(*args, **kwargs): if instance[0] is None: instance[0] = cls(*args, **kwargs) return instance[0] return wrapper class DeviceStatusStruct(Structure): _fields_ = [ ("ProfileId", c_uint32), ("StatusCode", c_uint16), ("SysErrorCode", c_uint32), ] class LogCategory(object): Disabled = 0 Error = 1 Progress = 2 Detail = 3 Retain = 4 @staticmethod def categoryToLogLevel(cat): if cat == LogCategory.Error: return logging.ERROR elif cat == LogCategory.Progress: return logging.INFO elif cat == LogCategory.Detail: return logging.DEBUG elif cat == LogCategory.Retain: return logging.CRITICAL else: return logging.NOTSET class ChipLogFormatter(logging.Formatter): def __init__( self, datefmt=None, logModulePrefix=False, logLevel=False, logTimestamp=False, logMSecs=True, ): fmt = "%(message)s" if logModulePrefix: fmt = "CHIP:%(chip-module)s: " + fmt if logLevel: fmt = "%(levelname)s:" + fmt if datefmt is not None or logTimestamp: fmt = "%(asctime)s " + fmt super(ChipLogFormatter, self).__init__(fmt=fmt, datefmt=datefmt) self.logMSecs = logMSecs def formatTime(self, record, datefmt=None): if datefmt is None: timestampStr = time.strftime("%Y-%m-%d %H:%M:%S%z") if self.logMSecs: timestampUS = record.__dict__.get("timestamp-usec", 0) timestampStr = "%s.%03ld" % (timestampStr, timestampUS / 1000) return timestampStr _CompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p) _ErrorFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_ulong, POINTER(DeviceStatusStruct)) _LogMessageFunct = CFUNCTYPE(None, c_int64, c_int64, c_char_p, c_uint8, c_char_p) @_singleton class ChipStack(object): def __init__(self, installDefaultLogHandler=True): self.networkLock = Lock() self.completeEvent = Event() self._ChipStackLib = None self._chipDLLPath = None self.devMgr = None self.callbackRes = None self._activeLogFunct = None self.addModulePrefixToLogMessage = True self._loadLib() self.logger = logging.getLogger(__name__) self.setLogFunct(self.defaultLogFunct) if hasattr(self.logger, "hasHandlers"): hasHandlers = self.logger.hasHandlers() else: hasHandlers = False logger = self.logger while logger is not None: if len(logger.handlers) > 0: hasHandlers = True break if not logger.propagate: break logger = logger.parent if installDefaultLogHandler and not hasHandlers: logHandler = logging.StreamHandler(stream=sys.stdout) logHandler.setFormatter(ChipLogFormatter()) self.logger.addHandler(logHandler) self.logger.setLevel(logging.DEBUG) def HandleComplete(appState, reqState): self.callbackRes = True self.completeEvent.set() def HandleError(appState, reqState, err, devStatusPtr): self.callbackRes = self.ErrorToException(err, devStatusPtr) self.completeEvent.set() self.cbHandleComplete = _CompleteFunct(HandleComplete) self.cbHandleError = _ErrorFunct(HandleError) self.blockingCB = None res = self._ChipStackLib.pychip_Stack_Init() if res != 0: raise self._ChipStack.ErrorToException(res) @property def defaultLogFunct(self): def logFunct(timestamp, timestampUSec, moduleName, logCat, message): moduleName = ChipUtility.CStringToString(moduleName) message = ChipUtility.CStringToString(message) if self.addModulePrefixToLogMessage: message = "CHIP:%s: %s" % (moduleName, message) logLevel = LogCategory.categoryToLogLevel(logCat) msgAttrs = { "chip-module": moduleName, "timestamp": timestamp, "timestamp-usec": timestampUSec, } self.logger.log(logLevel, message, extra=msgAttrs) return logFunct def setLogFunct(self, logFunct): if logFunct is None: logFunct = 0 if not isinstance(logFunct, _LogMessageFunct): logFunct = _LogMessageFunct(logFunct) with self.networkLock: self._activeLogFunct = logFunct self._ChipStackLib.pychip_Stack_SetLogFunct(logFunct) def Shutdown(self): self._ChipStack.Call(lambda: self._dmLib.pychip_Stack_Shutdown()) self.networkLock = None self.completeEvent = None self._ChipStackLib = None self._chipDLLPath = None self.devMgr = None self.callbackRes = None def Call(self, callFunct): self.callbackRes = None self.completeEvent.clear() with self.networkLock: res = callFunct() self.completeEvent.set() if res == 0 and self.callbackRes != None: return self.callbackRes return res def CallAsync(self, callFunct): self.callbackRes = None self.completeEvent.clear() with self.networkLock: res = callFunct() if res != 0: self.completeEvent.set() raise self.ErrorToException(res) while not self.completeEvent.isSet(): if self.blockingCB: self.blockingCB() self.completeEvent.wait(0.05) if isinstance(self.callbackRes, ChipStackException): raise self.callbackRes return self.callbackRes def ErrorToException(self, err, devStatusPtr=None): if err == 4044 and devStatusPtr: devStatus = devStatusPtr.contents msg = ChipUtility.CStringToString( ( self._ChipStackLib.pychip_Stack_StatusReportToString( devStatus.ProfileId, devStatus.StatusCode ) ) ) sysErrorCode = ( devStatus.SysErrorCode if (devStatus.SysErrorCode != 0) else None ) if sysErrorCode != None: msg = msg + " (system err %d)" % (sysErrorCode) return DeviceError( devStatus.ProfileId, devStatus.StatusCode, sysErrorCode, msg ) else: return ChipStackError( err, ChipUtility.CStringToString( (self._ChipStackLib.pychip_Stack_ErrorToString(err)) ), ) def LocateChipDLL(self): if self._chipDLLPath: return self._chipDLLPath scriptDir = os.path.dirname(os.path.abspath(__file__)) # modules. dmDLLPath = os.path.join(scriptDir, ChipStackDLLBaseName) if os.path.exists(dmDLLPath): self._chipDLLPath = dmDLLPath return self._chipDLLPath # For the convenience of developers, search the list of parent paths relative to the # running script looking for an CHIP build directory containing the Chip Device # Manager DLL. This makes it possible to import and use the ChipDeviceMgr module # directly from a built copy of the CHIP source tree. buildMachineGlob = "%s-*-%s*" % (platform.machine(), platform.system().lower()) relDMDLLPathGlob = os.path.join( "build", buildMachineGlob, "src/controller/python/.libs", ChipStackDLLBaseName, ) for dir in self._AllDirsToRoot(scriptDir): dmDLLPathGlob = os.path.join(dir, relDMDLLPathGlob) for dmDLLPath in glob.glob(dmDLLPathGlob): if os.path.exists(dmDLLPath): self._chipDLLPath = dmDLLPath return self._chipDLLPath raise Exception( "Unable to locate Chip Device Manager DLL (%s); expected location: %s" % (ChipStackDLLBaseName, scriptDir) ) # ----- Private Members ----- def _AllDirsToRoot(self, dir): dir = os.path.abspath(dir) while True: yield dir parent = os.path.dirname(dir) if parent == "" or parent == dir: break dir = parent def _loadLib(self): if self._ChipStackLib is None: self._ChipStackLib = CDLL(self.LocateChipDLL()) self._ChipStackLib.pychip_Stack_Init.argtypes = [] self._ChipStackLib.pychip_Stack_Init.restype = c_uint32 self._ChipStackLib.pychip_Stack_Shutdown.argtypes = [] self._ChipStackLib.pychip_Stack_Shutdown.restype = c_uint32 self._ChipStackLib.pychip_Stack_StatusReportToString.argtypes = [ c_uint32, c_uint16, ] self._ChipStackLib.pychip_Stack_StatusReportToString.restype = c_char_p self._ChipStackLib.pychip_Stack_ErrorToString.argtypes = [c_uint32] self._ChipStackLib.pychip_Stack_ErrorToString.restype = c_char_p self._ChipStackLib.pychip_Stack_SetLogFunct.argtypes = [_LogMessageFunct] self._ChipStackLib.pychip_Stack_SetLogFunct.restype = c_uint32
true
true