hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f700405ef3096905f3ac4c83f8ec59ec74c727e1 | 1,339 | py | Python | bugbounty/settings_utils.py | 18F/tts-bug-bounty-dashboard | a9be81ff3eaf25db3192b64b5580764db9555dec | [
"CC0-1.0"
] | 7 | 2017-07-15T13:48:00.000Z | 2021-04-16T23:55:43.000Z | bugbounty/settings_utils.py | 18F/tts-bug-bounty-dashboard | a9be81ff3eaf25db3192b64b5580764db9555dec | [
"CC0-1.0"
] | 35 | 2017-06-19T16:15:20.000Z | 2019-05-21T15:37:15.000Z | bugbounty/settings_utils.py | 18F/tts-bug-bounty-dashboard | a9be81ff3eaf25db3192b64b5580764db9555dec | [
"CC0-1.0"
] | 3 | 2017-07-14T02:37:40.000Z | 2017-07-22T18:22:43.000Z | import os
import json
Environ = os._Environ
def is_on_cloudfoundry(env: Environ=os.environ) -> bool:
return 'VCAP_SERVICES' in env
def load_cups_from_vcap_services(name: str, env: Environ=os.environ) -> None:
'''
Detects if VCAP_SERVICES exists in the environment; if so, parses
it and imports all the credentials from the given custom
user-provided service (CUPS) as strings into the environment.
For more details on CUPS, see:
https://docs.cloudfoundry.org/devguide/services/user-provided.html
'''
if not is_on_cloudfoundry(env):
return
vcap = json.loads(env['VCAP_SERVICES'])
for entry in vcap.get('user-provided', []):
if entry['name'] == name:
for key, value in entry['credentials'].items():
env[key] = value
def load_database_url_from_vcap_services(name: str, service: str,
env: Environ=os.environ) -> str:
"""
Sets os.environ[DATABASE_URL] from a service entry in VCAP_SERVICES.
"""
if not is_on_cloudfoundry(env):
return
# FIXME: this'll break if there are multiple databases. Not an issue right
# now, but could be in the future. Keep an eye on it.
vcap = json.loads(env['VCAP_SERVICES'])
env['DATABASE_URL'] = vcap[service][0]["credentials"]["uri"]
| 31.139535 | 78 | 0.65422 | import os
import json
Environ = os._Environ
def is_on_cloudfoundry(env: Environ=os.environ) -> bool:
return 'VCAP_SERVICES' in env
def load_cups_from_vcap_services(name: str, env: Environ=os.environ) -> None:
if not is_on_cloudfoundry(env):
return
vcap = json.loads(env['VCAP_SERVICES'])
for entry in vcap.get('user-provided', []):
if entry['name'] == name:
for key, value in entry['credentials'].items():
env[key] = value
def load_database_url_from_vcap_services(name: str, service: str,
env: Environ=os.environ) -> str:
if not is_on_cloudfoundry(env):
return
# now, but could be in the future. Keep an eye on it.
vcap = json.loads(env['VCAP_SERVICES'])
env['DATABASE_URL'] = vcap[service][0]["credentials"]["uri"]
| true | true |
f70043a615333f2ac090006c4d192287d490deb1 | 2,723 | py | Python | tvml/mongostats.py | pazlvbanke/tvmlopen | 740afdb4982768cbcde7c7d5c8f3d09dbae81111 | [
"MIT"
] | null | null | null | tvml/mongostats.py | pazlvbanke/tvmlopen | 740afdb4982768cbcde7c7d5c8f3d09dbae81111 | [
"MIT"
] | 4 | 2020-03-20T10:55:56.000Z | 2020-03-20T12:12:45.000Z | tvml/mongostats.py | pazlvbanke/tvmlopen | 740afdb4982768cbcde7c7d5c8f3d09dbae81111 | [
"MIT"
] | 1 | 2020-03-20T10:21:55.000Z | 2020-03-20T10:21:55.000Z | from pymongo import MongoClient
from pymongo import ReadPreference
from datetime import datetime, timedelta
class Mongo(MongoClient):
def __init__(self, username, password, host, db='tags', collection='tweets_pipeline_v2'):
uri = f"mongodb://{username}:{password}@{host}/{db}"
super(Mongo, self).__init__(host=uri,
authSource=db,
authMechanism='SCRAM-SHA-256',
port=27017,
replicaset="rs0",
read_preference=ReadPreference.SECONDARY,
)
self.database = self.get_default_database()
self.collection = collection
def pipelined(self, count=True):
query = {"status": "pipelined"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def feed(self, count=True):
query = {"status": "graphicone_feed"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def search(self, count=True):
query = {"status": "graphicone_search"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def left_for_analysts(self, count=True):
query = {"in_app": {"$exists": False},
"status": "graphicone_feed"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def removed_validators(self, count=True):
query = {"validator_username": {"$exists": True},
"status": "deleted"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def removed_analysts(self, count=True):
query = {"status": "deleted_from_analytics"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
# if __name__ == "__main__":
# _username = "login"
# _password = "passwd"
# mongodb_host = "host address"
#
# mongo_client = Mongo(_username, _password, mongodb_host)
# print(mongo_client.pipelined())
# print(mongo_client.search())
# print(mongo_client.feed())
# print(mongo_client.left_for_analysts())
# print(mongo_client.removed_validators())
# print(mongo_client.removed_analysts())
| 36.306667 | 93 | 0.607051 | from pymongo import MongoClient
from pymongo import ReadPreference
from datetime import datetime, timedelta
class Mongo(MongoClient):
def __init__(self, username, password, host, db='tags', collection='tweets_pipeline_v2'):
uri = f"mongodb://{username}:{password}@{host}/{db}"
super(Mongo, self).__init__(host=uri,
authSource=db,
authMechanism='SCRAM-SHA-256',
port=27017,
replicaset="rs0",
read_preference=ReadPreference.SECONDARY,
)
self.database = self.get_default_database()
self.collection = collection
def pipelined(self, count=True):
query = {"status": "pipelined"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def feed(self, count=True):
query = {"status": "graphicone_feed"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def search(self, count=True):
query = {"status": "graphicone_search"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def left_for_analysts(self, count=True):
query = {"in_app": {"$exists": False},
"status": "graphicone_feed"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def removed_validators(self, count=True):
query = {"validator_username": {"$exists": True},
"status": "deleted"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
def removed_analysts(self, count=True):
query = {"status": "deleted_from_analytics"}
if count:
return self.database[self.collection].count_documents(query)
return self.database[self.collection].find(query)
| true | true |
f70044103ae2198905b5fb0cb3bb0f501d542759 | 984 | py | Python | examples/_test_all.py | likianta/lk-lambdex | e9b5f96ea94c4b0fe25fd8adc7195e7366d26a6e | [
"MIT"
] | null | null | null | examples/_test_all.py | likianta/lk-lambdex | e9b5f96ea94c4b0fe25fd8adc7195e7366d26a6e | [
"MIT"
] | null | null | null | examples/_test_all.py | likianta/lk-lambdex | e9b5f96ea94c4b0fe25fd8adc7195e7366d26a6e | [
"MIT"
] | null | null | null | from lk_logger import lk
from examples import t01_simple_examples as t01
from examples import t02_referencing as t02
from examples import t03_fibonacci as t03
from examples import t04_catch_exceptions as t04
from examples import t05_qt_button_click_event as t05
from examples import t06_lambdex_kwargs as t06
""" Rules.zh:
1. 所有模块的待测函数, 都必须以 test_ 开头
2. 所有模块的待测函数, 都必须是无参函数
"""
if __name__ == '__main__':
with lk.counting(6):
for mod in [t01, t02, t03, t04, t05, t06]:
lk.logdx(mod.__name__)
with lk.counting():
for name in dir(mod):
if name.startswith('test_'):
func = getattr(mod, name)
lk.logax('testing', func.__name__)
try:
func()
except Exception as e:
lk.logt('[I1117]', e)
continue
| 30.75 | 58 | 0.539634 | from lk_logger import lk
from examples import t01_simple_examples as t01
from examples import t02_referencing as t02
from examples import t03_fibonacci as t03
from examples import t04_catch_exceptions as t04
from examples import t05_qt_button_click_event as t05
from examples import t06_lambdex_kwargs as t06
if __name__ == '__main__':
with lk.counting(6):
for mod in [t01, t02, t03, t04, t05, t06]:
lk.logdx(mod.__name__)
with lk.counting():
for name in dir(mod):
if name.startswith('test_'):
func = getattr(mod, name)
lk.logax('testing', func.__name__)
try:
func()
except Exception as e:
lk.logt('[I1117]', e)
continue
| true | true |
f700452101a3e4977fe2100fac23e05462b4fec1 | 2,279 | py | Python | Machine Learning A-Z Template Folder/Part 2 - Regression/Section 5 - Multiple Linear Regression/data_preprocessing_template.py | ManjunathaPatkar/Machine-Learning | f1c6ec1a9f802f6e88ed67c0da6c1e9373790537 | [
"MIT"
] | null | null | null | Machine Learning A-Z Template Folder/Part 2 - Regression/Section 5 - Multiple Linear Regression/data_preprocessing_template.py | ManjunathaPatkar/Machine-Learning | f1c6ec1a9f802f6e88ed67c0da6c1e9373790537 | [
"MIT"
] | null | null | null | Machine Learning A-Z Template Folder/Part 2 - Regression/Section 5 - Multiple Linear Regression/data_preprocessing_template.py | ManjunathaPatkar/Machine-Learning | f1c6ec1a9f802f6e88ed67c0da6c1e9373790537 | [
"MIT"
] | null | null | null | # Data Preprocessing Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
#encoding independent variable state
#from sklearn.preprocessing import LabelEncoder, OneHotEncoder
#labelencoder_X = LabelEncoder()
#X[:, 3] = labelencoder_X.fit_transform(X[:, 3])
#onehotencoder = OneHotEncoder(categorical_features = [3])
#X = onehotencoder.fit_transform(X).toarray()
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.compose import ColumnTransformer
ct = ColumnTransformer([("State", OneHotEncoder(), [3])], remainder = 'passthrough')
X= ct.fit_transform(X)
#avoiding the dummy variable trap
X=X[:,1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
#fitting multiple linear regression to the training set
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(X_train,y_train)
#Predicting the test set results
y_pred=regressor.predict(X_test)
#Building the optimal model using backward elimination
import statsmodels.api as sm
X=np.append(arr=np.ones((50,1)).astype(int),values=X,axis=1)
#X_opt=X[:,[0,1,2,3,4,5]]
X_opt = np.array(X[:, [0, 1, 2, 3, 4, 5]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
X_opt = np.array(X[:, [0, 1, 3, 4, 5]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
X_opt = np.array(X[:, [0, 3, 4, 5]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
X_opt = np.array(X[:, [0, 3, 5]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
X_opt = np.array(X[:, [0, 3]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
| 24.244681 | 92 | 0.746819 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.compose import ColumnTransformer
ct = ColumnTransformer([("State", OneHotEncoder(), [3])], remainder = 'passthrough')
X= ct.fit_transform(X)
X=X[:,1:]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(X_train,y_train)
y_pred=regressor.predict(X_test)
import statsmodels.api as sm
X=np.append(arr=np.ones((50,1)).astype(int),values=X,axis=1)
X_opt = np.array(X[:, [0, 1, 2, 3, 4, 5]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
X_opt = np.array(X[:, [0, 1, 3, 4, 5]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
X_opt = np.array(X[:, [0, 3, 4, 5]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
X_opt = np.array(X[:, [0, 3, 5]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
X_opt = np.array(X[:, [0, 3]], dtype=float)
regressor_OLS=sm.OLS(endog=y,exog=X_opt).fit()
regressor_OLS.summary()
| true | true |
f70045418bc49a61f9a7a48205189a79ca91e491 | 85,068 | py | Python | tools/ci_build/build.py | marijnfs/onnxruntime | 6e1eb4b0efca9644c5f8979fbded9416fdd722dc | [
"MIT"
] | 1 | 2019-01-15T18:10:37.000Z | 2019-01-15T18:10:37.000Z | tools/ci_build/build.py | marijnfs/onnxruntime | 6e1eb4b0efca9644c5f8979fbded9416fdd722dc | [
"MIT"
] | null | null | null | tools/ci_build/build.py | marijnfs/onnxruntime | 6e1eb4b0efca9644c5f8979fbded9416fdd722dc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import glob
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import hashlib
from logger import log
class BaseError(Exception):
"""Base class for errors originating from build.py."""
pass
class BuildError(BaseError):
"""Error from running build steps."""
def __init__(self, *messages):
super().__init__("\n".join(messages))
class UsageError(BaseError):
"""Usage related error."""
def __init__(self, message):
super().__init__(message)
def _check_python_version():
# According to the BUILD.md, python 3.5+ is required:
# Python 2 is definitely not supported and it should be safer to consider
# it won't run with python 4:
if sys.version_info[0] != 3:
raise BuildError(
"Bad python major version: expecting python 3, found version "
"'{}'".format(sys.version))
if sys.version_info[1] < 5:
raise BuildError(
"Bad python minor version: expecting python 3.5+, found version "
"'{}'".format(sys.version))
_check_python_version()
def parse_arguments():
parser = argparse.ArgumentParser(
description="ONNXRuntime CI build driver.",
usage=""" # noqa
Default behavior is --update --build --test for native architecture builds.
Default behavior is --update --build for cross-compiled builds.
The Update phase will update git submodules, and run cmake to generate makefiles.
The Build phase will build all projects.
The Test phase will run all unit tests, and optionally the ONNX tests.
Use the individual flags to only run the specified stages.
""")
# Main arguments
parser.add_argument(
"--build_dir", required=True, help="Path to the build directory.")
parser.add_argument(
"--config", nargs="+", default=["Debug"],
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration(s) to build.")
parser.add_argument(
"--update", action='store_true', help="Update makefiles.")
parser.add_argument("--build", action='store_true', help="Build.")
parser.add_argument(
"--clean", action='store_true',
help="Run 'cmake --build --target clean' for the selected config/s.")
parser.add_argument(
"--parallel", action='store_true', help="""Use parallel build.
The build setup doesn't get all dependencies right, so --parallel
only works if you're just rebuilding ONNXRuntime code. If you've
done an update that fetched external dependencies you have to build
without --parallel the first time. Once that's done , run with
"--build --parallel --test" to just build in
parallel and run tests.""")
parser.add_argument("--test", action='store_true', help="Run unit tests.")
parser.add_argument(
"--skip_tests", action='store_true', help="Skip all tests.")
# Training options
parser.add_argument(
"--enable_nvtx_profile", action='store_true', help="Enable NVTX profile in ORT.")
parser.add_argument(
"--enable_training", action='store_true', help="Enable training in ORT.")
parser.add_argument(
"--enable_training_python_frontend_e2e_tests", action="store_true",
help="Enable the pytorch frontend training tests.")
parser.add_argument(
"--enable_training_pipeline_e2e_tests", action="store_true",
help="Enable the pipeline c++ e2e tests.")
parser.add_argument(
"--use_horovod", action='store_true', help="Enable Horovod.")
parser.add_argument(
"--mpi_home", help="Path to MPI installation dir")
parser.add_argument(
"--nccl_home", help="Path to NCCL installation dir")
# enable ONNX tests
parser.add_argument(
"--enable_onnx_tests", action='store_true',
help="""When running the Test phase, run onnx_test_running against
available test data directories.""")
parser.add_argument("--path_to_protoc_exe", help="Path to protoc exe.")
parser.add_argument(
"--fuzz_testing", action='store_true', help="Enable Fuzz testing of the onnxruntime.")
parser.add_argument(
"--enable_symbolic_shape_infer_tests", action='store_true',
help="""When running the Test phase, run symbolic shape inference against
available test data directories.""")
# generate documentaiton
parser.add_argument(
"--gen_doc", action='store_true',
help="Generate documentation on contrib ops")
# CUDA related
parser.add_argument("--use_cuda", action='store_true', help="Enable CUDA.")
parser.add_argument(
"--cuda_version", help="The version of CUDA toolkit to use. "
"Auto-detect if not specified. e.g. 9.0")
parser.add_argument(
"--cuda_home", help="Path to CUDA home."
"Read from CUDA_HOME environment variable if --use_cuda is true and "
"--cuda_home is not specified.")
parser.add_argument(
"--cudnn_home", help="Path to CUDNN home. "
"Read from CUDNN_HOME environment variable if --use_cuda is true and "
"--cudnn_home is not specified.")
# Python bindings
parser.add_argument(
"--enable_pybind", action='store_true', help="Enable Python Bindings.")
parser.add_argument(
"--build_wheel", action='store_true', help="Build Python Wheel.")
parser.add_argument(
"--wheel_name_suffix", help="Suffix to append to created wheel names. "
"This value is currently only used for nightly builds.")
parser.add_argument(
"--numpy_version", help="Installs a specific version of numpy "
"before building the python binding.")
parser.add_argument(
"--skip-keras-test", action='store_true',
help="Skip tests with Keras if keras is installed")
# C-Sharp bindings
parser.add_argument(
"--build_csharp", action='store_true',
help="Build C#.Net DLL and NuGet package. This should be only used in CI pipelines. "
"For building C# bindings and packaging them into nuget package use --build_nuget arg.")
parser.add_argument(
"--build_nuget", action='store_true',
help="Build C#.Net DLL and NuGet package on the local machine. "
"Currently only Windows and Linux platforms are supported.")
# Java bindings
parser.add_argument(
"--build_java", action='store_true', help="Build Java bindings.")
# Node.js binding
parser.add_argument(
"--build_nodejs", action='store_true',
help="Build Node.js binding and NPM package.")
# Build a shared lib
parser.add_argument(
"--build_shared_lib", action='store_true',
help="Build a shared library for the ONNXRuntime.")
# Build options
parser.add_argument(
"--cmake_extra_defines", nargs="+",
help="Extra definitions to pass to CMake during build system "
"generation. These are just CMake -D options without the leading -D.")
parser.add_argument(
"--target",
help="Build a specific target, e.g. winml_dll")
parser.add_argument(
"--x86", action='store_true',
help="Create x86 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm", action='store_true',
help="Create ARM makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm64", action='store_true',
help="Create ARM64 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--msvc_toolset", help="MSVC toolset to use. e.g. 14.11")
parser.add_argument("--android", action='store_true', help='Build for Android')
parser.add_argument(
"--android_abi", default="arm64-v8a",
choices=["armeabi-v7a", "arm64-v8a", "x86", "x86_64"],
help="Specify the target Android Application Binary Interface (ABI)")
parser.add_argument("--android_api", type=int, default=27, help='Android API Level, e.g. 21')
parser.add_argument("--android_sdk_path", type=str, help='Path to the Android SDK')
parser.add_argument("--android_ndk_path", default="", help="Path to the Android NDK")
parser.add_argument("--android_cpp_shared", action="store_true",
help="Build with shared libc++ instead of the default static libc++.")
parser.add_argument("--test_binary_size", action="store_true",
help="If enabled, build will fail when the built binary size is larger than the threshold. "
"This only applies to Android Minimal build for now.")
parser.add_argument("--ios", action='store_true', help="build for ios")
parser.add_argument(
"--ios_sysroot", default="",
help="Specify the location name of the macOS platform SDK to be used")
parser.add_argument(
"--ios_toolchain_dir", default="",
help="Path to ios toolchain binaries")
parser.add_argument(
"--ios_toolchain_file", default="",
help="Path to ios toolchain file, "
"or cmake/onnxruntime_ios.toolchain.cmake will be used")
parser.add_argument(
"--xcode_code_signing_team_id", default="",
help="The development team ID used for code signing in Xcode")
parser.add_argument(
"--use_xcode", action='store_true',
help="Use Xcode as cmake generator, this is only supported on MacOS.")
parser.add_argument(
"--osx_arch", default="arm64", choices=["arm64", "x86_64"],
help="Specify the Target specific architectures for macOS and iOS, This is only supported on MacOS")
parser.add_argument(
"--apple_deploy_target", type=str,
help="Specify the minimum version of the target platform "
"(e.g. macOS or iOS)"
"This is only supported on MacOS")
# Arguments needed by CI
parser.add_argument(
"--cmake_path", default="cmake", help="Path to the CMake program.")
parser.add_argument(
"--ctest_path", default="ctest", help="Path to the CTest program.")
parser.add_argument(
"--skip_submodule_sync", action='store_true', help="Don't do a "
"'git submodule update'. Makes the Update phase faster.")
parser.add_argument(
"--use_vstest", action='store_true',
help="Use use_vstest for running unitests.")
parser.add_argument(
"--use_jemalloc", action='store_true', help="Use jemalloc.")
parser.add_argument(
"--use_mimalloc", default=['none'],
choices=['none', 'stl', 'arena', 'all'], help="Use mimalloc.")
parser.add_argument(
"--use_openblas", action='store_true', help="Build with OpenBLAS.")
parser.add_argument(
"--use_dnnl", action='store_true', help="Build with DNNL.")
parser.add_argument(
"--use_mklml", action='store_true', help="Build with MKLML.")
parser.add_argument(
"--use_featurizers", action='store_true',
help="Build with ML Featurizer support.")
parser.add_argument(
"--use_ngraph", action='store_true', help="Build with nGraph.")
parser.add_argument(
"--use_openvino", nargs="?", const="CPU_FP32",
choices=["CPU_FP32", "GPU_FP32", "GPU_FP16", "VAD-M_FP16",
"MYRIAD_FP16", "VAD-F_FP32"],
help="Build with OpenVINO for specific hardware.")
parser.add_argument(
"--use_nnapi", action='store_true', help="Build with NNAPI support.")
parser.add_argument(
"--use_rknpu", action='store_true', help="Build with RKNPU.")
parser.add_argument(
"--use_preinstalled_eigen", action='store_true',
help="Use pre-installed Eigen.")
parser.add_argument("--eigen_path", help="Path to pre-installed Eigen.")
parser.add_argument(
"--use_openmp", action='store_true', help="Build with OpenMP")
parser.add_argument(
"--enable_msinternal", action="store_true",
help="Enable for Microsoft internal builds only.")
parser.add_argument("--llvm_path", help="Path to llvm dir")
parser.add_argument(
"--use_vitisai", action='store_true', help="Build with Vitis-AI")
parser.add_argument(
"--use_nuphar", action='store_true', help="Build with nuphar")
parser.add_argument(
"--use_tensorrt", action='store_true', help="Build with TensorRT")
parser.add_argument(
"--tensorrt_home", help="Path to TensorRT installation dir")
parser.add_argument(
"--use_migraphx", action='store_true', help="Build with MIGraphX")
parser.add_argument(
"--migraphx_home", help="Path to MIGraphX installation dir")
parser.add_argument(
"--use_full_protobuf", action='store_true',
help="Use the full protobuf library")
parser.add_argument(
"--skip_onnx_tests", action='store_true', help="Explicitly disable "
"all onnx related tests. Note: Use --skip_tests to skip all tests.")
parser.add_argument(
"--skip_winml_tests", action='store_true',
help="Explicitly disable all WinML related tests")
parser.add_argument(
"--skip_nodejs_tests", action='store_true',
help="Explicitly disable all Node.js binding tests")
parser.add_argument(
"--enable_msvc_static_runtime", action='store_true',
help="Enable static linking of MSVC runtimes.")
parser.add_argument(
"--enable_language_interop_ops", action='store_true',
help="Enable operator implemented in language other than cpp")
parser.add_argument(
"--cmake_generator",
choices=['Visual Studio 15 2017', 'Visual Studio 16 2019', 'Ninja'],
default='Visual Studio 15 2017' if is_windows() else None,
help="Specify the generator that CMake invokes. "
"This is only supported on Windows")
parser.add_argument(
"--enable_multi_device_test", action='store_true',
help="Test with multi-device. Mostly used for multi-device GPU")
parser.add_argument(
"--use_dml", action='store_true', help="Build with DirectML.")
parser.add_argument(
"--use_winml", action='store_true', help="Build with WinML.")
parser.add_argument(
"--winml_root_namespace_override", type=str,
help="Specify the namespace that WinML builds into.")
parser.add_argument(
"--use_telemetry", action='store_true',
help="Only official builds can set this flag to enable telemetry.")
parser.add_argument(
"--enable_wcos", action='store_true',
help="Build for Windows Core OS.")
parser.add_argument(
"--enable_windows_store", action='store_true',
help="Build for Windows Store")
parser.add_argument(
"--enable_lto", action='store_true',
help="Enable Link Time Optimization")
parser.add_argument(
"--use_acl", nargs="?", const="ACL_1905",
choices=["ACL_1902", "ACL_1905", "ACL_1908", "ACL_2002"],
help="Build with ACL for ARM architectures.")
parser.add_argument(
"--use_armnn", action='store_true',
help="Enable ArmNN Execution Provider.")
parser.add_argument(
"--armnn_relu", action='store_true',
help="Use the Relu operator implementation from the ArmNN EP.")
parser.add_argument(
"--armnn_bn", action='store_true',
help="Use the Batch Normalization operator implementation from the ArmNN EP.")
parser.add_argument(
"--build_micro_benchmarks", action='store_true',
help="Build ONNXRuntime micro-benchmarks.")
# options to reduce binary size
parser.add_argument("--minimal_build", action='store_true',
help="Create a build that only supports ORT format models. "
"See /docs/ONNX_Runtime_Format_Model_Usage.md for more information. "
"RTTI is automatically disabled in a minimal build.")
parser.add_argument("--include_ops_by_model", type=str, help="include ops from model(s) under designated path.")
parser.add_argument("--include_ops_by_config", type=str,
help="include ops from config file. "
"See /docs/Reduced_Operator_Kernel_build.md for more information.")
parser.add_argument("--disable_contrib_ops", action='store_true',
help="Disable contrib ops (reduces binary size)")
parser.add_argument("--disable_ml_ops", action='store_true',
help="Disable traditional ML ops (reduces binary size)")
parser.add_argument("--disable_rtti", action='store_true', help="Disable RTTI (reduces binary size)")
parser.add_argument("--disable_exceptions", action='store_true',
help="Disable exceptions to reduce binary size. Requires --minimal_build.")
parser.add_argument("--disable_ort_format_load", action='store_true',
help='Disable support for loading ORT format models in a non-minimal build.')
return parser.parse_args()
def resolve_executable_path(command_or_path):
"""Returns the absolute path of an executable."""
executable_path = shutil.which(command_or_path)
if executable_path is None:
raise BuildError("Failed to resolve executable path for "
"'{}'.".format(command_or_path))
return os.path.realpath(executable_path)
def is_windows():
return sys.platform.startswith("win")
def is_macOS():
return sys.platform.startswith("darwin")
def is_linux():
return sys.platform.startswith("linux")
def get_linux_distro():
try:
with open('/etc/os-release', 'r') as f:
dist_info = dict(
line.strip().split('=', 1) for line in f.readlines())
return dist_info.get('NAME', '').strip('"'), dist_info.get(
'VERSION', '').strip('"')
except (IOError, ValueError):
return '', ''
def is_ubuntu_1604():
dist, ver = get_linux_distro()
return dist == 'Ubuntu' and ver.startswith('16.04')
def get_config_build_dir(build_dir, config):
# build directory per configuration
return os.path.join(build_dir, config)
def run_subprocess(args, cwd=None, capture=False, dll_path=None,
shell=False, env={}):
log.info("Running subprocess in '{0}'\n{1}".format(
cwd or os.getcwd(), args))
my_env = os.environ.copy()
if dll_path:
if is_windows():
my_env["PATH"] = dll_path + os.pathsep + my_env["PATH"]
else:
if "LD_LIBRARY_PATH" in my_env:
my_env["LD_LIBRARY_PATH"] += os.pathsep + dll_path
else:
my_env["LD_LIBRARY_PATH"] = dll_path
stdout, stderr = (subprocess.PIPE, subprocess.STDOUT) if capture else (
None, None)
my_env.update(env)
completed_process = subprocess.run(
args, cwd=cwd, check=True, stdout=stdout, stderr=stderr,
env=my_env, shell=shell)
log.debug("Subprocess completed. Return code=" +
str(completed_process.returncode))
return completed_process
def update_submodules(source_dir):
run_subprocess(["git", "submodule", "sync", "--recursive"], cwd=source_dir)
run_subprocess(["git", "submodule", "update", "--init", "--recursive"],
cwd=source_dir)
def is_docker():
path = '/proc/self/cgroup'
return (
os.path.exists('/.dockerenv') or
os.path.isfile(path) and any('docker' in line for line in open(path))
)
def is_sudo():
return 'SUDO_UID' in os.environ.keys()
def install_apt_package(package):
have = package in str(run_subprocess(
["apt", "list", "--installed", package], capture=True).stdout)
if not have:
if is_sudo():
run_subprocess(['apt-get', 'install', '-y', package])
else:
raise BuildError(package + " APT package missing. Please re-run "
"this script using sudo to install.")
def install_ubuntu_deps(args):
"""Check if the necessary Ubuntu dependencies are installed.
Not required on docker. Provide help output if missing."""
# check we need the packages first
if not (args.enable_pybind or args.use_openblas):
return
# not needed on docker as packages are pre-installed
if not is_docker():
try:
if args.enable_pybind:
install_apt_package("python3")
if args.use_openblas:
install_apt_package("libopenblas-dev")
except Exception as e:
raise BuildError("Error setting up required APT packages. "
"{}".format(str(e)))
def install_python_deps(numpy_version=""):
dep_packages = ['setuptools', 'wheel', 'pytest']
dep_packages.append('numpy=={}'.format(numpy_version) if numpy_version
else 'numpy>=1.16.6')
dep_packages.append('sympy>=1.1')
dep_packages.append('packaging')
dep_packages.append('cerberus')
run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',
'files.pythonhosted.org'] + dep_packages)
# We need to install Torch to test certain functionalities of the ORT Python package
def install_torch():
# Command works for both Windows
run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',
'files.pythonhosted.org', 'torch===1.5.1+cu101', 'torchvision===0.6.1+cu101',
'-f', 'https://download.pytorch.org/whl/torch_stable.html'])
def check_md5(filename, expected_md5):
if not os.path.exists(filename):
return False
hash_md5 = hashlib.md5()
BLOCKSIZE = 1024*64
with open(filename, "rb") as f:
buf = f.read(BLOCKSIZE)
while len(buf) > 0:
hash_md5.update(buf)
buf = f.read(BLOCKSIZE)
hex = hash_md5.hexdigest()
if hex != expected_md5:
log.info('md5 mismatch, expect %s, got %s' % (expected_md5, hex))
os.remove(filename)
return False
return True
def setup_test_data(build_dir, configs):
# create a shortcut for test models if there is a 'models'
# folder in build_dir
if is_windows():
src_model_dir = os.path.join(build_dir, 'models')
if os.path.exists('C:\\local\\models') and not os.path.exists(
src_model_dir):
log.debug("creating shortcut %s -> %s" % (
'C:\\local\\models', src_model_dir))
run_subprocess(['mklink', '/D', '/J', src_model_dir,
'C:\\local\\models'], shell=True)
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
dest_model_dir = os.path.join(config_build_dir, 'models')
if os.path.exists('C:\\local\\models') and not os.path.exists(
dest_model_dir):
log.debug("creating shortcut %s -> %s" % (
'C:\\local\\models', dest_model_dir))
run_subprocess(['mklink', '/D', '/J', dest_model_dir,
'C:\\local\\models'], shell=True)
elif os.path.exists(src_model_dir) and not os.path.exists(
dest_model_dir):
log.debug("creating shortcut %s -> %s" % (
src_model_dir, dest_model_dir))
run_subprocess(['mklink', '/D', '/J', dest_model_dir,
src_model_dir], shell=True)
def use_dev_mode(args):
if args.use_acl:
return 'OFF'
if args.use_armnn:
return 'OFF'
if args.ios and is_macOS():
return 'OFF'
return 'ON'
def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home,
mpi_home, nccl_home, tensorrt_home, migraphx_home,
path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args):
log.info("Generating CMake build tree")
cmake_dir = os.path.join(source_dir, "cmake")
# TODO: fix jemalloc build so it does not conflict with onnxruntime
# shared lib builds. (e.g. onnxuntime_pybind)
# for now, disable jemalloc if pybind is also enabled.
cmake_args = [
cmake_path, cmake_dir,
"-Donnxruntime_RUN_ONNX_TESTS=" + (
"ON" if args.enable_onnx_tests else "OFF"),
"-Donnxruntime_BUILD_WINML_TESTS=" + (
"OFF" if args.skip_winml_tests else "ON"),
"-Donnxruntime_GENERATE_TEST_REPORTS=ON",
"-Donnxruntime_DEV_MODE=" + use_dev_mode(args),
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-Donnxruntime_USE_CUDA=" + ("ON" if args.use_cuda else "OFF"),
"-Donnxruntime_CUDNN_HOME=" + (cudnn_home if args.use_cuda else ""),
"-Donnxruntime_USE_FEATURIZERS=" + (
"ON" if args.use_featurizers else "OFF"),
"-Donnxruntime_CUDA_HOME=" + (cuda_home if args.use_cuda else ""),
"-Donnxruntime_USE_JEMALLOC=" + ("ON" if args.use_jemalloc else "OFF"),
"-Donnxruntime_USE_MIMALLOC_STL_ALLOCATOR=" + (
"ON" if args.use_mimalloc == "stl" or
args.use_mimalloc == "all" else "OFF"),
"-Donnxruntime_USE_MIMALLOC_ARENA_ALLOCATOR=" + (
"ON" if args.use_mimalloc == "arena" or
args.use_mimalloc == "all" else "OFF"),
"-Donnxruntime_ENABLE_PYTHON=" + (
"ON" if args.enable_pybind else "OFF"),
"-Donnxruntime_BUILD_CSHARP=" + ("ON" if args.build_csharp else "OFF"),
"-Donnxruntime_BUILD_JAVA=" + ("ON" if args.build_java else "OFF"),
"-Donnxruntime_BUILD_NODEJS=" + ("ON" if args.build_nodejs else "OFF"),
"-Donnxruntime_BUILD_SHARED_LIB=" + (
"ON" if args.build_shared_lib else "OFF"),
"-Donnxruntime_USE_EIGEN_FOR_BLAS=" + (
"OFF" if args.use_openblas else "ON"),
"-Donnxruntime_USE_OPENBLAS=" + ("ON" if args.use_openblas else "OFF"),
"-Donnxruntime_USE_DNNL=" + ("ON" if args.use_dnnl else "OFF"),
"-Donnxruntime_USE_MKLML=" + ("ON" if args.use_mklml else "OFF"),
"-Donnxruntime_USE_NGRAPH=" + ("ON" if args.use_ngraph else "OFF"),
"-Donnxruntime_USE_NNAPI_BUILTIN=" + ("ON" if args.use_nnapi else "OFF"),
"-Donnxruntime_USE_RKNPU=" + ("ON" if args.use_rknpu else "OFF"),
"-Donnxruntime_USE_OPENMP=" + (
"ON" if args.use_openmp and not (
args.use_nnapi or (args.use_mklml and (is_macOS() or is_windows())) or args.use_ngraph or
args.android or (args.ios and is_macOS())
or args.use_rknpu)
else "OFF"),
"-Donnxruntime_USE_TVM=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_USE_LLVM=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + (
"ON" if args.enable_msinternal else "OFF"),
"-Donnxruntime_USE_VITISAI=" + ("ON" if args.use_vitisai else "OFF"),
"-Donnxruntime_USE_NUPHAR=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_USE_TENSORRT=" + ("ON" if args.use_tensorrt else "OFF"),
"-Donnxruntime_TENSORRT_HOME=" + (
tensorrt_home if args.use_tensorrt else ""),
# set vars for migraphx
"-Donnxruntime_USE_MIGRAPHX=" + ("ON" if args.use_migraphx else "OFF"),
"-Donnxruntime_MIGRAPHX_HOME=" + (migraphx_home if args.use_migraphx else ""),
# By default - we currently support only cross compiling for
# ARM/ARM64 (no native compilation supported through this
# script).
"-Donnxruntime_CROSS_COMPILING=" + (
"ON" if args.arm64 or args.arm else "OFF"),
"-Donnxruntime_DISABLE_CONTRIB_OPS=" + ("ON" if args.disable_contrib_ops else "OFF"),
"-Donnxruntime_DISABLE_ML_OPS=" + ("ON" if args.disable_ml_ops else "OFF"),
"-Donnxruntime_DISABLE_RTTI=" + ("ON" if args.disable_rtti else "OFF"),
"-Donnxruntime_DISABLE_EXCEPTIONS=" + ("ON" if args.disable_exceptions else "OFF"),
"-Donnxruntime_DISABLE_ORT_FORMAT_LOAD=" + ("ON" if args.disable_ort_format_load else "OFF"),
"-Donnxruntime_MINIMAL_BUILD=" + ("ON" if args.minimal_build else "OFF"),
"-Donnxruntime_REDUCED_OPS_BUILD=" + (
"ON" if args.include_ops_by_config or args.include_ops_by_model else "OFF"),
"-Donnxruntime_MSVC_STATIC_RUNTIME=" + (
"ON" if args.enable_msvc_static_runtime else "OFF"),
# enable pyop if it is nightly build
"-Donnxruntime_ENABLE_LANGUAGE_INTEROP_OPS=" + (
"ON" if args.enable_language_interop_ops else "OFF"),
"-Donnxruntime_USE_DML=" + ("ON" if args.use_dml else "OFF"),
"-Donnxruntime_USE_WINML=" + ("ON" if args.use_winml else "OFF"),
"-Donnxruntime_USE_TELEMETRY=" + (
"ON" if args.use_telemetry else "OFF"),
"-Donnxruntime_ENABLE_LTO=" + ("ON" if args.enable_lto else "OFF"),
"-Donnxruntime_USE_ACL=" + ("ON" if args.use_acl else "OFF"),
"-Donnxruntime_USE_ACL_1902=" + (
"ON" if args.use_acl == "ACL_1902" else "OFF"),
"-Donnxruntime_USE_ACL_1905=" + (
"ON" if args.use_acl == "ACL_1905" else "OFF"),
"-Donnxruntime_USE_ACL_1908=" + (
"ON" if args.use_acl == "ACL_1908" else "OFF"),
"-Donnxruntime_USE_ACL_2002=" + (
"ON" if args.use_acl == "ACL_2002" else "OFF"),
"-Donnxruntime_USE_ARMNN=" + (
"ON" if args.use_armnn else "OFF"),
"-Donnxruntime_ARMNN_RELU_USE_CPU=" + (
"OFF" if args.armnn_relu else "ON"),
"-Donnxruntime_ARMNN_BN_USE_CPU=" + (
"OFF" if args.armnn_bn else "ON"),
# Training related flags
"-Donnxruntime_ENABLE_NVTX_PROFILE=" + (
"ON" if args.enable_nvtx_profile else "OFF"),
"-Donnxruntime_ENABLE_TRAINING=" + (
"ON" if args.enable_training else "OFF"),
"-Donnxruntime_USE_HOROVOD=" + (
"ON" if args.use_horovod else "OFF"),
"-Donnxruntime_BUILD_BENCHMARKS=" + (
"ON" if args.build_micro_benchmarks else "OFF")
]
if mpi_home and os.path.exists(mpi_home):
cmake_args += ["-Donnxruntime_MPI_HOME=" + mpi_home]
if nccl_home and os.path.exists(nccl_home):
cmake_args += ["-Donnxruntime_NCCL_HOME=" + nccl_home]
if args.winml_root_namespace_override:
cmake_args += ["-Donnxruntime_WINML_NAMESPACE_OVERRIDE=" +
args.winml_root_namespace_override]
if args.use_openvino:
cmake_args += ["-Donnxruntime_USE_OPENVINO=ON",
"-Donnxruntime_USE_OPENVINO_MYRIAD=" + (
"ON" if args.use_openvino == "MYRIAD_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP32=" + (
"ON" if args.use_openvino == "GPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16=" + (
"ON" if args.use_openvino == "GPU_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32=" + (
"ON" if args.use_openvino == "CPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_M=" + (
"ON" if args.use_openvino == "VAD-M_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_F=" + (
"ON" if args.use_openvino == "VAD-F_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_BINARY=" + (
"ON" if args.use_openvino else "OFF")]
# temp turn on only for linux gpu build
if not is_windows():
if args.use_cuda:
cmake_args += [
"-Donnxruntime_USE_FULL_PROTOBUF=ON"]
# nGraph, TensorRT and OpenVINO providers currently only supports
# full_protobuf option.
if (args.use_full_protobuf or args.use_ngraph or args.use_tensorrt or
args.use_openvino or args.use_vitisai or args.gen_doc):
cmake_args += [
"-Donnxruntime_USE_FULL_PROTOBUF=ON",
"-DProtobuf_USE_STATIC_LIBS=ON"
]
if args.use_nuphar and args.llvm_path is not None:
cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path]
if args.use_cuda and not is_windows():
nvml_stub_path = cuda_home + "/lib64/stubs"
cmake_args += ["-DCUDA_CUDA_LIBRARY=" + nvml_stub_path]
if args.use_preinstalled_eigen:
cmake_args += ["-Donnxruntime_USE_PREINSTALLED_EIGEN=ON",
"-Deigen_SOURCE_PATH=" + args.eigen_path]
if args.android:
cmake_args += [
"-DCMAKE_TOOLCHAIN_FILE=" + args.android_ndk_path +
"/build/cmake/android.toolchain.cmake",
"-DANDROID_PLATFORM=android-" + str(args.android_api),
"-DANDROID_ABI=" + str(args.android_abi)
]
if args.android_cpp_shared:
cmake_args += ["-DANDROID_STL=c++_shared"]
if args.ios:
if is_macOS():
needed_args = [
args.use_xcode,
args.ios_sysroot,
args.apple_deploy_target,
]
arg_names = [
"--use_xcode " +
"<need use xcode to cross build iOS on MacOS>",
"--ios_sysroot " +
"<the location or name of the macOS platform SDK>",
"--apple_deploy_target " +
"<the minimum version of the target platform>",
]
if not all(needed_args):
raise BuildError(
"iOS build on MacOS canceled due to missing arguments: " +
', '.join(
val for val, cond in zip(arg_names, needed_args)
if not cond))
cmake_args += [
"-DCMAKE_SYSTEM_NAME=iOS",
"-Donnxruntime_BUILD_SHARED_LIB=ON",
"-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
"-DCMAKE_OSX_ARCHITECTURES=" + args.osx_arch,
"-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target,
# we do not need protoc binary for ios cross build
"-Dprotobuf_BUILD_PROTOC_BINARIES=OFF",
"-DCMAKE_TOOLCHAIN_FILE=" + (
args.ios_toolchain_file if args.ios_toolchain_file
else "../cmake/onnxruntime_ios.toolchain.cmake")
]
# Code sign the binaries, if the code signing development team id is provided
if args.xcode_code_signing_team_id:
cmake_args += ["-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=" + args.xcode_code_signing_team_id]
else:
# TODO: the cross compiling on Linux is not officially supported by Apple
# and is already broken with the latest codebase, so it should be removed.
# We are cross compiling on Linux
needed_args = [
args.ios_sysroot,
args.arm64 or args.arm,
args.ios_toolchain_dir
]
arg_names = [
"--ios_sysroot <path to sysroot>",
"--arm or --arm64",
"--ios_toolchain_dir <path to toolchain>"
]
if not all(needed_args):
raise BuildError(
"iOS build canceled due to missing arguments: " +
', '.join(
val for val, cond in zip(arg_names, needed_args)
if not cond))
compilers = sorted(
glob.glob(args.ios_toolchain_dir + "/bin/*-clang*"))
os.environ["PATH"] = os.path.join(
args.ios_toolchain_dir, "bin") + os.pathsep + os.environ.get(
"PATH", "")
os.environ["LD_LIBRARY_PATH"] = os.path.join(
args.ios_toolchain_dir, "/lib") + os.pathsep + os.environ.get(
"LD_LIBRARY_PATH", "")
if len(compilers) != 2:
raise BuildError(
"error identifying compilers in ios_toolchain_dir")
cmake_args += [
"-DCMAKE_OSX_ARCHITECTURES=" +
("arm64" if args.arm64 else "arm"),
"-DCMAKE_SYSTEM_NAME=iOSCross",
"-Donnxruntime_BUILD_UNIT_TESTS=OFF",
"-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
"-DCMAKE_C_COMPILER=" + compilers[0],
"-DCMAKE_CXX_COMPILER=" + compilers[1]
]
if path_to_protoc_exe:
cmake_args += [
"-DONNX_CUSTOM_PROTOC_EXECUTABLE=%s" % path_to_protoc_exe]
if args.fuzz_testing:
if not (args.build_shared_lib and
is_windows() and
args.cmake_generator == 'Visual Studio 16 2019' and
args.use_full_protobuf):
raise BuildError(
"Fuzz test has only be tested with build shared libs option using MSVC on windows")
cmake_args += [
"-Donnxruntime_BUILD_UNIT_TESTS=ON",
"-Donnxruntime_FUZZ_TEST=ON",
"-Donnxruntime_USE_FULL_PROTOBUF=ON"]
if args.gen_doc:
cmake_args += ["-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=ON"]
else:
cmake_args += ["-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=OFF"]
cmake_args += ["-D{}".format(define) for define in cmake_extra_defines]
cmake_args += cmake_extra_args
# ADO pipelines will store the pipeline build number
# (e.g. 191101-2300.1.master) and source version in environment
# variables. If present, use these values to define the
# WinML/ORT DLL versions.
build_number = os.getenv('Build_BuildNumber')
source_version = os.getenv('Build_SourceVersion')
if build_number and source_version:
build_matches = re.fullmatch(
r"(\d\d)(\d\d)(\d\d)(\d\d)\.(\d+)", build_number)
if build_matches:
YY = build_matches.group(2)
MM = build_matches.group(3)
DD = build_matches.group(4)
# Get ORT major and minor number
with open(os.path.join(source_dir, 'VERSION_NUMBER')) as f:
first_line = f.readline()
ort_version_matches = re.match(r"(\d+).(\d+)", first_line)
if not ort_version_matches:
raise BuildError("Couldn't read version from VERSION_FILE")
ort_major = ort_version_matches.group(1)
ort_minor = ort_version_matches.group(2)
# Example (BuildNumber: 191101-2300.1.master,
# SourceVersion: 0bce7ae6755c792eda558e5d27ded701707dc404)
# MajorPart = 1
# MinorPart = 0
# BuildPart = 1911
# PrivatePart = 123
# String = 191101-2300.1.master.0bce7ae
cmake_args += [
"-DVERSION_MAJOR_PART={}".format(ort_major),
"-DVERSION_MINOR_PART={}".format(ort_minor),
"-DVERSION_BUILD_PART={}".format(YY),
"-DVERSION_PRIVATE_PART={}{}".format(MM, DD),
"-DVERSION_STRING={}.{}.{}.{}".format(
ort_major, ort_minor, build_number,
source_version[0:7])
]
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
if args.use_nuphar:
os.environ["PATH"] = os.path.join(
config_build_dir, "external", "tvm",
config) + os.pathsep + os.path.dirname(sys.executable) + os.pathsep + os.environ["PATH"]
run_subprocess(
cmake_args + [
"-Donnxruntime_ENABLE_MEMLEAK_CHECKER=" +
("ON" if config.lower() == 'debug' and not args.use_nuphar and not
args.use_ngraph and not args.use_openvino and not
args.enable_msvc_static_runtime
else "OFF"), "-DCMAKE_BUILD_TYPE={}".format(config)],
cwd=config_build_dir)
def clean_targets(cmake_path, build_dir, configs):
for config in configs:
log.info("Cleaning targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config,
"--target", "clean"]
run_subprocess(cmd_args)
def build_targets(args, cmake_path, build_dir, configs, parallel, target=None):
for config in configs:
log.info("Building targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config]
if target:
cmd_args.extend(['--target', target])
build_tool_args = []
if parallel:
num_cores = str(multiprocessing.cpu_count())
if is_windows() and args.cmake_generator != 'Ninja':
build_tool_args += [
"/maxcpucount:" + num_cores,
# if nodeReuse is true, msbuild processes will stay around for a bit after the build completes
"/nodeReuse:False",
]
elif (is_macOS() and args.use_xcode):
# CMake will generate correct build tool args for Xcode
cmd_args += ["--parallel", num_cores]
elif args.cmake_generator != 'Ninja':
build_tool_args += ["-j" + num_cores]
if build_tool_args:
cmd_args += ["--"]
cmd_args += build_tool_args
env = {}
if args.android:
env['ANDROID_SDK_ROOT'] = args.android_sdk_path
run_subprocess(cmd_args, env=env)
def add_dir_if_exists(directory, dir_list):
if os.path.isdir(directory):
dir_list.append(directory)
def setup_cuda_vars(args):
cuda_home = ""
cudnn_home = ""
if args.use_cuda:
cuda_home = args.cuda_home if args.cuda_home else os.getenv(
"CUDA_HOME")
cudnn_home = args.cudnn_home if args.cudnn_home else os.getenv(
"CUDNN_HOME")
cuda_home_valid = (cuda_home is not None and os.path.exists(cuda_home))
cudnn_home_valid = (cudnn_home is not None and os.path.exists(
cudnn_home))
if not cuda_home_valid or not cudnn_home_valid:
raise BuildError(
"cuda_home and cudnn_home paths must be specified and valid.",
"cuda_home='{}' valid={}. cudnn_home='{}' valid={}"
.format(
cuda_home, cuda_home_valid, cudnn_home, cudnn_home_valid))
return cuda_home, cudnn_home
def setup_tensorrt_vars(args):
tensorrt_home = ""
if args.use_tensorrt:
tensorrt_home = (args.tensorrt_home if args.tensorrt_home
else os.getenv("TENSORRT_HOME"))
tensorrt_home_valid = (tensorrt_home is not None and
os.path.exists(tensorrt_home))
if not tensorrt_home_valid:
raise BuildError(
"tensorrt_home paths must be specified and valid.",
"tensorrt_home='{}' valid={}."
.format(tensorrt_home, tensorrt_home_valid))
# Set maximum workspace size in byte for
# TensorRT (1GB = 1073741824 bytes).
os.environ["ORT_TENSORRT_MAX_WORKSPACE_SIZE"] = "1073741824"
# Set maximum number of iterations to detect unsupported nodes
# and partition the models for TensorRT.
os.environ["ORT_TENSORRT_MAX_PARTITION_ITERATIONS"] = "1000"
# Set minimum subgraph node size in graph partitioning
# for TensorRT.
os.environ["ORT_TENSORRT_MIN_SUBGRAPH_SIZE"] = "1"
# Set FP16 flag
os.environ["ORT_TENSORRT_FP16_ENABLE"] = "0"
return tensorrt_home
def setup_migraphx_vars(args):
migraphx_home = None
if (args.use_migraphx):
print("migraphx_home = {}".format(args.migraphx_home))
migraphx_home = args.migraphx_home or os.getenv("MIGRAPHX_HOME") or None
migraphx_home_not_valid = (migraphx_home and not os.path.exists(migraphx_home))
if (migraphx_home_not_valid):
raise BuildError("migraphx_home paths must be specified and valid.",
"migraphx_home='{}' valid={}."
.format(migraphx_home, migraphx_home_not_valid))
return migraphx_home or ''
def setup_dml_build(args, cmake_path, build_dir, configs):
if args.use_dml:
for config in configs:
# Run the RESTORE_PACKAGES target to perform the initial
# NuGet setup.
cmd_args = [cmake_path,
"--build", get_config_build_dir(build_dir, config),
"--config", config,
"--target", "RESTORE_PACKAGES"]
run_subprocess(cmd_args)
def adb_push(src, dest, **kwargs):
return run_subprocess(['adb', 'push', src, dest], **kwargs)
def adb_shell(*args, **kwargs):
return run_subprocess(['adb', 'shell', *args], **kwargs)
def run_android_tests(args, source_dir, config, cwd):
if args.android_abi == 'x86_64':
run_subprocess(os.path.join(
source_dir, 'tools', 'ci_build', 'github', 'android',
'start_android_emulator.sh'))
adb_push('testdata', '/data/local/tmp/', cwd=cwd)
adb_push(
os.path.join(source_dir, 'cmake', 'external', 'onnx', 'onnx', 'backend', 'test'),
'/data/local/tmp/', cwd=cwd)
adb_push('onnxruntime_test_all', '/data/local/tmp/', cwd=cwd)
adb_push('onnx_test_runner', '/data/local/tmp/', cwd=cwd)
adb_shell('cd /data/local/tmp && /data/local/tmp/onnxruntime_test_all')
if args.use_nnapi:
adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner -e nnapi /data/local/tmp/test')
else:
adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner /data/local/tmp/test')
# run shared_lib_test if necessary
if args.build_shared_lib:
adb_push('libonnxruntime.so', '/data/local/tmp/', cwd=cwd)
adb_push('onnxruntime_shared_lib_test', '/data/local/tmp/', cwd=cwd)
adb_shell(
'cd /data/local/tmp && ' +
'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp && ' +
'/data/local/tmp/onnxruntime_shared_lib_test')
elif args.android_abi == 'arm64-v8a':
# For Android arm64 abi we are only verify the size of the binary generated by minimal build config
# Will fail the build if the shared_lib size is larger than the threshold
if args.minimal_build and config == 'MinSizeRel' and args.build_shared_lib and args.test_binary_size:
# set current size limit to 1100KB
bin_size_threshold = 1100000
bin_actual_size = os.path.getsize(os.path.join(cwd, 'libonnxruntime.so'))
log.info('Android arm64 minsizerel libonnxruntime.so size [' + str(bin_actual_size) + 'B]')
# Write the binary size to a file for uploading later
with open(os.path.join(cwd, 'binary_size_data.txt'), 'w') as file:
file.writelines([
'os,arch,build_config,size\n',
'android,arm64-v8a,minimal-baseline,' + str(bin_actual_size) + '\n'
])
if bin_actual_size > bin_size_threshold:
raise BuildError('Android arm64 minsizerel libonnxruntime.so size [' + str(bin_actual_size) +
'B] is bigger than threshold [' + str(bin_size_threshold) + 'B]')
def run_ios_tests(args, source_dir, config, cwd):
cpr = run_subprocess(["xcodebuild", "test", "-project", "./onnxruntime.xcodeproj",
"-configuration", config,
"-scheme", "onnxruntime_test_all_xc", "-destination",
"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
if cpr.returncode == 0:
cpr = run_subprocess(["xcodebuild", "test", "-project", "./onnxruntime.xcodeproj",
"-configuration", config,
"-scheme", "onnxruntime_shared_lib_test_xc", "-destination",
"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
cpr.check_returncode()
def run_orttraining_test_orttrainer_frontend_separately(cwd):
class TestNameCollecterPlugin:
def __init__(self):
self.collected = set()
def pytest_collection_modifyitems(self, items):
for item in items:
print('item.name: ', item.name)
test_name = item.name
start = test_name.find('[')
if start > 0:
test_name = test_name[:start]
self.collected.add(test_name)
import pytest
plugin = TestNameCollecterPlugin()
test_script_filename = os.path.join(cwd, "orttraining_test_orttrainer_frontend.py")
pytest.main(['--collect-only', test_script_filename], plugins=[plugin])
for test_name in plugin.collected:
run_subprocess([
sys.executable, '-m', 'pytest',
'orttraining_test_orttrainer_frontend.py', '-v', '-k', test_name], cwd=cwd)
def run_training_python_frontend_tests(cwd):
run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer.py'], cwd=cwd)
run_subprocess([sys.executable, 'onnxruntime_test_training_unit_tests.py'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_list_input'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_dict_input'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_list_and_dict_input'], cwd=cwd)
# TODO: use run_orttraining_test_orttrainer_frontend_separately to work around a sporadic segfault.
# shall revert to run_subprocess call once the segfault issue is resolved.
run_orttraining_test_orttrainer_frontend_separately(cwd)
# run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_frontend.py'], cwd=cwd)
run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_bert_toy_onnx.py'], cwd=cwd)
def run_training_python_frontend_e2e_tests(cwd):
# frontend tests are to be added here:
log.info("Running python frontend e2e tests.")
import torch
ngpus = torch.cuda.device_count()
if ngpus > 1:
bert_pretrain_script = 'orttraining_run_bert_pretrain.py'
log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(
ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_throughput'))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,
bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_throughput'], cwd=cwd)
log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(
ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,
bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'], cwd=cwd)
# a long run
log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {}'.format(
ngpus, sys.executable, bert_pretrain_script))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,
bert_pretrain_script], cwd=cwd)
log.debug('RUN: mpirun -n {} {} orttraining_run_glue.py'.format(ngpus, sys.executable))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable, 'orttraining_run_glue.py'], cwd=cwd)
# with orttraining_run_glue.py.
# 1. we like to force to use single GPU (with CUDA_VISIBLE_DEVICES)
# for fine-tune tests.
# 2. need to run test separately (not to mix between fp16
# and full precision runs. this need to be investigated).
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_fp16_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_fp16_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_multiple_choice.py', 'ORTMultipleChoiceTest.test_bert_fp16_with_swag', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer_with_mixed_precision.py'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_mixed_precision'], cwd=cwd)
# this test is not stable. need to skip to unblock release
# run_subprocess([
# sys.executable, 'orttraining_test_transformers.py',
# 'BertModelTest.test_for_pretraining_mixed_precision_with_gradient_accumulation'], cwd=cwd)
def run_training_pipeline_e2e_tests(cwd):
# pipeline tests are to be added here:
log.info("Running pipeline e2e tests.")
import torch
ngpus = torch.cuda.device_count()
command = ['./onnxruntime_training_bert',
'--ort_log_severity', '1',
'--optimizer=Lamb',
'--learning_rate=3e-3',
'--max_seq_length=128',
'--max_predictions_per_seq=20',
'--warmup_ratio=0.2843',
'--warmup_mode=Poly',
'--model_name', '/bert_ort/bert_models/nv/bert-large/' +
'bert-large-uncased_L_24_H_1024_A_16_V_30528_S_512_Dp_0.1_optimized_layer_norm_opset12',
'--train_data_dir', '/bert_data/128/books_wiki_en_corpus/train',
'--test_data_dir', '/bert_data/128/books_wiki_en_corpus/test',
'--display_loss_steps', '1',
'--use_nccl',
'--use_mixed_precision',
'--allreduce_in_fp16',
'--gradient_accumulation_steps', '48',
'--num_train_steps', '96',
'--train_batch_size', '50']
# TODO: currently the CI machine only has 4 GPUs for parallel tests.
# Fill in more pipeline partition options when the machine has different GPUs counts.
if ngpus != 4:
return
# Test 4-way pipeline parallel
pp_command = ['mpirun', '-n', str(ngpus)] + command + ['--pipeline_parallel_size', '4', '--cut_group_info',
'1149:407-1219/1341/1463/1585/1707/1829,' +
'1881:407-1951/2073/2195/2317/2439/2561,' +
'2613:407-2683/2805/2927/3049/3171/3293']
command_str = ', '.join(pp_command)
log.debug('RUN: ' + command_str)
run_subprocess(pp_command, cwd=cwd)
# Test 2-way data parallel + 2-way pipeline parallel
pp_dp_command = ['mpirun', '-n', str(ngpus)]
pp_dp_command = pp_dp_command + command
pp_dp_command = pp_dp_command + ['--data_parallel_size', '2', '--pipeline_parallel_size',
'2', '--cut_group_info',
'1881:407-1951/2073/2195/2317/2439/2561/2683/2805/2927/3049/3171/3293']
command_str = ', '.join(pp_dp_command)
log.debug('RUN: ' + command_str)
run_subprocess(pp_dp_command, cwd=cwd)
def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):
for config in configs:
log.info("Running tests for %s configuration", config)
cwd = get_config_build_dir(build_dir, config)
if args.enable_training and args.use_cuda and args.enable_training_python_frontend_e2e_tests:
# run frontend tests for orttraining-linux-gpu-frontend_test-ci-pipeline.
# this is not a PR merge test so skip other non-frontend tests.
run_training_python_frontend_e2e_tests(cwd=cwd)
run_training_python_frontend_tests(cwd=cwd)
continue
if args.enable_training and args.use_cuda and args.enable_training_pipeline_e2e_tests:
# run distributed pipeline test on 4-GPU CI machine.
run_training_pipeline_e2e_tests(cwd=cwd)
continue
if args.android:
run_android_tests(args, source_dir, config, cwd)
continue
elif args.ios:
run_ios_tests(args, source_dir, config, cwd)
continue
dll_path_list = []
if args.use_nuphar:
dll_path_list.append(os.path.join(
build_dir, config, "external", "tvm", config))
if args.use_tensorrt:
dll_path_list.append(os.path.join(args.tensorrt_home, 'lib'))
if args.use_mklml:
dll_path_list.append(os.path.join(build_dir, config, "mklml", "src", "project_mklml", "lib"))
if not is_windows():
# A workaround for making libonnxruntime_providers_shared.so loadable.
dll_path_list.append(os.path.join(build_dir, config))
dll_path = None
if len(dll_path_list) > 0:
dll_path = os.pathsep.join(dll_path_list)
if ctest_path is None:
# Get the "Google Test Adapter" for vstest.
if not os.path.exists(os.path.join(cwd,
'googletestadapter.0.17.1')):
run_subprocess(
['nuget.exe', 'restore',
os.path.join(source_dir, 'packages.config'),
'-ConfigFile', os.path.join(source_dir, 'NuGet.config'),
'-PackagesDirectory', cwd])
cwd2 = os.path.join(cwd, config)
executables = ['onnxruntime_test_all.exe']
if args.build_shared_lib:
executables.append('onnxruntime_shared_lib_test.exe')
executables.append('onnxruntime_global_thread_pools_test.exe')
run_subprocess(
['vstest.console.exe', '--parallel',
'--TestAdapterPath:..\\googletestadapter.0.17.1\\build\\_common', # noqa
'/Logger:trx', '/Enablecodecoverage', '/Platform:x64',
"/Settings:%s" % os.path.join(
source_dir, 'cmake\\codeconv.runsettings')] + executables,
cwd=cwd2, dll_path=dll_path)
else:
ctest_cmd = [ctest_path, "--build-config", config, "--verbose", "--timeout", "3600"]
run_subprocess(ctest_cmd, cwd=cwd, dll_path=dll_path)
if args.enable_pybind:
# Disable python tests for TensorRT because many tests are
# not supported yet.
if args.use_tensorrt:
return
# Disable python tests in a reduced build as we don't know which ops have been included and which
# models can run
if args.include_ops_by_model or args.include_ops_by_config or args.minimal_build:
return
if is_windows():
cwd = os.path.join(cwd, config)
run_subprocess([sys.executable, 'onnxruntime_test_python.py'], cwd=cwd, dll_path=dll_path)
if args.enable_symbolic_shape_infer_tests:
run_subprocess([sys.executable, 'onnxruntime_test_python_symbolic_shape_infer.py'],
cwd=cwd, dll_path=dll_path)
# For CUDA enabled builds test IOBinding feature
if args.use_cuda:
# We need to have Torch installed to test the IOBinding feature
# which currently uses Torch's allocator to allocate GPU memory for testing
log.info("Testing IOBinding feature")
run_subprocess([sys.executable, 'onnxruntime_test_python_iobinding.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_ml_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_mlops.py'], cwd=cwd, dll_path=dll_path)
if args.enable_training and args.use_cuda:
# run basic frontend tests
run_training_python_frontend_tests(cwd=cwd)
try:
import onnx # noqa
onnx_test = True
except ImportError as error:
log.exception(error)
log.warning("onnx is not installed. The ONNX tests will be skipped.")
onnx_test = False
if onnx_test:
run_subprocess([sys.executable, 'onnxruntime_test_python_backend.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_ml_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_backend_mlops.py'],
cwd=cwd, dll_path=dll_path)
run_subprocess([sys.executable,
os.path.join(source_dir, 'onnxruntime', 'test', 'onnx', 'gen_test_models.py'),
'--output_dir', 'test_models'], cwd=cwd)
if not args.skip_onnx_tests:
run_subprocess([os.path.join(cwd, 'onnx_test_runner'), 'test_models'], cwd=cwd)
if config != 'Debug':
run_subprocess([sys.executable, 'onnx_backend_test_series.py'], cwd=cwd, dll_path=dll_path)
if not args.skip_keras_test:
try:
import onnxmltools # noqa
import keras # noqa
onnxml_test = True
except ImportError:
log.warning(
"onnxmltools and keras are not installed. "
"The keras tests will be skipped.")
onnxml_test = False
if onnxml_test:
run_subprocess(
[sys.executable, 'onnxruntime_test_python_keras.py'],
cwd=cwd, dll_path=dll_path)
def nuphar_run_python_tests(build_dir, configs):
"""nuphar temporary function for running python tests separately
as it requires ONNX 1.5.0
"""
for config in configs:
if config == 'Debug':
continue
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
dll_path = os.path.join(build_dir, config, "external", "tvm", config)
# install onnx for shape inference in testing Nuphar scripts
# this needs to happen after onnx_test_data preparation which
# uses onnx 1.3.0
run_subprocess(
[sys.executable, '-m', 'pip', 'install', '--user', 'onnx==1.5.0'])
run_subprocess(
[sys.executable, 'onnxruntime_test_python_nuphar.py'],
cwd=cwd, dll_path=dll_path)
def run_nodejs_tests(nodejs_binding_dir):
args = ['npm', 'test', '--', '--timeout=2000']
if is_windows():
args = ['cmd', '/c'] + args
run_subprocess(args, cwd=nodejs_binding_dir)
def build_python_wheel(
source_dir, build_dir, configs, use_cuda, use_ngraph, use_dnnl,
use_tensorrt, use_openvino, use_nuphar, use_vitisai, use_acl, use_armnn, use_dml,
wheel_name_suffix, enable_training, nightly_build=False, featurizers_build=False, use_ninja=False):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
if is_windows() and not use_ninja:
cwd = os.path.join(cwd, config)
args = [sys.executable, os.path.join(source_dir, 'setup.py'),
'bdist_wheel']
# We explicitly override the platform tag in the name of the generated build wheel
# so that we can install the wheel on Mac OS X versions 10.12+.
# Without this explicit override, we will something like this while building on MacOS 10.14 -
# [WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value (10.12)
# than the version on which the Python interpreter was compiled (10.14) and will be ignored.
# Since we need to support 10.12+, we explicitly override the platform tag.
# See PR #3626 for more details
if is_macOS():
args += ['-p', 'macosx_10_12_x86_64']
# Any combination of the following arguments can be applied
if nightly_build:
args.append('--nightly_build')
if featurizers_build:
args.append("--use_featurizers")
if wheel_name_suffix:
args.append('--wheel_name_suffix={}'.format(wheel_name_suffix))
if enable_training:
args.append("--enable_training")
# The following arguments are mutually exclusive
if use_tensorrt:
args.append('--use_tensorrt')
elif use_cuda:
args.append('--use_cuda')
elif use_ngraph:
args.append('--use_ngraph')
elif use_openvino:
args.append('--use_openvino')
elif use_dnnl:
args.append('--use_dnnl')
elif use_nuphar:
args.append('--use_nuphar')
elif use_vitisai:
args.append('--use_vitisai')
elif use_acl:
args.append('--use_acl')
elif use_armnn:
args.append('--use_armnn')
elif use_dml:
args.append('--use_dml')
run_subprocess(args, cwd=cwd)
def derive_linux_build_property():
if is_windows():
return "/p:IsLinuxBuild=\"false\""
else:
return "/p:IsLinuxBuild=\"true\""
def build_nuget_package(source_dir, build_dir, configs, use_cuda, use_openvino, use_tensorrt, use_dnnl, use_mklml):
if not (is_windows() or is_linux()):
raise BuildError(
'Currently csharp builds and nuget package creation is only supportted '
'on Windows and Linux platforms.')
csharp_build_dir = os.path.join(source_dir, 'csharp')
is_linux_build = derive_linux_build_property()
# derive package name and execution provider based on the build args
execution_provider = "/p:ExecutionProvider=\"None\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime\""
if use_openvino:
execution_provider = "/p:ExecutionProvider=\"openvino\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.OpenVino\""
elif use_tensorrt:
execution_provider = "/p:ExecutionProvider=\"tensorrt\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.TensorRT\""
elif use_dnnl:
execution_provider = "/p:ExecutionProvider=\"dnnl\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.DNNL\""
elif use_cuda:
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.Gpu\""
elif use_mklml:
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.MKLML\""
else:
pass
# set build directory based on build_dir arg
native_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_dir + "\""
# dotnet restore
cmd_args = ["dotnet", "restore", "OnnxRuntime.CSharp.sln", "--configfile", "Nuget.CSharp.config"]
run_subprocess(cmd_args, cwd=csharp_build_dir)
# build csharp bindings and create nuget package for each config
for config in configs:
if is_linux():
native_build_dir = os.path.join(native_dir, config)
cmd_args = ["make", "install", "DESTDIR=.//nuget-staging"]
run_subprocess(cmd_args, cwd=native_build_dir)
configuration = "/p:Configuration=\"" + config + "\""
cmd_args = ["dotnet", "msbuild", "OnnxRuntime.CSharp.sln", configuration, package_name, is_linux_build,
ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_build_dir)
cmd_args = [
"dotnet", "msbuild", "OnnxRuntime.CSharp.proj", "/t:CreatePackage",
package_name, configuration, execution_provider, is_linux_build, ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_build_dir)
def run_csharp_tests(source_dir, build_dir, use_cuda, use_openvino, use_tensorrt, use_dnnl):
# Currently only running tests on windows.
if not is_windows():
return
csharp_source_dir = os.path.join(source_dir, 'csharp')
is_linux_build = derive_linux_build_property()
# define macros based on build args
macros = ""
if use_openvino:
macros += "USE_OPENVINO;"
if use_tensorrt:
macros += "USE_TENSORRT;"
if use_dnnl:
macros += "USE_DNNL;"
if use_cuda:
macros += "USE_CUDA;"
define_constants = ""
if macros != "":
define_constants = "/p:DefineConstants=\"" + macros + "\""
# set build directory based on build_dir arg
native_build_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_build_dir + "\""
# Skip pretrained models test. Only run unit tests as part of the build
# add "--verbosity", "detailed" to this command if required
cmd_args = ["dotnet", "test", "test\\Microsoft.ML.OnnxRuntime.Tests\\Microsoft.ML.OnnxRuntime.Tests.csproj",
"--filter", "FullyQualifiedName!=Microsoft.ML.OnnxRuntime.Tests.InferenceTest.TestPreTrainedModels",
is_linux_build, define_constants, ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_source_dir)
def build_protoc_for_host(cmake_path, source_dir, build_dir, args):
if (args.arm or args.arm64 or args.enable_windows_store) and (not is_windows() and not args.ios):
raise BuildError(
'Currently only support building protoc for Windows host while '
'cross-compiling for ARM/ARM64/Store and linux cross-compiling iOS')
log.info(
"Building protoc for host to be used in cross-compiled build process")
protoc_build_dir = os.path.join(os.getcwd(), build_dir, 'host_protoc')
os.makedirs(protoc_build_dir, exist_ok=True)
# Generate step
cmd_args = [
cmake_path,
os.path.join(source_dir, 'cmake', 'external', 'protobuf', 'cmake'),
'-Dprotobuf_BUILD_TESTS=OFF',
'-Dprotobuf_WITH_ZLIB_DEFAULT=OFF',
'-Dprotobuf_BUILD_SHARED_LIBS=OFF'
]
is_ninja = args.cmake_generator == 'Ninja'
if args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
cmd_args += ['-G', args.cmake_generator]
if is_windows():
if not is_ninja:
cmd_args += ['-T', 'host=x64']
elif is_macOS():
if args.use_xcode:
cmd_args += ['-G', 'Xcode']
# CMake < 3.18 has a bug setting system arch to arm64 (if not specified) for Xcode 12,
# protoc for host should be built using host architecture
# Explicitly specify the CMAKE_OSX_ARCHITECTURES for x86_64 Mac.
import platform
if platform.machine() == 'x86_64':
cmd_args += ['-DCMAKE_OSX_ARCHITECTURES=x86_64']
run_subprocess(cmd_args, cwd=protoc_build_dir)
# Build step
cmd_args = [cmake_path,
"--build", protoc_build_dir,
"--config", "Release",
"--target", "protoc"]
run_subprocess(cmd_args)
# Absolute protoc path is needed for cmake
config_dir = ''
suffix = ''
if (is_windows() and not is_ninja) or (is_macOS() and args.use_xcode):
config_dir = 'Release'
if is_windows():
suffix = '.exe'
expected_protoc_path = os.path.join(protoc_build_dir, config_dir, 'protoc' + suffix)
if not os.path.exists(expected_protoc_path):
raise BuildError("Couldn't find {}. Host build of protoc failed.".format(expected_protoc_path))
return expected_protoc_path
def generate_documentation(source_dir, build_dir, configs):
operator_doc_path = os.path.join(source_dir, 'docs', 'ContribOperators.md')
opkernel_doc_path = os.path.join(source_dir, 'docs', 'OperatorKernels.md')
for config in configs:
# Copy the gen_contrib_doc.py.
shutil.copy(
os.path.join(source_dir, 'tools', 'python', 'gen_contrib_doc.py'),
os.path.join(build_dir, config))
shutil.copy(
os.path.join(source_dir, 'tools', 'python', 'gen_opkernel_doc.py'),
os.path.join(build_dir, config))
run_subprocess(
[sys.executable,
'gen_contrib_doc.py',
'--output_path', operator_doc_path],
cwd=os.path.join(build_dir, config))
run_subprocess(
[sys.executable,
'gen_opkernel_doc.py',
'--output_path', opkernel_doc_path],
cwd=os.path.join(build_dir, config))
docdiff = ''
try:
docdiff = subprocess.check_output(['git', 'diff', opkernel_doc_path])
except subprocess.CalledProcessError:
print('git diff returned non-zero error code')
if len(docdiff) > 0:
# Show warning instead of throwing exception, because it is
# dependent on build configuration for including
# execution propviders
log.warning(
'The updated opkernel document file ' + str(opkernel_doc_path) +
' is different from the checked in version. Consider '
'regenerating the file with CPU, DNNL and CUDA providers enabled.')
log.debug('diff:\n' + str(docdiff))
docdiff = ''
try:
docdiff = subprocess.check_output(['git', 'diff', operator_doc_path])
except subprocess.CalledProcessError:
print('git diff returned non-zero error code')
if len(docdiff) > 0:
raise BuildError(
'The updated operator document file ' +
str(operator_doc_path) + ' must be checked in.\n diff:\n' +
str(docdiff))
def main():
args = parse_arguments()
cmake_extra_defines = (args.cmake_extra_defines
if args.cmake_extra_defines else [])
cross_compiling = args.arm or args.arm64 or args.android
# If there was no explicit argument saying what to do, default
# to update, build and test (for native builds).
if not (args.update or args.clean or args.build or args.test):
log.debug(
"Defaulting to running update, build "
"[and test for native builds].")
args.update = True
args.build = True
if cross_compiling:
args.test = args.android_abi == 'x86_64' or args.android_abi == 'arm64-v8a'
else:
args.test = True
if args.skip_tests:
args.test = False
if args.include_ops_by_model or args.include_ops_by_config:
from exclude_unused_ops import exclude_unused_ops
models_path = args.include_ops_by_model if args.include_ops_by_model else ''
config_path = args.include_ops_by_config if args.include_ops_by_config else ''
exclude_unused_ops(models_path, config_path, use_cuda=args.use_cuda)
if args.use_tensorrt:
args.use_cuda = True
if args.build_wheel or args.gen_doc:
args.enable_pybind = True
if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs:
args.build_shared_lib = True
if args.build_nuget and cross_compiling:
raise BuildError('Currently nuget package creation is not supported while cross-compiling')
if args.enable_pybind and args.disable_exceptions:
raise BuildError('Python bindings require exceptions to be enabled.')
if args.minimal_build and args.disable_ort_format_load:
raise BuildError('Minimal build requires loading ORT format models.')
# Disabling unit tests for VAD-F as FPGA only supports
# models with NCHW layout
if args.use_openvino == "VAD-F_FP32":
args.test = False
configs = set(args.config)
# setup paths and directories
cmake_path = resolve_executable_path(args.cmake_path)
ctest_path = None if args.use_vstest else resolve_executable_path(
args.ctest_path)
build_dir = args.build_dir
script_dir = os.path.realpath(os.path.dirname(__file__))
source_dir = os.path.normpath(os.path.join(script_dir, "..", ".."))
# if using cuda, setup cuda paths and env vars
cuda_home, cudnn_home = setup_cuda_vars(args)
mpi_home = args.mpi_home
nccl_home = args.nccl_home
# if using tensorrt, setup tensorrt paths
tensorrt_home = setup_tensorrt_vars(args)
# if using migraphx, setup migraphx paths
migraphx_home = setup_migraphx_vars(args)
os.makedirs(build_dir, exist_ok=True)
log.info("Build started")
if args.update:
cmake_extra_args = []
path_to_protoc_exe = args.path_to_protoc_exe
if not args.skip_submodule_sync:
update_submodules(source_dir)
if is_windows():
if args.cmake_generator == 'Ninja':
if args.x86 or args.arm or args.arm64:
raise BuildError(
"To cross-compile with Ninja, load the toolset "
"environment for the target processor (e.g. Cross "
"Tools Command Prompt for VS)")
cmake_extra_args = ['-G', args.cmake_generator]
elif args.x86:
cmake_extra_args = [
'-A', 'Win32', '-T', 'host=x64', '-G', args.cmake_generator
]
elif args.arm or args.arm64:
# Cross-compiling for ARM(64) architecture
# First build protoc for host to use during cross-compilation
if path_to_protoc_exe is None:
path_to_protoc_exe = build_protoc_for_host(
cmake_path, source_dir, build_dir, args)
if args.arm:
cmake_extra_args = ['-A', 'ARM']
else:
cmake_extra_args = ['-A', 'ARM64']
cmake_extra_args += ['-G', args.cmake_generator]
# Cannot test on host build machine for cross-compiled
# builds (Override any user-defined behaviour for test if any)
if args.test:
log.info(
"Cannot test on host build machine for cross-compiled "
"ARM(64) builds. Will skip test running after build.")
args.test = False
else:
if (args.msvc_toolset == '14.16' and
args.cmake_generator == 'Visual Studio 16 2019'):
# CUDA 10.0 requires _MSC_VER >= 1700 and
# _MSC_VER < 1920, aka Visual Studio version
# in [2012, 2019). In VS2019, we have to use
# Side-by-side minor version MSVC toolsets from
# Visual Studio 2017 14.16 is MSVC version
# 141 is MSVC Toolset Version
# Cuda VS extension should be installed to
# C:\Program Files (x86)\Microsoft Visual
# Studio\2019\Enterprise\MSBuild\Microsoft\VC\v160\BuildCustomizations # noqa
toolset = 'v141,host=x64,version=' + args.msvc_toolset
elif args.msvc_toolset:
toolset = 'host=x64,version=' + args.msvc_toolset
else:
toolset = 'host=x64'
if args.cuda_version:
toolset += ',cuda=' + args.cuda_version
cmake_extra_args = [
'-A', 'x64', '-T', toolset, '-G', args.cmake_generator
]
if args.enable_windows_store:
cmake_extra_args.append(
'-DCMAKE_TOOLCHAIN_FILE=' + os.path.join(
source_dir, 'cmake', 'store_toolchain.cmake'))
if args.enable_wcos:
cmake_extra_args.append('-DCMAKE_USER_MAKE_RULES_OVERRIDE=wcos_rules_override.cmake')
elif args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
cmake_extra_args += ['-G', args.cmake_generator]
elif is_macOS() and args.use_xcode:
cmake_extra_args += ['-G', 'Xcode']
if (args.android or args.ios or args.enable_windows_store) and args.path_to_protoc_exe is None:
# Cross-compiling for Android and iOS
path_to_protoc_exe = build_protoc_for_host(
cmake_path, source_dir, build_dir, args)
if is_ubuntu_1604():
if (args.arm or args.arm64):
raise BuildError(
"Only Windows ARM(64) cross-compiled builds supported "
"currently through this script")
install_ubuntu_deps(args)
if not is_docker() and not args.use_acl and not args.use_armnn:
install_python_deps()
if args.enable_pybind and is_windows():
install_python_deps(args.numpy_version)
if args.enable_onnx_tests:
setup_test_data(build_dir, configs)
generate_build_tree(
cmake_path, source_dir, build_dir, cuda_home, cudnn_home, mpi_home, nccl_home,
tensorrt_home, migraphx_home, path_to_protoc_exe, configs, cmake_extra_defines,
args, cmake_extra_args)
if args.clean:
clean_targets(cmake_path, build_dir, configs)
# if using DML, perform initial nuget package restore
setup_dml_build(args, cmake_path, build_dir, configs)
if args.build:
build_targets(args, cmake_path, build_dir, configs, args.parallel, args.target)
if args.test:
run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs)
# run nuphar python tests last, as it installs ONNX 1.5.0
if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:
nuphar_run_python_tests(build_dir, configs)
# run node.js binding tests
if args.build_nodejs and not args.skip_nodejs_tests:
nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, "nodejs"))
run_nodejs_tests(nodejs_binding_dir)
if args.build:
if args.build_wheel:
nightly_build = bool(os.getenv('NIGHTLY_BUILD') == '1')
build_python_wheel(
source_dir,
build_dir,
configs,
args.use_cuda,
args.use_ngraph,
args.use_dnnl,
args.use_tensorrt,
args.use_openvino,
args.use_nuphar,
args.use_vitisai,
args.use_acl,
args.use_armnn,
args.use_dml,
args.wheel_name_suffix,
args.enable_training,
nightly_build=nightly_build,
featurizers_build=args.use_featurizers,
use_ninja=(args.cmake_generator == 'Ninja')
)
if args.build_nuget:
build_nuget_package(
source_dir,
build_dir,
configs,
args.use_cuda,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl,
args.use_mklml
)
if args.test and args.build_nuget:
run_csharp_tests(
source_dir,
build_dir,
args.use_cuda,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl)
if args.gen_doc and (args.build or args.test):
generate_documentation(source_dir, build_dir, configs)
log.info("Build complete")
if __name__ == "__main__":
try:
sys.exit(main())
except BaseError as e:
log.error(str(e))
sys.exit(1)
| 43.557604 | 119 | 0.613627 |
import argparse
import glob
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import hashlib
from logger import log
class BaseError(Exception):
pass
class BuildError(BaseError):
def __init__(self, *messages):
super().__init__("\n".join(messages))
class UsageError(BaseError):
def __init__(self, message):
super().__init__(message)
def _check_python_version():
if sys.version_info[0] != 3:
raise BuildError(
"Bad python major version: expecting python 3, found version "
"'{}'".format(sys.version))
if sys.version_info[1] < 5:
raise BuildError(
"Bad python minor version: expecting python 3.5+, found version "
"'{}'".format(sys.version))
_check_python_version()
def parse_arguments():
parser = argparse.ArgumentParser(
description="ONNXRuntime CI build driver.",
usage=""" # noqa
Default behavior is --update --build --test for native architecture builds.
Default behavior is --update --build for cross-compiled builds.
The Update phase will update git submodules, and run cmake to generate makefiles.
The Build phase will build all projects.
The Test phase will run all unit tests, and optionally the ONNX tests.
Use the individual flags to only run the specified stages.
""")
# Main arguments
parser.add_argument(
"--build_dir", required=True, help="Path to the build directory.")
parser.add_argument(
"--config", nargs="+", default=["Debug"],
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration(s) to build.")
parser.add_argument(
"--update", action='store_true', help="Update makefiles.")
parser.add_argument("--build", action='store_true', help="Build.")
parser.add_argument(
"--clean", action='store_true',
help="Run 'cmake --build --target clean' for the selected config/s.")
parser.add_argument(
"--parallel", action='store_true', help="""Use parallel build.
The build setup doesn't get all dependencies right, so --parallel
only works if you're just rebuilding ONNXRuntime code. If you've
done an update that fetched external dependencies you have to build
without --parallel the first time. Once that's done , run with
"--build --parallel --test" to just build in
parallel and run tests.""")
parser.add_argument("--test", action='store_true', help="Run unit tests.")
parser.add_argument(
"--skip_tests", action='store_true', help="Skip all tests.")
# Training options
parser.add_argument(
"--enable_nvtx_profile", action='store_true', help="Enable NVTX profile in ORT.")
parser.add_argument(
"--enable_training", action='store_true', help="Enable training in ORT.")
parser.add_argument(
"--enable_training_python_frontend_e2e_tests", action="store_true",
help="Enable the pytorch frontend training tests.")
parser.add_argument(
"--enable_training_pipeline_e2e_tests", action="store_true",
help="Enable the pipeline c++ e2e tests.")
parser.add_argument(
"--use_horovod", action='store_true', help="Enable Horovod.")
parser.add_argument(
"--mpi_home", help="Path to MPI installation dir")
parser.add_argument(
"--nccl_home", help="Path to NCCL installation dir")
# enable ONNX tests
parser.add_argument(
"--enable_onnx_tests", action='store_true',
help="""When running the Test phase, run onnx_test_running against
available test data directories.""")
parser.add_argument("--path_to_protoc_exe", help="Path to protoc exe.")
parser.add_argument(
"--fuzz_testing", action='store_true', help="Enable Fuzz testing of the onnxruntime.")
parser.add_argument(
"--enable_symbolic_shape_infer_tests", action='store_true',
help="""When running the Test phase, run symbolic shape inference against
available test data directories.""")
# generate documentaiton
parser.add_argument(
"--gen_doc", action='store_true',
help="Generate documentation on contrib ops")
# CUDA related
parser.add_argument("--use_cuda", action='store_true', help="Enable CUDA.")
parser.add_argument(
"--cuda_version", help="The version of CUDA toolkit to use. "
"Auto-detect if not specified. e.g. 9.0")
parser.add_argument(
"--cuda_home", help="Path to CUDA home."
"Read from CUDA_HOME environment variable if --use_cuda is true and "
"--cuda_home is not specified.")
parser.add_argument(
"--cudnn_home", help="Path to CUDNN home. "
"Read from CUDNN_HOME environment variable if --use_cuda is true and "
"--cudnn_home is not specified.")
# Python bindings
parser.add_argument(
"--enable_pybind", action='store_true', help="Enable Python Bindings.")
parser.add_argument(
"--build_wheel", action='store_true', help="Build Python Wheel.")
parser.add_argument(
"--wheel_name_suffix", help="Suffix to append to created wheel names. "
"This value is currently only used for nightly builds.")
parser.add_argument(
"--numpy_version", help="Installs a specific version of numpy "
"before building the python binding.")
parser.add_argument(
"--skip-keras-test", action='store_true',
help="Skip tests with Keras if keras is installed")
# C-Sharp bindings
parser.add_argument(
"--build_csharp", action='store_true',
help="Build C#.Net DLL and NuGet package. This should be only used in CI pipelines. "
"For building C# bindings and packaging them into nuget package use --build_nuget arg.")
parser.add_argument(
"--build_nuget", action='store_true',
help="Build C#.Net DLL and NuGet package on the local machine. "
"Currently only Windows and Linux platforms are supported.")
# Java bindings
parser.add_argument(
"--build_java", action='store_true', help="Build Java bindings.")
# Node.js binding
parser.add_argument(
"--build_nodejs", action='store_true',
help="Build Node.js binding and NPM package.")
# Build a shared lib
parser.add_argument(
"--build_shared_lib", action='store_true',
help="Build a shared library for the ONNXRuntime.")
# Build options
parser.add_argument(
"--cmake_extra_defines", nargs="+",
help="Extra definitions to pass to CMake during build system "
"generation. These are just CMake -D options without the leading -D.")
parser.add_argument(
"--target",
help="Build a specific target, e.g. winml_dll")
parser.add_argument(
"--x86", action='store_true',
help="Create x86 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm", action='store_true',
help="Create ARM makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm64", action='store_true',
help="Create ARM64 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--msvc_toolset", help="MSVC toolset to use. e.g. 14.11")
parser.add_argument("--android", action='store_true', help='Build for Android')
parser.add_argument(
"--android_abi", default="arm64-v8a",
choices=["armeabi-v7a", "arm64-v8a", "x86", "x86_64"],
help="Specify the target Android Application Binary Interface (ABI)")
parser.add_argument("--android_api", type=int, default=27, help='Android API Level, e.g. 21')
parser.add_argument("--android_sdk_path", type=str, help='Path to the Android SDK')
parser.add_argument("--android_ndk_path", default="", help="Path to the Android NDK")
parser.add_argument("--android_cpp_shared", action="store_true",
help="Build with shared libc++ instead of the default static libc++.")
parser.add_argument("--test_binary_size", action="store_true",
help="If enabled, build will fail when the built binary size is larger than the threshold. "
"This only applies to Android Minimal build for now.")
parser.add_argument("--ios", action='store_true', help="build for ios")
parser.add_argument(
"--ios_sysroot", default="",
help="Specify the location name of the macOS platform SDK to be used")
parser.add_argument(
"--ios_toolchain_dir", default="",
help="Path to ios toolchain binaries")
parser.add_argument(
"--ios_toolchain_file", default="",
help="Path to ios toolchain file, "
"or cmake/onnxruntime_ios.toolchain.cmake will be used")
parser.add_argument(
"--xcode_code_signing_team_id", default="",
help="The development team ID used for code signing in Xcode")
parser.add_argument(
"--use_xcode", action='store_true',
help="Use Xcode as cmake generator, this is only supported on MacOS.")
parser.add_argument(
"--osx_arch", default="arm64", choices=["arm64", "x86_64"],
help="Specify the Target specific architectures for macOS and iOS, This is only supported on MacOS")
parser.add_argument(
"--apple_deploy_target", type=str,
help="Specify the minimum version of the target platform "
"(e.g. macOS or iOS)"
"This is only supported on MacOS")
# Arguments needed by CI
parser.add_argument(
"--cmake_path", default="cmake", help="Path to the CMake program.")
parser.add_argument(
"--ctest_path", default="ctest", help="Path to the CTest program.")
parser.add_argument(
"--skip_submodule_sync", action='store_true', help="Don't do a "
"'git submodule update'. Makes the Update phase faster.")
parser.add_argument(
"--use_vstest", action='store_true',
help="Use use_vstest for running unitests.")
parser.add_argument(
"--use_jemalloc", action='store_true', help="Use jemalloc.")
parser.add_argument(
"--use_mimalloc", default=['none'],
choices=['none', 'stl', 'arena', 'all'], help="Use mimalloc.")
parser.add_argument(
"--use_openblas", action='store_true', help="Build with OpenBLAS.")
parser.add_argument(
"--use_dnnl", action='store_true', help="Build with DNNL.")
parser.add_argument(
"--use_mklml", action='store_true', help="Build with MKLML.")
parser.add_argument(
"--use_featurizers", action='store_true',
help="Build with ML Featurizer support.")
parser.add_argument(
"--use_ngraph", action='store_true', help="Build with nGraph.")
parser.add_argument(
"--use_openvino", nargs="?", const="CPU_FP32",
choices=["CPU_FP32", "GPU_FP32", "GPU_FP16", "VAD-M_FP16",
"MYRIAD_FP16", "VAD-F_FP32"],
help="Build with OpenVINO for specific hardware.")
parser.add_argument(
"--use_nnapi", action='store_true', help="Build with NNAPI support.")
parser.add_argument(
"--use_rknpu", action='store_true', help="Build with RKNPU.")
parser.add_argument(
"--use_preinstalled_eigen", action='store_true',
help="Use pre-installed Eigen.")
parser.add_argument("--eigen_path", help="Path to pre-installed Eigen.")
parser.add_argument(
"--use_openmp", action='store_true', help="Build with OpenMP")
parser.add_argument(
"--enable_msinternal", action="store_true",
help="Enable for Microsoft internal builds only.")
parser.add_argument("--llvm_path", help="Path to llvm dir")
parser.add_argument(
"--use_vitisai", action='store_true', help="Build with Vitis-AI")
parser.add_argument(
"--use_nuphar", action='store_true', help="Build with nuphar")
parser.add_argument(
"--use_tensorrt", action='store_true', help="Build with TensorRT")
parser.add_argument(
"--tensorrt_home", help="Path to TensorRT installation dir")
parser.add_argument(
"--use_migraphx", action='store_true', help="Build with MIGraphX")
parser.add_argument(
"--migraphx_home", help="Path to MIGraphX installation dir")
parser.add_argument(
"--use_full_protobuf", action='store_true',
help="Use the full protobuf library")
parser.add_argument(
"--skip_onnx_tests", action='store_true', help="Explicitly disable "
"all onnx related tests. Note: Use --skip_tests to skip all tests.")
parser.add_argument(
"--skip_winml_tests", action='store_true',
help="Explicitly disable all WinML related tests")
parser.add_argument(
"--skip_nodejs_tests", action='store_true',
help="Explicitly disable all Node.js binding tests")
parser.add_argument(
"--enable_msvc_static_runtime", action='store_true',
help="Enable static linking of MSVC runtimes.")
parser.add_argument(
"--enable_language_interop_ops", action='store_true',
help="Enable operator implemented in language other than cpp")
parser.add_argument(
"--cmake_generator",
choices=['Visual Studio 15 2017', 'Visual Studio 16 2019', 'Ninja'],
default='Visual Studio 15 2017' if is_windows() else None,
help="Specify the generator that CMake invokes. "
"This is only supported on Windows")
parser.add_argument(
"--enable_multi_device_test", action='store_true',
help="Test with multi-device. Mostly used for multi-device GPU")
parser.add_argument(
"--use_dml", action='store_true', help="Build with DirectML.")
parser.add_argument(
"--use_winml", action='store_true', help="Build with WinML.")
parser.add_argument(
"--winml_root_namespace_override", type=str,
help="Specify the namespace that WinML builds into.")
parser.add_argument(
"--use_telemetry", action='store_true',
help="Only official builds can set this flag to enable telemetry.")
parser.add_argument(
"--enable_wcos", action='store_true',
help="Build for Windows Core OS.")
parser.add_argument(
"--enable_windows_store", action='store_true',
help="Build for Windows Store")
parser.add_argument(
"--enable_lto", action='store_true',
help="Enable Link Time Optimization")
parser.add_argument(
"--use_acl", nargs="?", const="ACL_1905",
choices=["ACL_1902", "ACL_1905", "ACL_1908", "ACL_2002"],
help="Build with ACL for ARM architectures.")
parser.add_argument(
"--use_armnn", action='store_true',
help="Enable ArmNN Execution Provider.")
parser.add_argument(
"--armnn_relu", action='store_true',
help="Use the Relu operator implementation from the ArmNN EP.")
parser.add_argument(
"--armnn_bn", action='store_true',
help="Use the Batch Normalization operator implementation from the ArmNN EP.")
parser.add_argument(
"--build_micro_benchmarks", action='store_true',
help="Build ONNXRuntime micro-benchmarks.")
parser.add_argument("--minimal_build", action='store_true',
help="Create a build that only supports ORT format models. "
"See /docs/ONNX_Runtime_Format_Model_Usage.md for more information. "
"RTTI is automatically disabled in a minimal build.")
parser.add_argument("--include_ops_by_model", type=str, help="include ops from model(s) under designated path.")
parser.add_argument("--include_ops_by_config", type=str,
help="include ops from config file. "
"See /docs/Reduced_Operator_Kernel_build.md for more information.")
parser.add_argument("--disable_contrib_ops", action='store_true',
help="Disable contrib ops (reduces binary size)")
parser.add_argument("--disable_ml_ops", action='store_true',
help="Disable traditional ML ops (reduces binary size)")
parser.add_argument("--disable_rtti", action='store_true', help="Disable RTTI (reduces binary size)")
parser.add_argument("--disable_exceptions", action='store_true',
help="Disable exceptions to reduce binary size. Requires --minimal_build.")
parser.add_argument("--disable_ort_format_load", action='store_true',
help='Disable support for loading ORT format models in a non-minimal build.')
return parser.parse_args()
def resolve_executable_path(command_or_path):
executable_path = shutil.which(command_or_path)
if executable_path is None:
raise BuildError("Failed to resolve executable path for "
"'{}'.".format(command_or_path))
return os.path.realpath(executable_path)
def is_windows():
return sys.platform.startswith("win")
def is_macOS():
return sys.platform.startswith("darwin")
def is_linux():
return sys.platform.startswith("linux")
def get_linux_distro():
try:
with open('/etc/os-release', 'r') as f:
dist_info = dict(
line.strip().split('=', 1) for line in f.readlines())
return dist_info.get('NAME', '').strip('"'), dist_info.get(
'VERSION', '').strip('"')
except (IOError, ValueError):
return '', ''
def is_ubuntu_1604():
dist, ver = get_linux_distro()
return dist == 'Ubuntu' and ver.startswith('16.04')
def get_config_build_dir(build_dir, config):
return os.path.join(build_dir, config)
def run_subprocess(args, cwd=None, capture=False, dll_path=None,
shell=False, env={}):
log.info("Running subprocess in '{0}'\n{1}".format(
cwd or os.getcwd(), args))
my_env = os.environ.copy()
if dll_path:
if is_windows():
my_env["PATH"] = dll_path + os.pathsep + my_env["PATH"]
else:
if "LD_LIBRARY_PATH" in my_env:
my_env["LD_LIBRARY_PATH"] += os.pathsep + dll_path
else:
my_env["LD_LIBRARY_PATH"] = dll_path
stdout, stderr = (subprocess.PIPE, subprocess.STDOUT) if capture else (
None, None)
my_env.update(env)
completed_process = subprocess.run(
args, cwd=cwd, check=True, stdout=stdout, stderr=stderr,
env=my_env, shell=shell)
log.debug("Subprocess completed. Return code=" +
str(completed_process.returncode))
return completed_process
def update_submodules(source_dir):
run_subprocess(["git", "submodule", "sync", "--recursive"], cwd=source_dir)
run_subprocess(["git", "submodule", "update", "--init", "--recursive"],
cwd=source_dir)
def is_docker():
path = '/proc/self/cgroup'
return (
os.path.exists('/.dockerenv') or
os.path.isfile(path) and any('docker' in line for line in open(path))
)
def is_sudo():
return 'SUDO_UID' in os.environ.keys()
def install_apt_package(package):
have = package in str(run_subprocess(
["apt", "list", "--installed", package], capture=True).stdout)
if not have:
if is_sudo():
run_subprocess(['apt-get', 'install', '-y', package])
else:
raise BuildError(package + " APT package missing. Please re-run "
"this script using sudo to install.")
def install_ubuntu_deps(args):
if not (args.enable_pybind or args.use_openblas):
return
if not is_docker():
try:
if args.enable_pybind:
install_apt_package("python3")
if args.use_openblas:
install_apt_package("libopenblas-dev")
except Exception as e:
raise BuildError("Error setting up required APT packages. "
"{}".format(str(e)))
def install_python_deps(numpy_version=""):
dep_packages = ['setuptools', 'wheel', 'pytest']
dep_packages.append('numpy=={}'.format(numpy_version) if numpy_version
else 'numpy>=1.16.6')
dep_packages.append('sympy>=1.1')
dep_packages.append('packaging')
dep_packages.append('cerberus')
run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',
'files.pythonhosted.org'] + dep_packages)
def install_torch():
run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host',
'files.pythonhosted.org', 'torch===1.5.1+cu101', 'torchvision===0.6.1+cu101',
'-f', 'https://download.pytorch.org/whl/torch_stable.html'])
def check_md5(filename, expected_md5):
if not os.path.exists(filename):
return False
hash_md5 = hashlib.md5()
BLOCKSIZE = 1024*64
with open(filename, "rb") as f:
buf = f.read(BLOCKSIZE)
while len(buf) > 0:
hash_md5.update(buf)
buf = f.read(BLOCKSIZE)
hex = hash_md5.hexdigest()
if hex != expected_md5:
log.info('md5 mismatch, expect %s, got %s' % (expected_md5, hex))
os.remove(filename)
return False
return True
def setup_test_data(build_dir, configs):
if is_windows():
src_model_dir = os.path.join(build_dir, 'models')
if os.path.exists('C:\\local\\models') and not os.path.exists(
src_model_dir):
log.debug("creating shortcut %s -> %s" % (
'C:\\local\\models', src_model_dir))
run_subprocess(['mklink', '/D', '/J', src_model_dir,
'C:\\local\\models'], shell=True)
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
dest_model_dir = os.path.join(config_build_dir, 'models')
if os.path.exists('C:\\local\\models') and not os.path.exists(
dest_model_dir):
log.debug("creating shortcut %s -> %s" % (
'C:\\local\\models', dest_model_dir))
run_subprocess(['mklink', '/D', '/J', dest_model_dir,
'C:\\local\\models'], shell=True)
elif os.path.exists(src_model_dir) and not os.path.exists(
dest_model_dir):
log.debug("creating shortcut %s -> %s" % (
src_model_dir, dest_model_dir))
run_subprocess(['mklink', '/D', '/J', dest_model_dir,
src_model_dir], shell=True)
def use_dev_mode(args):
if args.use_acl:
return 'OFF'
if args.use_armnn:
return 'OFF'
if args.ios and is_macOS():
return 'OFF'
return 'ON'
def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home,
mpi_home, nccl_home, tensorrt_home, migraphx_home,
path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args):
log.info("Generating CMake build tree")
cmake_dir = os.path.join(source_dir, "cmake")
cmake_args = [
cmake_path, cmake_dir,
"-Donnxruntime_RUN_ONNX_TESTS=" + (
"ON" if args.enable_onnx_tests else "OFF"),
"-Donnxruntime_BUILD_WINML_TESTS=" + (
"OFF" if args.skip_winml_tests else "ON"),
"-Donnxruntime_GENERATE_TEST_REPORTS=ON",
"-Donnxruntime_DEV_MODE=" + use_dev_mode(args),
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-Donnxruntime_USE_CUDA=" + ("ON" if args.use_cuda else "OFF"),
"-Donnxruntime_CUDNN_HOME=" + (cudnn_home if args.use_cuda else ""),
"-Donnxruntime_USE_FEATURIZERS=" + (
"ON" if args.use_featurizers else "OFF"),
"-Donnxruntime_CUDA_HOME=" + (cuda_home if args.use_cuda else ""),
"-Donnxruntime_USE_JEMALLOC=" + ("ON" if args.use_jemalloc else "OFF"),
"-Donnxruntime_USE_MIMALLOC_STL_ALLOCATOR=" + (
"ON" if args.use_mimalloc == "stl" or
args.use_mimalloc == "all" else "OFF"),
"-Donnxruntime_USE_MIMALLOC_ARENA_ALLOCATOR=" + (
"ON" if args.use_mimalloc == "arena" or
args.use_mimalloc == "all" else "OFF"),
"-Donnxruntime_ENABLE_PYTHON=" + (
"ON" if args.enable_pybind else "OFF"),
"-Donnxruntime_BUILD_CSHARP=" + ("ON" if args.build_csharp else "OFF"),
"-Donnxruntime_BUILD_JAVA=" + ("ON" if args.build_java else "OFF"),
"-Donnxruntime_BUILD_NODEJS=" + ("ON" if args.build_nodejs else "OFF"),
"-Donnxruntime_BUILD_SHARED_LIB=" + (
"ON" if args.build_shared_lib else "OFF"),
"-Donnxruntime_USE_EIGEN_FOR_BLAS=" + (
"OFF" if args.use_openblas else "ON"),
"-Donnxruntime_USE_OPENBLAS=" + ("ON" if args.use_openblas else "OFF"),
"-Donnxruntime_USE_DNNL=" + ("ON" if args.use_dnnl else "OFF"),
"-Donnxruntime_USE_MKLML=" + ("ON" if args.use_mklml else "OFF"),
"-Donnxruntime_USE_NGRAPH=" + ("ON" if args.use_ngraph else "OFF"),
"-Donnxruntime_USE_NNAPI_BUILTIN=" + ("ON" if args.use_nnapi else "OFF"),
"-Donnxruntime_USE_RKNPU=" + ("ON" if args.use_rknpu else "OFF"),
"-Donnxruntime_USE_OPENMP=" + (
"ON" if args.use_openmp and not (
args.use_nnapi or (args.use_mklml and (is_macOS() or is_windows())) or args.use_ngraph or
args.android or (args.ios and is_macOS())
or args.use_rknpu)
else "OFF"),
"-Donnxruntime_USE_TVM=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_USE_LLVM=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + (
"ON" if args.enable_msinternal else "OFF"),
"-Donnxruntime_USE_VITISAI=" + ("ON" if args.use_vitisai else "OFF"),
"-Donnxruntime_USE_NUPHAR=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_USE_TENSORRT=" + ("ON" if args.use_tensorrt else "OFF"),
"-Donnxruntime_TENSORRT_HOME=" + (
tensorrt_home if args.use_tensorrt else ""),
"-Donnxruntime_USE_MIGRAPHX=" + ("ON" if args.use_migraphx else "OFF"),
"-Donnxruntime_MIGRAPHX_HOME=" + (migraphx_home if args.use_migraphx else ""),
"-Donnxruntime_CROSS_COMPILING=" + (
"ON" if args.arm64 or args.arm else "OFF"),
"-Donnxruntime_DISABLE_CONTRIB_OPS=" + ("ON" if args.disable_contrib_ops else "OFF"),
"-Donnxruntime_DISABLE_ML_OPS=" + ("ON" if args.disable_ml_ops else "OFF"),
"-Donnxruntime_DISABLE_RTTI=" + ("ON" if args.disable_rtti else "OFF"),
"-Donnxruntime_DISABLE_EXCEPTIONS=" + ("ON" if args.disable_exceptions else "OFF"),
"-Donnxruntime_DISABLE_ORT_FORMAT_LOAD=" + ("ON" if args.disable_ort_format_load else "OFF"),
"-Donnxruntime_MINIMAL_BUILD=" + ("ON" if args.minimal_build else "OFF"),
"-Donnxruntime_REDUCED_OPS_BUILD=" + (
"ON" if args.include_ops_by_config or args.include_ops_by_model else "OFF"),
"-Donnxruntime_MSVC_STATIC_RUNTIME=" + (
"ON" if args.enable_msvc_static_runtime else "OFF"),
"-Donnxruntime_ENABLE_LANGUAGE_INTEROP_OPS=" + (
"ON" if args.enable_language_interop_ops else "OFF"),
"-Donnxruntime_USE_DML=" + ("ON" if args.use_dml else "OFF"),
"-Donnxruntime_USE_WINML=" + ("ON" if args.use_winml else "OFF"),
"-Donnxruntime_USE_TELEMETRY=" + (
"ON" if args.use_telemetry else "OFF"),
"-Donnxruntime_ENABLE_LTO=" + ("ON" if args.enable_lto else "OFF"),
"-Donnxruntime_USE_ACL=" + ("ON" if args.use_acl else "OFF"),
"-Donnxruntime_USE_ACL_1902=" + (
"ON" if args.use_acl == "ACL_1902" else "OFF"),
"-Donnxruntime_USE_ACL_1905=" + (
"ON" if args.use_acl == "ACL_1905" else "OFF"),
"-Donnxruntime_USE_ACL_1908=" + (
"ON" if args.use_acl == "ACL_1908" else "OFF"),
"-Donnxruntime_USE_ACL_2002=" + (
"ON" if args.use_acl == "ACL_2002" else "OFF"),
"-Donnxruntime_USE_ARMNN=" + (
"ON" if args.use_armnn else "OFF"),
"-Donnxruntime_ARMNN_RELU_USE_CPU=" + (
"OFF" if args.armnn_relu else "ON"),
"-Donnxruntime_ARMNN_BN_USE_CPU=" + (
"OFF" if args.armnn_bn else "ON"),
"-Donnxruntime_ENABLE_NVTX_PROFILE=" + (
"ON" if args.enable_nvtx_profile else "OFF"),
"-Donnxruntime_ENABLE_TRAINING=" + (
"ON" if args.enable_training else "OFF"),
"-Donnxruntime_USE_HOROVOD=" + (
"ON" if args.use_horovod else "OFF"),
"-Donnxruntime_BUILD_BENCHMARKS=" + (
"ON" if args.build_micro_benchmarks else "OFF")
]
if mpi_home and os.path.exists(mpi_home):
cmake_args += ["-Donnxruntime_MPI_HOME=" + mpi_home]
if nccl_home and os.path.exists(nccl_home):
cmake_args += ["-Donnxruntime_NCCL_HOME=" + nccl_home]
if args.winml_root_namespace_override:
cmake_args += ["-Donnxruntime_WINML_NAMESPACE_OVERRIDE=" +
args.winml_root_namespace_override]
if args.use_openvino:
cmake_args += ["-Donnxruntime_USE_OPENVINO=ON",
"-Donnxruntime_USE_OPENVINO_MYRIAD=" + (
"ON" if args.use_openvino == "MYRIAD_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP32=" + (
"ON" if args.use_openvino == "GPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16=" + (
"ON" if args.use_openvino == "GPU_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32=" + (
"ON" if args.use_openvino == "CPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_M=" + (
"ON" if args.use_openvino == "VAD-M_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_F=" + (
"ON" if args.use_openvino == "VAD-F_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_BINARY=" + (
"ON" if args.use_openvino else "OFF")]
if not is_windows():
if args.use_cuda:
cmake_args += [
"-Donnxruntime_USE_FULL_PROTOBUF=ON"]
if (args.use_full_protobuf or args.use_ngraph or args.use_tensorrt or
args.use_openvino or args.use_vitisai or args.gen_doc):
cmake_args += [
"-Donnxruntime_USE_FULL_PROTOBUF=ON",
"-DProtobuf_USE_STATIC_LIBS=ON"
]
if args.use_nuphar and args.llvm_path is not None:
cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path]
if args.use_cuda and not is_windows():
nvml_stub_path = cuda_home + "/lib64/stubs"
cmake_args += ["-DCUDA_CUDA_LIBRARY=" + nvml_stub_path]
if args.use_preinstalled_eigen:
cmake_args += ["-Donnxruntime_USE_PREINSTALLED_EIGEN=ON",
"-Deigen_SOURCE_PATH=" + args.eigen_path]
if args.android:
cmake_args += [
"-DCMAKE_TOOLCHAIN_FILE=" + args.android_ndk_path +
"/build/cmake/android.toolchain.cmake",
"-DANDROID_PLATFORM=android-" + str(args.android_api),
"-DANDROID_ABI=" + str(args.android_abi)
]
if args.android_cpp_shared:
cmake_args += ["-DANDROID_STL=c++_shared"]
if args.ios:
if is_macOS():
needed_args = [
args.use_xcode,
args.ios_sysroot,
args.apple_deploy_target,
]
arg_names = [
"--use_xcode " +
"<need use xcode to cross build iOS on MacOS>",
"--ios_sysroot " +
"<the location or name of the macOS platform SDK>",
"--apple_deploy_target " +
"<the minimum version of the target platform>",
]
if not all(needed_args):
raise BuildError(
"iOS build on MacOS canceled due to missing arguments: " +
', '.join(
val for val, cond in zip(arg_names, needed_args)
if not cond))
cmake_args += [
"-DCMAKE_SYSTEM_NAME=iOS",
"-Donnxruntime_BUILD_SHARED_LIB=ON",
"-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
"-DCMAKE_OSX_ARCHITECTURES=" + args.osx_arch,
"-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target,
"-Dprotobuf_BUILD_PROTOC_BINARIES=OFF",
"-DCMAKE_TOOLCHAIN_FILE=" + (
args.ios_toolchain_file if args.ios_toolchain_file
else "../cmake/onnxruntime_ios.toolchain.cmake")
]
if args.xcode_code_signing_team_id:
cmake_args += ["-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=" + args.xcode_code_signing_team_id]
else:
needed_args = [
args.ios_sysroot,
args.arm64 or args.arm,
args.ios_toolchain_dir
]
arg_names = [
"--ios_sysroot <path to sysroot>",
"--arm or --arm64",
"--ios_toolchain_dir <path to toolchain>"
]
if not all(needed_args):
raise BuildError(
"iOS build canceled due to missing arguments: " +
', '.join(
val for val, cond in zip(arg_names, needed_args)
if not cond))
compilers = sorted(
glob.glob(args.ios_toolchain_dir + "/bin/*-clang*"))
os.environ["PATH"] = os.path.join(
args.ios_toolchain_dir, "bin") + os.pathsep + os.environ.get(
"PATH", "")
os.environ["LD_LIBRARY_PATH"] = os.path.join(
args.ios_toolchain_dir, "/lib") + os.pathsep + os.environ.get(
"LD_LIBRARY_PATH", "")
if len(compilers) != 2:
raise BuildError(
"error identifying compilers in ios_toolchain_dir")
cmake_args += [
"-DCMAKE_OSX_ARCHITECTURES=" +
("arm64" if args.arm64 else "arm"),
"-DCMAKE_SYSTEM_NAME=iOSCross",
"-Donnxruntime_BUILD_UNIT_TESTS=OFF",
"-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
"-DCMAKE_C_COMPILER=" + compilers[0],
"-DCMAKE_CXX_COMPILER=" + compilers[1]
]
if path_to_protoc_exe:
cmake_args += [
"-DONNX_CUSTOM_PROTOC_EXECUTABLE=%s" % path_to_protoc_exe]
if args.fuzz_testing:
if not (args.build_shared_lib and
is_windows() and
args.cmake_generator == 'Visual Studio 16 2019' and
args.use_full_protobuf):
raise BuildError(
"Fuzz test has only be tested with build shared libs option using MSVC on windows")
cmake_args += [
"-Donnxruntime_BUILD_UNIT_TESTS=ON",
"-Donnxruntime_FUZZ_TEST=ON",
"-Donnxruntime_USE_FULL_PROTOBUF=ON"]
if args.gen_doc:
cmake_args += ["-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=ON"]
else:
cmake_args += ["-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=OFF"]
cmake_args += ["-D{}".format(define) for define in cmake_extra_defines]
cmake_args += cmake_extra_args
build_number = os.getenv('Build_BuildNumber')
source_version = os.getenv('Build_SourceVersion')
if build_number and source_version:
build_matches = re.fullmatch(
r"(\d\d)(\d\d)(\d\d)(\d\d)\.(\d+)", build_number)
if build_matches:
YY = build_matches.group(2)
MM = build_matches.group(3)
DD = build_matches.group(4)
with open(os.path.join(source_dir, 'VERSION_NUMBER')) as f:
first_line = f.readline()
ort_version_matches = re.match(r"(\d+).(\d+)", first_line)
if not ort_version_matches:
raise BuildError("Couldn't read version from VERSION_FILE")
ort_major = ort_version_matches.group(1)
ort_minor = ort_version_matches.group(2)
# Example (BuildNumber: 191101-2300.1.master,
# SourceVersion: 0bce7ae6755c792eda558e5d27ded701707dc404)
# MajorPart = 1
# MinorPart = 0
# BuildPart = 1911
# PrivatePart = 123
# String = 191101-2300.1.master.0bce7ae
cmake_args += [
"-DVERSION_MAJOR_PART={}".format(ort_major),
"-DVERSION_MINOR_PART={}".format(ort_minor),
"-DVERSION_BUILD_PART={}".format(YY),
"-DVERSION_PRIVATE_PART={}{}".format(MM, DD),
"-DVERSION_STRING={}.{}.{}.{}".format(
ort_major, ort_minor, build_number,
source_version[0:7])
]
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
if args.use_nuphar:
os.environ["PATH"] = os.path.join(
config_build_dir, "external", "tvm",
config) + os.pathsep + os.path.dirname(sys.executable) + os.pathsep + os.environ["PATH"]
run_subprocess(
cmake_args + [
"-Donnxruntime_ENABLE_MEMLEAK_CHECKER=" +
("ON" if config.lower() == 'debug' and not args.use_nuphar and not
args.use_ngraph and not args.use_openvino and not
args.enable_msvc_static_runtime
else "OFF"), "-DCMAKE_BUILD_TYPE={}".format(config)],
cwd=config_build_dir)
def clean_targets(cmake_path, build_dir, configs):
for config in configs:
log.info("Cleaning targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config,
"--target", "clean"]
run_subprocess(cmd_args)
def build_targets(args, cmake_path, build_dir, configs, parallel, target=None):
for config in configs:
log.info("Building targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config]
if target:
cmd_args.extend(['--target', target])
build_tool_args = []
if parallel:
num_cores = str(multiprocessing.cpu_count())
if is_windows() and args.cmake_generator != 'Ninja':
build_tool_args += [
"/maxcpucount:" + num_cores,
# if nodeReuse is true, msbuild processes will stay around for a bit after the build completes
"/nodeReuse:False",
]
elif (is_macOS() and args.use_xcode):
# CMake will generate correct build tool args for Xcode
cmd_args += ["--parallel", num_cores]
elif args.cmake_generator != 'Ninja':
build_tool_args += ["-j" + num_cores]
if build_tool_args:
cmd_args += ["--"]
cmd_args += build_tool_args
env = {}
if args.android:
env['ANDROID_SDK_ROOT'] = args.android_sdk_path
run_subprocess(cmd_args, env=env)
def add_dir_if_exists(directory, dir_list):
if os.path.isdir(directory):
dir_list.append(directory)
def setup_cuda_vars(args):
cuda_home = ""
cudnn_home = ""
if args.use_cuda:
cuda_home = args.cuda_home if args.cuda_home else os.getenv(
"CUDA_HOME")
cudnn_home = args.cudnn_home if args.cudnn_home else os.getenv(
"CUDNN_HOME")
cuda_home_valid = (cuda_home is not None and os.path.exists(cuda_home))
cudnn_home_valid = (cudnn_home is not None and os.path.exists(
cudnn_home))
if not cuda_home_valid or not cudnn_home_valid:
raise BuildError(
"cuda_home and cudnn_home paths must be specified and valid.",
"cuda_home='{}' valid={}. cudnn_home='{}' valid={}"
.format(
cuda_home, cuda_home_valid, cudnn_home, cudnn_home_valid))
return cuda_home, cudnn_home
def setup_tensorrt_vars(args):
tensorrt_home = ""
if args.use_tensorrt:
tensorrt_home = (args.tensorrt_home if args.tensorrt_home
else os.getenv("TENSORRT_HOME"))
tensorrt_home_valid = (tensorrt_home is not None and
os.path.exists(tensorrt_home))
if not tensorrt_home_valid:
raise BuildError(
"tensorrt_home paths must be specified and valid.",
"tensorrt_home='{}' valid={}."
.format(tensorrt_home, tensorrt_home_valid))
# Set maximum workspace size in byte for
# TensorRT (1GB = 1073741824 bytes).
os.environ["ORT_TENSORRT_MAX_WORKSPACE_SIZE"] = "1073741824"
# Set maximum number of iterations to detect unsupported nodes
# and partition the models for TensorRT.
os.environ["ORT_TENSORRT_MAX_PARTITION_ITERATIONS"] = "1000"
# Set minimum subgraph node size in graph partitioning
# for TensorRT.
os.environ["ORT_TENSORRT_MIN_SUBGRAPH_SIZE"] = "1"
# Set FP16 flag
os.environ["ORT_TENSORRT_FP16_ENABLE"] = "0"
return tensorrt_home
def setup_migraphx_vars(args):
migraphx_home = None
if (args.use_migraphx):
print("migraphx_home = {}".format(args.migraphx_home))
migraphx_home = args.migraphx_home or os.getenv("MIGRAPHX_HOME") or None
migraphx_home_not_valid = (migraphx_home and not os.path.exists(migraphx_home))
if (migraphx_home_not_valid):
raise BuildError("migraphx_home paths must be specified and valid.",
"migraphx_home='{}' valid={}."
.format(migraphx_home, migraphx_home_not_valid))
return migraphx_home or ''
def setup_dml_build(args, cmake_path, build_dir, configs):
if args.use_dml:
for config in configs:
# Run the RESTORE_PACKAGES target to perform the initial
# NuGet setup.
cmd_args = [cmake_path,
"--build", get_config_build_dir(build_dir, config),
"--config", config,
"--target", "RESTORE_PACKAGES"]
run_subprocess(cmd_args)
def adb_push(src, dest, **kwargs):
return run_subprocess(['adb', 'push', src, dest], **kwargs)
def adb_shell(*args, **kwargs):
return run_subprocess(['adb', 'shell', *args], **kwargs)
def run_android_tests(args, source_dir, config, cwd):
if args.android_abi == 'x86_64':
run_subprocess(os.path.join(
source_dir, 'tools', 'ci_build', 'github', 'android',
'start_android_emulator.sh'))
adb_push('testdata', '/data/local/tmp/', cwd=cwd)
adb_push(
os.path.join(source_dir, 'cmake', 'external', 'onnx', 'onnx', 'backend', 'test'),
'/data/local/tmp/', cwd=cwd)
adb_push('onnxruntime_test_all', '/data/local/tmp/', cwd=cwd)
adb_push('onnx_test_runner', '/data/local/tmp/', cwd=cwd)
adb_shell('cd /data/local/tmp && /data/local/tmp/onnxruntime_test_all')
if args.use_nnapi:
adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner -e nnapi /data/local/tmp/test')
else:
adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner /data/local/tmp/test')
# run shared_lib_test if necessary
if args.build_shared_lib:
adb_push('libonnxruntime.so', '/data/local/tmp/', cwd=cwd)
adb_push('onnxruntime_shared_lib_test', '/data/local/tmp/', cwd=cwd)
adb_shell(
'cd /data/local/tmp && ' +
'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp && ' +
'/data/local/tmp/onnxruntime_shared_lib_test')
elif args.android_abi == 'arm64-v8a':
# For Android arm64 abi we are only verify the size of the binary generated by minimal build config
# Will fail the build if the shared_lib size is larger than the threshold
if args.minimal_build and config == 'MinSizeRel' and args.build_shared_lib and args.test_binary_size:
# set current size limit to 1100KB
bin_size_threshold = 1100000
bin_actual_size = os.path.getsize(os.path.join(cwd, 'libonnxruntime.so'))
log.info('Android arm64 minsizerel libonnxruntime.so size [' + str(bin_actual_size) + 'B]')
# Write the binary size to a file for uploading later
with open(os.path.join(cwd, 'binary_size_data.txt'), 'w') as file:
file.writelines([
'os,arch,build_config,size\n',
'android,arm64-v8a,minimal-baseline,' + str(bin_actual_size) + '\n'
])
if bin_actual_size > bin_size_threshold:
raise BuildError('Android arm64 minsizerel libonnxruntime.so size [' + str(bin_actual_size) +
'B] is bigger than threshold [' + str(bin_size_threshold) + 'B]')
def run_ios_tests(args, source_dir, config, cwd):
cpr = run_subprocess(["xcodebuild", "test", "-project", "./onnxruntime.xcodeproj",
"-configuration", config,
"-scheme", "onnxruntime_test_all_xc", "-destination",
"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
if cpr.returncode == 0:
cpr = run_subprocess(["xcodebuild", "test", "-project", "./onnxruntime.xcodeproj",
"-configuration", config,
"-scheme", "onnxruntime_shared_lib_test_xc", "-destination",
"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
cpr.check_returncode()
def run_orttraining_test_orttrainer_frontend_separately(cwd):
class TestNameCollecterPlugin:
def __init__(self):
self.collected = set()
def pytest_collection_modifyitems(self, items):
for item in items:
print('item.name: ', item.name)
test_name = item.name
start = test_name.find('[')
if start > 0:
test_name = test_name[:start]
self.collected.add(test_name)
import pytest
plugin = TestNameCollecterPlugin()
test_script_filename = os.path.join(cwd, "orttraining_test_orttrainer_frontend.py")
pytest.main(['--collect-only', test_script_filename], plugins=[plugin])
for test_name in plugin.collected:
run_subprocess([
sys.executable, '-m', 'pytest',
'orttraining_test_orttrainer_frontend.py', '-v', '-k', test_name], cwd=cwd)
def run_training_python_frontend_tests(cwd):
run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer.py'], cwd=cwd)
run_subprocess([sys.executable, 'onnxruntime_test_training_unit_tests.py'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_list_input'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_dict_input'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_list_and_dict_input'], cwd=cwd)
# TODO: use run_orttraining_test_orttrainer_frontend_separately to work around a sporadic segfault.
# shall revert to run_subprocess call once the segfault issue is resolved.
run_orttraining_test_orttrainer_frontend_separately(cwd)
# run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_frontend.py'], cwd=cwd)
run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_bert_toy_onnx.py'], cwd=cwd)
def run_training_python_frontend_e2e_tests(cwd):
# frontend tests are to be added here:
log.info("Running python frontend e2e tests.")
import torch
ngpus = torch.cuda.device_count()
if ngpus > 1:
bert_pretrain_script = 'orttraining_run_bert_pretrain.py'
log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(
ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_throughput'))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,
bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_throughput'], cwd=cwd)
log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(
ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,
bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'], cwd=cwd)
# a long run
log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {}'.format(
ngpus, sys.executable, bert_pretrain_script))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,
bert_pretrain_script], cwd=cwd)
log.debug('RUN: mpirun -n {} {} orttraining_run_glue.py'.format(ngpus, sys.executable))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable, 'orttraining_run_glue.py'], cwd=cwd)
# with orttraining_run_glue.py.
# 1. we like to force to use single GPU (with CUDA_VISIBLE_DEVICES)
# for fine-tune tests.
# 2. need to run test separately (not to mix between fp16
# and full precision runs. this need to be investigated).
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_fp16_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_fp16_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_multiple_choice.py', 'ORTMultipleChoiceTest.test_bert_fp16_with_swag', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer_with_mixed_precision.py'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_mixed_precision'], cwd=cwd)
# this test is not stable. need to skip to unblock release
# run_subprocess([
# sys.executable, 'orttraining_test_transformers.py',
# 'BertModelTest.test_for_pretraining_mixed_precision_with_gradient_accumulation'], cwd=cwd)
def run_training_pipeline_e2e_tests(cwd):
# pipeline tests are to be added here:
log.info("Running pipeline e2e tests.")
import torch
ngpus = torch.cuda.device_count()
command = ['./onnxruntime_training_bert',
'--ort_log_severity', '1',
'--optimizer=Lamb',
'--learning_rate=3e-3',
'--max_seq_length=128',
'--max_predictions_per_seq=20',
'--warmup_ratio=0.2843',
'--warmup_mode=Poly',
'--model_name', '/bert_ort/bert_models/nv/bert-large/' +
'bert-large-uncased_L_24_H_1024_A_16_V_30528_S_512_Dp_0.1_optimized_layer_norm_opset12',
'--train_data_dir', '/bert_data/128/books_wiki_en_corpus/train',
'--test_data_dir', '/bert_data/128/books_wiki_en_corpus/test',
'--display_loss_steps', '1',
'--use_nccl',
'--use_mixed_precision',
'--allreduce_in_fp16',
'--gradient_accumulation_steps', '48',
'--num_train_steps', '96',
'--train_batch_size', '50']
# TODO: currently the CI machine only has 4 GPUs for parallel tests.
# Fill in more pipeline partition options when the machine has different GPUs counts.
if ngpus != 4:
return
# Test 4-way pipeline parallel
pp_command = ['mpirun', '-n', str(ngpus)] + command + ['--pipeline_parallel_size', '4', '--cut_group_info',
'1149:407-1219/1341/1463/1585/1707/1829,' +
'1881:407-1951/2073/2195/2317/2439/2561,' +
'2613:407-2683/2805/2927/3049/3171/3293']
command_str = ', '.join(pp_command)
log.debug('RUN: ' + command_str)
run_subprocess(pp_command, cwd=cwd)
# Test 2-way data parallel + 2-way pipeline parallel
pp_dp_command = ['mpirun', '-n', str(ngpus)]
pp_dp_command = pp_dp_command + command
pp_dp_command = pp_dp_command + ['--data_parallel_size', '2', '--pipeline_parallel_size',
'2', '--cut_group_info',
'1881:407-1951/2073/2195/2317/2439/2561/2683/2805/2927/3049/3171/3293']
command_str = ', '.join(pp_dp_command)
log.debug('RUN: ' + command_str)
run_subprocess(pp_dp_command, cwd=cwd)
def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):
for config in configs:
log.info("Running tests for %s configuration", config)
cwd = get_config_build_dir(build_dir, config)
if args.enable_training and args.use_cuda and args.enable_training_python_frontend_e2e_tests:
# run frontend tests for orttraining-linux-gpu-frontend_test-ci-pipeline.
# this is not a PR merge test so skip other non-frontend tests.
run_training_python_frontend_e2e_tests(cwd=cwd)
run_training_python_frontend_tests(cwd=cwd)
continue
if args.enable_training and args.use_cuda and args.enable_training_pipeline_e2e_tests:
# run distributed pipeline test on 4-GPU CI machine.
run_training_pipeline_e2e_tests(cwd=cwd)
continue
if args.android:
run_android_tests(args, source_dir, config, cwd)
continue
elif args.ios:
run_ios_tests(args, source_dir, config, cwd)
continue
dll_path_list = []
if args.use_nuphar:
dll_path_list.append(os.path.join(
build_dir, config, "external", "tvm", config))
if args.use_tensorrt:
dll_path_list.append(os.path.join(args.tensorrt_home, 'lib'))
if args.use_mklml:
dll_path_list.append(os.path.join(build_dir, config, "mklml", "src", "project_mklml", "lib"))
if not is_windows():
# A workaround for making libonnxruntime_providers_shared.so loadable.
dll_path_list.append(os.path.join(build_dir, config))
dll_path = None
if len(dll_path_list) > 0:
dll_path = os.pathsep.join(dll_path_list)
if ctest_path is None:
# Get the "Google Test Adapter" for vstest.
if not os.path.exists(os.path.join(cwd,
'googletestadapter.0.17.1')):
run_subprocess(
['nuget.exe', 'restore',
os.path.join(source_dir, 'packages.config'),
'-ConfigFile', os.path.join(source_dir, 'NuGet.config'),
'-PackagesDirectory', cwd])
cwd2 = os.path.join(cwd, config)
executables = ['onnxruntime_test_all.exe']
if args.build_shared_lib:
executables.append('onnxruntime_shared_lib_test.exe')
executables.append('onnxruntime_global_thread_pools_test.exe')
run_subprocess(
['vstest.console.exe', '--parallel',
'--TestAdapterPath:..\\googletestadapter.0.17.1\\build\\_common', # noqa
'/Logger:trx', '/Enablecodecoverage', '/Platform:x64',
"/Settings:%s" % os.path.join(
source_dir, 'cmake\\codeconv.runsettings')] + executables,
cwd=cwd2, dll_path=dll_path)
else:
ctest_cmd = [ctest_path, "--build-config", config, "--verbose", "--timeout", "3600"]
run_subprocess(ctest_cmd, cwd=cwd, dll_path=dll_path)
if args.enable_pybind:
# Disable python tests for TensorRT because many tests are
# not supported yet.
if args.use_tensorrt:
return
# Disable python tests in a reduced build as we don't know which ops have been included and which
if args.include_ops_by_model or args.include_ops_by_config or args.minimal_build:
return
if is_windows():
cwd = os.path.join(cwd, config)
run_subprocess([sys.executable, 'onnxruntime_test_python.py'], cwd=cwd, dll_path=dll_path)
if args.enable_symbolic_shape_infer_tests:
run_subprocess([sys.executable, 'onnxruntime_test_python_symbolic_shape_infer.py'],
cwd=cwd, dll_path=dll_path)
if args.use_cuda:
log.info("Testing IOBinding feature")
run_subprocess([sys.executable, 'onnxruntime_test_python_iobinding.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_ml_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_mlops.py'], cwd=cwd, dll_path=dll_path)
if args.enable_training and args.use_cuda:
# run basic frontend tests
run_training_python_frontend_tests(cwd=cwd)
try:
import onnx # noqa
onnx_test = True
except ImportError as error:
log.exception(error)
log.warning("onnx is not installed. The ONNX tests will be skipped.")
onnx_test = False
if onnx_test:
run_subprocess([sys.executable, 'onnxruntime_test_python_backend.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_ml_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_backend_mlops.py'],
cwd=cwd, dll_path=dll_path)
run_subprocess([sys.executable,
os.path.join(source_dir, 'onnxruntime', 'test', 'onnx', 'gen_test_models.py'),
'--output_dir', 'test_models'], cwd=cwd)
if not args.skip_onnx_tests:
run_subprocess([os.path.join(cwd, 'onnx_test_runner'), 'test_models'], cwd=cwd)
if config != 'Debug':
run_subprocess([sys.executable, 'onnx_backend_test_series.py'], cwd=cwd, dll_path=dll_path)
if not args.skip_keras_test:
try:
import onnxmltools # noqa
import keras # noqa
onnxml_test = True
except ImportError:
log.warning(
"onnxmltools and keras are not installed. "
"The keras tests will be skipped.")
onnxml_test = False
if onnxml_test:
run_subprocess(
[sys.executable, 'onnxruntime_test_python_keras.py'],
cwd=cwd, dll_path=dll_path)
def nuphar_run_python_tests(build_dir, configs):
for config in configs:
if config == 'Debug':
continue
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
dll_path = os.path.join(build_dir, config, "external", "tvm", config)
# install onnx for shape inference in testing Nuphar scripts
# this needs to happen after onnx_test_data preparation which
# uses onnx 1.3.0
run_subprocess(
[sys.executable, '-m', 'pip', 'install', '--user', 'onnx==1.5.0'])
run_subprocess(
[sys.executable, 'onnxruntime_test_python_nuphar.py'],
cwd=cwd, dll_path=dll_path)
def run_nodejs_tests(nodejs_binding_dir):
args = ['npm', 'test', '--', '--timeout=2000']
if is_windows():
args = ['cmd', '/c'] + args
run_subprocess(args, cwd=nodejs_binding_dir)
def build_python_wheel(
source_dir, build_dir, configs, use_cuda, use_ngraph, use_dnnl,
use_tensorrt, use_openvino, use_nuphar, use_vitisai, use_acl, use_armnn, use_dml,
wheel_name_suffix, enable_training, nightly_build=False, featurizers_build=False, use_ninja=False):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
if is_windows() and not use_ninja:
cwd = os.path.join(cwd, config)
args = [sys.executable, os.path.join(source_dir, 'setup.py'),
'bdist_wheel']
# We explicitly override the platform tag in the name of the generated build wheel
# so that we can install the wheel on Mac OS X versions 10.12+.
# Without this explicit override, we will something like this while building on MacOS 10.14 -
# [WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value (10.12)
# than the version on which the Python interpreter was compiled (10.14) and will be ignored.
# Since we need to support 10.12+, we explicitly override the platform tag.
# See PR #3626 for more details
if is_macOS():
args += ['-p', 'macosx_10_12_x86_64']
# Any combination of the following arguments can be applied
if nightly_build:
args.append('--nightly_build')
if featurizers_build:
args.append("--use_featurizers")
if wheel_name_suffix:
args.append('--wheel_name_suffix={}'.format(wheel_name_suffix))
if enable_training:
args.append("--enable_training")
# The following arguments are mutually exclusive
if use_tensorrt:
args.append('--use_tensorrt')
elif use_cuda:
args.append('--use_cuda')
elif use_ngraph:
args.append('--use_ngraph')
elif use_openvino:
args.append('--use_openvino')
elif use_dnnl:
args.append('--use_dnnl')
elif use_nuphar:
args.append('--use_nuphar')
elif use_vitisai:
args.append('--use_vitisai')
elif use_acl:
args.append('--use_acl')
elif use_armnn:
args.append('--use_armnn')
elif use_dml:
args.append('--use_dml')
run_subprocess(args, cwd=cwd)
def derive_linux_build_property():
if is_windows():
return "/p:IsLinuxBuild=\"false\""
else:
return "/p:IsLinuxBuild=\"true\""
def build_nuget_package(source_dir, build_dir, configs, use_cuda, use_openvino, use_tensorrt, use_dnnl, use_mklml):
if not (is_windows() or is_linux()):
raise BuildError(
'Currently csharp builds and nuget package creation is only supportted '
'on Windows and Linux platforms.')
csharp_build_dir = os.path.join(source_dir, 'csharp')
is_linux_build = derive_linux_build_property()
# derive package name and execution provider based on the build args
execution_provider = "/p:ExecutionProvider=\"None\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime\""
if use_openvino:
execution_provider = "/p:ExecutionProvider=\"openvino\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.OpenVino\""
elif use_tensorrt:
execution_provider = "/p:ExecutionProvider=\"tensorrt\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.TensorRT\""
elif use_dnnl:
execution_provider = "/p:ExecutionProvider=\"dnnl\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.DNNL\""
elif use_cuda:
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.Gpu\""
elif use_mklml:
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.MKLML\""
else:
pass
# set build directory based on build_dir arg
native_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_dir + "\""
# dotnet restore
cmd_args = ["dotnet", "restore", "OnnxRuntime.CSharp.sln", "--configfile", "Nuget.CSharp.config"]
run_subprocess(cmd_args, cwd=csharp_build_dir)
# build csharp bindings and create nuget package for each config
for config in configs:
if is_linux():
native_build_dir = os.path.join(native_dir, config)
cmd_args = ["make", "install", "DESTDIR=.//nuget-staging"]
run_subprocess(cmd_args, cwd=native_build_dir)
configuration = "/p:Configuration=\"" + config + "\""
cmd_args = ["dotnet", "msbuild", "OnnxRuntime.CSharp.sln", configuration, package_name, is_linux_build,
ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_build_dir)
cmd_args = [
"dotnet", "msbuild", "OnnxRuntime.CSharp.proj", "/t:CreatePackage",
package_name, configuration, execution_provider, is_linux_build, ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_build_dir)
def run_csharp_tests(source_dir, build_dir, use_cuda, use_openvino, use_tensorrt, use_dnnl):
# Currently only running tests on windows.
if not is_windows():
return
csharp_source_dir = os.path.join(source_dir, 'csharp')
is_linux_build = derive_linux_build_property()
# define macros based on build args
macros = ""
if use_openvino:
macros += "USE_OPENVINO;"
if use_tensorrt:
macros += "USE_TENSORRT;"
if use_dnnl:
macros += "USE_DNNL;"
if use_cuda:
macros += "USE_CUDA;"
define_constants = ""
if macros != "":
define_constants = "/p:DefineConstants=\"" + macros + "\""
# set build directory based on build_dir arg
native_build_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_build_dir + "\""
# Skip pretrained models test. Only run unit tests as part of the build
# add "--verbosity", "detailed" to this command if required
cmd_args = ["dotnet", "test", "test\\Microsoft.ML.OnnxRuntime.Tests\\Microsoft.ML.OnnxRuntime.Tests.csproj",
"--filter", "FullyQualifiedName!=Microsoft.ML.OnnxRuntime.Tests.InferenceTest.TestPreTrainedModels",
is_linux_build, define_constants, ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_source_dir)
def build_protoc_for_host(cmake_path, source_dir, build_dir, args):
if (args.arm or args.arm64 or args.enable_windows_store) and (not is_windows() and not args.ios):
raise BuildError(
'Currently only support building protoc for Windows host while '
'cross-compiling for ARM/ARM64/Store and linux cross-compiling iOS')
log.info(
"Building protoc for host to be used in cross-compiled build process")
protoc_build_dir = os.path.join(os.getcwd(), build_dir, 'host_protoc')
os.makedirs(protoc_build_dir, exist_ok=True)
# Generate step
cmd_args = [
cmake_path,
os.path.join(source_dir, 'cmake', 'external', 'protobuf', 'cmake'),
'-Dprotobuf_BUILD_TESTS=OFF',
'-Dprotobuf_WITH_ZLIB_DEFAULT=OFF',
'-Dprotobuf_BUILD_SHARED_LIBS=OFF'
]
is_ninja = args.cmake_generator == 'Ninja'
if args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
cmd_args += ['-G', args.cmake_generator]
if is_windows():
if not is_ninja:
cmd_args += ['-T', 'host=x64']
elif is_macOS():
if args.use_xcode:
cmd_args += ['-G', 'Xcode']
# CMake < 3.18 has a bug setting system arch to arm64 (if not specified) for Xcode 12,
# protoc for host should be built using host architecture
# Explicitly specify the CMAKE_OSX_ARCHITECTURES for x86_64 Mac.
import platform
if platform.machine() == 'x86_64':
cmd_args += ['-DCMAKE_OSX_ARCHITECTURES=x86_64']
run_subprocess(cmd_args, cwd=protoc_build_dir)
# Build step
cmd_args = [cmake_path,
"--build", protoc_build_dir,
"--config", "Release",
"--target", "protoc"]
run_subprocess(cmd_args)
# Absolute protoc path is needed for cmake
config_dir = ''
suffix = ''
if (is_windows() and not is_ninja) or (is_macOS() and args.use_xcode):
config_dir = 'Release'
if is_windows():
suffix = '.exe'
expected_protoc_path = os.path.join(protoc_build_dir, config_dir, 'protoc' + suffix)
if not os.path.exists(expected_protoc_path):
raise BuildError("Couldn't find {}. Host build of protoc failed.".format(expected_protoc_path))
return expected_protoc_path
def generate_documentation(source_dir, build_dir, configs):
operator_doc_path = os.path.join(source_dir, 'docs', 'ContribOperators.md')
opkernel_doc_path = os.path.join(source_dir, 'docs', 'OperatorKernels.md')
for config in configs:
shutil.copy(
os.path.join(source_dir, 'tools', 'python', 'gen_contrib_doc.py'),
os.path.join(build_dir, config))
shutil.copy(
os.path.join(source_dir, 'tools', 'python', 'gen_opkernel_doc.py'),
os.path.join(build_dir, config))
run_subprocess(
[sys.executable,
'gen_contrib_doc.py',
'--output_path', operator_doc_path],
cwd=os.path.join(build_dir, config))
run_subprocess(
[sys.executable,
'gen_opkernel_doc.py',
'--output_path', opkernel_doc_path],
cwd=os.path.join(build_dir, config))
docdiff = ''
try:
docdiff = subprocess.check_output(['git', 'diff', opkernel_doc_path])
except subprocess.CalledProcessError:
print('git diff returned non-zero error code')
if len(docdiff) > 0:
log.warning(
'The updated opkernel document file ' + str(opkernel_doc_path) +
' is different from the checked in version. Consider '
'regenerating the file with CPU, DNNL and CUDA providers enabled.')
log.debug('diff:\n' + str(docdiff))
docdiff = ''
try:
docdiff = subprocess.check_output(['git', 'diff', operator_doc_path])
except subprocess.CalledProcessError:
print('git diff returned non-zero error code')
if len(docdiff) > 0:
raise BuildError(
'The updated operator document file ' +
str(operator_doc_path) + ' must be checked in.\n diff:\n' +
str(docdiff))
def main():
args = parse_arguments()
cmake_extra_defines = (args.cmake_extra_defines
if args.cmake_extra_defines else [])
cross_compiling = args.arm or args.arm64 or args.android
if not (args.update or args.clean or args.build or args.test):
log.debug(
"Defaulting to running update, build "
"[and test for native builds].")
args.update = True
args.build = True
if cross_compiling:
args.test = args.android_abi == 'x86_64' or args.android_abi == 'arm64-v8a'
else:
args.test = True
if args.skip_tests:
args.test = False
if args.include_ops_by_model or args.include_ops_by_config:
from exclude_unused_ops import exclude_unused_ops
models_path = args.include_ops_by_model if args.include_ops_by_model else ''
config_path = args.include_ops_by_config if args.include_ops_by_config else ''
exclude_unused_ops(models_path, config_path, use_cuda=args.use_cuda)
if args.use_tensorrt:
args.use_cuda = True
if args.build_wheel or args.gen_doc:
args.enable_pybind = True
if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs:
args.build_shared_lib = True
if args.build_nuget and cross_compiling:
raise BuildError('Currently nuget package creation is not supported while cross-compiling')
if args.enable_pybind and args.disable_exceptions:
raise BuildError('Python bindings require exceptions to be enabled.')
if args.minimal_build and args.disable_ort_format_load:
raise BuildError('Minimal build requires loading ORT format models.')
if args.use_openvino == "VAD-F_FP32":
args.test = False
configs = set(args.config)
cmake_path = resolve_executable_path(args.cmake_path)
ctest_path = None if args.use_vstest else resolve_executable_path(
args.ctest_path)
build_dir = args.build_dir
script_dir = os.path.realpath(os.path.dirname(__file__))
source_dir = os.path.normpath(os.path.join(script_dir, "..", ".."))
cuda_home, cudnn_home = setup_cuda_vars(args)
mpi_home = args.mpi_home
nccl_home = args.nccl_home
tensorrt_home = setup_tensorrt_vars(args)
migraphx_home = setup_migraphx_vars(args)
os.makedirs(build_dir, exist_ok=True)
log.info("Build started")
if args.update:
cmake_extra_args = []
path_to_protoc_exe = args.path_to_protoc_exe
if not args.skip_submodule_sync:
update_submodules(source_dir)
if is_windows():
if args.cmake_generator == 'Ninja':
if args.x86 or args.arm or args.arm64:
raise BuildError(
"To cross-compile with Ninja, load the toolset "
"environment for the target processor (e.g. Cross "
"Tools Command Prompt for VS)")
cmake_extra_args = ['-G', args.cmake_generator]
elif args.x86:
cmake_extra_args = [
'-A', 'Win32', '-T', 'host=x64', '-G', args.cmake_generator
]
elif args.arm or args.arm64:
if path_to_protoc_exe is None:
path_to_protoc_exe = build_protoc_for_host(
cmake_path, source_dir, build_dir, args)
if args.arm:
cmake_extra_args = ['-A', 'ARM']
else:
cmake_extra_args = ['-A', 'ARM64']
cmake_extra_args += ['-G', args.cmake_generator]
if args.test:
log.info(
"Cannot test on host build machine for cross-compiled "
"ARM(64) builds. Will skip test running after build.")
args.test = False
else:
if (args.msvc_toolset == '14.16' and
args.cmake_generator == 'Visual Studio 16 2019'):
toolset = 'v141,host=x64,version=' + args.msvc_toolset
elif args.msvc_toolset:
toolset = 'host=x64,version=' + args.msvc_toolset
else:
toolset = 'host=x64'
if args.cuda_version:
toolset += ',cuda=' + args.cuda_version
cmake_extra_args = [
'-A', 'x64', '-T', toolset, '-G', args.cmake_generator
]
if args.enable_windows_store:
cmake_extra_args.append(
'-DCMAKE_TOOLCHAIN_FILE=' + os.path.join(
source_dir, 'cmake', 'store_toolchain.cmake'))
if args.enable_wcos:
cmake_extra_args.append('-DCMAKE_USER_MAKE_RULES_OVERRIDE=wcos_rules_override.cmake')
elif args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
cmake_extra_args += ['-G', args.cmake_generator]
elif is_macOS() and args.use_xcode:
cmake_extra_args += ['-G', 'Xcode']
if (args.android or args.ios or args.enable_windows_store) and args.path_to_protoc_exe is None:
path_to_protoc_exe = build_protoc_for_host(
cmake_path, source_dir, build_dir, args)
if is_ubuntu_1604():
if (args.arm or args.arm64):
raise BuildError(
"Only Windows ARM(64) cross-compiled builds supported "
"currently through this script")
install_ubuntu_deps(args)
if not is_docker() and not args.use_acl and not args.use_armnn:
install_python_deps()
if args.enable_pybind and is_windows():
install_python_deps(args.numpy_version)
if args.enable_onnx_tests:
setup_test_data(build_dir, configs)
generate_build_tree(
cmake_path, source_dir, build_dir, cuda_home, cudnn_home, mpi_home, nccl_home,
tensorrt_home, migraphx_home, path_to_protoc_exe, configs, cmake_extra_defines,
args, cmake_extra_args)
if args.clean:
clean_targets(cmake_path, build_dir, configs)
setup_dml_build(args, cmake_path, build_dir, configs)
if args.build:
build_targets(args, cmake_path, build_dir, configs, args.parallel, args.target)
if args.test:
run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs)
if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:
nuphar_run_python_tests(build_dir, configs)
if args.build_nodejs and not args.skip_nodejs_tests:
nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, "nodejs"))
run_nodejs_tests(nodejs_binding_dir)
if args.build:
if args.build_wheel:
nightly_build = bool(os.getenv('NIGHTLY_BUILD') == '1')
build_python_wheel(
source_dir,
build_dir,
configs,
args.use_cuda,
args.use_ngraph,
args.use_dnnl,
args.use_tensorrt,
args.use_openvino,
args.use_nuphar,
args.use_vitisai,
args.use_acl,
args.use_armnn,
args.use_dml,
args.wheel_name_suffix,
args.enable_training,
nightly_build=nightly_build,
featurizers_build=args.use_featurizers,
use_ninja=(args.cmake_generator == 'Ninja')
)
if args.build_nuget:
build_nuget_package(
source_dir,
build_dir,
configs,
args.use_cuda,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl,
args.use_mklml
)
if args.test and args.build_nuget:
run_csharp_tests(
source_dir,
build_dir,
args.use_cuda,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl)
if args.gen_doc and (args.build or args.test):
generate_documentation(source_dir, build_dir, configs)
log.info("Build complete")
if __name__ == "__main__":
try:
sys.exit(main())
except BaseError as e:
log.error(str(e))
sys.exit(1)
| true | true |
f70045804ab2dc1175a34896e20405f1777bd9de | 3,755 | py | Python | acme/agents/jax/dqn/config.py | nrocketmann/acme-intrinsic | ce90aa15ec785a8618a2505410ab6b9f1f9b5a32 | [
"Apache-2.0"
] | null | null | null | acme/agents/jax/dqn/config.py | nrocketmann/acme-intrinsic | ce90aa15ec785a8618a2505410ab6b9f1f9b5a32 | [
"Apache-2.0"
] | null | null | null | acme/agents/jax/dqn/config.py | nrocketmann/acme-intrinsic | ce90aa15ec785a8618a2505410ab6b9f1f9b5a32 | [
"Apache-2.0"
] | null | null | null | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN config."""
import dataclasses
from acme.adders import reverb as adders_reverb
import numpy as np
@dataclasses.dataclass
class DQNConfig:
"""Configuration options for DQN agent."""
epsilon: float = 0.05 # Action selection via epsilon-greedy policy.
# TODO(b/191706065): update all clients and remove this field.
seed: int = 1 # Random seed.
# Learning rule
learning_rate: float = 1e-3 # Learning rate for Adam optimizer.
adam_eps: float = 1e-8 # Eps for Adam optimizer.
discount: float = 0.99 # Discount rate applied to value per timestep.
n_step: int = 5 # N-step TD learning.
target_update_period: int = 100 # Update target network every period.
max_gradient_norm: float = np.inf # For gradient clipping.
# Replay options
batch_size: int = 256 # Number of transitions per batch.
min_replay_size: int = 1_000 # Minimum replay size.
max_replay_size: int = 1_000_000 # Maximum replay size.
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
importance_sampling_exponent: float = 0.2 # Importance sampling for replay.
priority_exponent: float = 0.6 # Priority exponent for replay.
prefetch_size: int = 4 # Prefetch size for reverb replay performance.
samples_per_insert: float = 0.5 # Ratio of learning samples to insert.
# Rate to be used for the SampleToInsertRatio rate limitter tolerance.
# See a formula in make_replay_tables for more details.
samples_per_insert_tolerance_rate: float = 0.1
# How many gradient updates to perform per learner step.
num_sgd_steps_per_step: int = 1
@dataclasses.dataclass
class DQNEmpowermentConfig:
"""Configuration options for DQN agent."""
epsilon: float = 0.05 # Action selection via epsilon-greedy policy.
# TODO(b/191706065): update all clients and remove this field.
seed: int = 1 # Random seed.
# Learning rule
learning_rate: float = 1e-3 # Learning rate for Adam optimizer.
adam_eps: float = 1e-8 # Eps for Adam optimizer.
discount: float = 0.99 # Discount rate applied to value per timestep.
n_step: int = 5 # N-step TD learning.
target_update_period: int = 100 # Update target network every period.
max_gradient_norm: float = np.inf # For gradient clipping.
# Replay options
batch_size: int = 256 # Number of transitions per batch.
min_replay_size: int = 1_000 # Minimum replay size.
max_replay_size: int = 1_000_000 # Maximum replay size.
replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
importance_sampling_exponent: float = 0.2 # Importance sampling for replay.
priority_exponent: float = 0.6 # Priority exponent for replay.
prefetch_size: int = 4 # Prefetch size for reverb replay performance.
samples_per_insert: float = 0.5 # Ratio of learning samples to insert.
sequence_length: int = 10
prefetch_size: int = 4
sequence_period: int = 2
# Rate to be used for the SampleToInsertRatio rate limitter tolerance.
# See a formula in make_replay_tables for more details.
samples_per_insert_tolerance_rate: float = 0.1
# How many gradient updates to perform per learner step.
num_sgd_steps_per_step: int = 1
| 42.191011 | 78 | 0.747537 |
import dataclasses
from acme.adders import reverb as adders_reverb
import numpy as np
@dataclasses.dataclass
class DQNConfig:
epsilon: float = 0.05 seed: int = 1
learning_rate: float = 1e-3 adam_eps: float = 1e-8 discount: float = 0.99 n_step: int = 5 target_update_period: int = 100 max_gradient_norm: float = np.inf
batch_size: int = 256 min_replay_size: int = 1_000 max_replay_size: int = 1_000_000 replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
importance_sampling_exponent: float = 0.2 priority_exponent: float = 0.6 prefetch_size: int = 4 samples_per_insert: float = 0.5 samples_per_insert_tolerance_rate: float = 0.1
num_sgd_steps_per_step: int = 1
@dataclasses.dataclass
class DQNEmpowermentConfig:
epsilon: float = 0.05 seed: int = 1
learning_rate: float = 1e-3 adam_eps: float = 1e-8 discount: float = 0.99 n_step: int = 5 target_update_period: int = 100 max_gradient_norm: float = np.inf
batch_size: int = 256 min_replay_size: int = 1_000 max_replay_size: int = 1_000_000 replay_table_name: str = adders_reverb.DEFAULT_PRIORITY_TABLE
importance_sampling_exponent: float = 0.2 priority_exponent: float = 0.6 prefetch_size: int = 4 samples_per_insert: float = 0.5 sequence_length: int = 10
prefetch_size: int = 4
sequence_period: int = 2
samples_per_insert_tolerance_rate: float = 0.1
num_sgd_steps_per_step: int = 1
| true | true |
f7004704fef6b4c992ee9e18c045004750d66571 | 587 | py | Python | core/learnable_priors/normal_prior.py | insilicomedicine/TRIP | 5e7b9da298aa47a71c71e1144ff1d8e538dbccaa | [
"MIT"
] | 25 | 2019-10-29T02:06:03.000Z | 2021-04-12T03:14:21.000Z | core/learnable_priors/normal_prior.py | insilicomedicine/TRIP | 5e7b9da298aa47a71c71e1144ff1d8e538dbccaa | [
"MIT"
] | 1 | 2020-12-23T01:51:18.000Z | 2020-12-23T01:51:18.000Z | core/learnable_priors/normal_prior.py | insilicomedicine/TRIP | 5e7b9da298aa47a71c71e1144ff1d8e538dbccaa | [
"MIT"
] | 4 | 2020-01-05T17:32:54.000Z | 2020-11-18T05:24:37.000Z | import torch
from torch import nn
from torch.distributions import MultivariateNormal
class Normal(nn.Module):
def __init__(self, num_vars=100):
super(Normal, self).__init__()
self.num_vars = num_vars
self.means = nn.Parameter(torch.zeros(num_vars))
self.std = nn.Parameter(torch.eye(num_vars))
def log_prob(self, x):
distr = MultivariateNormal(self.means, self.std)
return distr.log_prob(x)
def sample(self, num_samples):
distr = MultivariateNormal(self.means, self.std)
return distr.sample_n(num_samples)
| 27.952381 | 56 | 0.684838 | import torch
from torch import nn
from torch.distributions import MultivariateNormal
class Normal(nn.Module):
def __init__(self, num_vars=100):
super(Normal, self).__init__()
self.num_vars = num_vars
self.means = nn.Parameter(torch.zeros(num_vars))
self.std = nn.Parameter(torch.eye(num_vars))
def log_prob(self, x):
distr = MultivariateNormal(self.means, self.std)
return distr.log_prob(x)
def sample(self, num_samples):
distr = MultivariateNormal(self.means, self.std)
return distr.sample_n(num_samples)
| true | true |
f70047cdafe4dcd083f47814ee7d17be097fee36 | 2,338 | py | Python | algorithms/quicksort.py | not-sponsored/Guide-to-Data-Structures-and-Algorithms-Exercises | a905298c594a826e558cd1c94876b632db5d4d11 | [
"Fair"
] | null | null | null | algorithms/quicksort.py | not-sponsored/Guide-to-Data-Structures-and-Algorithms-Exercises | a905298c594a826e558cd1c94876b632db5d4d11 | [
"Fair"
] | null | null | null | algorithms/quicksort.py | not-sponsored/Guide-to-Data-Structures-and-Algorithms-Exercises | a905298c594a826e558cd1c94876b632db5d4d11 | [
"Fair"
] | null | null | null | import sys
class SortableArray():
def __init__(self, arr):
self.arr = arr
def partition(self, left, right):
# choose right most as pivot
pivot_index = right
# get pivot value for compares
pivot = self.arr[pivot_index]
right -= 1
print(f'left orig: {left} right orig: {right}')
while True:
# move left pointer until we hit a value >= pivot
while self.arr[left] < pivot:
print(f'left: {left}')
left += 1
print('left', left)
# move right until hit a value <= pivot or hits 0 index
while right > 0 and self.arr[right] > pivot:
print(f'right: {right}')
right -= 1
print('right', right)
# if left >= right then we break and swap
if left >= right:
break
# if not we swap right and left and continue
else:
self.arr[left], self.arr[right] = self.arr[right], self.arr[left]
left += 1
# finally swap left and pivot
self.arr[left], self.arr[pivot_index] = self.arr[pivot_index], self.arr[left]
print(self.arr)
return left
def quicksort(self, left, right):
# base case one element
if right - left <= 0:
return
# partition and get pivot
pivot_index = self.partition(left, right)
# recursively call for left partition
self.quicksort(left, pivot_index - 1)
# recursively call for right partition
self.quicksort(pivot_index + 1, right)
def quickselect(self, kth_lowest_num, left, right):
# base case one element
if right - left <= 0:
return self.arr[left]
# partition and get pivot
pivot_index = self.partition(left, right)
# if kth is less than pivot, recursively call for left
if kth_lowest_num < pivot_index:
self.quickselect(kth_lowest_num, left, pivot_index - 1)
# if kth is greater than pivot, recursively call for right
elif kth_lowest_num > pivot_index:
self.quickselect(kth_lowest_num, pivot_index + 1, right)
# else we have the kth num bc kth = pivot_index
else:
# weird error returns a None
print(f'kth {kth_lowest_num}: {self.arr[pivot_index]}')
return self.arr[pivot_index]
def main(arr, kth):
sortable = SortableArray(arr)
print(sortable.quickselect(kth, 0, len(arr) - 1))
sortable.quicksort(0, len(arr) - 1)
print(f'final sorted array: {sortable.arr}')
if __name__ == '__main__':
main([int(x) for x in sys.argv[1].split(',')], int(sys.argv[2]))
| 28.168675 | 79 | 0.671514 | import sys
class SortableArray():
def __init__(self, arr):
self.arr = arr
def partition(self, left, right):
pivot_index = right
pivot = self.arr[pivot_index]
right -= 1
print(f'left orig: {left} right orig: {right}')
while True:
while self.arr[left] < pivot:
print(f'left: {left}')
left += 1
print('left', left)
while right > 0 and self.arr[right] > pivot:
print(f'right: {right}')
right -= 1
print('right', right)
if left >= right:
break
else:
self.arr[left], self.arr[right] = self.arr[right], self.arr[left]
left += 1
self.arr[left], self.arr[pivot_index] = self.arr[pivot_index], self.arr[left]
print(self.arr)
return left
def quicksort(self, left, right):
if right - left <= 0:
return
pivot_index = self.partition(left, right)
self.quicksort(left, pivot_index - 1)
self.quicksort(pivot_index + 1, right)
def quickselect(self, kth_lowest_num, left, right):
if right - left <= 0:
return self.arr[left]
pivot_index = self.partition(left, right)
if kth_lowest_num < pivot_index:
self.quickselect(kth_lowest_num, left, pivot_index - 1)
elif kth_lowest_num > pivot_index:
self.quickselect(kth_lowest_num, pivot_index + 1, right)
else:
print(f'kth {kth_lowest_num}: {self.arr[pivot_index]}')
return self.arr[pivot_index]
def main(arr, kth):
sortable = SortableArray(arr)
print(sortable.quickselect(kth, 0, len(arr) - 1))
sortable.quicksort(0, len(arr) - 1)
print(f'final sorted array: {sortable.arr}')
if __name__ == '__main__':
main([int(x) for x in sys.argv[1].split(',')], int(sys.argv[2]))
| true | true |
f70049a506cae229d8102cddf51c76a5b5acadbc | 18,069 | py | Python | dit/math/ops.py | Ejjaffe/dit | c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1 | [
"BSD-3-Clause"
] | 1 | 2020-03-13T10:30:11.000Z | 2020-03-13T10:30:11.000Z | dit/math/ops.py | Ejjaffe/dit | c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1 | [
"BSD-3-Clause"
] | null | null | null | dit/math/ops.py | Ejjaffe/dit | c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1 | [
"BSD-3-Clause"
] | null | null | null | """
Classes to contextualize math operations in log vs linear space.
"""
from types import MethodType
import numpy as np
from ..exceptions import InvalidBase
__all__ = (
'get_ops',
'LinearOperations',
'LogOperations',
)
# For 2.x, these are ascii strings. For 3.x these are unicode strings.
acceptable_base_strings = {'linear', 'e'}
def get_ops(base):
"""
Returns an *Operations instance, depending on the base.
Parameters
----------
base : float, 'linear', 'e'
The base for the Operations instance.
"""
# Let's not initialize unless we have to.
if base in cache:
ops = cache[base]
else:
# This assumes that 'linear' is in cache.
ops = LogOperations(base)
cache[base] = ops
return ops
def exp_func(b):
"""
Returns a base-`b` exponential function.
Parameters
----------
b : positive float or 'e'
The base of the desired exponential function.
Returns
-------
exp : function
The base-`b` exponential function. The returned function will operate
elementwise on NumPy arrays, but note, it is not a ufunc.
Examples
--------
>>> exp2 = exp_func(2)
>>> exp2(1)
2.0
>>> exp3 = exp_func(3)
>>> exp3(1)
3.0
Raises
------
InvalidBase
If the base is less than zero or equal to one.
"""
from dit.utils import is_string_like
if is_string_like(b) and b not in acceptable_base_strings:
raise InvalidBase(msg=b)
if b == 'linear':
exp = lambda x: x # pragma: no branch
elif b == 2:
exp = np.exp2
elif b == 10:
exp = lambda x: 10**x
elif b == 'e' or np.isclose(b, np.e):
exp = np.exp
else:
if b <= 0 or b == 1:
raise InvalidBase(b)
def exp(x, base=b):
"""
Return `base`**`x`
Parameters
----------
x : float
The number to exponentiate
base : float
The base of the exponential
Returns
-------
p : float
`base`**`x`
"""
return base**np.asarray(x)
return exp
def log_func(b):
"""
Returns a base-`b` logarithm function.
Parameters
----------
b : positive float or 'e'
The base of the desired logarithm function.
Returns
-------
log : function
The base-`b` logarithm function. The returned function will operate
elementwise on NumPy arrays, but note, it is not a ufunc.
Examples
--------
>>> log2 = log_func(2)
>>> log2(2)
1.0
>>> log3 = log_func(3)
>>> log3(3)
1.0
Raises
------
InvalidBase
If the base is less than zero or equal to one.
"""
from dit.utils import is_string_like
if is_string_like(b) and b not in acceptable_base_strings:
raise InvalidBase(msg=b)
if b == 'linear':
log = lambda x: x # pragma: no branch
elif b == 2:
log = np.log2
elif b == 10:
log = np.log10
elif b == 'e' or np.isclose(b, np.e):
log = np.log
else:
if b <= 0 or b == 1:
raise InvalidBase(b)
Z = np.log(b)
def log(x, func=np.log):
"""
Return the log of `x`
Parameters
----------
x : float
The value to take the log of
func : function
A logarithm function
Returns
-------
log : float
The logarithm of `x` in base `b` (from outer scope)
"""
return func(x) / Z
return log
class Operations(object):
"""
Base class which implements certain math operations.
For example, regular addition with log probabilities is handled specially.
While we could implement many more operations, we do not. Their usage
is uncommon and their implementation would be slower as well. For example,
subtraction with log probabailities must go as:
.. math::
log_2(x-y) = log_2(x) + log_2(1 - 2^[ log_2(y) - log_2(x) ])
Note that if :math:`y > x`, then :math:`log(y) > log(x)` and the inner term
of the second logarithm will be less than 0, yielding NaN.
"""
### Do we allow base == 'e' or should we convert to its numerical value?
### Ans: We store whatever was specified but provide get_base() with an
### option to return a numerical base.
one = None
zero = None
base = None
exp = None
log = None
def get_base(self, numerical=False):
"""
Returns the base in which operations take place.
For linear-based operations, the result is 'linear'.
Parameters
----------
numerical : bool
If `True`, then if the base is 'e', it is returned as a float.
"""
if numerical and self.base == 'e':
base = np.exp(1)
else:
base = self.base
return base
def is_null(self, p):
"""
Returns `True` if `p` is a null probability.
Parameters
----------
p : float
The probability to be tested.
"""
return np.isclose(self.zero, p)
def is_null_exact(self, p):
"""
Returns `True` if `p` is exactly a null probability.
Parameters
----------
p : float
The probability to be tested.
"""
return self.zero == p
def add(self, x, y):
""" Abstract base class """
raise NotImplementedError
def add_inplace(self, x, y):
""" Abstract base class """
raise NotImplementedError
def add_reduce(self, x):
""" Abstract base class """
raise NotImplementedError
def mult(self, x, y):
""" Abstract base class """
raise NotImplementedError
def mult_inplace(self, x, y):
""" Abstract base class """
raise NotImplementedError
def mult_reduce(self, x):
""" Abstract base class """
raise NotImplementedError
def invert(self, x):
""" Abstract base class """
raise NotImplementedError
def normalize(self, x):
""" Abstract base class """
raise NotImplementedError
class LinearOperations(Operations):
"""
The class of operations on linear values.
"""
one = 1
zero = 0
base = 'linear'
# If the functions below are standard Python functions (as opposed to
# NumPy ufuncs), then they will be treated as unbound methods for the class.
# During instantiation, they are bound to the instance (since before
# instantiation they are class methods) and thus, we are left with
# bound methods (undesirably). If we had modified these attributes in the
# __init__ function, then they would not be bound (or even unbound methods)
# but functions instead (desirably). This is precisely what LogOperations
# does, which is why it does not have this issue. An alternative approach
# is to explicitly declare these functions to be static methods, as we
# do below.
#
exp = staticmethod(exp_func(base))
log = staticmethod(log_func(base))
def add(self, x, y):
"""
Add the arrays element-wise. Neither x nor y will be modified.
Assumption: :math:`y >= 0`.
Operation: :math:`z[i] = x[i] + y[i]`
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
z : NumPy array, shape (n,)
The resultant array.
"""
z = x + y
return z
def add_inplace(self, x, y):
"""
Adds `y` to `x`, in-place. `x` will be modified, but `y` will not.
Assumption: :math:`y >= 0`.
Operation: :math:`x[i] += y[i]`
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
x : NumPy array, shape (n,)
The resultant array.
"""
x += y
return x
def add_reduce(self, x, axis=None):
"""
Performs an `addition' reduction on `x`.
Assumption: :math:`y >= 0`.
Operation: :math:`z = \\sum_i x[i]`
Returns
-------
z : float
The summation of the elements in `x`.
"""
z = x.sum(axis=axis)
return z
def mult(self, x, y):
"""
Multiplies the arrays element-wise. Neither x nor y will be modified.
Operation: :math:`z[i] = x[i] * y[i]`
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to multiply.
Returns
-------
z : NumPy array, shape (n,)
The resultant array.
"""
z = x * y
return z
def mult_inplace(self, x, y):
"""
Multiplies `y` to `x`, in-place. `x` will be modified, but `y` will not.
Operation: :math:`x[i] *= y[i]`
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to multiply.
Returns
-------
x : NumPy array, shape (n,)
The resultant array.
"""
x *= y
return x
def mult_reduce(self, x, axis=None):
"""
Performs an `multiplication' reduction on `x`.
Operation: :math:`z = \\prod_i x[i]`
Returns
-------
z : float
The product of the elements in `x`.
"""
z = np.prod(x, axis=axis)
return z
def invert(self, x):
"""
Returns the element-wise multiplicative inverse of x.
Operation: :math:`z[i] = 1/x[i]`
Parameters
----------
x : NumPy array, shape (n,)
The array to invert.
Returns
-------
z : NumPy array, shape (n,)
The inverted array.
"""
z = 1 / x
return z
def normalize(self, x, axis=None):
"""
Returns a normalized version of x.
Operation: :math:`z[i] = x[i] / sum(x)`
If x is 2D and axis is None, then normalization is over all elements.
Use axis=-1 to normalize each row of x.
Parameters
----------
x : NumPy array, shape (n,)
The array to normalize.
Returns
-------
z : NumPy array, shape (n,)
The normalized array.
"""
z = x / x.sum(axis=None)
return z
def set_add(ops):
"""
Set the add method on the LogOperations instance.
"""
# To preserve numerical accuracy, we must make use of a logaddexp
# function. These functions only exist in Numpy for base-e and base-2.
# For all other bases, we must convert and then convert back.
# In each case, we use default arguments to make the function that we
# are calling 'local'.
base = ops.base
if base == 2:
def add(self, x, y, func=np.logaddexp2):
return func(x, y)
elif base == 'e' or np.isclose(base, np.e):
def add(self, x, y, func=np.logaddexp):
return func(x, y)
else:
# No need to optimize this...
def add(self, x, y):
# Convert log_b probabilities to log_2 probabilities.
x2 = x * np.log2(base)
y2 = y * np.log2(base)
z = np.logaddexp2(x2, y2)
# Convert log_2 probabilities to log_b probabilities.
z *= self.log(2)
return z
add.__doc__ = """
Add the arrays element-wise. Neither x nor y will be modified.
Assumption: y <= 0.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
z : NumPy array, shape (n,)
The resultant array.
"""
ops.add = MethodType(add, ops)
def set_add_inplace(ops):
"""
Set the add_inplace method on the LogOperations instance.
"""
base = ops.base
if base == 2:
def add_inplace(self, x, y, func=np.logaddexp2):
return func(x, y, x)
elif base == 'e' or np.isclose(base, np.e):
def add_inplace(self, x, y, func=np.logaddexp):
return func(x, y, x)
else:
def add_inplace(self, x, y):
x *= np.log2(base)
y2 = y * np.log2(base)
np.logaddexp2(x, y2, x)
x *= self.log(2)
return x
add_inplace.__doc__ = """
Adds `y` to `x`, in-place. `x` will be modified, but `y` will not.
Assumption: :math:`y <= 0`.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
x : NumPy array, shape (n,)
The resultant array.
"""
ops.add_inplace = MethodType(add_inplace, ops)
def set_add_reduce(ops):
"""
Set the add_reduce method on the LogOperations instance.
"""
# https://github.com/numpy/numpy/issues/4599
base = ops.base
if base == 2:
def add_reduce(self, x, axis=None, func=np.logaddexp2):
if len(x) == 0:
# Since logaddexp.identity is None, we handle it separately.
z = self.zero
else:
# Note, we are converting to a NumPy array, if necessary.
z = func.reduce(x, axis=axis, dtype=float)
return z
elif base == 'e' or np.isclose(base, np.e):
def add_reduce(self, x, axis=None, func=np.logaddexp):
if len(x) == 0:
# Since logaddexp.identity is None, we handle it separately.
z = self.zero
else:
# Note, we are converting to a NumPy array, if necessary.
z = func.reduce(x, axis=axis, dtype=float)
return z
else:
def add_reduce(self, x, axis=None):
if len(x) == 0:
# Since logaddexp.identity is None, we handle it separately.
z = self.zero
else:
# Note, we are converting to a NumPy array, if necessary.
# Change the base-2, add, and then convert back.
x2 = x * np.log2(base)
z = np.logaddexp2.reduce(x2, axis=axis, dtype=float)
z /= np.log2(base)
return z
add_reduce.__doc__ = """
Performs an `addition' reduction on `x`.
Assumption: :math:`y <= 0`.
Returns
-------
z : float
The summation of the elements in `x`.
"""
ops.add_reduce = MethodType(add_reduce, ops)
class LogOperations(Operations):
one = None
zero = None
base = None
exp = None
log = None
def __init__(self, base):
"""
Initialize the log operation manager.
Parameters
----------
base : float
The base of the logarithm.
"""
self.set_base(base)
def set_base(self, base):
"""
Change the base of the logarithm.
Parameters
----------
base : float
The base of the logarithm.
"""
self.base = base
self.exp = exp_func(base)
self.log = log_func(base)
# Note: When base < 1, zero == +inf. When base > 1, zero == -inf.
self.one = self.log(1)
self.zero = self.log(0)
# Update the add methods.
set_add(self)
set_add_inplace(self)
set_add_reduce(self)
def mult(self, x, y):
"""
Multiplies the arrays element-wise. Neither `x` nor `y` will be modified.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to multiply.
Returns
-------
z : NumPy array, shape (n,)
The resultant array.
"""
z = x + y
return z
def mult_inplace(self, x, y):
"""
Multiplies `y` to `x`, in-place. `x` will be modified, but `y` will not.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to multiply.
Returns
-------
x : NumPy array, shape (n,)
The resultant array.
"""
x += y
return x
def mult_reduce(self, x, axis=None):
"""
Performs an `multiplication' reduction on `x`.
Returns
-------
z : float
The product of the elements in `x`.
"""
# The identity for addition in NumPy is zero.
# This corresponds to an identity of 1 for log operations, and this is
# exactly the desired identity for multiplying probabilities.
z = x.sum(axis=axis)
return z
def invert(self, x):
"""
Returns the element-wise multiplicative inverse of `x`: :math:`1/x`.
Parameters
----------
x : NumPy array, shape (n,)
The array to invert.
Returns
-------
z : NumPy array, shape (n,)
The inverted array.
"""
z = -x
return z
def normalize(self, x, axis=None):
"""
Returns a normalized version of `x`.
Non-log equivalent operation: :math:`z[i] = x[i] / sum(x)`
If `x` is 2D and axis is None, then normalization is over all elements.
Use axis=-1 to normalize each row of `x`.
Parameters
----------
x : NumPy array, shape (n,)
The array to normalize.
Returns
-------
z : NumPy array, shape (n,)
The normalized array.
"""
# The API way would be: mult(x, invert( add_reduce(x) ))
# We'll avoid some of those function calls.
z = x - self.add_reduce(x, axis=axis)
return z
cache = {
'linear': LinearOperations(),
2: LogOperations(2),
'e': LogOperations('e')
}
| 24.05992 | 82 | 0.517295 | from types import MethodType
import numpy as np
from ..exceptions import InvalidBase
__all__ = (
'get_ops',
'LinearOperations',
'LogOperations',
)
acceptable_base_strings = {'linear', 'e'}
def get_ops(base):
if base in cache:
ops = cache[base]
else:
# This assumes that 'linear' is in cache.
ops = LogOperations(base)
cache[base] = ops
return ops
def exp_func(b):
from dit.utils import is_string_like
if is_string_like(b) and b not in acceptable_base_strings:
raise InvalidBase(msg=b)
if b == 'linear':
exp = lambda x: x # pragma: no branch
elif b == 2:
exp = np.exp2
elif b == 10:
exp = lambda x: 10**x
elif b == 'e' or np.isclose(b, np.e):
exp = np.exp
else:
if b <= 0 or b == 1:
raise InvalidBase(b)
def exp(x, base=b):
"""
Return `base`**`x`
Parameters
----------
x : float
The number to exponentiate
base : float
The base of the exponential
Returns
-------
p : float
`base`**`x`
"""
return base**np.asarray(x)
return exp
def log_func(b):
from dit.utils import is_string_like
if is_string_like(b) and b not in acceptable_base_strings:
raise InvalidBase(msg=b)
if b == 'linear':
log = lambda x: x # pragma: no branch
elif b == 2:
log = np.log2
elif b == 10:
log = np.log10
elif b == 'e' or np.isclose(b, np.e):
log = np.log
else:
if b <= 0 or b == 1:
raise InvalidBase(b)
Z = np.log(b)
def log(x, func=np.log):
"""
Return the log of `x`
Parameters
----------
x : float
The value to take the log of
func : function
A logarithm function
Returns
-------
log : float
The logarithm of `x` in base `b` (from outer scope)
"""
return func(x) / Z
return log
class Operations(object):
### Do we allow base == 'e' or should we convert to its numerical value?
### Ans: We store whatever was specified but provide get_base() with an
### option to return a numerical base.
one = None
zero = None
base = None
exp = None
log = None
def get_base(self, numerical=False):
if numerical and self.base == 'e':
base = np.exp(1)
else:
base = self.base
return base
def is_null(self, p):
return np.isclose(self.zero, p)
def is_null_exact(self, p):
return self.zero == p
def add(self, x, y):
raise NotImplementedError
def add_inplace(self, x, y):
raise NotImplementedError
def add_reduce(self, x):
raise NotImplementedError
def mult(self, x, y):
raise NotImplementedError
def mult_inplace(self, x, y):
raise NotImplementedError
def mult_reduce(self, x):
raise NotImplementedError
def invert(self, x):
raise NotImplementedError
def normalize(self, x):
raise NotImplementedError
class LinearOperations(Operations):
one = 1
zero = 0
base = 'linear'
# If the functions below are standard Python functions (as opposed to
# NumPy ufuncs), then they will be treated as unbound methods for the class.
# During instantiation, they are bound to the instance (since before
# instantiation they are class methods) and thus, we are left with
# bound methods (undesirably). If we had modified these attributes in the
# __init__ function, then they would not be bound (or even unbound methods)
# but functions instead (desirably). This is precisely what LogOperations
# does, which is why it does not have this issue. An alternative approach
# is to explicitly declare these functions to be static methods, as we
# do below.
#
exp = staticmethod(exp_func(base))
log = staticmethod(log_func(base))
def add(self, x, y):
z = x + y
return z
def add_inplace(self, x, y):
x += y
return x
def add_reduce(self, x, axis=None):
z = x.sum(axis=axis)
return z
def mult(self, x, y):
z = x * y
return z
def mult_inplace(self, x, y):
x *= y
return x
def mult_reduce(self, x, axis=None):
z = np.prod(x, axis=axis)
return z
def invert(self, x):
z = 1 / x
return z
def normalize(self, x, axis=None):
z = x / x.sum(axis=None)
return z
def set_add(ops):
# To preserve numerical accuracy, we must make use of a logaddexp
# function. These functions only exist in Numpy for base-e and base-2.
# For all other bases, we must convert and then convert back.
# In each case, we use default arguments to make the function that we
# are calling 'local'.
base = ops.base
if base == 2:
def add(self, x, y, func=np.logaddexp2):
return func(x, y)
elif base == 'e' or np.isclose(base, np.e):
def add(self, x, y, func=np.logaddexp):
return func(x, y)
else:
# No need to optimize this...
def add(self, x, y):
# Convert log_b probabilities to log_2 probabilities.
x2 = x * np.log2(base)
y2 = y * np.log2(base)
z = np.logaddexp2(x2, y2)
# Convert log_2 probabilities to log_b probabilities.
z *= self.log(2)
return z
add.__doc__ = """
Add the arrays element-wise. Neither x nor y will be modified.
Assumption: y <= 0.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
z : NumPy array, shape (n,)
The resultant array.
"""
ops.add = MethodType(add, ops)
def set_add_inplace(ops):
base = ops.base
if base == 2:
def add_inplace(self, x, y, func=np.logaddexp2):
return func(x, y, x)
elif base == 'e' or np.isclose(base, np.e):
def add_inplace(self, x, y, func=np.logaddexp):
return func(x, y, x)
else:
def add_inplace(self, x, y):
x *= np.log2(base)
y2 = y * np.log2(base)
np.logaddexp2(x, y2, x)
x *= self.log(2)
return x
add_inplace.__doc__ = """
Adds `y` to `x`, in-place. `x` will be modified, but `y` will not.
Assumption: :math:`y <= 0`.
Parameters
----------
x, y : NumPy arrays, shape (n,)
The arrays to add.
Returns
-------
x : NumPy array, shape (n,)
The resultant array.
"""
ops.add_inplace = MethodType(add_inplace, ops)
def set_add_reduce(ops):
# https://github.com/numpy/numpy/issues/4599
base = ops.base
if base == 2:
def add_reduce(self, x, axis=None, func=np.logaddexp2):
if len(x) == 0:
# Since logaddexp.identity is None, we handle it separately.
z = self.zero
else:
# Note, we are converting to a NumPy array, if necessary.
z = func.reduce(x, axis=axis, dtype=float)
return z
elif base == 'e' or np.isclose(base, np.e):
def add_reduce(self, x, axis=None, func=np.logaddexp):
if len(x) == 0:
# Since logaddexp.identity is None, we handle it separately.
z = self.zero
else:
# Note, we are converting to a NumPy array, if necessary.
z = func.reduce(x, axis=axis, dtype=float)
return z
else:
def add_reduce(self, x, axis=None):
if len(x) == 0:
# Since logaddexp.identity is None, we handle it separately.
z = self.zero
else:
# Note, we are converting to a NumPy array, if necessary.
# Change the base-2, add, and then convert back.
x2 = x * np.log2(base)
z = np.logaddexp2.reduce(x2, axis=axis, dtype=float)
z /= np.log2(base)
return z
add_reduce.__doc__ = """
Performs an `addition' reduction on `x`.
Assumption: :math:`y <= 0`.
Returns
-------
z : float
The summation of the elements in `x`.
"""
ops.add_reduce = MethodType(add_reduce, ops)
class LogOperations(Operations):
one = None
zero = None
base = None
exp = None
log = None
def __init__(self, base):
self.set_base(base)
def set_base(self, base):
self.base = base
self.exp = exp_func(base)
self.log = log_func(base)
self.one = self.log(1)
self.zero = self.log(0)
set_add(self)
set_add_inplace(self)
set_add_reduce(self)
def mult(self, x, y):
z = x + y
return z
def mult_inplace(self, x, y):
x += y
return x
def mult_reduce(self, x, axis=None):
z = x.sum(axis=axis)
return z
def invert(self, x):
z = -x
return z
def normalize(self, x, axis=None):
z = x - self.add_reduce(x, axis=axis)
return z
cache = {
'linear': LinearOperations(),
2: LogOperations(2),
'e': LogOperations('e')
}
| true | true |
f70049a62ff8108e599465f06904de5438b65282 | 19,426 | py | Python | release/scripts/modules/bl_i18n_utils/utils_spell_check.py | lordloki/upbge | 18d0f5419cc1338ecf739362afef56bd96b42cfb | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2022-01-11T10:02:21.000Z | 2022-01-11T10:02:21.000Z | release/scripts/modules/bl_i18n_utils/utils_spell_check.py | lordloki/upbge | 18d0f5419cc1338ecf739362afef56bd96b42cfb | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/modules/bl_i18n_utils/utils_spell_check.py | lordloki/upbge | 18d0f5419cc1338ecf739362afef56bd96b42cfb | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import enchant
import os
import pickle
import re
class SpellChecker:
"""
A basic spell checker.
"""
# These must be all lower case for comparisons
uimsgs = {
# OK words
"adaptively", "adaptivity",
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
"chamfer",
"couldn", # couldn't
"customizable",
"decrement",
"derivate",
"deterministically",
"doesn", # doesn't
"duplications",
"effector",
"equi", # equi-angular, etc.
"fader",
"globbing",
"hasn", # hasn't
"hetero",
"hoc", # ad-hoc
"incompressible",
"indices",
"instantiation",
"iridas",
"isn", # isn't
"iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"ons", # add-ons
"pong", # ping pong
"scalable",
"shadeless",
"shouldn", # shouldn't
"smoothen",
"spacings",
"teleport", "teleporting",
"vertices",
"wasn", # wasn't
# Merged words
"antialiasing", "antialias",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoexec",
"autoexecution",
"autogenerated",
"autolock",
"automask", "automasking",
"automerge",
"autoname",
"autopack",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface", "backfacing",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitdepth",
"bitflag", "bitflags",
"bitrate",
"blackbody",
"blendfile",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"bytecode",
"chunksize",
"customdata",
"dataset", "datasets",
"de",
"deadzone",
"deconstruct",
"defocus",
"denoise", "denoised", "denoising", "denoiser",
"deselect", "deselecting", "deselection",
"despill", "despilling",
"dirtree",
"editcurve",
"editmesh",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline", "gridlines",
"hardlight",
"hemi",
"hostname",
"inbetween",
"inscatter", "inscattering",
"libdata",
"lightcache",
"lightprobe", "lightprobes",
"lightless",
"lineset",
"linestyle", "linestyles",
"localview",
"lookup", "lookups",
"mathutils",
"micropolygon",
"midlevel",
"midground",
"mixdown",
"monospaced",
"multi",
"multifractal",
"multiframe",
"multilayer",
"multipaint",
"multires", "multiresolution",
"multisampling",
"multiscatter",
"multitexture",
"multithreaded",
"multiuser",
"multiview",
"namespace",
"nodetree", "nodetrees",
"keyconfig",
"offscreen",
"online",
"playhead",
"popup", "popups",
"pointcloud",
"pre",
"precache", "precaching",
"precalculate",
"precomputing",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing", "preprocessor",
"preseek",
"promillage",
"pushdown",
"raytree",
"readonly",
"realtime",
"reinject", "reinjected",
"rekey",
"remesh",
"reprojection", "reproject", "reprojecting",
"resize",
"restpose",
"resync",
"retarget", "retargets", "retargeting", "retargeted",
"retiming",
"rigidbody",
"ringnoise",
"rolloff",
"runtime",
"scanline",
"screenshot", "screenshots",
"seekability",
"selfcollision",
"shadowbuffer", "shadowbuffers",
"singletexture",
"spellcheck", "spellchecking",
"startup",
"stateful",
"starfield",
"studiolight",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"subitem",
"submode",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"todo",
"tradeoff",
"un",
"unassociate", "unassociated",
"unbake",
"unclosed",
"uncomment",
"unculled",
"undeformed",
"undistort", "undistorted", "undistortion",
"ungroup", "ungrouped",
"unhide",
"unindent",
"unkeyed",
"unlink", "unlinked",
"unmute",
"unphysical",
"unpremultiply",
"unprojected",
"unprotect",
"unreacted",
"unreferenced",
"unregister",
"unselect", "unselected", "unselectable",
"unsets",
"unshadowed",
"unspill",
"unstitchable", "unstitch",
"unsubdivided", "unsubdivide",
"untrusted",
"vectorscope",
"whitespace", "whitespaces",
"worldspace",
"workflow",
"workspace", "workspaces",
# Neologisms, slangs
"affectable",
"animatable",
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"determinator",
"editability",
"effectors",
"expander",
"instancer",
"keyer",
"lacunarity",
"linkable",
"numerics",
"occluder", "occluders",
"overridable",
"passepartout",
"perspectively",
"pixelate",
"pointiness",
"polycount",
"polygonization", "polygonalization", # yuck!
"scalings",
"selectable", "selectability",
"shaper",
"smoothen", "smoothening",
"spherize", "spherized",
"stitchable",
"symmetrize",
"trackability",
"transmissivity",
"rasterized", "rasterization", "rasterizer",
"renderer", "renderers", "renderable", "renderability",
# Really bad!!!
"convertor",
"fullscr",
# Abbreviations
"aero",
"amb",
"anim",
"aov",
"app",
"bbox", "bboxes",
"bksp", # Backspace
"bool",
"calc",
"cfl",
"config", "configs",
"const",
"coord", "coords",
"degr",
"diff",
"dof",
"dupli", "duplis",
"eg",
"esc",
"expr",
"fac",
"fra",
"fract",
"frs",
"grless",
"http",
"init",
"irr", # Irradiance
"kbit", "kb",
"lang", "langs",
"lclick", "rclick",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"mbs", # mouse button 'select'.
"mem",
"multicam",
"num",
"ok",
"orco",
"ortho",
"pano",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"sce",
"sel",
"spec",
"struct", "structs",
"subdiv",
"sys",
"tex",
"texcoord",
"tmr", # timer
"tri", "tris",
"udim", "udims",
"upres", # Upresolution
"usd",
"uv", "uvs", "uvw", "uw", "uvmap",
"ve",
"vec",
"vel", # velocity!
"vert", "verts",
"vis",
"vram",
"xor",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# General computer/science terms
"affine",
"albedo",
"anamorphic",
"anisotropic", "anisotropy",
"bitangent",
"boid", "boids",
"ceil",
"compressibility",
"curvilinear",
"equiangular",
"equisolid",
"euler", "eulers",
"fribidi",
"gettext",
"hashable",
"hotspot",
"interocular",
"intrinsics",
"irradiance",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"msgfmt",
"nand", "xnor",
"normals",
"numpad",
"octahedral",
"octree",
"omnidirectional",
"opengl",
"openmp",
"parametrization",
"photoreceptor",
"poly",
"polyline", "polylines",
"probabilistically",
"pulldown", "pulldowns",
"quantized",
"quartic",
"quaternion", "quaternions",
"quintic",
"samplerate",
"sawtooth",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"subtractive",
"superellipse",
"tooltip", "tooltips",
"trackpad",
"tuple",
"unicode",
"viewport", "viewports",
"viscoelastic",
"vorticity",
"waveform", "waveforms",
"wildcard", "wildcards",
"wintab", # Some Windows tablet API
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"bindpose",
"binormal",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chroma",
"chrominance",
"clearcoat",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"cubemap", "cubemaps",
"cuda",
"deinterlace",
"dropoff",
"duotone",
"dv",
"eigenvectors",
"emissive",
"equirectangular",
"filmlike",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"inpaint",
"kerning",
"lightmap",
"linearlight",
"lossless", "lossy",
"luminance",
"mantaflow",
"matcap",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"ntsc",
"nurb", "nurbs",
"perlin",
"phong",
"pinlight",
"qi",
"radiosity",
"raycasting",
"raytrace", "raytracing", "raytraced",
"refractions",
"remesher", "remeshing", "remesh",
"renderfarm",
"scanfill",
"shader", "shaders",
"shadowmap", "shadowmaps",
"softlight",
"specular", "specularity",
"spillmap",
"sobel",
"stereoscopy",
"texel",
"timecode",
"tonemap",
"toon",
"transmissive",
"vividlight",
"volumetrics",
"voronoi",
"voxel", "voxels",
"vsync",
"wireframe",
"zmask",
"ztransp",
# Blender terms
"audaspace",
"azone", # action zone
"backwire",
"bbone",
"bendy", # bones
"bmesh",
"breakdowner",
"bspline",
"bweight",
"colorband",
"datablock", "datablocks",
"despeckle",
"depsgraph",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"dyntopo",
"editbone",
"editmode",
"eevee",
"fcurve", "fcurves",
"fedge", "fedges",
"filmic",
"fluidsim",
"freestyle",
"enum", "enums",
"gizmogroup",
"gon", "gons", # N-Gon(s)
"gpencil",
"idcol",
"keyframe", "keyframes", "keyframing", "keyframed",
"lookdev",
"luminocity",
"mathvis",
"metaball", "metaballs", "mball",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"mpoly",
"mtex",
"nabla",
"navmesh",
"outliner",
"overscan",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"qe", # keys...
"shaderfx", "shaderfxs",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"subdiv",
"subtype",
"sunsky",
"tessface", "tessfaces",
"texface",
"timeline", "timelines",
"tosphere",
"uilist",
"userpref",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"vse",
"wasd", "wasdqe", # keys...
"wetmap", "wetmaps",
"wpaint",
"uvwarp",
# UOC (Ugly Operator Categories)
"cachefile",
"paintcurve",
"ptcache",
"dpaint",
# Algorithm/library names
"ashikhmin", # Ashikhmin-Shirley
"arsloe", # Texel-Marsen-Arsloe
"beckmann",
"blackman", # Blackman-Harris
"blosc",
"burley", # Christensen-Burley
"catmull",
"catrom",
"chebychev",
"conrady", # Brown-Conrady
"courant",
"cryptomatte", "crypto",
"embree",
"gmp",
"hosek",
"kutta",
"lennard",
"marsen", # Texel-Marsen-Arsloe
"mikktspace",
"minkowski",
"minnaert",
"moskowitz", # Pierson-Moskowitz
"musgrave",
"nayar",
"netravali",
"nishita",
"ogawa",
"oren",
"peucker", # Ramer-Douglas-Peucker
"pierson", # Pierson-Moskowitz
"preetham",
"prewitt",
"ramer", # Ramer-Douglas-Peucker
"runge",
"sobol",
"verlet",
"wilkie",
"worley",
# Acronyms
"aa", "msaa",
"ao",
"api",
"apic", # Affine Particle-In-Cell
"asc", "cdl",
"ascii",
"atrac",
"avx",
"bsdf",
"bssrdf",
"bw",
"ccd",
"cmd",
"cmos",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"eo",
"fh",
"fk",
"fov",
"fft",
"futura",
"fx",
"gfx",
"ggx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdc",
"hdr", "hdri", "hdris",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva", "hsl",
"id",
"ies",
"ior",
"itu",
"jonswap",
"lhs",
"lmb", "mmb", "rmb",
"kb",
"mocap",
"msgid", "msgids",
"mux",
"ndof",
"ppc",
"precisa",
"px",
"qmc",
"rdp",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"ssao",
"ssr",
"svn",
"tma",
"ui",
"unix",
"vbo", "vbos",
"vr",
"wxyz",
"xr",
"ycc", "ycca",
"yrgb",
"yuv", "yuva",
# Blender acronyms
"bge",
"bli",
"bpy",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"py",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
# Files types/formats
"avi",
"attrac",
"autocad",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dwaa",
"dwab",
"dxf",
"eps",
"exr",
"fbx",
"fbxnode",
"ffmpeg",
"flac",
"gltf",
"gzip",
"ico",
"jpg", "jpeg", "jpegs",
"json",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"osl",
"oso",
"piz",
"png", "pngs",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"vp9",
"wav",
"webm",
"xiph",
"xml",
"xna",
"xvid",
}
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_split_words = re.compile(_valid_words).findall
@classmethod
def split_words(cls, text):
return [w for w in cls._split_words(text) if w]
def __init__(self, settings, lang="en_US"):
self.settings = settings
self.dict_spelling = enchant.Dict(lang)
self.cache = set(self.uimsgs)
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'rb') as f:
self.cache |= set(pickle.load(f))
def __del__(self):
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'wb') as f:
pickle.dump(self.cache, f)
def check(self, txt):
ret = []
if txt in self.cache:
return ret
for w in self.split_words(txt):
w_lower = w.lower()
if w_lower in self.cache:
continue
if not self.dict_spelling.check(w):
ret.append((w, self.dict_spelling.suggest(w)))
else:
self.cache.add(w_lower)
if not ret:
self.cache.add(txt)
return ret
| 23.20908 | 103 | 0.438279 |
import enchant
import os
import pickle
import re
class SpellChecker:
uimsgs = {
"adaptively", "adaptivity",
"aren", "betweens", # yuck! in-betweens!
"boolean", "booleans",
"chamfer",
"couldn", # couldn't
"customizable",
"decrement",
"derivate",
"deterministically",
"doesn", "duplications",
"effector",
"equi", # equi-angular, etc.
"fader",
"globbing",
"hasn", # hasn't
"hetero",
"hoc", "incompressible",
"indices",
"instantiation",
"iridas",
"isn", "iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"ons", # add-ons
"pong", # ping pong
"scalable",
"shadeless",
"shouldn", # shouldn't
"smoothen",
"spacings",
"teleport", "teleporting",
"vertices",
"wasn",
# Merged words
"antialiasing", "antialias",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoexec",
"autoexecution",
"autogenerated",
"autolock",
"automask", "automasking",
"automerge",
"autoname",
"autopack",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface", "backfacing",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitdepth",
"bitflag", "bitflags",
"bitrate",
"blackbody",
"blendfile",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"bytecode",
"chunksize",
"customdata",
"dataset", "datasets",
"de",
"deadzone",
"deconstruct",
"defocus",
"denoise", "denoised", "denoising", "denoiser",
"deselect", "deselecting", "deselection",
"despill", "despilling",
"dirtree",
"editcurve",
"editmesh",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline", "gridlines",
"hardlight",
"hemi",
"hostname",
"inbetween",
"inscatter", "inscattering",
"libdata",
"lightcache",
"lightprobe", "lightprobes",
"lightless",
"lineset",
"linestyle", "linestyles",
"localview",
"lookup", "lookups",
"mathutils",
"micropolygon",
"midlevel",
"midground",
"mixdown",
"monospaced",
"multi",
"multifractal",
"multiframe",
"multilayer",
"multipaint",
"multires", "multiresolution",
"multisampling",
"multiscatter",
"multitexture",
"multithreaded",
"multiuser",
"multiview",
"namespace",
"nodetree", "nodetrees",
"keyconfig",
"offscreen",
"online",
"playhead",
"popup", "popups",
"pointcloud",
"pre",
"precache", "precaching",
"precalculate",
"precomputing",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing", "preprocessor",
"preseek",
"promillage",
"pushdown",
"raytree",
"readonly",
"realtime",
"reinject", "reinjected",
"rekey",
"remesh",
"reprojection", "reproject", "reprojecting",
"resize",
"restpose",
"resync",
"retarget", "retargets", "retargeting", "retargeted",
"retiming",
"rigidbody",
"ringnoise",
"rolloff",
"runtime",
"scanline",
"screenshot", "screenshots",
"seekability",
"selfcollision",
"shadowbuffer", "shadowbuffers",
"singletexture",
"spellcheck", "spellchecking",
"startup",
"stateful",
"starfield",
"studiolight",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"subitem",
"submode",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"todo",
"tradeoff",
"un",
"unassociate", "unassociated",
"unbake",
"unclosed",
"uncomment",
"unculled",
"undeformed",
"undistort", "undistorted", "undistortion",
"ungroup", "ungrouped",
"unhide",
"unindent",
"unkeyed",
"unlink", "unlinked",
"unmute",
"unphysical",
"unpremultiply",
"unprojected",
"unprotect",
"unreacted",
"unreferenced",
"unregister",
"unselect", "unselected", "unselectable",
"unsets",
"unshadowed",
"unspill",
"unstitchable", "unstitch",
"unsubdivided", "unsubdivide",
"untrusted",
"vectorscope",
"whitespace", "whitespaces",
"worldspace",
"workflow",
"workspace", "workspaces",
# Neologisms, slangs
"affectable",
"animatable",
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"determinator",
"editability",
"effectors",
"expander",
"instancer",
"keyer",
"lacunarity",
"linkable",
"numerics",
"occluder", "occluders",
"overridable",
"passepartout",
"perspectively",
"pixelate",
"pointiness",
"polycount",
"polygonization", "polygonalization", # yuck!
"scalings",
"selectable", "selectability",
"shaper",
"smoothen", "smoothening",
"spherize", "spherized",
"stitchable",
"symmetrize",
"trackability",
"transmissivity",
"rasterized", "rasterization", "rasterizer",
"renderer", "renderers", "renderable", "renderability",
# Really bad!!!
"convertor",
"fullscr",
# Abbreviations
"aero",
"amb",
"anim",
"aov",
"app",
"bbox", "bboxes",
"bksp", # Backspace
"bool",
"calc",
"cfl",
"config", "configs",
"const",
"coord", "coords",
"degr",
"diff",
"dof",
"dupli", "duplis",
"eg",
"esc",
"expr",
"fac",
"fra",
"fract",
"frs",
"grless",
"http",
"init",
"irr", # Irradiance
"kbit", "kb",
"lang", "langs",
"lclick", "rclick",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"mbs", # mouse button 'select'.
"mem",
"multicam",
"num",
"ok",
"orco",
"ortho",
"pano",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"sce",
"sel",
"spec",
"struct", "structs",
"subdiv",
"sys",
"tex",
"texcoord",
"tmr", # timer
"tri", "tris",
"udim", "udims",
"upres", # Upresolution
"usd",
"uv", "uvs", "uvw", "uw", "uvmap",
"ve",
"vec",
"vel", # velocity!
"vert", "verts",
"vis",
"vram",
"xor",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# General computer/science terms
"affine",
"albedo",
"anamorphic",
"anisotropic", "anisotropy",
"bitangent",
"boid", "boids",
"ceil",
"compressibility",
"curvilinear",
"equiangular",
"equisolid",
"euler", "eulers",
"fribidi",
"gettext",
"hashable",
"hotspot",
"interocular",
"intrinsics",
"irradiance",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"msgfmt",
"nand", "xnor",
"normals",
"numpad",
"octahedral",
"octree",
"omnidirectional",
"opengl",
"openmp",
"parametrization",
"photoreceptor",
"poly",
"polyline", "polylines",
"probabilistically",
"pulldown", "pulldowns",
"quantized",
"quartic",
"quaternion", "quaternions",
"quintic",
"samplerate",
"sawtooth",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"subtractive",
"superellipse",
"tooltip", "tooltips",
"trackpad",
"tuple",
"unicode",
"viewport", "viewports",
"viscoelastic",
"vorticity",
"waveform", "waveforms",
"wildcard", "wildcards",
"wintab", # Some Windows tablet API
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"bindpose",
"binormal",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chroma",
"chrominance",
"clearcoat",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"cubemap", "cubemaps",
"cuda",
"deinterlace",
"dropoff",
"duotone",
"dv",
"eigenvectors",
"emissive",
"equirectangular",
"filmlike",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"inpaint",
"kerning",
"lightmap",
"linearlight",
"lossless", "lossy",
"luminance",
"mantaflow",
"matcap",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"ntsc",
"nurb", "nurbs",
"perlin",
"phong",
"pinlight",
"qi",
"radiosity",
"raycasting",
"raytrace", "raytracing", "raytraced",
"refractions",
"remesher", "remeshing", "remesh",
"renderfarm",
"scanfill",
"shader", "shaders",
"shadowmap", "shadowmaps",
"softlight",
"specular", "specularity",
"spillmap",
"sobel",
"stereoscopy",
"texel",
"timecode",
"tonemap",
"toon",
"transmissive",
"vividlight",
"volumetrics",
"voronoi",
"voxel", "voxels",
"vsync",
"wireframe",
"zmask",
"ztransp",
# Blender terms
"audaspace",
"azone", # action zone
"backwire",
"bbone",
"bendy", # bones
"bmesh",
"breakdowner",
"bspline",
"bweight",
"colorband",
"datablock", "datablocks",
"despeckle",
"depsgraph",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"dyntopo",
"editbone",
"editmode",
"eevee",
"fcurve", "fcurves",
"fedge", "fedges",
"filmic",
"fluidsim",
"freestyle",
"enum", "enums",
"gizmogroup",
"gon", "gons", # N-Gon(s)
"gpencil",
"idcol",
"keyframe", "keyframes", "keyframing", "keyframed",
"lookdev",
"luminocity",
"mathvis",
"metaball", "metaballs", "mball",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"mpoly",
"mtex",
"nabla",
"navmesh",
"outliner",
"overscan",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"qe", # keys...
"shaderfx", "shaderfxs",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"subdiv",
"subtype",
"sunsky",
"tessface", "tessfaces",
"texface",
"timeline", "timelines",
"tosphere",
"uilist",
"userpref",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"vse",
"wasd", "wasdqe", # keys...
"wetmap", "wetmaps",
"wpaint",
"uvwarp",
# UOC (Ugly Operator Categories)
"cachefile",
"paintcurve",
"ptcache",
"dpaint",
# Algorithm/library names
"ashikhmin", # Ashikhmin-Shirley
"arsloe", # Texel-Marsen-Arsloe
"beckmann",
"blackman", # Blackman-Harris
"blosc",
"burley", # Christensen-Burley
"catmull",
"catrom",
"chebychev",
"conrady", # Brown-Conrady
"courant",
"cryptomatte", "crypto",
"embree",
"gmp",
"hosek",
"kutta",
"lennard",
"marsen", # Texel-Marsen-Arsloe
"mikktspace",
"minkowski",
"minnaert",
"moskowitz", # Pierson-Moskowitz
"musgrave",
"nayar",
"netravali",
"nishita",
"ogawa",
"oren",
"peucker", # Ramer-Douglas-Peucker
"pierson", # Pierson-Moskowitz
"preetham",
"prewitt",
"ramer", # Ramer-Douglas-Peucker
"runge",
"sobol",
"verlet",
"wilkie",
"worley",
# Acronyms
"aa", "msaa",
"ao",
"api",
"apic", # Affine Particle-In-Cell
"asc", "cdl",
"ascii",
"atrac",
"avx",
"bsdf",
"bssrdf",
"bw",
"ccd",
"cmd",
"cmos",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"eo",
"fh",
"fk",
"fov",
"fft",
"futura",
"fx",
"gfx",
"ggx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdc",
"hdr", "hdri", "hdris",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva", "hsl",
"id",
"ies",
"ior",
"itu",
"jonswap",
"lhs",
"lmb", "mmb", "rmb",
"kb",
"mocap",
"msgid", "msgids",
"mux",
"ndof",
"ppc",
"precisa",
"px",
"qmc",
"rdp",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"ssao",
"ssr",
"svn",
"tma",
"ui",
"unix",
"vbo", "vbos",
"vr",
"wxyz",
"xr",
"ycc", "ycca",
"yrgb",
"yuv", "yuva",
# Blender acronyms
"bge",
"bli",
"bpy",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"py",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
# Files types/formats
"avi",
"attrac",
"autocad",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dwaa",
"dwab",
"dxf",
"eps",
"exr",
"fbx",
"fbxnode",
"ffmpeg",
"flac",
"gltf",
"gzip",
"ico",
"jpg", "jpeg", "jpegs",
"json",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"osl",
"oso",
"piz",
"png", "pngs",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"vp9",
"wav",
"webm",
"xiph",
"xml",
"xna",
"xvid",
}
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_split_words = re.compile(_valid_words).findall
@classmethod
def split_words(cls, text):
return [w for w in cls._split_words(text) if w]
def __init__(self, settings, lang="en_US"):
self.settings = settings
self.dict_spelling = enchant.Dict(lang)
self.cache = set(self.uimsgs)
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'rb') as f:
self.cache |= set(pickle.load(f))
def __del__(self):
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'wb') as f:
pickle.dump(self.cache, f)
def check(self, txt):
ret = []
if txt in self.cache:
return ret
for w in self.split_words(txt):
w_lower = w.lower()
if w_lower in self.cache:
continue
if not self.dict_spelling.check(w):
ret.append((w, self.dict_spelling.suggest(w)))
else:
self.cache.add(w_lower)
if not ret:
self.cache.add(txt)
return ret
| true | true |
f7004a6a9da6e184c08cdd829e3bc4d6ac4c80b7 | 12,774 | py | Python | scripts/sphere/register.py | NingAnMe/voxelmorph | 3a1a4c2f456af2dba5552efc1b08c68af38e54dc | [
"Apache-2.0"
] | null | null | null | scripts/sphere/register.py | NingAnMe/voxelmorph | 3a1a4c2f456af2dba5552efc1b08c68af38e54dc | [
"Apache-2.0"
] | null | null | null | scripts/sphere/register.py | NingAnMe/voxelmorph | 3a1a4c2f456af2dba5552efc1b08c68af38e54dc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Example script to register two volumes with VoxelMorph models.
Please make sure to use trained models appropriately. Let's say we have a model trained to register
a scan (moving) to an atlas (fixed). To register a scan to the atlas and save the warp field, run:
register.py --moving moving.nii.gz --fixed fixed.nii.gz --model model.pt
--moved moved.nii.gz --warp warp.nii.gz
The source and target input images are expected to be affinely registered.
If you use this code, please cite the following, and read function docs for further info/citations
VoxelMorph: A Learning Framework for Deformable Medical Image Registration
G. Balakrishnan, A. Zhao, M. R. Sabuncu, J. Guttag, A.V. Dalca.
IEEE TMI: Transactions on Medical Imaging. 38(8). pp 1788-1800. 2019.
or
Unsupervised Learning for Probabilistic Diffeomorphic Registration for Images and Surfaces
A.V. Dalca, G. Balakrishnan, J. Guttag, M.R. Sabuncu.
MedIA: Medical Image Analysis. (57). pp 226-236, 2019
Copyright 2020 Adrian V. Dalca
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and limitations under
the License.
"""
import os
import argparse
import matplotlib.pyplot as plt
# third party
import numpy as np
import nibabel as nib
import torch
from scipy.interpolate import RegularGridInterpolator
from astropy.coordinates import cartesian_to_spherical, spherical_to_cartesian
# import voxelmorph with sphere backend
os.environ['VXM_BACKEND'] = 'sphere'
import voxelmorph as vxm # nopep8
import math
# parse commandline args
parser = argparse.ArgumentParser()
parser.add_argument('--moving', required=True, help='moving image (source) filename')
parser.add_argument('--fixed', required=True, help='fixed image (target) filename')
parser.add_argument('--moved', help='warped image output filename')
parser.add_argument('--model', required=True, help='pytorch model for nonlinear registration')
# parser.add_argument('--normalize_type', default='std', help='select the data normalization processing type')
parser.add_argument('--warp', help='output warp deformation filename')
parser.add_argument('--sphere_sub', help='sphere_sub image filename')
parser.add_argument('--sphere_atlas', help='sphere_atlas image filename')
parser.add_argument('--sphere_reg', help='sphere.reg image output filename')
parser.add_argument('--sulc_sub', help='silc_sub image filename')
parser.add_argument('--sulc_atlas', help='silc_atlas image filename')
parser.add_argument('--sphere_freesurfer', help='sphere_freesurfer image filename')
parser.add_argument('--plot_image', help='show time image output filename')
parser.add_argument('--plot_image_dif_1', help='show dif image output filename')
parser.add_argument('--plot_image_dif_2', help='show dif image output filename')
parser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used')
parser.add_argument('--multichannel', action='store_true',
help='specify that data has multiple channels')
args = parser.parse_args()
def meannormalize(sub_data):
mean = np.mean(sub_data)
std = np.std(sub_data)
norm = (sub_data - mean) / std
return norm, mean, std
def backmeannormalize(input, mean, std):
output = input * std + mean
return output
def minmaxnormalize(sub_data):
zeros = sub_data == 0
max = np.max(sub_data)
min = np.min(sub_data)
norm = (sub_data - min) / (max - min)
norm[zeros] = 0
return norm
def backminmaxnormalize(input, max, min):
output = input * (max - min) + min
return output
def domainnorm(sub_data):
domain = 33
norm = sub_data / domain
return norm
def backdomainnorm(sub_data):
domain = 33
output = sub_data * domain
return output
# def normalize_forword(data, type="std"):
# if type == "std":
# return meannormalize(data)
# elif type == "min_max":
# return minmaxnormalize(data)
# else:
# raise KeyError("type is error")
#
# def normalize_backword(data, a, b, type="std"):
# if type == "std":
# return backmeannormalize(data, a, b)
# elif type == "min_max":
# return backminmaxnormalize(data, a, b)
# else:
# raise KeyError("type is error")
def interpolate(warp_file, lh_sphere):
x = np.linspace(-128, 128, 256) # phi ###
y = np.linspace(0, 512, 512) # theta ###
# print(warp_file.files)
warp = warp_file.squeeze()
warp = warp.permute(0, 2, 1)
warp = warp.detach().numpy()
# warp = warp_file['vol']
# warp = np.moveaxis(warp, 1, -1)
interpolate_function_x = RegularGridInterpolator((x, y), -warp[0]) # x-axis
interpolate_function_y = RegularGridInterpolator((x, y), -warp[1]) # y-axis
coords, faces = nib.freesurfer.read_geometry(lh_sphere)
r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])
p = phi.degree
t = theta.degree
theta_bins = 512
phi_bins = 256
theta_width = math.degrees(2 * np.pi) / theta_bins
t /= theta_width
phi_width = math.degrees(np.pi) / phi_bins
p /= phi_width
t = t.reshape(-1, 1)
p = p.reshape(-1, 1)
pts = np.concatenate((p, t), axis=1)
new_pts_x = interpolate_function_x(pts)
new_pts_y = interpolate_function_y(pts)
x_prime = pts.T[0] + new_pts_x
y_prime = pts.T[1] + new_pts_y
x_prime *= phi_width
y_prime *= theta_width
y_prime = np.clip(y_prime, 0, 360)
x_prime = np.clip(x_prime, -90, 90)
t_prime = [math.radians(i) for i in y_prime]
p_prime = [math.radians(i) for i in x_prime]
t_prime = np.array(t_prime)
p_prime = np.array(p_prime)
return r, p_prime, t_prime
# save 4 image
def save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,
imagesavefilename):
lh_morph_sulc_sub = nib.freesurfer.read_morph_data(lh_sulc_sub)
lh_morph_sulc_atlas = nib.freesurfer.read_morph_data(lh_sulc_atlas)
coords_sub, faces_sub = nib.freesurfer.read_geometry(lh_sphere_sub)
r_sub, phi_sub, theta_sub = cartesian_to_spherical(coords_sub[:, 0], coords_sub[:, 1], coords_sub[:, 2])
coords_atlas, faces_atlas = nib.freesurfer.read_geometry(lh_sphere_atlas)
r_atlas, phi_atlas, theta_atlas = cartesian_to_spherical(coords_atlas[:, 0], coords_atlas[:, 1], coords_atlas[:, 2])
coords_freesurfer, faces_freesurfer = nib.freesurfer.read_geometry(lh_sphere_freesurfer)
r_reg, phi_reg, theta_reg = cartesian_to_spherical(coords_freesurfer[:, 0], coords_freesurfer[:, 1],
coords_freesurfer[:, 2])
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot(141)
ax.scatter(phi_sub.degree, theta_sub.degree, s=0.1,
c=lh_morph_sulc_sub) # phi.degree: [-90, 90], theta.degree: [0, 360]
plt.title('Moving')
ax = fig.add_subplot(142)
ax.scatter(phi_atlas.degree, theta_atlas.degree, s=0.1, c=lh_morph_sulc_atlas)
plt.title('Fixed')
ax = fig.add_subplot(143)
phi_prime = [math.degrees(p) for p in phi_prime]
thtea_prime = [math.degrees(t) for t in theta_prime]
ax.scatter(phi_prime, thtea_prime, s=0.1, c=lh_morph_sulc_sub) # (256, 512)
plt.title('Moved')
ax = fig.add_subplot(144)
ax.scatter(phi_reg.degree, theta_reg.degree, s=0.1, c=lh_morph_sulc_sub) # (256, 512)
plt.title('Moved FreeSurfer')
plt.savefig(imagesavefilename)
def xyz2degree(lh_sphere, lh_sulc):
# coords: return (x, y, z) coordinates
# faces: defining mesh triangles
coords, faces = nib.freesurfer.read_geometry(lh_sphere)
# (r: radius, phi: latitude, theta: longitude) in radians
r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])
lat = phi.degree + 90
lon = theta.degree
# resize to (512, 256)
y_bins = 512
x_bins = 256
y_width = math.degrees(2 * np.pi) / y_bins
ys = lon // y_width
x_width = math.degrees(np.pi) / x_bins
xs = lat // x_width
ys = np.clip(ys, 0, 511)
xs = np.clip(xs, 0, 255)
# load curv and sulc info
lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)
xs = xs.astype(np.int32)
ys = ys.astype(np.int32)
# values store [theta, phi, sulc value, curv value]
values = np.zeros((512, 256))
values[ys, xs] = lh_morph_sulc
# values[1, ys, xs] = lh_morph_curv
return values
def xyz2degree2(phi, theta, lh_sulc):
lat = phi + 90
lon = theta
# resize to (512, 256)
y_bins = 512
x_bins = 256
y_width = math.degrees(2 * np.pi) / y_bins
ys = lon // y_width
x_width = math.degrees(np.pi) / x_bins
xs = lat // x_width
ys = np.clip(ys, 0, 511)
xs = np.clip(xs, 0, 255)
# load curv and sulc info
lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)
xs = xs.astype(np.int32)
ys = ys.astype(np.int32)
# values store [theta, phi, sulc value, curv value]
values = np.zeros((512, 256))
values[ys, xs] = lh_morph_sulc
# values[1, ys, xs] = lh_morph_curv
return values
# device handling
if args.gpu and (args.gpu != '-1'):
device = 'cuda'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
device = 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# load moving and fixed images
add_feat_axis = not args.multichannel
moving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis)
fixed, fixed_affine = vxm.py.utils.load_volfile(
args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True)
# load and set up model
model = vxm.networks.VxmDense.load(args.model, device)
model.to(device)
model.eval()
# set up normalize type
# normalize_type = args.normalize_type
# normalize_type = "min_max"
# set up tensors and permute
# moving, a_moving, b_moving = normalize_forword(moving, type=normalize_type)
# fixed, a_fixed, b_fixed = normalize_forword(fixed, type=normalize_type)
# moving = domainnorm(moving)
moving = minmaxnormalize(moving)
fixed = minmaxnormalize(fixed)
input_moving = torch.from_numpy(moving).to(device).float().permute(0, 3, 1, 2)
input_fixed = torch.from_numpy(fixed).to(device).float().permute(0, 3, 1, 2)
# predict
moved, warp = model(input_moving, input_fixed, registration=True)
# moved = normalize_backword(moved, a_moving, b_moving, type=normalize_type)
# moved = backdomainnorm(moved)
if args.sphere_sub:
c, faces = nib.freesurfer.read_geometry(args.sphere_sub)
coords = np.empty(shape=c.shape)
r, phi_prime, theta_prime = interpolate(warp, args.sphere_sub)
coords[:, 0], coords[:, 1], coords[:, 2] = spherical_to_cartesian(r, phi_prime, theta_prime)
nib.freesurfer.io.write_geometry(args.sphere_reg, coords, faces)
if args.plot_image:
lh_sphere_sub = args.sphere_sub
lh_sphere_atlas = args.sphere_atlas
lh_sulc_sub = args.sulc_sub
lh_sulc_atlas = args.sulc_atlas
lh_sphere_freesurfer = args.sphere_freesurfer
imagesavefilename = args.plot_image
save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,
imagesavefilename)
if args.plot_image_dif_1 or args.plot_image_dif_2:
imagesavefilenamedif_1 = args.plot_image_dif_1
imagesavefilenamedif_2 = args.plot_image_dif_2
dif_moving = xyz2degree(lh_sphere_sub, lh_sulc_sub)
dif_moved = xyz2degree2(phi_prime, theta_prime, lh_sulc_sub)
dif_freesurfer = xyz2degree(lh_sphere_freesurfer, lh_sulc_sub)
dif_moved_moving = dif_moved - dif_moving
print(np.nanmax(dif_moved_moving), np.nanmin(dif_moved_moving), np.nanmean(dif_moved_moving))
dif_freesurfer_moved = dif_freesurfer - dif_moved
plt.figure(figsize=(14, 7))
plt.imshow(dif_moved_moving)
plt.title('moved_moving')
plt.colorbar()
plt.savefig(imagesavefilenamedif_1)
plt.figure(figsize=(14, 7))
plt.imshow(dif_freesurfer_moved)
plt.title('freesurfer_moved')
plt.colorbar()
plt.savefig(imagesavefilenamedif_2)
# save moved image
if args.moved:
moved = moved.detach().cpu().numpy().squeeze()
vxm.py.utils.save_volfile(moved, args.moved, fixed_affine)
# save warp
if args.warp:
warp = warp.detach().cpu().numpy().squeeze()
vxm.py.utils.save_volfile(warp, args.warp, fixed_affine)
| 35.287293 | 120 | 0.702129 |
import os
import argparse
import matplotlib.pyplot as plt
import numpy as np
import nibabel as nib
import torch
from scipy.interpolate import RegularGridInterpolator
from astropy.coordinates import cartesian_to_spherical, spherical_to_cartesian
os.environ['VXM_BACKEND'] = 'sphere'
import voxelmorph as vxm import math
parser = argparse.ArgumentParser()
parser.add_argument('--moving', required=True, help='moving image (source) filename')
parser.add_argument('--fixed', required=True, help='fixed image (target) filename')
parser.add_argument('--moved', help='warped image output filename')
parser.add_argument('--model', required=True, help='pytorch model for nonlinear registration')
parser.add_argument('--warp', help='output warp deformation filename')
parser.add_argument('--sphere_sub', help='sphere_sub image filename')
parser.add_argument('--sphere_atlas', help='sphere_atlas image filename')
parser.add_argument('--sphere_reg', help='sphere.reg image output filename')
parser.add_argument('--sulc_sub', help='silc_sub image filename')
parser.add_argument('--sulc_atlas', help='silc_atlas image filename')
parser.add_argument('--sphere_freesurfer', help='sphere_freesurfer image filename')
parser.add_argument('--plot_image', help='show time image output filename')
parser.add_argument('--plot_image_dif_1', help='show dif image output filename')
parser.add_argument('--plot_image_dif_2', help='show dif image output filename')
parser.add_argument('-g', '--gpu', help='GPU number(s) - if not supplied, CPU is used')
parser.add_argument('--multichannel', action='store_true',
help='specify that data has multiple channels')
args = parser.parse_args()
def meannormalize(sub_data):
mean = np.mean(sub_data)
std = np.std(sub_data)
norm = (sub_data - mean) / std
return norm, mean, std
def backmeannormalize(input, mean, std):
output = input * std + mean
return output
def minmaxnormalize(sub_data):
zeros = sub_data == 0
max = np.max(sub_data)
min = np.min(sub_data)
norm = (sub_data - min) / (max - min)
norm[zeros] = 0
return norm
def backminmaxnormalize(input, max, min):
output = input * (max - min) + min
return output
def domainnorm(sub_data):
domain = 33
norm = sub_data / domain
return norm
def backdomainnorm(sub_data):
domain = 33
output = sub_data * domain
return output
def interpolate(warp_file, lh_sphere):
x = np.linspace(-128, 128, 256) y = np.linspace(0, 512, 512)
warp = warp_file.squeeze()
warp = warp.permute(0, 2, 1)
warp = warp.detach().numpy()
interpolate_function_x = RegularGridInterpolator((x, y), -warp[0]) interpolate_function_y = RegularGridInterpolator((x, y), -warp[1])
coords, faces = nib.freesurfer.read_geometry(lh_sphere)
r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])
p = phi.degree
t = theta.degree
theta_bins = 512
phi_bins = 256
theta_width = math.degrees(2 * np.pi) / theta_bins
t /= theta_width
phi_width = math.degrees(np.pi) / phi_bins
p /= phi_width
t = t.reshape(-1, 1)
p = p.reshape(-1, 1)
pts = np.concatenate((p, t), axis=1)
new_pts_x = interpolate_function_x(pts)
new_pts_y = interpolate_function_y(pts)
x_prime = pts.T[0] + new_pts_x
y_prime = pts.T[1] + new_pts_y
x_prime *= phi_width
y_prime *= theta_width
y_prime = np.clip(y_prime, 0, 360)
x_prime = np.clip(x_prime, -90, 90)
t_prime = [math.radians(i) for i in y_prime]
p_prime = [math.radians(i) for i in x_prime]
t_prime = np.array(t_prime)
p_prime = np.array(p_prime)
return r, p_prime, t_prime
def save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,
imagesavefilename):
lh_morph_sulc_sub = nib.freesurfer.read_morph_data(lh_sulc_sub)
lh_morph_sulc_atlas = nib.freesurfer.read_morph_data(lh_sulc_atlas)
coords_sub, faces_sub = nib.freesurfer.read_geometry(lh_sphere_sub)
r_sub, phi_sub, theta_sub = cartesian_to_spherical(coords_sub[:, 0], coords_sub[:, 1], coords_sub[:, 2])
coords_atlas, faces_atlas = nib.freesurfer.read_geometry(lh_sphere_atlas)
r_atlas, phi_atlas, theta_atlas = cartesian_to_spherical(coords_atlas[:, 0], coords_atlas[:, 1], coords_atlas[:, 2])
coords_freesurfer, faces_freesurfer = nib.freesurfer.read_geometry(lh_sphere_freesurfer)
r_reg, phi_reg, theta_reg = cartesian_to_spherical(coords_freesurfer[:, 0], coords_freesurfer[:, 1],
coords_freesurfer[:, 2])
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot(141)
ax.scatter(phi_sub.degree, theta_sub.degree, s=0.1,
c=lh_morph_sulc_sub) plt.title('Moving')
ax = fig.add_subplot(142)
ax.scatter(phi_atlas.degree, theta_atlas.degree, s=0.1, c=lh_morph_sulc_atlas)
plt.title('Fixed')
ax = fig.add_subplot(143)
phi_prime = [math.degrees(p) for p in phi_prime]
thtea_prime = [math.degrees(t) for t in theta_prime]
ax.scatter(phi_prime, thtea_prime, s=0.1, c=lh_morph_sulc_sub) plt.title('Moved')
ax = fig.add_subplot(144)
ax.scatter(phi_reg.degree, theta_reg.degree, s=0.1, c=lh_morph_sulc_sub) plt.title('Moved FreeSurfer')
plt.savefig(imagesavefilename)
def xyz2degree(lh_sphere, lh_sulc):
coords, faces = nib.freesurfer.read_geometry(lh_sphere)
r, phi, theta = cartesian_to_spherical(coords[:, 0], coords[:, 1], coords[:, 2])
lat = phi.degree + 90
lon = theta.degree
y_bins = 512
x_bins = 256
y_width = math.degrees(2 * np.pi) / y_bins
ys = lon // y_width
x_width = math.degrees(np.pi) / x_bins
xs = lat // x_width
ys = np.clip(ys, 0, 511)
xs = np.clip(xs, 0, 255)
lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)
xs = xs.astype(np.int32)
ys = ys.astype(np.int32)
values = np.zeros((512, 256))
values[ys, xs] = lh_morph_sulc
return values
def xyz2degree2(phi, theta, lh_sulc):
lat = phi + 90
lon = theta
y_bins = 512
x_bins = 256
y_width = math.degrees(2 * np.pi) / y_bins
ys = lon // y_width
x_width = math.degrees(np.pi) / x_bins
xs = lat // x_width
ys = np.clip(ys, 0, 511)
xs = np.clip(xs, 0, 255)
lh_morph_sulc = nib.freesurfer.read_morph_data(lh_sulc)
xs = xs.astype(np.int32)
ys = ys.astype(np.int32)
values = np.zeros((512, 256))
values[ys, xs] = lh_morph_sulc
return values
if args.gpu and (args.gpu != '-1'):
device = 'cuda'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
device = 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
add_feat_axis = not args.multichannel
moving = vxm.py.utils.load_volfile(args.moving, add_batch_axis=True, add_feat_axis=add_feat_axis)
fixed, fixed_affine = vxm.py.utils.load_volfile(
args.fixed, add_batch_axis=True, add_feat_axis=add_feat_axis, ret_affine=True)
model = vxm.networks.VxmDense.load(args.model, device)
model.to(device)
model.eval()
moving = minmaxnormalize(moving)
fixed = minmaxnormalize(fixed)
input_moving = torch.from_numpy(moving).to(device).float().permute(0, 3, 1, 2)
input_fixed = torch.from_numpy(fixed).to(device).float().permute(0, 3, 1, 2)
moved, warp = model(input_moving, input_fixed, registration=True)
if args.sphere_sub:
c, faces = nib.freesurfer.read_geometry(args.sphere_sub)
coords = np.empty(shape=c.shape)
r, phi_prime, theta_prime = interpolate(warp, args.sphere_sub)
coords[:, 0], coords[:, 1], coords[:, 2] = spherical_to_cartesian(r, phi_prime, theta_prime)
nib.freesurfer.io.write_geometry(args.sphere_reg, coords, faces)
if args.plot_image:
lh_sphere_sub = args.sphere_sub
lh_sphere_atlas = args.sphere_atlas
lh_sulc_sub = args.sulc_sub
lh_sulc_atlas = args.sulc_atlas
lh_sphere_freesurfer = args.sphere_freesurfer
imagesavefilename = args.plot_image
save4image(lh_sphere_sub, lh_sphere_atlas, lh_sulc_sub, lh_sulc_atlas, lh_sphere_freesurfer, phi_prime, theta_prime,
imagesavefilename)
if args.plot_image_dif_1 or args.plot_image_dif_2:
imagesavefilenamedif_1 = args.plot_image_dif_1
imagesavefilenamedif_2 = args.plot_image_dif_2
dif_moving = xyz2degree(lh_sphere_sub, lh_sulc_sub)
dif_moved = xyz2degree2(phi_prime, theta_prime, lh_sulc_sub)
dif_freesurfer = xyz2degree(lh_sphere_freesurfer, lh_sulc_sub)
dif_moved_moving = dif_moved - dif_moving
print(np.nanmax(dif_moved_moving), np.nanmin(dif_moved_moving), np.nanmean(dif_moved_moving))
dif_freesurfer_moved = dif_freesurfer - dif_moved
plt.figure(figsize=(14, 7))
plt.imshow(dif_moved_moving)
plt.title('moved_moving')
plt.colorbar()
plt.savefig(imagesavefilenamedif_1)
plt.figure(figsize=(14, 7))
plt.imshow(dif_freesurfer_moved)
plt.title('freesurfer_moved')
plt.colorbar()
plt.savefig(imagesavefilenamedif_2)
if args.moved:
moved = moved.detach().cpu().numpy().squeeze()
vxm.py.utils.save_volfile(moved, args.moved, fixed_affine)
if args.warp:
warp = warp.detach().cpu().numpy().squeeze()
vxm.py.utils.save_volfile(warp, args.warp, fixed_affine)
| true | true |
f7004b5c8eda4fe476b1e7d12428eec65275cd45 | 13,707 | py | Python | src/pymor/reductors/residual.py | meretp/pymor | 0965a5c3d0725466103efae5190493fceb2bf441 | [
"Unlicense"
] | 182 | 2015-01-06T16:12:45.000Z | 2022-03-22T09:24:45.000Z | src/pymor/reductors/residual.py | meretp/pymor | 0965a5c3d0725466103efae5190493fceb2bf441 | [
"Unlicense"
] | 1,305 | 2015-01-06T15:02:31.000Z | 2022-03-29T14:58:28.000Z | src/pymor/reductors/residual.py | HenKlei/pymor | 01876cd39e04bec6d4299f36b59663cd08beafd3 | [
"Unlicense"
] | 68 | 2015-01-06T11:13:20.000Z | 2022-03-21T18:07:42.000Z | # This file is part of the pyMOR project (https://www.pymor.org).
# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.algorithms.image import estimate_image_hierarchical
from pymor.algorithms.projection import project, project_to_subbasis
from pymor.core.base import BasicObject
from pymor.core.exceptions import ImageCollectionError
from pymor.operators.constructions import ZeroOperator
from pymor.operators.interface import Operator
class ResidualReductor(BasicObject):
"""Generic reduced basis residual reductor.
Given an operator and a right-hand side, the residual is given by::
residual.apply(U, mu) == operator.apply(U, mu) - rhs.as_range_array(mu)
When operator maps to functionals instead of vectors, we are interested in the Riesz
representative of the residual::
residual.apply(U, mu)
== product.apply_inverse(operator.apply(U, mu) - rhs.as_range_array(mu))
Given a basis `RB` of a subspace of the source space of `operator`, this reductor
uses :func:`~pymor.algorithms.image.estimate_image_hierarchical` to determine
a low-dimensional subspace containing the image of the subspace under
`residual` (resp. `riesz_residual`), computes an orthonormal basis
`residual_range` for this range space and then returns the Petrov-Galerkin projection ::
projected_residual
== project(residual, range_basis=residual_range, source_basis=RB)
of the residual operator. Given a reduced basis coefficient vector `u`, w.r.t.
`RB`, the (dual) norm of the residual can then be computed as ::
projected_residual.apply(u, mu).norm()
Moreover, a `reconstruct` method is provided such that ::
residual_reductor.reconstruct(projected_residual.apply(u, mu))
== residual.apply(RB.lincomb(u), mu)
Parameters
----------
RB
|VectorArray| containing a basis of the reduced space onto which to project.
operator
See definition of `residual`.
rhs
See definition of `residual`. If `None`, zero right-hand side is assumed.
product
Inner product |Operator| w.r.t. which to orthonormalize and w.r.t. which to
compute the Riesz representatives in case `operator` maps to functionals.
riesz_representatives
If `True` compute the Riesz representative of the residual.
"""
def __init__(self, RB, operator, rhs=None, product=None, riesz_representatives=False):
assert RB in operator.source
assert rhs is None \
or (rhs.source.is_scalar and rhs.range == operator.range and rhs.linear)
assert product is None or product.source == product.range == operator.range
self.__auto_init(locals())
self.residual_range = operator.range.empty()
self.residual_range_dims = []
def reduce(self):
if self.residual_range is not False:
with self.logger.block('Estimating residual range ...'):
try:
self.residual_range, self.residual_range_dims = \
estimate_image_hierarchical([self.operator], [self.rhs],
self.RB,
(self.residual_range, self.residual_range_dims),
orthonormalize=True, product=self.product,
riesz_representatives=self.riesz_representatives)
except ImageCollectionError as e:
self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')
self.residual_range = False
if self.residual_range is False:
operator = project(self.operator, None, self.RB)
return NonProjectedResidualOperator(operator, self.rhs, self.riesz_representatives, self.product)
with self.logger.block('Projecting residual operator ...'):
if self.riesz_representatives:
operator = project(self.operator, self.residual_range, self.RB, product=None) # the product cancels out
rhs = project(self.rhs, self.residual_range, None, product=None)
else:
operator = project(self.operator, self.residual_range, self.RB, product=self.product)
rhs = project(self.rhs, self.residual_range, None, product=self.product)
return ResidualOperator(operator, rhs)
def reconstruct(self, u):
"""Reconstruct high-dimensional residual vector from reduced vector `u`."""
if self.residual_range is False:
if self.product:
return u * (u.norm() / u.norm(self.product))[0]
else:
return u
else:
return self.residual_range[:u.dim].lincomb(u.to_numpy())
class ResidualOperator(Operator):
"""Instantiated by :class:`ResidualReductor`."""
def __init__(self, operator, rhs, name=None):
self.__auto_init(locals())
self.source = operator.source
self.range = operator.range
self.linear = operator.linear
self.rhs_vector = rhs.as_range_array() if rhs and not rhs.parametric else None
def apply(self, U, mu=None):
V = self.operator.apply(U, mu=mu)
if self.rhs:
F = self.rhs_vector or self.rhs.as_range_array(mu)
if len(V) > 1:
V -= F[[0]*len(V)]
else:
V -= F
return V
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return ResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),
project_to_subbasis(self.rhs, dim_range, None),
name=name)
class NonProjectedResidualOperator(ResidualOperator):
"""Instantiated by :class:`ResidualReductor`.
Not to be used directly.
"""
def __init__(self, operator, rhs, riesz_representatives, product):
super().__init__(operator, rhs)
self.__auto_init(locals())
def apply(self, U, mu=None):
R = super().apply(U, mu=mu)
if self.product:
if self.riesz_representatives:
R_riesz = self.product.apply_inverse(R)
# divide by norm, except when norm is zero:
inversel2 = 1./R_riesz.norm()
inversel2 = np.nan_to_num(inversel2)
R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)
return R_riesz
else:
# divide by norm, except when norm is zero:
inversel2 = 1./R.norm()
inversel2 = np.nan_to_num(inversel2)
R.scal(np.sqrt(self.product.pairwise_apply2(R, R)) * inversel2)
return R
else:
return R
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return self.with_(operator=project_to_subbasis(self.operator, None, dim_source))
class ImplicitEulerResidualReductor(BasicObject):
"""Reduced basis residual reductor with mass operator for implicit Euler timestepping.
Given an operator, mass and a functional, the concatenation of residual operator
with the Riesz isomorphism is given by::
riesz_residual.apply(U, U_old, mu)
== product.apply_inverse(operator.apply(U, mu) + 1/dt*mass.apply(U, mu)
- 1/dt*mass.apply(U_old, mu) - rhs.as_vector(mu))
This reductor determines a low-dimensional subspace of the image of a reduced basis space under
`riesz_residual` using :func:`~pymor.algorithms.image.estimate_image_hierarchical`, computes an
orthonormal basis `residual_range` of this range space and then returns the Petrov-Galerkin
projection ::
projected_riesz_residual
== riesz_residual.projected(range_basis=residual_range, source_basis=RB)
of the `riesz_residual` operator. Given reduced basis coefficient vectors `u` and `u_old`,
the dual norm of the residual can then be computed as ::
projected_riesz_residual.apply(u, u_old, mu).norm()
Moreover, a `reconstruct` method is provided such that ::
residual_reductor.reconstruct(projected_riesz_residual.apply(u, u_old, mu))
== riesz_residual.apply(RB.lincomb(u), RB.lincomb(u_old), mu)
Parameters
----------
operator
See definition of `riesz_residual`.
mass
The mass operator. See definition of `riesz_residual`.
dt
The time step size. See definition of `riesz_residual`.
rhs
See definition of `riesz_residual`. If `None`, zero right-hand side is assumed.
RB
|VectorArray| containing a basis of the reduced space onto which to project.
product
Inner product |Operator| w.r.t. which to compute the Riesz representatives.
"""
def __init__(self, RB, operator, mass, dt, rhs=None, product=None):
assert RB in operator.source
assert rhs.source.is_scalar and rhs.range == operator.range and rhs.linear
assert product is None or product.source == product.range == operator.range
self.__auto_init(locals())
self.residual_range = operator.range.empty()
self.residual_range_dims = []
def reduce(self):
if self.residual_range is not False:
with self.logger.block('Estimating residual range ...'):
try:
self.residual_range, self.residual_range_dims = \
estimate_image_hierarchical([self.operator, self.mass], [self.rhs],
self.RB,
(self.residual_range, self.residual_range_dims),
orthonormalize=True, product=self.product,
riesz_representatives=True)
except ImageCollectionError as e:
self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')
self.residual_range = False
if self.residual_range is False:
operator = project(self.operator, None, self.RB)
mass = project(self.mass, None, self.RB)
return NonProjectedImplicitEulerResidualOperator(operator, mass, self.rhs, self.dt, self.product)
with self.logger.block('Projecting residual operator ...'):
# the product always cancels out
operator = project(self.operator, self.residual_range, self.RB, product=None)
mass = project(self.mass, self.residual_range, self.RB, product=None)
rhs = project(self.rhs, self.residual_range, None, product=None)
return ImplicitEulerResidualOperator(operator, mass, rhs, self.dt)
def reconstruct(self, u):
"""Reconstruct high-dimensional residual vector from reduced vector `u`."""
if self.residual_range is False:
if self.product:
return u * (u.norm() / u.norm(self.product))[0]
else:
return u
else:
return self.residual_range[:u.dim].lincomb(u.to_numpy())
class ImplicitEulerResidualOperator(Operator):
"""Instantiated by :class:`ImplicitEulerResidualReductor`."""
def __init__(self, operator, mass, rhs, dt, name=None):
self.__auto_init(locals())
self.source = operator.source
self.range = operator.range
self.linear = operator.linear
self.rhs_vector = rhs.as_range_array() if not rhs.parametric else None
def apply(self, U, U_old, mu=None):
V = self.operator.apply(U, mu=mu)
V.axpy(1./self.dt, self.mass.apply(U, mu=mu))
V.axpy(-1./self.dt, self.mass.apply(U_old, mu=mu))
if not isinstance(self.rhs, ZeroOperator):
F = self.rhs_vector or self.rhs.as_range_array(mu)
if len(V) > 1:
V -= F[[0]*len(V)]
else:
V -= F
return V
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return ImplicitEulerResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),
project_to_subbasis(self.mass, dim_range, dim_source),
project_to_subbasis(self.rhs, dim_range, None),
self.dt,
name=name)
class NonProjectedImplicitEulerResidualOperator(ImplicitEulerResidualOperator):
"""Instantiated by :class:`ImplicitEulerResidualReductor`.
Not to be used directly.
"""
def __init__(self, operator, mass, rhs, dt, product):
super().__init__(operator, mass, rhs, dt)
self.product = product
def apply(self, U, U_old, mu=None):
R = super().apply(U, U_old, mu=mu)
if self.product:
R_riesz = self.product.apply_inverse(R)
# divide by norm, except when norm is zero:
inversel2 = 1./R_riesz.norm()
inversel2 = np.nan_to_num(inversel2)
R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)
return R_riesz
else:
return R
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return self.with_(operator=project_to_subbasis(self.operator, None, dim_source),
mass=project_to_subbasis(self.mass, None, dim_source))
| 43.376582 | 120 | 0.625885 |
import numpy as np
from pymor.algorithms.image import estimate_image_hierarchical
from pymor.algorithms.projection import project, project_to_subbasis
from pymor.core.base import BasicObject
from pymor.core.exceptions import ImageCollectionError
from pymor.operators.constructions import ZeroOperator
from pymor.operators.interface import Operator
class ResidualReductor(BasicObject):
def __init__(self, RB, operator, rhs=None, product=None, riesz_representatives=False):
assert RB in operator.source
assert rhs is None \
or (rhs.source.is_scalar and rhs.range == operator.range and rhs.linear)
assert product is None or product.source == product.range == operator.range
self.__auto_init(locals())
self.residual_range = operator.range.empty()
self.residual_range_dims = []
def reduce(self):
if self.residual_range is not False:
with self.logger.block('Estimating residual range ...'):
try:
self.residual_range, self.residual_range_dims = \
estimate_image_hierarchical([self.operator], [self.rhs],
self.RB,
(self.residual_range, self.residual_range_dims),
orthonormalize=True, product=self.product,
riesz_representatives=self.riesz_representatives)
except ImageCollectionError as e:
self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')
self.residual_range = False
if self.residual_range is False:
operator = project(self.operator, None, self.RB)
return NonProjectedResidualOperator(operator, self.rhs, self.riesz_representatives, self.product)
with self.logger.block('Projecting residual operator ...'):
if self.riesz_representatives:
operator = project(self.operator, self.residual_range, self.RB, product=None) rhs = project(self.rhs, self.residual_range, None, product=None)
else:
operator = project(self.operator, self.residual_range, self.RB, product=self.product)
rhs = project(self.rhs, self.residual_range, None, product=self.product)
return ResidualOperator(operator, rhs)
def reconstruct(self, u):
if self.residual_range is False:
if self.product:
return u * (u.norm() / u.norm(self.product))[0]
else:
return u
else:
return self.residual_range[:u.dim].lincomb(u.to_numpy())
class ResidualOperator(Operator):
def __init__(self, operator, rhs, name=None):
self.__auto_init(locals())
self.source = operator.source
self.range = operator.range
self.linear = operator.linear
self.rhs_vector = rhs.as_range_array() if rhs and not rhs.parametric else None
def apply(self, U, mu=None):
V = self.operator.apply(U, mu=mu)
if self.rhs:
F = self.rhs_vector or self.rhs.as_range_array(mu)
if len(V) > 1:
V -= F[[0]*len(V)]
else:
V -= F
return V
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return ResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),
project_to_subbasis(self.rhs, dim_range, None),
name=name)
class NonProjectedResidualOperator(ResidualOperator):
def __init__(self, operator, rhs, riesz_representatives, product):
super().__init__(operator, rhs)
self.__auto_init(locals())
def apply(self, U, mu=None):
R = super().apply(U, mu=mu)
if self.product:
if self.riesz_representatives:
R_riesz = self.product.apply_inverse(R)
inversel2 = 1./R_riesz.norm()
inversel2 = np.nan_to_num(inversel2)
R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)
return R_riesz
else:
inversel2 = 1./R.norm()
inversel2 = np.nan_to_num(inversel2)
R.scal(np.sqrt(self.product.pairwise_apply2(R, R)) * inversel2)
return R
else:
return R
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return self.with_(operator=project_to_subbasis(self.operator, None, dim_source))
class ImplicitEulerResidualReductor(BasicObject):
def __init__(self, RB, operator, mass, dt, rhs=None, product=None):
assert RB in operator.source
assert rhs.source.is_scalar and rhs.range == operator.range and rhs.linear
assert product is None or product.source == product.range == operator.range
self.__auto_init(locals())
self.residual_range = operator.range.empty()
self.residual_range_dims = []
def reduce(self):
if self.residual_range is not False:
with self.logger.block('Estimating residual range ...'):
try:
self.residual_range, self.residual_range_dims = \
estimate_image_hierarchical([self.operator, self.mass], [self.rhs],
self.RB,
(self.residual_range, self.residual_range_dims),
orthonormalize=True, product=self.product,
riesz_representatives=True)
except ImageCollectionError as e:
self.logger.warning(f'Cannot compute range of {e.op}. Evaluation will be slow.')
self.residual_range = False
if self.residual_range is False:
operator = project(self.operator, None, self.RB)
mass = project(self.mass, None, self.RB)
return NonProjectedImplicitEulerResidualOperator(operator, mass, self.rhs, self.dt, self.product)
with self.logger.block('Projecting residual operator ...'):
operator = project(self.operator, self.residual_range, self.RB, product=None)
mass = project(self.mass, self.residual_range, self.RB, product=None)
rhs = project(self.rhs, self.residual_range, None, product=None)
return ImplicitEulerResidualOperator(operator, mass, rhs, self.dt)
def reconstruct(self, u):
if self.residual_range is False:
if self.product:
return u * (u.norm() / u.norm(self.product))[0]
else:
return u
else:
return self.residual_range[:u.dim].lincomb(u.to_numpy())
class ImplicitEulerResidualOperator(Operator):
def __init__(self, operator, mass, rhs, dt, name=None):
self.__auto_init(locals())
self.source = operator.source
self.range = operator.range
self.linear = operator.linear
self.rhs_vector = rhs.as_range_array() if not rhs.parametric else None
def apply(self, U, U_old, mu=None):
V = self.operator.apply(U, mu=mu)
V.axpy(1./self.dt, self.mass.apply(U, mu=mu))
V.axpy(-1./self.dt, self.mass.apply(U_old, mu=mu))
if not isinstance(self.rhs, ZeroOperator):
F = self.rhs_vector or self.rhs.as_range_array(mu)
if len(V) > 1:
V -= F[[0]*len(V)]
else:
V -= F
return V
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return ImplicitEulerResidualOperator(project_to_subbasis(self.operator, dim_range, dim_source),
project_to_subbasis(self.mass, dim_range, dim_source),
project_to_subbasis(self.rhs, dim_range, None),
self.dt,
name=name)
class NonProjectedImplicitEulerResidualOperator(ImplicitEulerResidualOperator):
def __init__(self, operator, mass, rhs, dt, product):
super().__init__(operator, mass, rhs, dt)
self.product = product
def apply(self, U, U_old, mu=None):
R = super().apply(U, U_old, mu=mu)
if self.product:
R_riesz = self.product.apply_inverse(R)
inversel2 = 1./R_riesz.norm()
inversel2 = np.nan_to_num(inversel2)
R_riesz.scal(np.sqrt(R_riesz.pairwise_inner(R)) * inversel2)
return R_riesz
else:
return R
def projected_to_subbasis(self, dim_range=None, dim_source=None, name=None):
return self.with_(operator=project_to_subbasis(self.operator, None, dim_source),
mass=project_to_subbasis(self.mass, None, dim_source))
| true | true |
f7004c81dd586096d66ee40abfa3ddd193a494db | 7,828 | py | Python | rain_api_core/egress_util.py | asfadmin/rain-api-core | 99985d4a346ab06449a42ed6b5b91f36d2bc760a | [
"Apache-2.0"
] | 1 | 2020-05-06T22:01:22.000Z | 2020-05-06T22:01:22.000Z | rain_api_core/egress_util.py | asfadmin/rain-api-core | 99985d4a346ab06449a42ed6b5b91f36d2bc760a | [
"Apache-2.0"
] | 87 | 2019-09-16T20:45:59.000Z | 2022-03-31T21:18:44.000Z | rain_api_core/egress_util.py | asfadmin/rain-api-core | 99985d4a346ab06449a42ed6b5b91f36d2bc760a | [
"Apache-2.0"
] | 2 | 2020-05-06T22:01:29.000Z | 2021-03-23T18:22:52.000Z | import logging
import hmac
from hashlib import sha256
import os
import urllib
from datetime import datetime
log = logging.getLogger(__name__)
# This warning is stupid
# pylint: disable=logging-fstring-interpolation
def prepend_bucketname(name):
prefix = os.getenv('BUCKETNAME_PREFIX', "gsfc-ngap-{}-".format(os.getenv('MATURITY', 'DEV')[0:1].lower()))
return "{}{}".format(prefix, name)
def hmacsha256(key, string):
return hmac.new(key, string.encode('utf-8'), sha256)
def get_presigned_url(session, bucket_name, object_name, region_name, expire_seconds, user_id, method='GET'):
timez = datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
datez = timez[:8]
hostname = "{0}.s3{1}.amazonaws.com".format(bucket_name, "."+region_name if region_name != "us-east-1" else "")
cred = session['Credentials']['AccessKeyId']
secret = session['Credentials']['SecretAccessKey']
token = session['Credentials']['SessionToken']
aws4_request = "/".join([datez, region_name, "s3", "aws4_request"])
cred_string = "{0}/{1}".format(cred, aws4_request)
# Canonical Query String Parts
parts = ["A-userid={0}".format(user_id),
"X-Amz-Algorithm=AWS4-HMAC-SHA256",
"X-Amz-Credential="+urllib.parse.quote_plus(cred_string),
"X-Amz-Date="+timez,
"X-Amz-Expires={0}".format(expire_seconds),
"X-Amz-Security-Token="+urllib.parse.quote_plus(token),
"X-Amz-SignedHeaders=host"]
can_query_string = "&".join(parts)
# Canonical Requst
can_req = method + "\n/" + object_name + "\n" + can_query_string + "\nhost:" + hostname + "\n\nhost\nUNSIGNED-PAYLOAD"
can_req_hash = sha256(can_req.encode('utf-8')).hexdigest()
# String to Sign
stringtosign = "\n".join(["AWS4-HMAC-SHA256", timez, aws4_request, can_req_hash])
# Signing Key
StepOne = hmacsha256( "AWS4{0}".format(secret).encode('utf-8'), datez).digest()
StepTwo = hmacsha256( StepOne, region_name ).digest()
StepThree = hmacsha256( StepTwo, "s3").digest()
SigningKey = hmacsha256( StepThree, "aws4_request").digest()
# Final Signature
Signature = hmacsha256(SigningKey, stringtosign).hexdigest()
# Dump URL
url = "https://" + hostname + "/" + object_name + "?" + can_query_string + "&X-Amz-Signature=" + Signature
return url
def get_bucket_dynamic_path(path_list, b_map):
# Old and REVERSE format has no 'MAP'. In either case, we don't want it fouling our dict.
if 'MAP' in b_map:
map_dict = b_map['MAP']
else:
map_dict = b_map
mapping = []
log.debug("Pathparts is {0}".format(", ".join(path_list)))
# walk the bucket map to see if this path is valid
for path_part in path_list:
# Check if we hit a leaf of the YAML tree
if (mapping and isinstance(map_dict, str)) or 'bucket' in map_dict: #
customheaders = {}
if isinstance(map_dict, dict) and 'bucket' in map_dict:
bucketname = map_dict['bucket']
if 'headers' in map_dict:
customheaders = map_dict['headers']
else:
bucketname = map_dict
log.debug(f'mapping: {mapping}')
# Pop mapping off path_list
for _ in mapping:
path_list.pop(0)
# Join the remaining bits together to form object_name
object_name = "/".join(path_list)
bucket_path = "/".join(mapping)
log.info("Bucket mapping was {0}, object was {1}".format(bucket_path, object_name))
return prepend_bucketname(bucketname), bucket_path, object_name, customheaders
if path_part in map_dict:
map_dict = map_dict[path_part]
mapping.append(path_part)
log.debug("Found {0}, Mapping is now {1}".format(path_part, "/".join(mapping)))
else:
log.warning("Could not find {0} in bucketmap".format(path_part))
log.debug('said bucketmap: {}'.format(map_dict))
return False, False, False, {}
# what? No path?
return False, False, False, {}
def process_varargs(varargs: list, b_map: dict):
"""
wrapper around process_request that returns legacy values to preserve backward compatibility
:param varargs: a list with the path to the file requested.
:param b_map: bucket map
:return: path, bucket, object_name
"""
log.warning('Deprecated process_varargs() called.')
path, bucket, object_name, _ = process_request(varargs, b_map)
return path, bucket, object_name
def process_request(varargs, b_map):
varargs = varargs.split("/")
# Make sure we got at least 1 path, and 1 file name:
if len(varargs) < 2:
return "/".join(varargs), None, None, []
# Watch for ASF-ish reverse URL mapping formats:
if len(varargs) == 3:
if os.getenv('USE_REVERSE_BUCKET_MAP', 'FALSE').lower() == 'true':
varargs[0], varargs[1] = varargs[1], varargs[0]
# Look up the bucket from path parts
bucket, path, object_name, headers = get_bucket_dynamic_path(varargs, b_map)
# If we didn't figure out the bucket, we don't know the path/object_name
if not bucket:
object_name = varargs.pop(-1)
path = "/".join(varargs)
return path, bucket, object_name, headers
def bucket_prefix_match(bucket_check, bucket_map, object_name=""):
log.debug(f"bucket_prefix_match(): checking if {bucket_check} matches {bucket_map} w/ optional obj '{object_name}'")
if bucket_check == bucket_map.split('/')[0] and object_name.startswith("/".join(bucket_map.split('/')[1:])):
log.debug(f"Prefixed Bucket Map matched: s3://{bucket_check}/{object_name} => {bucket_map}")
return True
return False
# Sort public/private buckets such that object-prefixes are processed FIRST
def get_sorted_bucket_list(b_map, bucket_group):
if bucket_group not in b_map:
# But why?!
log.warning(f"Bucket map does not contain bucket group '{bucket_group}'")
return []
# b_map[bucket_group] SHOULD be a dict, but list actually works too.
if isinstance(b_map[bucket_group], dict):
return sorted(list(b_map[bucket_group].keys()), key=lambda e: e.count("/"), reverse=True )
if isinstance(b_map[bucket_group], list):
return sorted(list(b_map[bucket_group]), key=lambda e: e.count("/"), reverse=True )
# Something went wrong.
return []
def check_private_bucket(bucket, b_map, object_name=""):
log.debug('check_private_buckets(): bucket: {}'.format(bucket))
# Check public bucket file:
if 'PRIVATE_BUCKETS' in b_map:
# Prioritize prefixed buckets first, the deeper the better!
sorted_buckets = get_sorted_bucket_list(b_map, 'PRIVATE_BUCKETS')
log.debug(f"Sorted PRIVATE buckets are {sorted_buckets}")
for priv_bucket in sorted_buckets:
if bucket_prefix_match(bucket, prepend_bucketname(priv_bucket), object_name):
# This bucket is PRIVATE, return group!
return b_map['PRIVATE_BUCKETS'][priv_bucket]
return False
def check_public_bucket(bucket, b_map, object_name=""):
# Check for PUBLIC_BUCKETS in bucket map file
if 'PUBLIC_BUCKETS' in b_map:
sorted_buckets = get_sorted_bucket_list(b_map, 'PUBLIC_BUCKETS')
log.debug(f"Sorted PUBLIC buckets are {sorted_buckets}")
for pub_bucket in sorted_buckets:
if bucket_prefix_match(bucket, prepend_bucketname(pub_bucket), object_name):
# This bucket is public!
log.debug("found a public, we'll take it")
return True
# Did not find this in public bucket list
log.debug('we did not find a public bucket for {}'.format(bucket))
return False
| 37.454545 | 122 | 0.649974 | import logging
import hmac
from hashlib import sha256
import os
import urllib
from datetime import datetime
log = logging.getLogger(__name__)
def prepend_bucketname(name):
prefix = os.getenv('BUCKETNAME_PREFIX', "gsfc-ngap-{}-".format(os.getenv('MATURITY', 'DEV')[0:1].lower()))
return "{}{}".format(prefix, name)
def hmacsha256(key, string):
return hmac.new(key, string.encode('utf-8'), sha256)
def get_presigned_url(session, bucket_name, object_name, region_name, expire_seconds, user_id, method='GET'):
timez = datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
datez = timez[:8]
hostname = "{0}.s3{1}.amazonaws.com".format(bucket_name, "."+region_name if region_name != "us-east-1" else "")
cred = session['Credentials']['AccessKeyId']
secret = session['Credentials']['SecretAccessKey']
token = session['Credentials']['SessionToken']
aws4_request = "/".join([datez, region_name, "s3", "aws4_request"])
cred_string = "{0}/{1}".format(cred, aws4_request)
parts = ["A-userid={0}".format(user_id),
"X-Amz-Algorithm=AWS4-HMAC-SHA256",
"X-Amz-Credential="+urllib.parse.quote_plus(cred_string),
"X-Amz-Date="+timez,
"X-Amz-Expires={0}".format(expire_seconds),
"X-Amz-Security-Token="+urllib.parse.quote_plus(token),
"X-Amz-SignedHeaders=host"]
can_query_string = "&".join(parts)
can_req = method + "\n/" + object_name + "\n" + can_query_string + "\nhost:" + hostname + "\n\nhost\nUNSIGNED-PAYLOAD"
can_req_hash = sha256(can_req.encode('utf-8')).hexdigest()
stringtosign = "\n".join(["AWS4-HMAC-SHA256", timez, aws4_request, can_req_hash])
StepOne = hmacsha256( "AWS4{0}".format(secret).encode('utf-8'), datez).digest()
StepTwo = hmacsha256( StepOne, region_name ).digest()
StepThree = hmacsha256( StepTwo, "s3").digest()
SigningKey = hmacsha256( StepThree, "aws4_request").digest()
Signature = hmacsha256(SigningKey, stringtosign).hexdigest()
url = "https://" + hostname + "/" + object_name + "?" + can_query_string + "&X-Amz-Signature=" + Signature
return url
def get_bucket_dynamic_path(path_list, b_map):
if 'MAP' in b_map:
map_dict = b_map['MAP']
else:
map_dict = b_map
mapping = []
log.debug("Pathparts is {0}".format(", ".join(path_list)))
# walk the bucket map to see if this path is valid
for path_part in path_list:
# Check if we hit a leaf of the YAML tree
if (mapping and isinstance(map_dict, str)) or 'bucket' in map_dict: #
customheaders = {}
if isinstance(map_dict, dict) and 'bucket' in map_dict:
bucketname = map_dict['bucket']
if 'headers' in map_dict:
customheaders = map_dict['headers']
else:
bucketname = map_dict
log.debug(f'mapping: {mapping}')
# Pop mapping off path_list
for _ in mapping:
path_list.pop(0)
# Join the remaining bits together to form object_name
object_name = "/".join(path_list)
bucket_path = "/".join(mapping)
log.info("Bucket mapping was {0}, object was {1}".format(bucket_path, object_name))
return prepend_bucketname(bucketname), bucket_path, object_name, customheaders
if path_part in map_dict:
map_dict = map_dict[path_part]
mapping.append(path_part)
log.debug("Found {0}, Mapping is now {1}".format(path_part, "/".join(mapping)))
else:
log.warning("Could not find {0} in bucketmap".format(path_part))
log.debug('said bucketmap: {}'.format(map_dict))
return False, False, False, {}
# what? No path?
return False, False, False, {}
def process_varargs(varargs: list, b_map: dict):
log.warning('Deprecated process_varargs() called.')
path, bucket, object_name, _ = process_request(varargs, b_map)
return path, bucket, object_name
def process_request(varargs, b_map):
varargs = varargs.split("/")
# Make sure we got at least 1 path, and 1 file name:
if len(varargs) < 2:
return "/".join(varargs), None, None, []
# Watch for ASF-ish reverse URL mapping formats:
if len(varargs) == 3:
if os.getenv('USE_REVERSE_BUCKET_MAP', 'FALSE').lower() == 'true':
varargs[0], varargs[1] = varargs[1], varargs[0]
# Look up the bucket from path parts
bucket, path, object_name, headers = get_bucket_dynamic_path(varargs, b_map)
# If we didn't figure out the bucket, we don't know the path/object_name
if not bucket:
object_name = varargs.pop(-1)
path = "/".join(varargs)
return path, bucket, object_name, headers
def bucket_prefix_match(bucket_check, bucket_map, object_name=""):
log.debug(f"bucket_prefix_match(): checking if {bucket_check} matches {bucket_map} w/ optional obj '{object_name}'")
if bucket_check == bucket_map.split('/')[0] and object_name.startswith("/".join(bucket_map.split('/')[1:])):
log.debug(f"Prefixed Bucket Map matched: s3://{bucket_check}/{object_name} => {bucket_map}")
return True
return False
# Sort public/private buckets such that object-prefixes are processed FIRST
def get_sorted_bucket_list(b_map, bucket_group):
if bucket_group not in b_map:
# But why?!
log.warning(f"Bucket map does not contain bucket group '{bucket_group}'")
return []
# b_map[bucket_group] SHOULD be a dict, but list actually works too.
if isinstance(b_map[bucket_group], dict):
return sorted(list(b_map[bucket_group].keys()), key=lambda e: e.count("/"), reverse=True )
if isinstance(b_map[bucket_group], list):
return sorted(list(b_map[bucket_group]), key=lambda e: e.count("/"), reverse=True )
# Something went wrong.
return []
def check_private_bucket(bucket, b_map, object_name=""):
log.debug('check_private_buckets(): bucket: {}'.format(bucket))
# Check public bucket file:
if 'PRIVATE_BUCKETS' in b_map:
# Prioritize prefixed buckets first, the deeper the better!
sorted_buckets = get_sorted_bucket_list(b_map, 'PRIVATE_BUCKETS')
log.debug(f"Sorted PRIVATE buckets are {sorted_buckets}")
for priv_bucket in sorted_buckets:
if bucket_prefix_match(bucket, prepend_bucketname(priv_bucket), object_name):
# This bucket is PRIVATE, return group!
return b_map['PRIVATE_BUCKETS'][priv_bucket]
return False
def check_public_bucket(bucket, b_map, object_name=""):
# Check for PUBLIC_BUCKETS in bucket map file
if 'PUBLIC_BUCKETS' in b_map:
sorted_buckets = get_sorted_bucket_list(b_map, 'PUBLIC_BUCKETS')
log.debug(f"Sorted PUBLIC buckets are {sorted_buckets}")
for pub_bucket in sorted_buckets:
if bucket_prefix_match(bucket, prepend_bucketname(pub_bucket), object_name):
# This bucket is public!
log.debug("found a public, we'll take it")
return True
log.debug('we did not find a public bucket for {}'.format(bucket))
return False
| true | true |
f7004e07d39bc89734abdba71f14d69e5287529e | 871 | py | Python | app/main/views.py | stephane-evrard/IP4_News | 866855710cbdc51f02d0d10b09971121affeafe9 | [
"Unlicense"
] | null | null | null | app/main/views.py | stephane-evrard/IP4_News | 866855710cbdc51f02d0d10b09971121affeafe9 | [
"Unlicense"
] | null | null | null | app/main/views.py | stephane-evrard/IP4_News | 866855710cbdc51f02d0d10b09971121affeafe9 | [
"Unlicense"
] | null | null | null | from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_sources,get_articles
from ..models import Sources
#views
@main.route('/')
def index():
'''
view root page function that returns the index the page and its data
'''
sources = get_sources('business')
sports_sources = get_sources('sports')
technology_sources = get_sources('technology')
entertainment_sources = get_sources('entertainment')
title = "News Of The Day"
return render_template('index.html',title = title, sources = sources,sports_sources = sports_sources,technology_sources = technology_sources,entertainment_sources = entertainment_sources)
@main.route('/sources/<id>')
def articles(id):
'''
view articles page
'''
articles = get_articles(id)
title = f'NH | {id}'
return render_template('articles.html',title= title,articles = articles) | 31.107143 | 188 | 0.762342 | from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_sources,get_articles
from ..models import Sources
@main.route('/')
def index():
sources = get_sources('business')
sports_sources = get_sources('sports')
technology_sources = get_sources('technology')
entertainment_sources = get_sources('entertainment')
title = "News Of The Day"
return render_template('index.html',title = title, sources = sources,sports_sources = sports_sources,technology_sources = technology_sources,entertainment_sources = entertainment_sources)
@main.route('/sources/<id>')
def articles(id):
articles = get_articles(id)
title = f'NH | {id}'
return render_template('articles.html',title= title,articles = articles) | true | true |
f7004e17940f340a8654a088ad8c1a3611a4bb3f | 3,415 | py | Python | polyaxon_schemas/ml/layers/wrappers.py | granularai/polyaxon-schemas | 017ae74701f21f12f0b25e75379681ea5d8baa9e | [
"MIT"
] | null | null | null | polyaxon_schemas/ml/layers/wrappers.py | granularai/polyaxon-schemas | 017ae74701f21f12f0b25e75379681ea5d8baa9e | [
"MIT"
] | null | null | null | polyaxon_schemas/ml/layers/wrappers.py | granularai/polyaxon-schemas | 017ae74701f21f12f0b25e75379681ea5d8baa9e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields
from polyaxon_schemas.ml.layers.base import BaseLayerConfig, BaseLayerSchema
class WrapperSchema(BaseLayerSchema):
layer = fields.Nested('LayerSchema')
@staticmethod
def schema_config():
return WrapperConfig
class WrapperConfig(BaseLayerConfig):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
Args:
layer: The layer to be wrapped.
"""
IDENTIFIER = 'Wrapper'
SCHEMA = WrapperSchema
def __init__(self, layer, **kwargs):
super(WrapperConfig, self).__init__(**kwargs)
self.layer = layer
class TimeDistributedSchema(WrapperSchema):
@staticmethod
def schema_config():
return TimeDistributedConfig
class TimeDistributedConfig(WrapperConfig):
"""This wrapper allows to apply a layer to every temporal slice of an input.
The input should be at least 3D, and the dimension of index one
will be considered to be the temporal dimension.
Consider a batch of 32 samples,
where each sample is a sequence of 10 vectors of 16 dimensions.
The batch input shape of the layer is then `(32, 10, 16)`,
and the `input_shape`, not including the samples dimension, is `(10, 16)`.
You can then use `TimeDistributed` to apply a `Dense` layer
to each of the 10 timesteps, independently:
```python
# as the first layer in a model
x = TimeDistributed(Dense(8))(x)
# now x.output_shape == (None, 10, 8)
```
The output will then have shape `(32, 10, 8)`.
In subsequent layers, there is no need for the `input_shape`:
```python
x = TimeDistributed(Dense(32))(x)
# now x.output_shape == (None, 10, 32)
```
The output will then have shape `(32, 10, 32)`.
`TimeDistributed` can be used with arbitrary layers, not just `Dense`,
for instance with a `Conv2D` layer:
```python
x = TimeDistributed(Conv2D(64, (3, 3)))(x)
```
Args:
layer: a layer instance.
Polyaxonfile usage:
```yaml
TimeDistributed:
layer:
Dense:
units: 2
```
"""
IDENTIFIER = 'TimeDistributed'
SCHEMA = TimeDistributedSchema
class BidirectionalSchema(WrapperSchema):
@staticmethod
def schema_config():
return BidirectionalConfig
class BidirectionalConfig(WrapperConfig):
"""Bidirectional wrapper for RNNs.
Args:
layer: `Recurrent` instance.
merge_mode: Mode by which outputs of the
forward and backward RNNs will be combined.
One of {'sum', 'mul', 'concat', 'ave', None}.
If None, the outputs will not be combined,
they will be returned as a list.
Raises:
ValueError: In case of invalid `merge_mode` argument.
Example:
```python
x = Bidirectional(plx.layers.LSTM(units=128, dropout=0.2, recurrent_dropout=0.2))(x)
```
Polyaxonfile usage:
```yaml
Bidirectional:
layer:
LSTM:
units: 128
dropout: 0.2
recurrent_dropout: 0.2
```
"""
IDENTIFIER = 'Bidirectional'
SCHEMA = BidirectionalSchema
| 25.296296 | 88 | 0.652709 | from __future__ import absolute_import, division, print_function
from marshmallow import fields
from polyaxon_schemas.ml.layers.base import BaseLayerConfig, BaseLayerSchema
class WrapperSchema(BaseLayerSchema):
layer = fields.Nested('LayerSchema')
@staticmethod
def schema_config():
return WrapperConfig
class WrapperConfig(BaseLayerConfig):
IDENTIFIER = 'Wrapper'
SCHEMA = WrapperSchema
def __init__(self, layer, **kwargs):
super(WrapperConfig, self).__init__(**kwargs)
self.layer = layer
class TimeDistributedSchema(WrapperSchema):
@staticmethod
def schema_config():
return TimeDistributedConfig
class TimeDistributedConfig(WrapperConfig):
IDENTIFIER = 'TimeDistributed'
SCHEMA = TimeDistributedSchema
class BidirectionalSchema(WrapperSchema):
@staticmethod
def schema_config():
return BidirectionalConfig
class BidirectionalConfig(WrapperConfig):
IDENTIFIER = 'Bidirectional'
SCHEMA = BidirectionalSchema
| true | true |
f7004f189ed755c741a1e9adf7075481f0a47ecb | 719 | py | Python | conftest.py | DedMazzai/feedback-form | 37d0b18a97af43469050be4a8351198521e7f445 | [
"Apache-2.0"
] | null | null | null | conftest.py | DedMazzai/feedback-form | 37d0b18a97af43469050be4a8351198521e7f445 | [
"Apache-2.0"
] | null | null | null | conftest.py | DedMazzai/feedback-form | 37d0b18a97af43469050be4a8351198521e7f445 | [
"Apache-2.0"
] | null | null | null | import pytest
from pages.aplication import Application
def pytest_addoption(parser):
parser.addoption('--browser_name', action='store', default="chrome", help="Choose browser: chrome or firefox")
parser.addoption('--base_url', action='store', default='https://prodoctorov.ru/new/rate/doctor/12/'
, help="Choose base_url")
@pytest.fixture
def app(request):
browser_name = request.config.getoption("--browser_name") # для вызова из командной строки и выбора браузера
base_url = request.config.getoption("--base_url")
fixture = Application(browser_name=browser_name, base_url=base_url)
yield fixture
print("\nquit browser..")
fixture.destroy()
return fixture
| 34.238095 | 114 | 0.713491 | import pytest
from pages.aplication import Application
def pytest_addoption(parser):
parser.addoption('--browser_name', action='store', default="chrome", help="Choose browser: chrome or firefox")
parser.addoption('--base_url', action='store', default='https://prodoctorov.ru/new/rate/doctor/12/'
, help="Choose base_url")
@pytest.fixture
def app(request):
browser_name = request.config.getoption("--browser_name") base_url = request.config.getoption("--base_url")
fixture = Application(browser_name=browser_name, base_url=base_url)
yield fixture
print("\nquit browser..")
fixture.destroy()
return fixture
| true | true |
f7004f7c1cf7e6495b7823fbbf31d91eda9a6100 | 2,036 | py | Python | src/optimModels/utils/constantes.py | BioSystemsUM/optimModels | 41e8ec5e99a66052fc8b547b60f979d2a2fd669d | [
"Apache-2.0"
] | 1 | 2020-01-09T08:58:05.000Z | 2020-01-09T08:58:05.000Z | src/optimModels/utils/constantes.py | BioSystemsUM/optimModels | 41e8ec5e99a66052fc8b547b60f979d2a2fd669d | [
"Apache-2.0"
] | null | null | null | src/optimModels/utils/constantes.py | BioSystemsUM/optimModels | 41e8ec5e99a66052fc8b547b60f979d2a2fd669d | [
"Apache-2.0"
] | 2 | 2020-01-16T16:15:28.000Z | 2020-01-21T16:31:20.000Z | class optimType:
REACTION_KO = 1
REACTION_UO = 2
GENE_KO = 3
GENE_UO = 4
MEDIUM = 5
MEDIUM_LEVELS = 6
MEDIUM_REACTION_KO = 7
MEDIUM_REACTION_UO = 8
COMPOSITION = 9
PROTEIN_KO = 10
PROTEIN_UO = 11
types = {1:"Reaction Knockouts",2:"Reaction Under/Over expression", 3:"Gene Knockouts",
4:"Gene Under/Over expression", 5:"Medium compositions",6:"Medium compositions with levels",
7:"Medium with Reaction Knockouts",8: "Medium with Reaction Under/Over expression",
9:"Community Composition", 10:"Protein knockouts", 11:"Protein Under/Over expression"}
def get_optim_type_name(self, id):
return optimType.types.get(id)
class solverMethod:
LSODA = 1
LSODAR = 2
LSODE = 3
HEUN = 4
EULER = 5
RK4 = 6
DORMAN_PRINCE = 7
RKFehlberg = 8
Dopri5 = 9
Dop853 = 10
Vode = 11
Radau5 = 12
AdamsBashforth2=13
AdamsBashMoulton2=14
methods ={1:"LSODA",2:"LSODAR", 3: "LSODE", 4: "HEUN", 5: "EULER",
6: "Range Kutta 4", 7: "DORMAN PRINCE", 8: "RKFehlberg", 9: "Dopri5", 10: "Dop853", 11: "Vode",
12: "Radau5", 13: "AdamsBashforth2", 14: "AdamsBashMoulton2"
}
def get_solver_method_name(self, id):
return solverMethod.methods.get(id)
class solverStatus:
'''
Enumeration of possible solution status. (FROM FRAMED
'''
OPTIMAL = 1
UNKNOWN = 0
ERROR = 2
SUBOPTIMAL = -1
UNBOUNDED = -2
INFEASIBLE = -3
INF_OR_UNB = -4
@staticmethod
def get_status_str(id):
if solverStatus.ERROR == id :
str="Error"
elif solverStatus.OPTIMAL == id:
str = "Optimal"
elif solverStatus.SUBOPTIMAL == id:
str = "Sub-Optimal"
elif solverStatus.UNBOUNDED == id or solverStatus.INFEASIBLE == id or solverStatus.INF_OR_UNB == id:
str = "Infeasible or unbounded problem."
else:
str = "Unknown"
return str
| 27.146667 | 111 | 0.59332 | class optimType:
REACTION_KO = 1
REACTION_UO = 2
GENE_KO = 3
GENE_UO = 4
MEDIUM = 5
MEDIUM_LEVELS = 6
MEDIUM_REACTION_KO = 7
MEDIUM_REACTION_UO = 8
COMPOSITION = 9
PROTEIN_KO = 10
PROTEIN_UO = 11
types = {1:"Reaction Knockouts",2:"Reaction Under/Over expression", 3:"Gene Knockouts",
4:"Gene Under/Over expression", 5:"Medium compositions",6:"Medium compositions with levels",
7:"Medium with Reaction Knockouts",8: "Medium with Reaction Under/Over expression",
9:"Community Composition", 10:"Protein knockouts", 11:"Protein Under/Over expression"}
def get_optim_type_name(self, id):
return optimType.types.get(id)
class solverMethod:
LSODA = 1
LSODAR = 2
LSODE = 3
HEUN = 4
EULER = 5
RK4 = 6
DORMAN_PRINCE = 7
RKFehlberg = 8
Dopri5 = 9
Dop853 = 10
Vode = 11
Radau5 = 12
AdamsBashforth2=13
AdamsBashMoulton2=14
methods ={1:"LSODA",2:"LSODAR", 3: "LSODE", 4: "HEUN", 5: "EULER",
6: "Range Kutta 4", 7: "DORMAN PRINCE", 8: "RKFehlberg", 9: "Dopri5", 10: "Dop853", 11: "Vode",
12: "Radau5", 13: "AdamsBashforth2", 14: "AdamsBashMoulton2"
}
def get_solver_method_name(self, id):
return solverMethod.methods.get(id)
class solverStatus:
OPTIMAL = 1
UNKNOWN = 0
ERROR = 2
SUBOPTIMAL = -1
UNBOUNDED = -2
INFEASIBLE = -3
INF_OR_UNB = -4
@staticmethod
def get_status_str(id):
if solverStatus.ERROR == id :
str="Error"
elif solverStatus.OPTIMAL == id:
str = "Optimal"
elif solverStatus.SUBOPTIMAL == id:
str = "Sub-Optimal"
elif solverStatus.UNBOUNDED == id or solverStatus.INFEASIBLE == id or solverStatus.INF_OR_UNB == id:
str = "Infeasible or unbounded problem."
else:
str = "Unknown"
return str
| true | true |
f70050160877a22962c4c5bcea97ccc0f2832657 | 997 | py | Python | backend/app/app/schemas/dataroom.py | saschajullmann/sedotra | aaa38f6d533daa725a7037a8c446da978ffafa7d | [
"MIT"
] | null | null | null | backend/app/app/schemas/dataroom.py | saschajullmann/sedotra | aaa38f6d533daa725a7037a8c446da978ffafa7d | [
"MIT"
] | null | null | null | backend/app/app/schemas/dataroom.py | saschajullmann/sedotra | aaa38f6d533daa725a7037a8c446da978ffafa7d | [
"MIT"
] | null | null | null | from enum import Enum
from typing import Optional
from uuid import UUID
from pydantic import BaseModel
from app.models import User, Organization
class DataRoomBase(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
class DataRoomCreateRequest(DataRoomBase):
name: str
class DataRoomCreate(DataRoomCreateRequest):
creator: User
organization: Organization
class Config:
arbitrary_types_allowed = True
class DataRoomRole(str, Enum):
OWNER = "OWNER"
ADMIN = "ADMIN"
MEMBER = "MEMBER"
class DataRoomUserRoleRequest(BaseModel):
user_id: UUID
user_role: DataRoomRole
class Config:
use_enum_values = True
class DataRoomTeamRoleRequest(BaseModel):
team_id: UUID
team_role: DataRoomRole
class Config:
use_enum_values = True
class DataRoomUpdate(DataRoomBase):
pass
class DataRoomInDBBase(DataRoomBase):
id: Optional[UUID] = None
class Config:
orm_mode = True
| 17.189655 | 44 | 0.716148 | from enum import Enum
from typing import Optional
from uuid import UUID
from pydantic import BaseModel
from app.models import User, Organization
class DataRoomBase(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
class DataRoomCreateRequest(DataRoomBase):
name: str
class DataRoomCreate(DataRoomCreateRequest):
creator: User
organization: Organization
class Config:
arbitrary_types_allowed = True
class DataRoomRole(str, Enum):
OWNER = "OWNER"
ADMIN = "ADMIN"
MEMBER = "MEMBER"
class DataRoomUserRoleRequest(BaseModel):
user_id: UUID
user_role: DataRoomRole
class Config:
use_enum_values = True
class DataRoomTeamRoleRequest(BaseModel):
team_id: UUID
team_role: DataRoomRole
class Config:
use_enum_values = True
class DataRoomUpdate(DataRoomBase):
pass
class DataRoomInDBBase(DataRoomBase):
id: Optional[UUID] = None
class Config:
orm_mode = True
| true | true |
f70052f70cc24783258d4832724f32f148a9ec92 | 1,470 | py | Python | xhs_wechat_noteid_script.py | HhhuYu/xhs_simple_crawler | 7612d3a7eff7d4b2ded8bbb54bf05f201c607e22 | [
"MIT"
] | 70 | 2019-04-12T09:17:53.000Z | 2020-12-13T07:33:48.000Z | xhs_wechat_noteid_script.py | redoubtei/xhs_simple_crawler | 7612d3a7eff7d4b2ded8bbb54bf05f201c607e22 | [
"MIT"
] | 1 | 2019-04-18T15:39:16.000Z | 2019-05-17T04:09:36.000Z | xhs_wechat_noteid_script.py | redoubtei/xhs_simple_crawler | 7612d3a7eff7d4b2ded8bbb54bf05f201c607e22 | [
"MIT"
] | 23 | 2019-05-14T09:36:00.000Z | 2020-11-30T11:32:36.000Z | import json
import pymongo
from config import *
def response(flow):
global collection
client = pymongo.MongoClient(MONGO_URL)
db = client[WECHAT_XHS_MONGO_DB]
collection = db[WECHAT_XHS_NOTE_MONGO_COLLECTION]
url1 = 'https://www.xiaohongshu.com/sapi/wx_mp_api/sns/v1/search/notes?'
url2 = 'https://www.xiaohongshu.com/fe_api/burdock/v1/page/'
if flow.request.url.startswith(url1):
# 数据的解析
print(flow.request.url)
notes = json.loads(flow.response.text)["data"]["notes"]
for note in notes:
note_id = note["id"]
img_list = note["images_list"]
title = note["title"]
user = note["user"]
content = {
"note_id": note_id,
"img_list": img_list,
"title": title,
"user":user
}
collection.insert(content)
elif flow.request.url.startswith(url2):
print(flow.request.url)
notes = json.loads(flow.response.text)["data"]
for note in notes:
note_id = note["id"]
img_list = note["cover"]
title = note["title"]
user = note["user"]
content = {
"note_id": note_id,
"img_list": img_list,
"title": title,
"user": user
}
collection.insert(content)
| 27.735849 | 77 | 0.512925 | import json
import pymongo
from config import *
def response(flow):
global collection
client = pymongo.MongoClient(MONGO_URL)
db = client[WECHAT_XHS_MONGO_DB]
collection = db[WECHAT_XHS_NOTE_MONGO_COLLECTION]
url1 = 'https://www.xiaohongshu.com/sapi/wx_mp_api/sns/v1/search/notes?'
url2 = 'https://www.xiaohongshu.com/fe_api/burdock/v1/page/'
if flow.request.url.startswith(url1):
print(flow.request.url)
notes = json.loads(flow.response.text)["data"]["notes"]
for note in notes:
note_id = note["id"]
img_list = note["images_list"]
title = note["title"]
user = note["user"]
content = {
"note_id": note_id,
"img_list": img_list,
"title": title,
"user":user
}
collection.insert(content)
elif flow.request.url.startswith(url2):
print(flow.request.url)
notes = json.loads(flow.response.text)["data"]
for note in notes:
note_id = note["id"]
img_list = note["cover"]
title = note["title"]
user = note["user"]
content = {
"note_id": note_id,
"img_list": img_list,
"title": title,
"user": user
}
collection.insert(content)
| true | true |
f7005399cea08f14e2a5c830358e28def1a4179c | 2,362 | py | Python | TradingAlgorythm.py | b00v1ne/TrBot11 | 01d156f6c01cfb167aa3a38186402d4c7c4ff818 | [
"MIT"
] | null | null | null | TradingAlgorythm.py | b00v1ne/TrBot11 | 01d156f6c01cfb167aa3a38186402d4c7c4ff818 | [
"MIT"
] | null | null | null | TradingAlgorythm.py | b00v1ne/TrBot11 | 01d156f6c01cfb167aa3a38186402d4c7c4ff818 | [
"MIT"
] | null | null | null | import json
from datetime import datetime, timedelta
from bittrex.bittrex import Bittrex
def TradingAlorythm(command, market, amount, coinname, step, stoploss, key, secret):
TestTrading = Bittrex(key, secret)
period = timedelta(seconds=20)
next_tick = datetime.now() + period
seconds = 20
firstCycle = True
if command == "y":
print("buying {0} of {1} coins".format(amount, coinname))
# раскомментировать для созадния ордера на покупку
# TestTrading.buy_limit(market, amount, coinprice)
while command == "y":
# таймер каждые 20 секунд
if next_tick <= datetime.now():
print("Connecting to Bittrex")
seconds += 20
next_tick += period
print("Timer ticked")
print("Updating stock exchange...")
# Считываем значения курса
t = TestTrading.get_ticker(market)
# Запрашиваем баланс
balance = TestTrading.get_balance(coinname)
# Запрашиваем текущие ордера
orders = TestTrading.get_open_orders(market)
a = json.dumps(t)
# Печатаем значения курса
print(t)
# Печатаем баланс
print("Balance is {} ".format(balance['result']['Available']))
# Печатаем ордера
print(orders)
# Раскладываем по переменным
bid = t['result']['Bid']
ask = t['result']['Ask']
last = t['result']['Last']
if firstCycle:
StartValue = bid
firstCycle = False
Stop_loss = StartValue - 0.00000007
print("*--------------------------")
print("| Start Value | {: .8f} ".format(StartValue))
print("| Stop loss | {: .8f} ".format(Stop_loss))
print("|--------------------------")
print("| Bid | {: .8f} ".format(bid))
print("| Ask | {: .8f} ".format(ask))
print("| Last | {: .8f} ".format(last))
print("*--------------------------")
# Добавляем Bid в конец массива
# A.append(float(bid))
if bid >= step + StartValue:
print("MOVE STOP-LOSS")
StartValue = bid
if bid <= stoploss:
print("Sell order sent") | 38.721311 | 84 | 0.506351 | import json
from datetime import datetime, timedelta
from bittrex.bittrex import Bittrex
def TradingAlorythm(command, market, amount, coinname, step, stoploss, key, secret):
TestTrading = Bittrex(key, secret)
period = timedelta(seconds=20)
next_tick = datetime.now() + period
seconds = 20
firstCycle = True
if command == "y":
print("buying {0} of {1} coins".format(amount, coinname))
while command == "y":
if next_tick <= datetime.now():
print("Connecting to Bittrex")
seconds += 20
next_tick += period
print("Timer ticked")
print("Updating stock exchange...")
t = TestTrading.get_ticker(market)
balance = TestTrading.get_balance(coinname)
orders = TestTrading.get_open_orders(market)
a = json.dumps(t)
print(t)
print("Balance is {} ".format(balance['result']['Available']))
print(orders)
bid = t['result']['Bid']
ask = t['result']['Ask']
last = t['result']['Last']
if firstCycle:
StartValue = bid
firstCycle = False
Stop_loss = StartValue - 0.00000007
print("*--------------------------")
print("| Start Value | {: .8f} ".format(StartValue))
print("| Stop loss | {: .8f} ".format(Stop_loss))
print("|--------------------------")
print("| Bid | {: .8f} ".format(bid))
print("| Ask | {: .8f} ".format(ask))
print("| Last | {: .8f} ".format(last))
print("*--------------------------")
if bid >= step + StartValue:
print("MOVE STOP-LOSS")
StartValue = bid
if bid <= stoploss:
print("Sell order sent") | true | true |
f70053d83788a2e90b07b78a54f2c2a35ef9b934 | 2,044 | py | Python | flash.py | segrids/arduino_due | f375020b81459eae9b325aa3646ff84efc2853e8 | [
"MIT"
] | 3 | 2021-08-20T16:03:37.000Z | 2022-03-23T20:23:30.000Z | flash.py | segrids/testbench | f375020b81459eae9b325aa3646ff84efc2853e8 | [
"MIT"
] | null | null | null | flash.py | segrids/testbench | f375020b81459eae9b325aa3646ff84efc2853e8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""flash.py
Usage:
flash.py [<image>] [options]
flash.py (-h | --help)
Options:
-h --help Show this screen.
--target=<target> Select the target device [default: SAM3x8e].
--erase Erase the target before flashing.
--port=<p> Target device port [default: ttyACM0].
-v --verify Hash the flash and compare to binary.
-r --reset Reset the CPU (after write).
--bootloader=<bl> Specify a custom bootloader binary
[default: sam3x8e/bootloader.bin].
--plane=<pl> Select flash plane 0 or 1 [default: 0].
--boot-rom Boot from ROM.
"""
from sys import exit, stdout
import time
from docopt import docopt
from py.uart import Serial
from py import *
if __name__ == '__main__':
args = docopt(__doc__)
target = args['--target']
image = args['<image>']
port = '/dev/' + args['--port']
plane = int(args['--plane'])
bootloader = args['--bootloader']
verify = args['--verify']
erase = args['--erase']
boot_rom = args['--boot-rom']
reset = args['--reset']
print('Selected port:', port)
print('Selected image:', image)
if target == 'HT32':
from py.ht32.isp import ISP, isp
image = args['<image>']
if image is None:
if reset:
isp(Serial(port)).reset()
else:
print('No image specified, not flashing.')
else:
with open(image, 'rb') as f:
binary = f.read()
isp = ISP(Serial(port))
isp.page_erase(start_addr=0x0, end_addr=0x1000)
isp.flash(0x00, binary)
isp.reset()
elif target == 'SAM3x8e':
from py.sam3x8e.programmer import program
if bootloader is None:
bootloader = 'sam3x8e/bootloader.bin'
program(port, image=image, erase=erase, reset=True,\
verify=verify, bootloader_image=bootloader, plane=plane, boot_rom=boot_rom)
else:
print('Unknown target.')
| 30.507463 | 91 | 0.572407 | from sys import exit, stdout
import time
from docopt import docopt
from py.uart import Serial
from py import *
if __name__ == '__main__':
args = docopt(__doc__)
target = args['--target']
image = args['<image>']
port = '/dev/' + args['--port']
plane = int(args['--plane'])
bootloader = args['--bootloader']
verify = args['--verify']
erase = args['--erase']
boot_rom = args['--boot-rom']
reset = args['--reset']
print('Selected port:', port)
print('Selected image:', image)
if target == 'HT32':
from py.ht32.isp import ISP, isp
image = args['<image>']
if image is None:
if reset:
isp(Serial(port)).reset()
else:
print('No image specified, not flashing.')
else:
with open(image, 'rb') as f:
binary = f.read()
isp = ISP(Serial(port))
isp.page_erase(start_addr=0x0, end_addr=0x1000)
isp.flash(0x00, binary)
isp.reset()
elif target == 'SAM3x8e':
from py.sam3x8e.programmer import program
if bootloader is None:
bootloader = 'sam3x8e/bootloader.bin'
program(port, image=image, erase=erase, reset=True,\
verify=verify, bootloader_image=bootloader, plane=plane, boot_rom=boot_rom)
else:
print('Unknown target.')
| true | true |
f700541898a7e9b3e0e2a00fccab8f254ebb8886 | 28,007 | py | Python | tests/functional/test_managesf.py | earlren1014/RedHat-Software-Factory | dd50eba4e353945886ebceb5dd608179d608b956 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_managesf.py | earlren1014/RedHat-Software-Factory | dd50eba4e353945886ebceb5dd608179d608b956 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_managesf.py | earlren1014/RedHat-Software-Factory | dd50eba4e353945886ebceb5dd608179d608b956 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python
#
# Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import config
import shutil
import requests
from utils import Base
from utils import ManageSfUtils
from utils import GerritGitUtils
from utils import create_random_str
from utils import set_private_key
from utils import is_present, skipIfServiceMissing, skipIfServicePresent
from utils import skipIfIssueTrackerMissing
from pysflib.sfredmine import RedmineUtils
from pysflib.sfgerrit import GerritUtils
class TestConditionalTesting(Base):
"""Functional tests validating the service decorators. If the tests
are not skipped as expected, fail the tests.
"""
@skipIfServiceMissing('SomeLameFantasyServiceThatDoesNotExist')
def test_skip_if_service_missing(self):
self.fail('Failure to detect that a service is missing')
# assuming gerrit will always be there ...
@skipIfServicePresent('SFGerrit')
def test_skip_if_service_present(self):
self.fail('Failure to detect that a service is present')
class TestManageSF(Base):
""" Functional tests that validate managesf features.
Here we do basic verifications about project creation
with managesf.
"""
@classmethod
def setUpClass(cls):
cls.msu = ManageSfUtils(config.GATEWAY_URL)
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.projects = []
self.dirs_to_delete = []
self.rm = RedmineUtils(
config.GATEWAY_URL + "/redmine/",
auth_cookie=config.USERS[config.ADMIN_USER]['auth_cookie'])
self.gu = GerritUtils(
config.GATEWAY_URL,
auth_cookie=config.USERS[config.ADMIN_USER]['auth_cookie'])
def project_exists_ex(self, name, user):
# Test here the project is "public"
# ( Redmine API project detail does not return the private/public flag)
rm = RedmineUtils(
config.GATEWAY_URL + "/redmine/",
auth_cookie=config.USERS[user]['auth_cookie'])
try:
return rm.project_exists(name)
except Exception:
return False
def tearDown(self):
for name in self.projects:
self.msu.deleteProject(name,
config.ADMIN_USER)
for dirs in self.dirs_to_delete:
shutil.rmtree(dirs)
def create_project(self, name, user, options=None):
self.msu.createProject(name, user, options)
self.projects.append(name)
def test_create_public_project_as_admin(self):
""" Create public project on redmine and gerrit as admin
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.ADMIN_USER)
# Gerrit part
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
self.assertTrue(self.gu.group_exists('%s-core' % pname))
# TODO(Project creator, as project owner, should only be in ptl group)
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-core' % pname))
# Redmine part
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Manager'))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Developer'))
self.assertTrue(self.project_exists_ex(pname, config.USER_2))
def test_create_private_project_as_admin(self):
""" Create private project on redmine and gerrit as admin
"""
pname = 'p_%s' % create_random_str()
options = {"private": ""}
self.create_project(pname, config.ADMIN_USER,
options=options)
# Gerrit part
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
self.assertTrue(self.gu.group_exists('%s-core' % pname))
self.assertTrue(self.gu.group_exists('%s-dev' % pname))
# TODO(Project creator, as project owner, should only be in ptl group)
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-core' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-dev' % pname))
# Redmine part
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Manager'))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Developer'))
self.assertFalse(self.project_exists_ex(pname, config.USER_2))
def test_delete_public_project_as_admin(self):
""" Delete public project on redmine and gerrit as admin
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.ADMIN_USER)
self.assertTrue(self.gu.project_exists(pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.msu.deleteProject(pname, config.ADMIN_USER)
self.assertFalse(self.gu.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-ptl' % pname))
if is_present("SFRedmine"):
self.assertFalse(self.rm.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-core' % pname))
self.projects.remove(pname)
def test_create_public_project_as_user(self):
""" Create public project on redmine and gerrit as user
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.USER_2)
# Gerrit part
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
self.assertTrue(self.gu.group_exists('%s-core' % pname))
# TODO(Project creator, as project owner, should only be in ptl group)
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-core' % pname))
# Redmine part
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(self.project_exists_ex(pname, config.USER_2))
self.assertTrue(
self.rm.check_user_role(pname, config.USER_2, 'Manager'))
self.assertTrue(
self.rm.check_user_role(pname, config.USER_2, 'Developer'))
self.assertTrue(self.project_exists_ex(pname, config.USER_3))
def test_create_private_project_as_user(self):
""" Create private project on redmine and gerrit as user
"""
pname = 'p_%s' % create_random_str()
options = {"private": ""}
self.create_project(pname, config.USER_2,
options=options)
# Gerrit part
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
self.assertTrue(self.gu.group_exists('%s-core' % pname))
self.assertTrue(self.gu.group_exists('%s-dev' % pname))
# TODO(Project creator, as project owner, should only be in ptl group)
self.assertTrue(
self.gu.member_in_group(config.USER_2, '%s-ptl' % pname))
self.assertTrue(
self.gu.member_in_group(config.USER_2, '%s-core' % pname))
self.assertTrue(
self.gu.member_in_group(config.USER_2, '%s-dev' % pname))
# Redmine part
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(self.project_exists_ex(pname, config.USER_2))
self.assertTrue(
self.rm.check_user_role(pname, config.USER_2, 'Manager'))
self.assertTrue(
self.rm.check_user_role(pname, config.USER_2, 'Developer'))
self.assertFalse(self.project_exists_ex(pname, config.USER_3))
def test_create_public_project_with_users_in_group(self):
""" Create public project on redmine and gerrit with users in groups
"""
pname = 'p_%s' % create_random_str()
u2mail = config.USERS[config.USER_2]['email']
u3mail = config.USERS[config.USER_3]['email']
options = {"ptl-group": "",
"core-group": "%s,%s" % (u2mail, u3mail),
}
self.create_project(pname, config.ADMIN_USER,
options=options)
# Gerrit part
self.assertTrue(self.gu.project_exists(pname))
# TODO(Project creator, as project owner, should only be in ptl group)
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
for user in (config.ADMIN_USER, config.USER_2, config.USER_3):
self.assertTrue(self.gu.member_in_group(user, '%s-core' % pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Manager'))
for user in (config.ADMIN_USER, config.USER_2, config.USER_3):
self.assertTrue(self.rm.check_user_role(pname,
user,
'Developer'))
def test_create_private_project_with_users_in_group(self):
""" Create private project on redmine and gerrit with users in groups
"""
pname = 'p_%s' % create_random_str()
u2mail = config.USERS[config.USER_2]['email']
u3mail = config.USERS[config.USER_3]['email']
u4mail = config.USERS[config.USER_4]['email']
options = {"private": "",
"ptl-group": "",
"core-group": "%s,%s" % (u2mail, u3mail),
"dev-group": "%s" % u4mail,
}
self.create_project(pname, config.ADMIN_USER,
options=options)
# Gerrit part
self.assertTrue(self.gu.project_exists(pname))
# TODO(Project creator, as project owner, should only be in ptl group)
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
for user in (config.ADMIN_USER, config.USER_2, config.USER_3):
self.assertTrue(self.gu.member_in_group(user, '%s-core' % pname))
self.assertTrue(
self.gu.member_in_group(config.USER_4, '%s-dev' % pname))
# Redmine part
if is_present("SFRedmine"):
# it should be visible to admin
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Manager'))
for user in (config.ADMIN_USER, config.USER_2,
config.USER_3, config.USER_4):
self.assertTrue(self.rm.check_user_role(pname, user,
'Developer'))
def test_create_public_project_as_admin_clone_as_admin(self):
""" Clone public project as admin and check content
"""
pname = 'a_%s' % create_random_str()
self.create_project(pname, config.ADMIN_USER)
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname)
clone_dir = ggu.clone(url, pname)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
# Test that the clone is a success
self.assertTrue(os.path.isdir(clone_dir))
# Verify master own the .gitreview file
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
# Verify meta/config branch own both group and ACLs config file
ggu.fetch_meta_config(clone_dir)
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'project.config')))
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'groups')))
# There is no group dev for a public project
content = file(os.path.join(clone_dir, 'project.config')).read()
self.assertFalse('%s-dev' % pname in content)
content = file(os.path.join(clone_dir, 'groups')).read()
self.assertFalse('%s-dev' % pname in content)
def test_create_private_project_as_admin_clone_as_admin(self):
""" Clone private project as admin and check content
"""
pname = 'p_%s' % create_random_str()
options = {"private": ""}
self.create_project(pname, config.ADMIN_USER, options=options)
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname)
clone_dir = ggu.clone(url, pname)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
# Test that the clone is a success
self.assertTrue(os.path.isdir(clone_dir))
# Verify master own the .gitreview file
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
# Verify meta/config branch own both group and ACLs config file
ggu.fetch_meta_config(clone_dir)
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'project.config')))
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'groups')))
# There is a group dev for a private project
content = file(os.path.join(clone_dir, 'project.config')).read()
self.assertTrue('%s-dev' % pname in content)
content = file(os.path.join(clone_dir, 'groups')).read()
self.assertTrue('%s-dev' % pname in content)
def test_create_public_project_as_admin_clone_as_user(self):
""" Create public project as admin then clone as user
"""
pname = 'p_%s' % create_random_str()
# create the project as admin
self.create_project(pname, config.ADMIN_USER)
# add user2 ssh pubkey to user2
gu = GerritUtils(
config.GATEWAY_URL,
auth_cookie=config.USERS[config.USER_2]['auth_cookie'])
gu.add_pubkey(config.USER_2_PUB_KEY)
# prepare to clone
priv_key_path = set_private_key(config.USER_2_PRIV_KEY)
self.dirs_to_delete.append(os.path.dirname(priv_key_path))
ggu = GerritGitUtils(config.USER_2,
priv_key_path,
config.USERS[config.USER_2]['email'])
url = "ssh://%s@%s:29418/%s" % (config.USER_2,
config.GATEWAY_HOST, pname)
# clone
clone_dir = ggu.clone(url, pname)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
# Test that the clone is a success
self.assertTrue(os.path.isdir(clone_dir))
# Verify master own the .gitreview file
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
def test_create_public_project_as_user_clone_as_user(self):
""" Create public project as user then clone as user
"""
pname = 'p_%s' % create_random_str()
# create the project as admin
self.create_project(pname, config.USER_2)
# add user2 ssh pubkey to user2
gu = GerritUtils(
config.GATEWAY_URL,
auth_cookie=config.USERS[config.USER_2]['auth_cookie'])
gu.add_pubkey(config.USER_2_PUB_KEY)
# prepare to clone
priv_key_path = set_private_key(config.USER_2_PRIV_KEY)
self.dirs_to_delete.append(os.path.dirname(priv_key_path))
ggu = GerritGitUtils(config.USER_2,
priv_key_path,
config.USERS[config.USER_2]['email'])
url = "ssh://%s@%s:29418/%s" % (config.USER_2,
config.GATEWAY_HOST, pname)
# clone
clone_dir = ggu.clone(url, pname)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
# Test that the clone is a success
self.assertTrue(os.path.isdir(clone_dir))
# Verify master own the .gitreview file
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
def test_upstream(self):
""" Validate upstream feature of managesf
"""
# Create a test upstream project
pname_us = 'p_upstream'
self.create_project(pname_us, config.ADMIN_USER)
ggu_us = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname_us)
# clone
us_clone_dir = ggu_us.clone(url, pname_us)
self.dirs_to_delete.append(os.path.dirname(us_clone_dir))
# Test that the clone is a success
self.assertTrue(os.path.isdir(us_clone_dir))
# push some test files to the upstream project
us_files = [str(x) for x in range(1, 10)]
for f in us_files:
file(os.path.join(us_clone_dir, f), 'w').write(f)
os.chmod(os.path.join(us_clone_dir, f), 0755)
ggu_us.add_commit_in_branch(us_clone_dir, "master",
commit="Adding files 1-10",
files=us_files)
ggu_us.direct_push_branch(us_clone_dir, "master")
ggu_us.add_commit_in_branch(us_clone_dir, "branch1")
ggu_us.direct_push_branch(us_clone_dir, "branch1")
# No create a test project with upstream pointing to the above
upstream_url = "ssh://%s@%s:29418/%s" % (
config.ADMIN_USER, config.GATEWAY_HOST, pname_us)
pname = 'p_%s' % create_random_str()
# create the project as admin
options = {"upstream": upstream_url,
"upstream-ssh-key": config.ADMIN_PRIV_KEY_PATH}
self.create_project(pname, config.ADMIN_USER, options=options)
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname)
# clone
clone_dir = ggu.clone(url, pname)
# Check if the files pushed in upstream project is present
files = [f for f in os.listdir(clone_dir) if not f.startswith('.')]
self.assertEqual(set(files), set(us_files))
branches = ggu.get_branches(clone_dir, True)
self.assertNotIn('gerrit/branch1', branches)
# Test upstream with additional branches
pname2 = 'p_%s' % create_random_str()
options['add-branches'] = ''
self.create_project(pname2, config.ADMIN_USER, options=options)
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname2)
clone_dir = ggu.clone(url, pname2)
branches = ggu.get_branches(clone_dir, True)
self.assertIn('gerrit/branch1', branches)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
def test_delete_project_as_admin(self):
""" Check if admin can delete projects that are not owned by admin
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.USER_2)
self.assertTrue(self.gu.project_exists(pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.msu.deleteProject(pname, config.ADMIN_USER)
self.assertFalse(self.gu.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-ptl' % pname))
if is_present("SFRedmine"):
self.assertFalse(self.rm.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-core' % pname))
self.projects.remove(pname)
def test_basic_ops_project_namespace(self):
""" Check if a project named with a / (namespace) is handled
correctly on basic ops by managesf
"""
pname = 'skydive/%s' % create_random_str()
self.create_project(pname, config.USER_2)
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
if is_present("SFRedmine"):
rname = '_'.join(pname.split('/'))
self.assertTrue(self.rm.project_exists(rname))
# Try to clone
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname)
clone_dir = ggu.clone(url, pname.split('/')[-1])
self.dirs_to_delete.append(os.path.dirname(clone_dir))
# Test that the clone is a success
self.assertTrue(os.path.isdir(clone_dir))
# Verify master own the .gitreview file
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
# Delete the project from SF
self.msu.deleteProject(pname, config.ADMIN_USER)
self.assertFalse(self.gu.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-ptl' % pname))
if is_present("SFRedmine"):
rname = '_'.join(pname.split('/'))
self.assertFalse(self.rm.project_exists(rname))
self.assertFalse(self.gu.group_exists('%s-core' % pname))
# Clean local clone directory
self.projects.remove(pname)
# For now listing users comes from Redmine
@skipIfIssueTrackerMissing()
def test_list_active_members(self):
""" Check the list of members as a list of tuples of emails and names
"""
self.assertTrue(self.msu.list_active_members(config.USER_2))
def test_init_user_tests(self):
""" Check if a test init feature behave as expected
"""
project = 'p_%s' % create_random_str()
self.create_project(project, config.USER_4)
self.msu.create_init_tests(project, config.USER_4)
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
open_reviews = ggu.list_open_reviews('config', config.GATEWAY_HOST)
match = [True for review in open_reviews if review['commitMessage'].
startswith("%s proposes initial test "
"definition for project %s" %
(config.USER_4, project))]
self.assertEqual(len(match), 1)
open_reviews = ggu.list_open_reviews(project, config.GATEWAY_HOST)
match = [True for review in open_reviews if review['commitMessage'].
startswith("%s proposes initial test "
"scripts for project %s" %
(config.USER_4, project))]
self.assertEqual(len(match), 1)
def test_rest_urls_accessible(self):
""" Check if managesf URLs are all working
"""
project = 'p_%s' % create_random_str()
self.create_project(project, config.ADMIN_USER)
cookies = dict(
auth_pubtkt=config.USERS[config.ADMIN_USER]['auth_cookie'])
paths = [
"/manage/project/",
"/manage/project/%s" % project,
"/manage/project/membership/"]
for path in paths:
url = "http://%s%s" % (config.GATEWAY_HOST, path)
resp = requests.get(url, cookies=cookies)
self.assertEqual(200, resp.status_code)
def test_validate_get_all_project_details(self):
""" Check if managesf allow us to fetch projects details
"""
project = 'p_%s' % create_random_str()
self.create_project(project, config.USER_2)
admin_cookies = dict(
auth_pubtkt=config.USERS[config.ADMIN_USER]['auth_cookie'])
user2_cookies = dict(
auth_pubtkt=config.USERS[config.USER_2]['auth_cookie'])
url = "http://%s%s" % (config.GATEWAY_HOST, "/manage/project/")
resp = requests.get(url, cookies=admin_cookies)
self.assertEqual(200, resp.status_code)
self.assertTrue(project in resp.json())
self.assertTrue('config' in resp.json())
resp = requests.get(url, cookies=user2_cookies)
self.assertEqual(200, resp.status_code)
self.assertTrue(project in resp.json())
self.assertTrue('config' in resp.json())
resp = requests.get(url, cookies=user2_cookies)
# Validate the same behavior with project including a '/'
project = 'p/%s' % create_random_str()
self.create_project(project, config.USER_2)
url = "http://%s%s" % (config.GATEWAY_HOST, "/manage/project/")
# Wait 15 seconds for managesf cache invalidation
import time
time.sleep(15)
resp = requests.get(url, cookies=user2_cookies)
self.assertEqual(200, resp.status_code)
self.assertTrue(project in resp.json())
def test_project_pages_config(self):
""" Check if managesf allow us to configure pages for a project
"""
project = 'p_%s' % create_random_str()
self.create_project(project, config.USER_2)
self.assertTrue(self.gu.project_exists(project))
self.assertTrue(self.rm.project_exists(project))
self.msu.update_project_page(config.USER_2, project,
"http://tests.com/")
self.assertEqual(self.msu.get_project_page(config.USER_2,
project).strip(),
"\"http://tests.com/\"")
self.msu.delete_project_page(config.USER_3, project)
self.assertEqual(self.msu.get_project_page(config.USER_2,
project).strip(),
"\"http://tests.com/\"")
self.msu.delete_project_page(config.USER_2, project)
self.assertEqual(self.msu.get_project_page(config.USER_2,
project).strip(),
"")
| 46.523256 | 79 | 0.602956 |
import os
import config
import shutil
import requests
from utils import Base
from utils import ManageSfUtils
from utils import GerritGitUtils
from utils import create_random_str
from utils import set_private_key
from utils import is_present, skipIfServiceMissing, skipIfServicePresent
from utils import skipIfIssueTrackerMissing
from pysflib.sfredmine import RedmineUtils
from pysflib.sfgerrit import GerritUtils
class TestConditionalTesting(Base):
"""Functional tests validating the service decorators. If the tests
are not skipped as expected, fail the tests.
"""
@skipIfServiceMissing('SomeLameFantasyServiceThatDoesNotExist')
def test_skip_if_service_missing(self):
self.fail('Failure to detect that a service is missing')
@skipIfServicePresent('SFGerrit')
def test_skip_if_service_present(self):
self.fail('Failure to detect that a service is present')
class TestManageSF(Base):
""" Functional tests that validate managesf features.
Here we do basic verifications about project creation
with managesf.
"""
@classmethod
def setUpClass(cls):
cls.msu = ManageSfUtils(config.GATEWAY_URL)
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.projects = []
self.dirs_to_delete = []
self.rm = RedmineUtils(
config.GATEWAY_URL + "/redmine/",
auth_cookie=config.USERS[config.ADMIN_USER]['auth_cookie'])
self.gu = GerritUtils(
config.GATEWAY_URL,
auth_cookie=config.USERS[config.ADMIN_USER]['auth_cookie'])
def project_exists_ex(self, name, user):
rm = RedmineUtils(
config.GATEWAY_URL + "/redmine/",
auth_cookie=config.USERS[user]['auth_cookie'])
try:
return rm.project_exists(name)
except Exception:
return False
def tearDown(self):
for name in self.projects:
self.msu.deleteProject(name,
config.ADMIN_USER)
for dirs in self.dirs_to_delete:
shutil.rmtree(dirs)
def create_project(self, name, user, options=None):
self.msu.createProject(name, user, options)
self.projects.append(name)
def test_create_public_project_as_admin(self):
""" Create public project on redmine and gerrit as admin
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.ADMIN_USER)
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
self.assertTrue(self.gu.group_exists('%s-core' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-core' % pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Manager'))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Developer'))
self.assertTrue(self.project_exists_ex(pname, config.USER_2))
def test_create_private_project_as_admin(self):
""" Create private project on redmine and gerrit as admin
"""
pname = 'p_%s' % create_random_str()
options = {"private": ""}
self.create_project(pname, config.ADMIN_USER,
options=options)
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
self.assertTrue(self.gu.group_exists('%s-core' % pname))
self.assertTrue(self.gu.group_exists('%s-dev' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-core' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-dev' % pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Manager'))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Developer'))
self.assertFalse(self.project_exists_ex(pname, config.USER_2))
def test_delete_public_project_as_admin(self):
""" Delete public project on redmine and gerrit as admin
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.ADMIN_USER)
self.assertTrue(self.gu.project_exists(pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.msu.deleteProject(pname, config.ADMIN_USER)
self.assertFalse(self.gu.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-ptl' % pname))
if is_present("SFRedmine"):
self.assertFalse(self.rm.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-core' % pname))
self.projects.remove(pname)
def test_create_public_project_as_user(self):
""" Create public project on redmine and gerrit as user
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.USER_2)
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
self.assertTrue(self.gu.group_exists('%s-core' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-core' % pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(self.project_exists_ex(pname, config.USER_2))
self.assertTrue(
self.rm.check_user_role(pname, config.USER_2, 'Manager'))
self.assertTrue(
self.rm.check_user_role(pname, config.USER_2, 'Developer'))
self.assertTrue(self.project_exists_ex(pname, config.USER_3))
def test_create_private_project_as_user(self):
""" Create private project on redmine and gerrit as user
"""
pname = 'p_%s' % create_random_str()
options = {"private": ""}
self.create_project(pname, config.USER_2,
options=options)
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
self.assertTrue(self.gu.group_exists('%s-core' % pname))
self.assertTrue(self.gu.group_exists('%s-dev' % pname))
self.assertTrue(
self.gu.member_in_group(config.USER_2, '%s-ptl' % pname))
self.assertTrue(
self.gu.member_in_group(config.USER_2, '%s-core' % pname))
self.assertTrue(
self.gu.member_in_group(config.USER_2, '%s-dev' % pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(self.project_exists_ex(pname, config.USER_2))
self.assertTrue(
self.rm.check_user_role(pname, config.USER_2, 'Manager'))
self.assertTrue(
self.rm.check_user_role(pname, config.USER_2, 'Developer'))
self.assertFalse(self.project_exists_ex(pname, config.USER_3))
def test_create_public_project_with_users_in_group(self):
""" Create public project on redmine and gerrit with users in groups
"""
pname = 'p_%s' % create_random_str()
u2mail = config.USERS[config.USER_2]['email']
u3mail = config.USERS[config.USER_3]['email']
options = {"ptl-group": "",
"core-group": "%s,%s" % (u2mail, u3mail),
}
self.create_project(pname, config.ADMIN_USER,
options=options)
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
for user in (config.ADMIN_USER, config.USER_2, config.USER_3):
self.assertTrue(self.gu.member_in_group(user, '%s-core' % pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Manager'))
for user in (config.ADMIN_USER, config.USER_2, config.USER_3):
self.assertTrue(self.rm.check_user_role(pname,
user,
'Developer'))
def test_create_private_project_with_users_in_group(self):
""" Create private project on redmine and gerrit with users in groups
"""
pname = 'p_%s' % create_random_str()
u2mail = config.USERS[config.USER_2]['email']
u3mail = config.USERS[config.USER_3]['email']
u4mail = config.USERS[config.USER_4]['email']
options = {"private": "",
"ptl-group": "",
"core-group": "%s,%s" % (u2mail, u3mail),
"dev-group": "%s" % u4mail,
}
self.create_project(pname, config.ADMIN_USER,
options=options)
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(
self.gu.member_in_group(config.ADMIN_USER, '%s-ptl' % pname))
for user in (config.ADMIN_USER, config.USER_2, config.USER_3):
self.assertTrue(self.gu.member_in_group(user, '%s-core' % pname))
self.assertTrue(
self.gu.member_in_group(config.USER_4, '%s-dev' % pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.assertTrue(
self.rm.check_user_role(pname, config.ADMIN_USER, 'Manager'))
for user in (config.ADMIN_USER, config.USER_2,
config.USER_3, config.USER_4):
self.assertTrue(self.rm.check_user_role(pname, user,
'Developer'))
def test_create_public_project_as_admin_clone_as_admin(self):
""" Clone public project as admin and check content
"""
pname = 'a_%s' % create_random_str()
self.create_project(pname, config.ADMIN_USER)
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname)
clone_dir = ggu.clone(url, pname)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
self.assertTrue(os.path.isdir(clone_dir))
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
ggu.fetch_meta_config(clone_dir)
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'project.config')))
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'groups')))
content = file(os.path.join(clone_dir, 'project.config')).read()
self.assertFalse('%s-dev' % pname in content)
content = file(os.path.join(clone_dir, 'groups')).read()
self.assertFalse('%s-dev' % pname in content)
def test_create_private_project_as_admin_clone_as_admin(self):
""" Clone private project as admin and check content
"""
pname = 'p_%s' % create_random_str()
options = {"private": ""}
self.create_project(pname, config.ADMIN_USER, options=options)
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname)
clone_dir = ggu.clone(url, pname)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
self.assertTrue(os.path.isdir(clone_dir))
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
ggu.fetch_meta_config(clone_dir)
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'project.config')))
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'groups')))
content = file(os.path.join(clone_dir, 'project.config')).read()
self.assertTrue('%s-dev' % pname in content)
content = file(os.path.join(clone_dir, 'groups')).read()
self.assertTrue('%s-dev' % pname in content)
def test_create_public_project_as_admin_clone_as_user(self):
""" Create public project as admin then clone as user
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.ADMIN_USER)
gu = GerritUtils(
config.GATEWAY_URL,
auth_cookie=config.USERS[config.USER_2]['auth_cookie'])
gu.add_pubkey(config.USER_2_PUB_KEY)
priv_key_path = set_private_key(config.USER_2_PRIV_KEY)
self.dirs_to_delete.append(os.path.dirname(priv_key_path))
ggu = GerritGitUtils(config.USER_2,
priv_key_path,
config.USERS[config.USER_2]['email'])
url = "ssh://%s@%s:29418/%s" % (config.USER_2,
config.GATEWAY_HOST, pname)
clone_dir = ggu.clone(url, pname)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
self.assertTrue(os.path.isdir(clone_dir))
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
def test_create_public_project_as_user_clone_as_user(self):
""" Create public project as user then clone as user
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.USER_2)
gu = GerritUtils(
config.GATEWAY_URL,
auth_cookie=config.USERS[config.USER_2]['auth_cookie'])
gu.add_pubkey(config.USER_2_PUB_KEY)
priv_key_path = set_private_key(config.USER_2_PRIV_KEY)
self.dirs_to_delete.append(os.path.dirname(priv_key_path))
ggu = GerritGitUtils(config.USER_2,
priv_key_path,
config.USERS[config.USER_2]['email'])
url = "ssh://%s@%s:29418/%s" % (config.USER_2,
config.GATEWAY_HOST, pname)
clone_dir = ggu.clone(url, pname)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
self.assertTrue(os.path.isdir(clone_dir))
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
def test_upstream(self):
""" Validate upstream feature of managesf
"""
pname_us = 'p_upstream'
self.create_project(pname_us, config.ADMIN_USER)
ggu_us = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname_us)
us_clone_dir = ggu_us.clone(url, pname_us)
self.dirs_to_delete.append(os.path.dirname(us_clone_dir))
self.assertTrue(os.path.isdir(us_clone_dir))
us_files = [str(x) for x in range(1, 10)]
for f in us_files:
file(os.path.join(us_clone_dir, f), 'w').write(f)
os.chmod(os.path.join(us_clone_dir, f), 0755)
ggu_us.add_commit_in_branch(us_clone_dir, "master",
commit="Adding files 1-10",
files=us_files)
ggu_us.direct_push_branch(us_clone_dir, "master")
ggu_us.add_commit_in_branch(us_clone_dir, "branch1")
ggu_us.direct_push_branch(us_clone_dir, "branch1")
upstream_url = "ssh://%s@%s:29418/%s" % (
config.ADMIN_USER, config.GATEWAY_HOST, pname_us)
pname = 'p_%s' % create_random_str()
options = {"upstream": upstream_url,
"upstream-ssh-key": config.ADMIN_PRIV_KEY_PATH}
self.create_project(pname, config.ADMIN_USER, options=options)
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname)
clone_dir = ggu.clone(url, pname)
files = [f for f in os.listdir(clone_dir) if not f.startswith('.')]
self.assertEqual(set(files), set(us_files))
branches = ggu.get_branches(clone_dir, True)
self.assertNotIn('gerrit/branch1', branches)
pname2 = 'p_%s' % create_random_str()
options['add-branches'] = ''
self.create_project(pname2, config.ADMIN_USER, options=options)
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname2)
clone_dir = ggu.clone(url, pname2)
branches = ggu.get_branches(clone_dir, True)
self.assertIn('gerrit/branch1', branches)
self.dirs_to_delete.append(os.path.dirname(clone_dir))
def test_delete_project_as_admin(self):
""" Check if admin can delete projects that are not owned by admin
"""
pname = 'p_%s' % create_random_str()
self.create_project(pname, config.USER_2)
self.assertTrue(self.gu.project_exists(pname))
if is_present("SFRedmine"):
self.assertTrue(self.rm.project_exists(pname))
self.msu.deleteProject(pname, config.ADMIN_USER)
self.assertFalse(self.gu.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-ptl' % pname))
if is_present("SFRedmine"):
self.assertFalse(self.rm.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-core' % pname))
self.projects.remove(pname)
def test_basic_ops_project_namespace(self):
""" Check if a project named with a / (namespace) is handled
correctly on basic ops by managesf
"""
pname = 'skydive/%s' % create_random_str()
self.create_project(pname, config.USER_2)
self.assertTrue(self.gu.project_exists(pname))
self.assertTrue(self.gu.group_exists('%s-ptl' % pname))
if is_present("SFRedmine"):
rname = '_'.join(pname.split('/'))
self.assertTrue(self.rm.project_exists(rname))
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
url = "ssh://%s@%s:29418/%s" % (config.ADMIN_USER,
config.GATEWAY_HOST, pname)
clone_dir = ggu.clone(url, pname.split('/')[-1])
self.dirs_to_delete.append(os.path.dirname(clone_dir))
self.assertTrue(os.path.isdir(clone_dir))
self.assertTrue(os.path.isfile(os.path.join(clone_dir,
'.gitreview')))
self.msu.deleteProject(pname, config.ADMIN_USER)
self.assertFalse(self.gu.project_exists(pname))
self.assertFalse(self.gu.group_exists('%s-ptl' % pname))
if is_present("SFRedmine"):
rname = '_'.join(pname.split('/'))
self.assertFalse(self.rm.project_exists(rname))
self.assertFalse(self.gu.group_exists('%s-core' % pname))
self.projects.remove(pname)
@skipIfIssueTrackerMissing()
def test_list_active_members(self):
""" Check the list of members as a list of tuples of emails and names
"""
self.assertTrue(self.msu.list_active_members(config.USER_2))
def test_init_user_tests(self):
""" Check if a test init feature behave as expected
"""
project = 'p_%s' % create_random_str()
self.create_project(project, config.USER_4)
self.msu.create_init_tests(project, config.USER_4)
ggu = GerritGitUtils(config.ADMIN_USER,
config.ADMIN_PRIV_KEY_PATH,
config.USERS[config.ADMIN_USER]['email'])
open_reviews = ggu.list_open_reviews('config', config.GATEWAY_HOST)
match = [True for review in open_reviews if review['commitMessage'].
startswith("%s proposes initial test "
"definition for project %s" %
(config.USER_4, project))]
self.assertEqual(len(match), 1)
open_reviews = ggu.list_open_reviews(project, config.GATEWAY_HOST)
match = [True for review in open_reviews if review['commitMessage'].
startswith("%s proposes initial test "
"scripts for project %s" %
(config.USER_4, project))]
self.assertEqual(len(match), 1)
def test_rest_urls_accessible(self):
""" Check if managesf URLs are all working
"""
project = 'p_%s' % create_random_str()
self.create_project(project, config.ADMIN_USER)
cookies = dict(
auth_pubtkt=config.USERS[config.ADMIN_USER]['auth_cookie'])
paths = [
"/manage/project/",
"/manage/project/%s" % project,
"/manage/project/membership/"]
for path in paths:
url = "http://%s%s" % (config.GATEWAY_HOST, path)
resp = requests.get(url, cookies=cookies)
self.assertEqual(200, resp.status_code)
def test_validate_get_all_project_details(self):
""" Check if managesf allow us to fetch projects details
"""
project = 'p_%s' % create_random_str()
self.create_project(project, config.USER_2)
admin_cookies = dict(
auth_pubtkt=config.USERS[config.ADMIN_USER]['auth_cookie'])
user2_cookies = dict(
auth_pubtkt=config.USERS[config.USER_2]['auth_cookie'])
url = "http://%s%s" % (config.GATEWAY_HOST, "/manage/project/")
resp = requests.get(url, cookies=admin_cookies)
self.assertEqual(200, resp.status_code)
self.assertTrue(project in resp.json())
self.assertTrue('config' in resp.json())
resp = requests.get(url, cookies=user2_cookies)
self.assertEqual(200, resp.status_code)
self.assertTrue(project in resp.json())
self.assertTrue('config' in resp.json())
resp = requests.get(url, cookies=user2_cookies)
project = 'p/%s' % create_random_str()
self.create_project(project, config.USER_2)
url = "http://%s%s" % (config.GATEWAY_HOST, "/manage/project/")
import time
time.sleep(15)
resp = requests.get(url, cookies=user2_cookies)
self.assertEqual(200, resp.status_code)
self.assertTrue(project in resp.json())
def test_project_pages_config(self):
""" Check if managesf allow us to configure pages for a project
"""
project = 'p_%s' % create_random_str()
self.create_project(project, config.USER_2)
self.assertTrue(self.gu.project_exists(project))
self.assertTrue(self.rm.project_exists(project))
self.msu.update_project_page(config.USER_2, project,
"http://tests.com/")
self.assertEqual(self.msu.get_project_page(config.USER_2,
project).strip(),
"\"http://tests.com/\"")
self.msu.delete_project_page(config.USER_3, project)
self.assertEqual(self.msu.get_project_page(config.USER_2,
project).strip(),
"\"http://tests.com/\"")
self.msu.delete_project_page(config.USER_2, project)
self.assertEqual(self.msu.get_project_page(config.USER_2,
project).strip(),
"")
| false | true |
f70054db99d6dc4c33a51f803f6de4a176c6c5f4 | 6,215 | py | Python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_04_01/operations/_private_link_resources_operations.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_04_01/operations/_private_link_resources_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_04_01/operations/_private_link_resources_operations.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class PrivateLinkResourcesOperations(object):
"""PrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2022_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResourcesListResult":
"""Gets a list of private link resources in the specified managed cluster.
To learn more about private clusters, see:
https://docs.microsoft.com/azure/aks/private-clusters.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourcesListResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_04_01.models.PrivateLinkResourcesListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourcesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourcesListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources"} # type: ignore
| 42.862069 | 229 | 0.696219 | from typing import Any, Callable, Dict, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-04-01")
accept = "application/json"
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources") path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
_url = _format_url_section(_url, **path_format_arguments)
_query_parameters = kwargs.pop("params", {}) _query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
_header_parameters = kwargs.pop("headers", {}) _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class PrivateLinkResourcesOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResourcesListResult":
cls = kwargs.pop('cls', None) error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01")
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourcesListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources"}
| true | true |
f70054faef3359df19102559f92b4e27574b584c | 451 | py | Python | improve_django_v3/menu/migrations/0006_auto_20170930_1108.py | squadran2003/improve-a-django-project | 89cff9f0b0e6534036e45565a17ae807b0e0a8b5 | [
"MIT"
] | null | null | null | improve_django_v3/menu/migrations/0006_auto_20170930_1108.py | squadran2003/improve-a-django-project | 89cff9f0b0e6534036e45565a17ae807b0e0a8b5 | [
"MIT"
] | null | null | null | improve_django_v3/menu/migrations/0006_auto_20170930_1108.py | squadran2003/improve-a-django-project | 89cff9f0b0e6534036e45565a17ae807b0e0a8b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-09-30 18:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0005_auto_20170930_1059'),
]
operations = [
migrations.AlterField(
model_name='item',
name='description',
field=models.CharField(max_length=255),
),
]
| 21.47619 | 51 | 0.616408 | from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menu', '0005_auto_20170930_1059'),
]
operations = [
migrations.AlterField(
model_name='item',
name='description',
field=models.CharField(max_length=255),
),
]
| true | true |
f700558e81a49cf9663da27bc6a4e2869c418a2f | 704 | py | Python | radio/apps.py | MaxwellDPS/trunk-player | 19f116e64249823f3a12b35ed55252db60b1cf78 | [
"MIT"
] | null | null | null | radio/apps.py | MaxwellDPS/trunk-player | 19f116e64249823f3a12b35ed55252db60b1cf78 | [
"MIT"
] | 7 | 2021-06-10T23:24:05.000Z | 2022-03-03T21:48:12.000Z | radio/apps.py | MaxwellDPS/trunk-player | 19f116e64249823f3a12b35ed55252db60b1cf78 | [
"MIT"
] | 1 | 2022-03-26T07:04:21.000Z | 2022-03-26T07:04:21.000Z | from importlib import import_module
from django.db.models.signals import post_migrate
from django.apps import AppConfig
def default_data_setup(sender, **kwargs):
from django.contrib.auth.models import User
try:
anon = User.objects.get(username='ANONYMOUS_USER')
except User.DoesNotExist:
print('Adding ANONYMOUS_USER')
anon = User.objects.create_user('ANONYMOUS_USER', '[email protected]')
# Make the user un usable
anon.set_unusable_password()
anon.is_active = False
anon.save()
class RadioConfig(AppConfig):
name = 'radio'
def ready(self):
post_migrate.connect(default_data_setup, sender=self)
| 27.076923 | 87 | 0.697443 | from importlib import import_module
from django.db.models.signals import post_migrate
from django.apps import AppConfig
def default_data_setup(sender, **kwargs):
from django.contrib.auth.models import User
try:
anon = User.objects.get(username='ANONYMOUS_USER')
except User.DoesNotExist:
print('Adding ANONYMOUS_USER')
anon = User.objects.create_user('ANONYMOUS_USER', '[email protected]')
anon.set_unusable_password()
anon.is_active = False
anon.save()
class RadioConfig(AppConfig):
name = 'radio'
def ready(self):
post_migrate.connect(default_data_setup, sender=self)
| true | true |
f7005624326db8bd844029d49a4f69d03cd93970 | 3,257 | py | Python | settings/ngrams.py | GU-DataLab/topic-modeling-textPrep | 8138040308f33785cc5baa5f5982c2543e26ae0f | [
"MIT"
] | 1 | 2022-03-19T11:33:49.000Z | 2022-03-19T11:33:49.000Z | settings/ngrams.py | GU-DataLab/topic-modeling-textPrep | 8138040308f33785cc5baa5f5982c2543e26ae0f | [
"MIT"
] | null | null | null | settings/ngrams.py | GU-DataLab/topic-modeling-textPrep | 8138040308f33785cc5baa5f5982c2543e26ae0f | [
"MIT"
] | null | null | null | from nltk.util import ngrams
from nltk.corpus import stopwords
from collections import Counter
from .common import get_pp_pipeline
def or_list(booleans):
return True in booleans
def get_ngrams(D):
'''
Returns all ngrams (aka a token containing a dollar sign ($)) from a set of topics or documents
:param topics:
:return:
'''
ngrams = set()
for d in D:
for w in d:
if '$' in w:
ngrams.add(w)
return list(ngrams)
def get_frequent_ngrams(text, n, stopword_list, threshold):
bigrams = ngrams(text, n)
bigram_freq = Counter(bigrams)
frequent_bigrams = []
for bigram, freq in bigram_freq.most_common():
if not (or_list([i in stopword_list for i in bigram])):
if freq > threshold:
frequent_bigrams.append('{}${}'.format(bigram[0], bigram[1]))
else:
break
return frequent_bigrams
def ngrammize_text(text, ngrams):
bigrammized_text = []
i = 0
while i < len(text):
term = text[i]
if i == len(text)-1:
bigrammized_text.append(term)
else:
next_term = text[i+1]
test_bigram = '{}${}'.format(term, next_term)
if test_bigram in ngrams:
bigrammized_text.append(test_bigram)
i += 1
else:
bigrammized_text.append(term)
i += 1
return bigrammized_text
def get_dataset_ngrams(docs, min_freq=1000, sw=None, extra_bigrams=None, extra_ngrams=None):
if not sw:
sw = stopwords.words('english')
sw_pp = get_pp_pipeline(remove_stopwords=False)
sw = sw_pp.clean_document(sw)
full_text = []
for doc in docs:
full_text.extend(doc)
frequent_bigrams = get_frequent_ngrams(full_text, 2, sw, min_freq)
if extra_bigrams:
frequent_bigrams.extend(extra_bigrams)
bigrammized_text = ngrammize_text(full_text, frequent_bigrams)
frequent_ngrams = get_frequent_ngrams(bigrammized_text, 2, sw, min_freq)
if extra_ngrams:
frequent_ngrams.extend(extra_ngrams)
return frequent_bigrams, frequent_ngrams
def insert_ngrams_flat_from_lists(docs, frequent_bigrams, frequent_ngrams):
for i in range(0, len(docs)):
doc = docs[i]
doc = ngrammize_text(doc, frequent_bigrams)
doc = ngrammize_text(doc, frequent_ngrams)
docs[i] = doc
return docs
def insert_ngrams_flat(docs, min_freq=1000, sw=None, extra_bigrams=None, extra_ngrams=None):
fb, fn = get_dataset_ngrams(docs, min_freq, sw, extra_bigrams, extra_ngrams)
return insert_ngrams_flat_from_lists(docs, fb, fn)
def insert_ngrams_from_lists(date_doc_tuples, frequent_bigrams, frequent_ngrams):
for i in range(0, len(date_doc_tuples)):
date, doc = date_doc_tuples[i]
doc = ngrammize_text(doc, frequent_bigrams)
doc = ngrammize_text(doc, frequent_ngrams)
date_doc_tuples[i] = (date, doc)
return date_doc_tuples
def insert_ngrams(date_docs, min_freq=1000, sw=None, extra_bigrams=None, extra_ngrams=None):
fb, fn = get_dataset_ngrams([x[1] for x in date_docs], min_freq, sw, extra_bigrams, extra_ngrams)
return insert_ngrams_from_lists(date_docs, fb, fn)
| 32.57 | 101 | 0.661652 | from nltk.util import ngrams
from nltk.corpus import stopwords
from collections import Counter
from .common import get_pp_pipeline
def or_list(booleans):
return True in booleans
def get_ngrams(D):
ngrams = set()
for d in D:
for w in d:
if '$' in w:
ngrams.add(w)
return list(ngrams)
def get_frequent_ngrams(text, n, stopword_list, threshold):
bigrams = ngrams(text, n)
bigram_freq = Counter(bigrams)
frequent_bigrams = []
for bigram, freq in bigram_freq.most_common():
if not (or_list([i in stopword_list for i in bigram])):
if freq > threshold:
frequent_bigrams.append('{}${}'.format(bigram[0], bigram[1]))
else:
break
return frequent_bigrams
def ngrammize_text(text, ngrams):
bigrammized_text = []
i = 0
while i < len(text):
term = text[i]
if i == len(text)-1:
bigrammized_text.append(term)
else:
next_term = text[i+1]
test_bigram = '{}${}'.format(term, next_term)
if test_bigram in ngrams:
bigrammized_text.append(test_bigram)
i += 1
else:
bigrammized_text.append(term)
i += 1
return bigrammized_text
def get_dataset_ngrams(docs, min_freq=1000, sw=None, extra_bigrams=None, extra_ngrams=None):
if not sw:
sw = stopwords.words('english')
sw_pp = get_pp_pipeline(remove_stopwords=False)
sw = sw_pp.clean_document(sw)
full_text = []
for doc in docs:
full_text.extend(doc)
frequent_bigrams = get_frequent_ngrams(full_text, 2, sw, min_freq)
if extra_bigrams:
frequent_bigrams.extend(extra_bigrams)
bigrammized_text = ngrammize_text(full_text, frequent_bigrams)
frequent_ngrams = get_frequent_ngrams(bigrammized_text, 2, sw, min_freq)
if extra_ngrams:
frequent_ngrams.extend(extra_ngrams)
return frequent_bigrams, frequent_ngrams
def insert_ngrams_flat_from_lists(docs, frequent_bigrams, frequent_ngrams):
for i in range(0, len(docs)):
doc = docs[i]
doc = ngrammize_text(doc, frequent_bigrams)
doc = ngrammize_text(doc, frequent_ngrams)
docs[i] = doc
return docs
def insert_ngrams_flat(docs, min_freq=1000, sw=None, extra_bigrams=None, extra_ngrams=None):
fb, fn = get_dataset_ngrams(docs, min_freq, sw, extra_bigrams, extra_ngrams)
return insert_ngrams_flat_from_lists(docs, fb, fn)
def insert_ngrams_from_lists(date_doc_tuples, frequent_bigrams, frequent_ngrams):
for i in range(0, len(date_doc_tuples)):
date, doc = date_doc_tuples[i]
doc = ngrammize_text(doc, frequent_bigrams)
doc = ngrammize_text(doc, frequent_ngrams)
date_doc_tuples[i] = (date, doc)
return date_doc_tuples
def insert_ngrams(date_docs, min_freq=1000, sw=None, extra_bigrams=None, extra_ngrams=None):
fb, fn = get_dataset_ngrams([x[1] for x in date_docs], min_freq, sw, extra_bigrams, extra_ngrams)
return insert_ngrams_from_lists(date_docs, fb, fn)
| true | true |
f700567345c8f7d640f885f9ec02c826d22cb2ad | 9,755 | py | Python | core/src/autogluon/core/task/base/base_task.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 4,462 | 2019-12-09T17:41:07.000Z | 2022-03-31T22:00:41.000Z | core/src/autogluon/core/task/base/base_task.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 1,408 | 2019-12-09T17:48:59.000Z | 2022-03-31T20:24:12.000Z | core/src/autogluon/core/task/base/base_task.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 623 | 2019-12-10T02:04:18.000Z | 2022-03-20T17:11:01.000Z | import collections
import copy
import logging
import time
from abc import abstractmethod
from ...scheduler import HyperbandScheduler, RLScheduler, FIFOScheduler
from ...scheduler.seq_scheduler import LocalSequentialScheduler
from ...utils import in_ipynb, try_import_mxnet
from ...utils.utils import setup_compute
__all__ = [
'BaseTask',
'compile_scheduler_options',
'compile_scheduler_options_v2',
'create_scheduler']
Results = collections.namedtuple('Results', 'model reward config time metadata')
schedulers = {
'local': LocalSequentialScheduler,
'fifo': FIFOScheduler,
'rl': RLScheduler,
'hyperband_stopping': HyperbandScheduler,
'hyperband_promotion': HyperbandScheduler,
}
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
def create_scheduler(train_fn, scheduler, scheduler_options):
if isinstance(scheduler, str):
scheduler_cls = schedulers[scheduler.lower()]
else:
assert callable(scheduler)
scheduler_cls = scheduler
scheduler_options = copy.copy(scheduler_options)
return scheduler_cls(train_fn, **scheduler_options)
class BaseTask(object):
"""BaseTask for AutoGluon applications
"""
@property
@staticmethod
def Dataset():
try_import_mxnet()
from autogluon.mxnet.utils.dataset import BaseDataset
return BaseDataset
@classmethod
def run_fit(cls, train_fn, search_strategy, scheduler_options,
plot_results=False):
start_time = time.time()
# create scheduler and schedule tasks
scheduler = create_scheduler(train_fn, search_strategy, scheduler_options)
scheduler.run()
scheduler.join_jobs()
# gather the best configuration
best_reward = scheduler.get_best_reward()
best_config = scheduler.get_best_config()
args = train_fn.args
args.final_fit = True
if hasattr(args, 'epochs') and hasattr(args, 'final_fit_epochs'):
args.epochs = args.final_fit_epochs
train_fn.args.update({'final_fit':True})
train_fn.kwvars.update({'final_fit':True})
scheduler_final = create_scheduler(train_fn, search_strategy, scheduler_options)
results = scheduler_final.run_with_config(best_config)
total_time = time.time() - start_time
if plot_results or in_ipynb():
plot_training_curves = scheduler_options['checkpoint'].replace('exp1.ag', 'plot_training_curves.png')
scheduler.get_training_curves(filename=plot_training_curves, plot=True, use_legend=False)
record_args = copy.deepcopy(args)
if results is None:
logger.warning('No valid results obtained with best config, the result may not be useful...')
results = {}
results.update(best_reward=best_reward,
best_config=best_config,
total_time=total_time,
metadata=scheduler.metadata,
training_history=scheduler.training_history,
config_history=scheduler.config_history,
reward_attr=scheduler._reward_attr,
args=record_args)
return results
@classmethod
@abstractmethod
def fit(cls, *args, **kwargs):
pass
# These search_strategies use HyperbandScheduler, along with certain
# searchers.
searcher_for_hyperband_strategy = {
'hyperband': 'random',
'bayesopt_hyperband': 'bayesopt'}
def compile_scheduler_options(
scheduler_options, search_strategy, search_options, nthreads_per_trial,
ngpus_per_trial, checkpoint, num_trials, time_out, resume, visualizer,
time_attr, reward_attr, dist_ip_addrs, epochs=None):
"""
Updates a copy of scheduler_options (scheduler-specific options, can be
empty) with general options. The result can be passed to __init__ of the
scheduler.
Special role of epochs for HyperbandScheduler: If the search_strategy
involves HyperbandScheduler and epochs is given, then this value is
copied to scheduler_options['max_t']. Pass epochs for applications
where the time_attr is epoch, and epochs is the maximum number of
epochs.
:param scheduler_options:
:param search_strategy:
:param search_options:
:param nthreads_per_trial:
:param ngpus_per_trial:
:param checkpoint:
:param num_trials:
:param time_out:
:param resume:
:param visualizer:
:param time_attr:
:param reward_attr:
:param dist_ip_addrs:
:param kwargs:
:param epochs: See above. Optional
:return: Copy of scheduler_options with updates
"""
if scheduler_options is None:
scheduler_options = dict()
else:
assert isinstance(scheduler_options, dict)
assert isinstance(search_strategy, str)
if search_options is None:
search_options = dict()
if visualizer is None:
visualizer = 'none'
if time_attr is None:
time_attr = 'epoch'
if reward_attr is None:
reward_attr = 'accuracy'
scheduler_options = copy.copy(scheduler_options)
scheduler_options.update({
'resource': {
'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
'searcher': search_strategy,
'search_options': search_options,
'checkpoint': checkpoint,
'resume': resume,
'num_trials': num_trials,
'time_out': time_out,
'reward_attr': reward_attr,
'time_attr': time_attr,
'visualizer': visualizer,
'dist_ip_addrs': dist_ip_addrs})
searcher = searcher_for_hyperband_strategy.get(search_strategy)
if searcher is not None:
scheduler_options['searcher'] = searcher
if epochs is not None:
scheduler_options['max_t'] = epochs
return scheduler_options
# TODO: Migrate TextPredictor to use this version, delete old version
def compile_scheduler_options_v2(
scheduler_options, nthreads_per_trial,
ngpus_per_trial, num_trials, time_out, scheduler=None, search_strategy=None, search_options=None, checkpoint=None, resume=False, visualizer=None,
time_attr=None, reward_attr=None, dist_ip_addrs=None, epochs=None):
"""
Updates a copy of scheduler_options (scheduler-specific options, can be
empty) with general options. The result can be passed to __init__ of the
scheduler.
Special role of epochs for HyperbandScheduler: If the search_strategy
involves HyperbandScheduler and epochs is given, then this value is
copied to scheduler_options['max_t']. Pass epochs for applications
where the time_attr is epoch, and epochs is the maximum number of
epochs.
:param scheduler_options:
:param scheduler:
:param search_strategy:
:param search_options:
:param nthreads_per_trial:
:param ngpus_per_trial:
:param checkpoint:
:param num_trials:
:param time_out:
:param resume:
:param visualizer:
:param time_attr:
:param reward_attr:
:param dist_ip_addrs:
:param kwargs:
:param epochs: See above. Optional
:return: Copy of scheduler_options with updates
"""
if scheduler_options is None:
scheduler_options = dict()
else:
assert isinstance(scheduler_options, dict)
scheduler_options = copy.copy(scheduler_options)
if dist_ip_addrs is None:
dist_ip_addrs = []
if search_strategy is None:
search_strategy = 'random'
if scheduler is None:
scheduler = 'local'
assert isinstance(search_strategy, str)
if search_options is None:
search_options = dict()
if visualizer is None:
visualizer = 'none'
if time_attr is None:
time_attr = 'epoch'
if reward_attr is None:
reward_attr = 'validation_performance'
scheduler_params = {
'resource': {
'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
'scheduler': scheduler,
'searcher': search_strategy,
'search_options': search_options,
'checkpoint': checkpoint,
'resume': resume,
'num_trials': num_trials,
'time_out': time_out,
'reward_attr': reward_attr,
'time_attr': time_attr,
'visualizer': visualizer,
'dist_ip_addrs': dist_ip_addrs,
}
resource = None
if 'resource' in scheduler_options:
scheduler_params['resource'].update(scheduler_options['resource'])
resource = scheduler_params['resource'].copy()
scheduler_params.update(scheduler_options)
if resource:
scheduler_params['resource'] = resource
scheduler_params['resource']['num_cpus'], scheduler_params['resource']['num_gpus'] = setup_compute(
nthreads_per_trial=scheduler_params['resource']['num_cpus'],
ngpus_per_trial=scheduler_params['resource']['num_gpus'],
) # TODO: use 'auto' downstream
searcher = searcher_for_hyperband_strategy.get(scheduler_params['searcher'])
if searcher is not None:
scheduler_params['searcher'] = searcher
if epochs is not None:
scheduler_params['max_t'] = epochs
required_options = [
'resource',
'scheduler',
'searcher',
'search_options',
'checkpoint',
'resume',
'num_trials',
'time_out',
'reward_attr',
'time_attr',
'visualizer',
'dist_ip_addrs',
]
missing_options = []
for option in required_options:
if option not in scheduler_params:
missing_options.append(option)
if missing_options:
raise AssertionError(f'Missing required keys in scheduler_options: {missing_options}')
return scheduler_params
| 35.089928 | 153 | 0.679036 | import collections
import copy
import logging
import time
from abc import abstractmethod
from ...scheduler import HyperbandScheduler, RLScheduler, FIFOScheduler
from ...scheduler.seq_scheduler import LocalSequentialScheduler
from ...utils import in_ipynb, try_import_mxnet
from ...utils.utils import setup_compute
__all__ = [
'BaseTask',
'compile_scheduler_options',
'compile_scheduler_options_v2',
'create_scheduler']
Results = collections.namedtuple('Results', 'model reward config time metadata')
schedulers = {
'local': LocalSequentialScheduler,
'fifo': FIFOScheduler,
'rl': RLScheduler,
'hyperband_stopping': HyperbandScheduler,
'hyperband_promotion': HyperbandScheduler,
}
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
def create_scheduler(train_fn, scheduler, scheduler_options):
if isinstance(scheduler, str):
scheduler_cls = schedulers[scheduler.lower()]
else:
assert callable(scheduler)
scheduler_cls = scheduler
scheduler_options = copy.copy(scheduler_options)
return scheduler_cls(train_fn, **scheduler_options)
class BaseTask(object):
@property
@staticmethod
def Dataset():
try_import_mxnet()
from autogluon.mxnet.utils.dataset import BaseDataset
return BaseDataset
@classmethod
def run_fit(cls, train_fn, search_strategy, scheduler_options,
plot_results=False):
start_time = time.time()
scheduler = create_scheduler(train_fn, search_strategy, scheduler_options)
scheduler.run()
scheduler.join_jobs()
best_reward = scheduler.get_best_reward()
best_config = scheduler.get_best_config()
args = train_fn.args
args.final_fit = True
if hasattr(args, 'epochs') and hasattr(args, 'final_fit_epochs'):
args.epochs = args.final_fit_epochs
train_fn.args.update({'final_fit':True})
train_fn.kwvars.update({'final_fit':True})
scheduler_final = create_scheduler(train_fn, search_strategy, scheduler_options)
results = scheduler_final.run_with_config(best_config)
total_time = time.time() - start_time
if plot_results or in_ipynb():
plot_training_curves = scheduler_options['checkpoint'].replace('exp1.ag', 'plot_training_curves.png')
scheduler.get_training_curves(filename=plot_training_curves, plot=True, use_legend=False)
record_args = copy.deepcopy(args)
if results is None:
logger.warning('No valid results obtained with best config, the result may not be useful...')
results = {}
results.update(best_reward=best_reward,
best_config=best_config,
total_time=total_time,
metadata=scheduler.metadata,
training_history=scheduler.training_history,
config_history=scheduler.config_history,
reward_attr=scheduler._reward_attr,
args=record_args)
return results
@classmethod
@abstractmethod
def fit(cls, *args, **kwargs):
pass
searcher_for_hyperband_strategy = {
'hyperband': 'random',
'bayesopt_hyperband': 'bayesopt'}
def compile_scheduler_options(
scheduler_options, search_strategy, search_options, nthreads_per_trial,
ngpus_per_trial, checkpoint, num_trials, time_out, resume, visualizer,
time_attr, reward_attr, dist_ip_addrs, epochs=None):
if scheduler_options is None:
scheduler_options = dict()
else:
assert isinstance(scheduler_options, dict)
assert isinstance(search_strategy, str)
if search_options is None:
search_options = dict()
if visualizer is None:
visualizer = 'none'
if time_attr is None:
time_attr = 'epoch'
if reward_attr is None:
reward_attr = 'accuracy'
scheduler_options = copy.copy(scheduler_options)
scheduler_options.update({
'resource': {
'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
'searcher': search_strategy,
'search_options': search_options,
'checkpoint': checkpoint,
'resume': resume,
'num_trials': num_trials,
'time_out': time_out,
'reward_attr': reward_attr,
'time_attr': time_attr,
'visualizer': visualizer,
'dist_ip_addrs': dist_ip_addrs})
searcher = searcher_for_hyperband_strategy.get(search_strategy)
if searcher is not None:
scheduler_options['searcher'] = searcher
if epochs is not None:
scheduler_options['max_t'] = epochs
return scheduler_options
def compile_scheduler_options_v2(
scheduler_options, nthreads_per_trial,
ngpus_per_trial, num_trials, time_out, scheduler=None, search_strategy=None, search_options=None, checkpoint=None, resume=False, visualizer=None,
time_attr=None, reward_attr=None, dist_ip_addrs=None, epochs=None):
if scheduler_options is None:
scheduler_options = dict()
else:
assert isinstance(scheduler_options, dict)
scheduler_options = copy.copy(scheduler_options)
if dist_ip_addrs is None:
dist_ip_addrs = []
if search_strategy is None:
search_strategy = 'random'
if scheduler is None:
scheduler = 'local'
assert isinstance(search_strategy, str)
if search_options is None:
search_options = dict()
if visualizer is None:
visualizer = 'none'
if time_attr is None:
time_attr = 'epoch'
if reward_attr is None:
reward_attr = 'validation_performance'
scheduler_params = {
'resource': {
'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
'scheduler': scheduler,
'searcher': search_strategy,
'search_options': search_options,
'checkpoint': checkpoint,
'resume': resume,
'num_trials': num_trials,
'time_out': time_out,
'reward_attr': reward_attr,
'time_attr': time_attr,
'visualizer': visualizer,
'dist_ip_addrs': dist_ip_addrs,
}
resource = None
if 'resource' in scheduler_options:
scheduler_params['resource'].update(scheduler_options['resource'])
resource = scheduler_params['resource'].copy()
scheduler_params.update(scheduler_options)
if resource:
scheduler_params['resource'] = resource
scheduler_params['resource']['num_cpus'], scheduler_params['resource']['num_gpus'] = setup_compute(
nthreads_per_trial=scheduler_params['resource']['num_cpus'],
ngpus_per_trial=scheduler_params['resource']['num_gpus'],
)
searcher = searcher_for_hyperband_strategy.get(scheduler_params['searcher'])
if searcher is not None:
scheduler_params['searcher'] = searcher
if epochs is not None:
scheduler_params['max_t'] = epochs
required_options = [
'resource',
'scheduler',
'searcher',
'search_options',
'checkpoint',
'resume',
'num_trials',
'time_out',
'reward_attr',
'time_attr',
'visualizer',
'dist_ip_addrs',
]
missing_options = []
for option in required_options:
if option not in scheduler_params:
missing_options.append(option)
if missing_options:
raise AssertionError(f'Missing required keys in scheduler_options: {missing_options}')
return scheduler_params
| true | true |
f700577848be1d3db591bde11b9367926ef378b4 | 57,165 | py | Python | CPAC/utils/create_fsl_model.py | chrisfoulon/C-PAC | 2746a90c39cea586aede98343c5927252bb8e81a | [
"BSD-3-Clause"
] | 1 | 2021-08-02T23:23:39.000Z | 2021-08-02T23:23:39.000Z | CPAC/utils/create_fsl_model.py | chrisfoulon/C-PAC | 2746a90c39cea586aede98343c5927252bb8e81a | [
"BSD-3-Clause"
] | null | null | null | CPAC/utils/create_fsl_model.py | chrisfoulon/C-PAC | 2746a90c39cea586aede98343c5927252bb8e81a | [
"BSD-3-Clause"
] | 2 | 2021-08-02T23:23:40.000Z | 2022-02-26T12:39:30.000Z |
def load_pheno_file(pheno_file):
import os
import pandas as pd
if not os.path.isfile(pheno_file):
err = "\n\n[!] CPAC says: The group-level analysis phenotype file "\
"provided does not exist!\nPath provided: %s\n\n" \
% pheno_file
raise Exception(err)
with open(os.path.abspath(pheno_file),"r") as f:
pheno_dataframe = pd.read_csv(f)
return pheno_dataframe
def load_group_participant_list(group_participant_list_file):
import os
import pandas as pd
if not os.path.isfile(group_participant_list_file):
err = "\n\n[!] CPAC says: The group-level analysis subject list "\
"provided does not exist!\nPath provided: %s\n\n" \
% group_subject_list
raise Exception(err)
with open(group_participant_list_file,"r") as f:
group_subs_dataframe = pd.read_csv(f)
if "participant" not in ga_sublist.columns:
err = "\n\n[!] CPAC says: Your group-level analysis subject "\
"list CSV is missing a 'participant' column.\n\n"
raise Exception(err)
return group_subs_dataframe
def process_pheno_file(pheno_file_dataframe, group_subs_dataframe, \
participant_id_label):
# drops participants from the phenotype file if they are not in the group
# analysis participant list
# also handles sessions and series appropriately for repeated measures
# input
# pheno_file_dataframe: Pandas dataframe of the phenotype file
# group_subs_dataframe: Pandas dataframe of the group analysis
# participant list
# participant_id_label: string of the name of the participant column in
# the phenotype file
# output
# pheno_file_rows: a list of dictionaries, with each dictionary being
# one of the rows from the phenotype file, with the
# format of {header: value, header: value, ..}
import os
import pandas as pd
if not isinstance(pheno_file_dataframe, pd.DataFrame):
err = "\n\n[!] CPAC says: The phenotype information input should " \
"be a Python Pandas dataframe object.\n\n"
raise Exception(err)
if not isinstance(group_subs_dataframe, pd.DataFrame):
err = "\n\n[!] CPAC says: The group analysis participant list input "\
"should be a Python Pandas dataframe object.\n\n"
raise Exception(err)
if not isinstance(participant_id_label, str):
err = "\n\n[!] CPAC says: The participant ID label input should be " \
"a string.\n\n"
pheno = pheno_file_dataframe
pheno_file_rows = []
df_rows = []
# convert from dataframe to list and make those strings just in case
subjects = list(group_subs_dataframe.participant)
subjects = [str(i) for i in subjects]
sessions = None
series = None
if "session" in group_subs_dataframe.columns:
sessions = list(group_subs_dataframe.session)
sessions = [str(i) for i in sessions]
if "series" in group_subs_dataframe.columns:
series = list(group_subs_dataframe.series)
series = [str(i) for i in series]
# use an integer for iteration because we're not sure if there will be
# sessions and/or series
for i in range(0,len(subjects)):
full_id = []
subject = subjects[i]
full_id.append(subject)
if sessions and series:
session = sessions[i]
scan = series[i]
full_id.append(session)
full_id.append(scan)
try:
row = pheno[(pheno[participant_id_label] == subject) & \
(pheno.session == session) & \
(pheno.series == scan)]
except:
row = pheno[(pheno[participant_id_label] == int(subject)) & \
(pheno.session == session) & \
(pheno.series == scan)]
elif sessions:
session = sessions[i]
full_id.append(session)
try:
row = pheno[(pheno[participant_id_label] == subject) & \
(pheno.session == session)]
except:
row = pheno[(pheno[participant_id_label] == int(subject)) & \
(pheno.session == session)]
elif series:
scan = series[i]
full_id.append(scan)
try:
row = pheno[(pheno[participant_id_label] == subject) & \
(pheno.series == scan)]
except:
row = pheno[(pheno[participant_id_label] == int(subject)) & \
(pheno.series == scan)]
else:
full_id.append(subject)
try:
row = pheno[(pheno[participant_id_label] == subject)]
except:
row = pheno[(pheno[participant_id_label] == int(subject))]
if len(row) > 1:
err = "\n\n[!] CPAC says: Multiple phenotype entries were " \
"found for these criteria:\n\n%s\n\nPlease ensure " \
"your group analysis participant list and phenotype " \
"file are configured correctly.\n\n" % str(full_id)
raise Exception(err)
elif len(row) == 1:
df_rows.append(row)
new_pheno_df = pd.concat(df_rows)
pheno_file_rows = new_pheno_df.to_dict("records")
return pheno_file_rows
def create_pheno_dict(pheno_file_rows, ev_selections, participant_id_label):
# creates the phenotype data dictionary in a format Patsy requires,
# and also demeans the continuous EVs marked for demeaning
# input
# pheno_file_rows: a list of dictionaries, with each dictionary being
# one of the rows from the phenotype file, with the
# format of {header: value, header: value, ..}
# ev_selections: a dictionary with keys for "categorical" and "demean",
# with the entries being lists of phenotype EV names
# participant_id_label: string of the name of the participant column in
# the phenotype file
# output
# pheno_data_dict: a dictionary with each key being a phenotype column,
# and each entry being a list of values, IN ORDER
# data is also in Patsy-acceptable format
import os
import csv
import numpy as np
pheno_data_dict = {}
for line in pheno_file_rows:
for val in line.values():
# if there are any blank values in the pheno row, skip this
# row. if not, continue on with the "else" clause
if val == "":
break
else:
for key in line.keys():
# if there are blank entries because of an empty row in
# the CSV (such as ",,,,,"), move on to the next entry
#if len(line[key]) == 0:
# continue
if key not in pheno_data_dict.keys():
pheno_data_dict[key] = []
# create a list within one of the dictionary values for
# that EV if it is categorical; formats this list into a
# form Patsy can understand regarding categoricals:
# example: { ADHD: ['adhd1', 'adhd1', 'adhd0'] }
# instead of just [1, 1, 0], etc.
if 'categorical' in ev_selections.keys():
if key in ev_selections['categorical']:
pheno_data_dict[key].append(key + str(line[key]))
elif (key == subject_id_label) or (key == "session") or \
(key == "series"):
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
elif (key == subject_id_label) or (key == "session") or \
(key == "series"):
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
# this needs to run after each list in each key has been fully
# populated above
for key in pheno_data_dict.keys():
# demean the EVs marked for demeaning
if 'demean' in ev_selections.keys():
if key in ev_selections['demean']:
new_demeaned_evs = []
mean_evs = 0.0
# populate a dictionary, a key for each demeanable EV, with
# the value being the sum of all the values (which need to be
# converted to float first)
for val in pheno_data_dict[key]:
mean_evs += float(val)
# calculate the mean of the current EV in this loop
mean_evs = mean_evs / len(pheno_data_dict[key])
# remove the EV's mean from each value of this EV
# (demean it!)
for val in pheno_data_dict[key]:
new_demeaned_evs.append(float(val) - mean_evs)
# replace
pheno_data_dict[key] = new_demeaned_evs
# converts non-categorical EV lists into NumPy arrays
# so that Patsy may read them in properly
if 'categorical' in ev_selections.keys():
if key not in ev_selections['categorical']:
pheno_data_dict[key] = np.array(pheno_data_dict[key])
return pheno_data_dict
def get_measure_dict(param_file):
# load the CPAC-generated power parameters file and parse it
# input
# param_file: a full path to the CPAC-generated power parameters CSV
# output
# measure_dict: a dictionary of dictionaries in the following format
# {"MeanFD_Power": {"participant_01": 15.43,
# "participant_02": 13.22},
# "MeanFD_Jenkinson": {"participant_01": 18.55,
# "participant_02": 16.27},
# ...}
import os
import pandas as pd
if not os.path.isfile(param_file):
err = "\n\n[!] CPAC says: You've included a motion parameter in " \
"your group-level analysis model design formula, but " \
"there is no motion parameters file available.\n\n"
raise Exception(err)
with open(param_file,"r") as f:
motion_params = pd.read_csv(f, index_col=False)
measures = ['MeanFD_Power', 'MeanFD_Jenkinson', 'MeanDVARS']
measure_dict = {}
for m in measures:
measure_map = {}
if m in motion_params.columns:
part_ids = list(motion_params["Subject"])
part_ids = [str(i) for i in part_ids]
scan_ids = list(motion_params["Scan"])
scan_ids = [str(i) for i in scan_ids]
measure_vals = list(motion_params[m])
measure_vals = [float(i) for i in measure_vals]
for part_id, scan_id, measure_val in \
zip(part_ids, scan_ids, measure_vals):
measure_map[(part_id,scan_id)] = measure_val
measure_dict[m] = measure_map
return measure_dict
def get_custom_roi_info(roi_means_dict):
# check
if roi_means_dict == None:
err_string = "\n\n[!] CPAC says: The custom ROI means were not " \
"calculated properly during the group analysis " \
"model generation.\n\n"
raise Exception(err_string)
roi_num = len(roi_means_dict.values()[0])
# this will be a dictionary matching ROI regressor header labels with
# the actual ROI dictionaries
roi_dict_dict = {}
# split the roi_means_dict from { subID: [mean1,mean2,mean3,..], ..}
# to three dictionaries of { subID: mean1, .. }, { subID: mean2, .. },
# and so on
for num in range(0,roi_num):
label = "Custom_ROI_Mean_%d" % int(num+1)
temp_roi_dict = {}
for key in roi_means_dict.keys():
temp_roi_dict[key] = roi_means_dict[key][num-1]
roi_dict_dict[label] = temp_roi_dict
return roi_dict_dict
def model_group_var_separately(grouping_var, formula, pheno_data_dict, \
ev_selections, coding_scheme):
if grouping_var == None or grouping_var not in formula:
print '\n\n[!] CPAC says: Model group variances separately is ' \
'enabled, but the grouping variable set is either set to ' \
'None, or was not included in the model as one of the ' \
'EVs.\n'
print 'Design formula: ', formula
print 'Grouping variable: ', grouping_var, '\n\n'
raise Exception
# do this a little early for the grouping variable so that it doesn't
# get in the way of doing this for the other EVs once they have the
# grouping variable in their names
if 'categorical' in ev_selections.keys():
for EV_name in ev_selections['categorical']:
if EV_name == grouping_var:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + \
', Sum)')
groupvar_levels = []
grouping_var_id_dict = {}
idx = 0
for cat_ev_value in pheno_data_dict[grouping_var]:
# here, each "cat_ev_value" will be one of the Patsy-format values
# of the categorical EV that the user has selected as the grouping
# variable, i.e. "sex1, sex1, sex0, sex1", etc..
# cat_ev_level is the level digit or label without the EV name
# ex. sex1 becomes 1
cat_ev_level = str(cat_ev_value).replace(str(grouping_var), "")
if cat_ev_level not in groupvar_levels:
groupvar_levels.append(cat_ev_level)
# groupvar_levels only keeps track of how many levels there are in
# the grouping variable
# populate this dict for creating the .grp file:
try:
grouping_var_id_dict[cat_ev_level].append(idx)
except:
grouping_var_id_dict[cat_ev_level] = [idx]
idx += 1
split_EVs = {}
for key in pheno_data_dict.keys():
# here, "key" is the name of each EV from the phenotype file, as
# they are labeled in the phenotype file (not Patsy format)
if (key in formula) and (key != grouping_var):
# for the formula edit
new_key_string = ""
for level in groupvar_levels:
# for the new split EV label
groupvar_with_level = str(grouping_var) + str(level)
new_key = key + "__" + groupvar_with_level
# for the formula edit
if new_key_string == "":
new_key_string = new_key
else:
new_key_string = new_key_string + " + " + new_key
split_EVs[new_key] = []
# for the formula as well
if key in ev_selections["categorical"]:
ev_selections["categorical"].append(new_key)
for val, groupvar_val in zip(pheno_data_dict[key], \
pheno_data_dict[grouping_var]):
if groupvar_with_level == groupvar_val:
split_EVs[new_key].append(val)
else:
split_EVs[new_key].append(0)
del pheno_data_dict[key]
if key in ev_selections["categorical"]:
ev_selections["categorical"].remove(key)
# formula edit
formula = formula.replace(key, new_key_string)
# put split EVs into pheno data dict
pheno_data_dict.update(split_EVs)
# parse through ev_selections, find the categorical names within the
# design formula and insert C(<name>, Sum) into the design formula
# this is required for Patsy to process the categorical EVs
# properly when generating the design matrix (this goes into the
# .mat file)
if 'categorical' in ev_selections.keys():
for EV_name in ev_selections['categorical']:
if EV_name != grouping_var:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + \
', Sum)')
# remove intercept when modeling group variances separately
formula = formula + " - 1"
return pheno_data_dict, formula, grouping_var_id_dict
def check_multicollinearity(matrix):
import numpy as np
print "\nChecking for multicollinearity in the model.."
U, s, V = np.linalg.svd(matrix)
max_singular = np.max(s)
min_singular = np.min(s)
print "Max singular: ", max_singular
print "Min singular: ", min_singular
print "Rank: ", np.linalg.matrix_rank(matrix), "\n"
if min_singular == 0:
print '[!] CPAC warns: Detected multicollinearity in the ' \
'computed group-level analysis model. Please double-' \
'check your model design.\n\n'
else:
condition_number = float(max_singular)/float(min_singular)
print "Condition number: %f\n\n" % condition_number
if condition_number > 30:
print '[!] CPAC warns: Detected multicollinearity in the ' \
'computed group-level analysis model. Please double-' \
'check your model design.\n\n'
def write_mat_file(design_matrix, output_dir, model_name, \
depatsified_EV_names, current_output=None):
import os
import numpy as np
dimx = None
dimy = None
if len(design_matrix.shape) == 1:
dimy = 1
dimx = design_matrix.shape[0]
else:
dimx, dimy = design_matrix.shape
ppstring = '/PPheights'
for i in range(0, dimy):
ppstring += '\t' + '%1.5e' %(1.0)
ppstring += '\n'
filename = model_name + ".mat"
out_file = os.path.join(output_dir, filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(out_file, 'wt') as f:
print >>f, '/NumWaves\t%d' %dimy
print >>f, '/NumPoints\t%d' %dimx
print >>f, ppstring
# print labels for the columns - mainly for double-checking your model
col_string = '\n'
for col in depatsified_EV_names:
col_string = col_string + col + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
np.savetxt(f, design_matrix, fmt='%1.5e', delimiter='\t')
def create_grp_file(design_matrix, grouping_var_id_dict, output_dir, \
model_name, current_output=None):
import os
import numpy as np
dimx = None
dimy = None
if len(design_matrix.shape) == 1:
dimy = 1
dimx = design_matrix.shape[0]
else:
dimx, dimy = design_matrix.shape
design_matrix_ones = np.ones(dimx)
if not (grouping_var_id_dict == None):
i = 1
for key in sorted(grouping_var_id_dict.keys()):
for index in grouping_var_id_dict[key]:
design_matrix_ones[index] = i
i += 1
filename = model_name + ".grp"
out_file = os.path.join(output_dir, filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(out_file, "wt") as f:
print >>f, '/NumWaves\t1'
print >>f, '/NumPoints\t%d\n' %dimx
print >>f, '/Matrix'
np.savetxt(f, design_matrix_ones, fmt='%d', delimiter='\t')
def create_design_matrix(pheno_file, ev_selections, formula, \
subject_id_label, sub_list=None, \
coding_scheme="Treatment", grouping_var=None, \
new_regressor_dict=None, roi_means_dict=None, \
output_dir=None, model_name="design", \
current_output=None):
# this should allow the user to easily create a FLAMEO-formatted .mat file
# and .grp file from the command line or from within CPAC
# input
# pheno_file: full path to a CSV file with the phenotypic data
#
# ev_selections: a Python dictionary of two lists denoting which EVs are
# categorical, and which should be demeaned
# format - {"categorical": ["dx_group", "sex"],
# "demean": ["age"]}
#
# formula: a string with the Patsy-format design matrix formula
# more info here:
# http://patsy.readthedocs.org/en/latest/formulas.html
#
# subject_id_label: a string denoting the header label of the subject ID
# column in the phenotype CSV file
#
# sub_list: (optional) full path to a CSV file containing the
# participant IDs, and optionally session and/or series IDs
# that you want included from the phenotypic file
# NOTE: if not provided, all rows from phenotypic are included
# in the model
#
# coding_scheme: (optional) which encoding scheme Patsy should use when
# creating the design matrix - "Treatment" (default) or
# "Sum"
#
# grouping_var: (optional) the grouping variable to use if modeling
# group variances separately
#
# new_regressor_dict: (optional) a Python dictionary containing other
# dictionaries of subject IDs matched to the values
# of each new regressor
# format - {"MeanFD": {"sub001": 0.493,
# "sub002": 0.211,},
# "Measure_Mean": {"sub001": 0.193,
# "sub002": 0.392},
# ..}
#
# roi_means_dict: (optional) a Python dictionary of lists containing the
# mean values of user-defined ROIs of the derivative for
# each subject
# format - {"sub001": [3.23, 2.11],
# "sub002": [1.79, 3.03]}
# (with the length of the lists being the number of
# ROIs specified)
#
# output_dir: (optional) where to write the .mat file
#
# model_name: (optional) name of the group analysis model
#
# current_output: (optional) name of the derivative in the analysis
#
# output
# dmatrix: a Patsy object of the design matrix
# depatsified_EV_names: a list of the column names of the design matrix
import os
import patsy
import numpy as np
# if running this script alone outside of CPAC
if output_dir == None:
output_dir = os.getcwd()
# let's process the phenotype file data and drop rows (participants) if
# they are not listed in the participant list
pheno_file_df = load_pheno_file(pheno_file)
participant_list_df = load_group_participant_list(sub_list)
pheno_file_rows = process_pheno_file(pheno_file_df, participant_list_df, \
subject_id_label)
# get number of subjects that have the derivative for this current model
# (basically, the amount of time points, which must be greater than the
# number of EVs)
num_subjects = len(participant_list_df)
# for repeated measures
if "session" in participant_list_df.columns:
ev_selections["categorical"].append("session")
if "series" in participant_list_df.columns:
ev_selections["categorical"].append("series")
# start adding additionally created EVs
if new_regressor_dict:
for measure in new_regressor_dict.keys():
if (measure in formula):
measure_dict = new_regressor_dict[measure]
for pheno_row_dict in pheno_file_rows:
participant_id = pheno_row_dict[subject_id_label]
if ("session" in pheno_row_dict.keys()) and \
("series" in pheno_row_dict.keys()):
session_id = pheno_row_dict["session"]
series_id = pheno_row_dict["series"]
participant_tuple = \
(participant_id, session_id, series_id)
elif "session" in pheno_row_dict.keys():
session_id = pheno_row_dict["session"]
participant_tuple = (participant_id, session_id)
elif "series" in pheno_row_dict.keys():
series_id = pheno_row_dict["series"]
participant_tuple = (participant_id, series_id)
else:
participant_tuple = (participant_id)
pheno_row_dict[measure] = measure_dict[participant_tuple]
ev_selections["demean"].append(measure)
if "Custom_ROI_Mean" in formula:
# include the means of the specified ROIs as regressors
if roi_means_dict == None:
err = "\n\n[!] You included 'Custom_ROI_Mean' in your model " \
"design, but there are no mean of ROI values provided." \
"\n\n"
raise Exception(err)
# roi_dict_dict is a dictionary of dictionaries, with each dictionary
# holding all of the means for one ROI, with each entry being a mean
# for a participant (the keys are the participant IDs)
# ex. {participant_01: 35.15, participant_02: 50.00}
# with the float values being all of the means of one of
# the ROIs specified
# there will be a dictionary for each ROI specified
roi_dict_dict = get_custom_roi_info(roi_means_dict)
add_formula_string = ""
for roi_column in roi_dict_dict.keys():
roi_dict = roi_dict_dict[roi_column]
for pheno_row_dict in pheno_file_rows:
participant_id = pheno_row_dict[subject_id_label]
if ("session" in pheno_row_dict.keys()) and \
("series" in pheno_row_dict.keys()):
session_id = pheno_row_dict["session"]
series_id = pheno_row_dict["series"]
participant_tuple = \
(participant_id, session_id, series_id)
elif "session" in pheno_row_dict.keys():
session_id = pheno_row_dict["session"]
participant_tuple = (participant_id, session_id)
elif "series" in pheno_row_dict.keys():
series_id = pheno_row_dict["series"]
participant_tuple = (participant_id, series_id)
else:
participant_tuple = (participant_id)
pheno_row_dict[roi_column] = roi_dict[participant_tuple]
ev_selections["demean"].append(roi_column)
# create a string of all the new custom ROI regressor column names
# to be inserted into the design formula, so that Patsy will
# accept the phenotypic data dictionary that now has these columns
if add_formula_string == "":
add_formula_string = add_formula_string + roi_column
else:
add_formula_string = add_formula_string + " + " + roi_column
# a regressor column of ROI means for each custom-specified ROI has
# now been added to the model with appropriate column labels
formula = formula.replace("Custom_ROI_Mean",add_formula_string)
# return the data from the phenotype file processed properly for Patsy
# and load it into 'pheno_data_dict'
# format: dictionary, each key is the name of an EV, and its value is
# a LIST of values in order of the subjects
# - categorical EVs are already renamed from '0,1,..' to
# 'EV0,EV1,..' with EV being the EV name
# - EVs to be demeaned are already demeaned
# - numerical EVs (non-categorical) are in a list which
# have been converted into a NumPy array
pheno_data_dict = create_pheno_dict(pheno_file_rows, ev_selections, \
subject_id_label)
# handle modeling group variances separately (if enabled), then edit the
# formula to be in Patsy language
if grouping_var != None:
pheno_data_dict, formula, grouping_var_id_dict = \
model_group_var_separately(grouping_var, \
formula, pheno_data_dict, \
ev_selections, coding_scheme)
else:
grouping_var_id_dict = None
if 'categorical' in ev_selections.keys():
for EV_name in ev_selections['categorical']:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + \
', Sum)')
# create the Patsy design matrix!
try:
dmatrix = patsy.dmatrix(formula, pheno_data_dict, NA_action='raise')
except:
print '\n\n[!] CPAC says: Design matrix creation wasn\'t ' \
'successful - do the terms in your formula correctly ' \
'correspond to the EVs listed in your phenotype file?\n'
print 'Phenotype file provided: '
print pheno_file, '\n'
print "Phenotypic data columns (regressors): ", pheno_data_dict.keys()
print "Formula: %s\n\n" % formula
raise Exception
# check the model for multicollinearity - Patsy takes care of this, but
# just in case
check_multicollinearity(np.array(dmatrix))
# prepare for final stages
design_matrix = np.array(dmatrix, dtype=np.float16)
column_names = dmatrix.design_info.column_names
# check to make sure there are more time points than EVs!
if len(column_names) >= num_subjects:
err = "\n\n[!] CPAC says: There are more EVs than there are " \
"subjects currently included in the model for %s. There must " \
"be more subjects than EVs in the design.\n\nNumber of " \
"subjects: %d\nNumber of EVs: %d\n\nNote: An 'Intercept' " \
"column gets added to the design as an EV, so there will be " \
"one more EV than you may have specified in your design. In " \
"addition, if you specified to model group variances " \
"separately, an Intercept column will not be included, but " \
"the amount of EVs can nearly double once they are split " \
"along the grouping variable.\n\n" \
"If the number of subjects is lower than the number of " \
"subjects in your group analysis subject list, this may be " \
"because not every subject in the subject list has an output " \
"for %s in the individual-level analysis output directory.\n\n"\
% (current_output, num_subjects, len(column_names), \
current_output)
raise Exception(err)
# remove the header formatting Patsy creates for categorical variables
# because we are going to use depatsified_EV_names in the "Available EVs
# for Contrasts" list on the next page, and also to test user-made custom
# contrast files
depatsified_EV_names = []
for column in column_names:
# if using Sum encoding, a column name may look like this:
# C(adhd, Sum)[S.adhd0]
# this loop leaves it with only "adhd0" in this case, for the
# contrasts list for the next GUI page
column_string = column
string_for_removal = ''
for char in column_string:
string_for_removal = string_for_removal + char
if char == '.':
column_string = column_string.replace(string_for_removal, '')
string_for_removal = ''
column_string = column_string.replace(']', '')
depatsified_EV_names.append(column_string)
# write the .mat file finally
write_mat_file(design_matrix, output_dir, model_name, \
depatsified_EV_names, current_output)
# write the .grp file also
create_grp_file(design_matrix, grouping_var_id_dict, output_dir, \
model_name, current_output)
# return the PATSY OBJECT of dmatrix, not the Numpy array "design_matrix"
return dmatrix, depatsified_EV_names
def positive(dmat, a, coding, group_sep, grouping_var):
import numpy as np
# this is also where the "Intercept" column gets introduced into
# the contrasts columns, for when the user uses the model builder's
# contrast builder
evs = dmat.design_info.column_name_indexes
con = np.zeros(dmat.shape[1])
if group_sep == True:
if "__" in a and grouping_var in a:
ev_desc = a.split("__")
for ev in evs:
count = 0
for desc in ev_desc:
if desc in ev:
count += 1
if count == len(ev_desc):
con[evs[ev]] = 1
break
else:
# it is a dropped term so make all other terms in that
# category at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
elif len(a.split(grouping_var)) > 2:
# this is if the current parsed contrast is the actual
# grouping variable, as the Patsified name will have the
# variable's name string in it twice
for ev in evs:
if a.split(".")[1] in ev:
con[evs[ev]] = 1
break
else:
# it is a dropped term so make all other terms in that
# category at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
# else not modeling group variances separately
else:
if a in evs:
con[evs[a]] = 1
else:
# it is a dropped term so make all other terms in that category
# at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
if coding == "Treatment":
# make Intercept 0
con[0] = 0
elif coding == "Sum":
# make Intercept 1
con[1] = 1
return con
def greater_than(dmat, a, b, coding, group_sep, grouping_var):
c1 = positive(dmat, a, coding, group_sep, grouping_var)
c2 = positive(dmat, b, coding, group_sep, grouping_var)
return c1-c2
def negative(dmat, a, coding, group_sep, grouping_var):
con = 0-positive(dmat, a, coding, group_sep, grouping_var)
return con
def create_dummy_string(length):
ppstring = ""
for i in range(0, length):
ppstring += '\t' + '%1.5e' %(1.0)
ppstring += '\n'
return ppstring
def create_con_file(con_dict, col_names, file_name, current_output, out_dir):
import os
print "col names: "
print col_names
with open(os.path.join(out_dir, file_name) + ".con",'w+') as f:
# write header
num = 1
for key in con_dict:
f.write("/ContrastName%s\t%s\n" %(num,key))
num += 1
f.write("/NumWaves\t%d\n" %len(con_dict[key]))
f.write("/NumContrasts\t%d\n" %len(con_dict))
f.write("/PPheights%s" %create_dummy_string(len(con_dict[key])))
f.write("/RequiredEffect%s" %create_dummy_string(len(con_dict[key])))
f.write("\n\n")
# print labels for the columns - mainly for double-checking your
# model
col_string = '\n'
for col in col_names:
col_string = col_string + col + '\t'
print >>f, col_string, '\n'
# write data
f.write("/Matrix\n")
for key in con_dict:
for v in con_dict[key]:
f.write("%1.5e\t" %v)
f.write("\n")
def create_fts_file(ftest_list, con_dict, model_name, current_output,
out_dir):
import os
import numpy as np
try:
print "\nFound f-tests in your model, writing f-tests file " \
"(.fts)..\n"
with open(os.path.join(out_dir, model_name + '.fts'), 'w') as f:
print >>f, '/NumWaves\t', len(con_dict)
print >>f, '/NumContrasts\t', len(ftest_list)
# process each f-test
ftst = []
for ftest_string in ftest_list:
ftest_vector = []
cons_in_ftest = ftest_string.split(",")
for con in con_dict.keys():
if con in cons_in_ftest:
ftest_vector.append(1)
else:
ftest_vector.append(0)
ftst.append(ftest_vector)
fts_n = np.array(ftst)
# print labels for the columns - mainly for double-checking your
# model
col_string = '\n'
for con in con_dict.keys():
col_string = col_string + con + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
for i in range(fts_n.shape[0]):
print >>f, ' '.join(fts_n[i].astype('str'))
except Exception as e:
filepath = os.path.join(out_dir, "model_files", current_output, \
model_name + '.fts')
errmsg = "\n\n[!] CPAC says: Could not create .fts file for " \
"FLAMEO or write it to disk.\nAttempted filepath: %s\n" \
"Error details: %s\n\n" % (filepath, e)
raise Exception(errmsg)
def create_con_ftst_file(con_file, model_name, current_output, output_dir,
column_names, coding_scheme, group_sep):
"""
Create the contrasts and fts file
"""
import os
import numpy as np
with open(con_file, "r") as f:
evs = f.readline()
evs = evs.rstrip('\r\n').split(',')
count_ftests = 0
# TODO: this needs to be re-visited, but I think this was originally added
# TODO: to counteract the fact that if someone was making a custom
# TODO: contrasts matrix CSV, they wouldn't know that the design matrix
# TODO: would have the Intercept added to it? but what if it wasn't?
# TODO: comment out for now... but test
# remove "Contrasts" label and replace it with "Intercept"
#evs[0] = "Intercept"
fTest = False
print "evs: "
print evs
for ev in evs:
if "f_test" in ev:
count_ftests += 1
if count_ftests > 0:
fTest = True
try:
data = np.genfromtxt(con_file, names=True, delimiter=',', dtype=None)
except:
print "Error: Could not successfully read in contrast file: ",con_file
raise Exception
lst = data.tolist()
ftst = []
fts_columns = []
contrasts = []
contrast_names = []
length = None
length = len(list(lst[0]))
# lst = list of tuples, "tp"
# tp = tuple in the format (contrast_name, 0, 0, 0, 0, ...)
# with the zeroes being the vector of contrasts for that contrast
for tp in lst:
contrast_names.append(tp[0])
# create a list of integers that is the vector for the contrast
# ex. [0, 1, 1, 0, ..]
con_vector = list(tp)[1:(length-count_ftests)]
fts_vector = list(tp)[(length-count_ftests):length]
fts_columns.append(fts_vector)
# TODO: see note about Intercept above
# add Intercept column
# if not group_sep:
# if coding_scheme == "Treatment":
# con_vector.insert(0, 0)
# elif coding_scheme == "Sum":
# con_vector.insert(0, 1)
contrasts.append(con_vector)
# contrast_names = list of the names of the contrasts (not regressors)
# contrasts = list of lists with the contrast vectors
num_EVs_in_con_file = len(contrasts[0])
contrasts = np.array(contrasts, dtype=np.float16)
fts_columns = np.array(fts_columns)
# if there are f-tests, create the array for them
if fTest:
if len(contrast_names) < 2:
errmsg = "\n\n[!] CPAC says: Not enough contrasts for running " \
"f-tests.\nTip: Do you have only one contrast in your " \
"contrasts file? f-tests require more than one contrast.\n"\
"Either remove the f-tests or include more contrasts.\n\n"
raise Exception(errmsg)
fts_n = fts_columns.T
if len(column_names) != (num_EVs_in_con_file):
err_string = "\n\n[!] CPAC says: The number of EVs in your model " \
"design matrix (found in the %s.mat file) does not " \
"match the number of EVs (columns) in your custom " \
"contrasts matrix CSV file.\n\nCustom contrasts matrix "\
"file: %s\n\nNumber of EVs in design matrix: %d\n" \
"Number of EVs in contrasts file: %d\n\nThe column " \
"labels in the design matrix should match those in " \
"your contrasts .CSV file.\nColumn labels in design " \
"matrix:\n%s" % (model_name, con_file, \
len(column_names), num_EVs_in_con_file, \
str(column_names))
raise Exception(err_string)
for design_mat_col, con_csv_col in zip(column_names, evs[1:]):
if con_csv_col not in design_mat_col:
errmsg = "\n\n[!] CPAC says: The names of the EVs in your " \
"custom contrasts .csv file do not match the names or " \
"order of the EVs in the design matrix. Please make " \
"sure these are consistent.\nDesign matrix EV columns: "\
"%s\nYour contrasts matrix columns: %s\n\n" \
% (column_names, evs[1:])
raise Exception(errmsg)
out_dir = os.path.join(output_dir, model_name + '.con')
with open(out_dir,"wt") as f:
idx = 1
pp_str = '/PPheights'
re_str = '/RequiredEffect'
for name in contrast_names:
print >>f, '/ContrastName%d' %idx, '\t', name
pp_str += '\t%1.5e' %(1)
re_str += '\t%1.5e' %(1)
idx += 1
print >>f, '/NumWaves\t', (contrasts.shape)[1]
print >>f, '/NumContrasts\t', (contrasts.shape)[0]
print >>f, pp_str
print >>f, re_str + '\n'
# print labels for the columns - mainly for double-checking your model
col_string = '\n'
for ev in evs:
col_string = col_string + ev + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
np.savetxt(f, contrasts, fmt='%1.5e', delimiter='\t')
if fTest:
print "\nFound f-tests in your model, writing f-tests file (.fts)..\n"
ftest_out_dir = os.path.join(output_dir, model_name + '.fts')
with open(ftest_out_dir,"wt") as f:
print >>f, '/NumWaves\t', (contrasts.shape)[0]
print >>f, '/NumContrasts\t', count_ftests
# print labels for the columns - mainly for double-checking your
# model
col_string = '\n'
for con in contrast_names:
col_string = col_string + con + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
for i in range(fts_n.shape[0]):
print >>f, ' '.join(fts_n[i].astype('str'))
def process_contrast(parsed_contrast, operator, ev_selections, group_sep, \
grouping_var, coding_scheme):
# take the contrast strings and process them appropriately
# extract the two separate contrasts (if there are two), and then
# identify which are categorical - adapting the string if so
parsed_EVs_in_contrast = []
EVs_in_contrast = parsed_contrast.split(operator)
if '' in EVs_in_contrast:
EVs_in_contrast.remove('')
for EV in EVs_in_contrast:
skip = 0
# they need to be put back into Patsy formatted header titles
# because the dmatrix gets passed into the function that writes
# out the contrast matrix
if 'categorical' in ev_selections.keys():
for cat_EV in ev_selections['categorical']:
# second half of this if clause is in case group variances
# are being modeled separately, and we don't want the EV
# that is the grouping variable (which is now present in
# other EV names) to confound this operation
if group_sep == True:
gpvar = grouping_var
else:
gpvar = "..."
if (cat_EV in EV) and not (gpvar in EV and \
"__" in EV):
# handle interactions
if ":" in EV:
temp_split_EV = EV.split(":")
for interaction_EV in temp_split_EV:
if cat_EV in interaction_EV:
current_EV = interaction_EV
else:
current_EV = EV
if coding_scheme == 'Treatment':
cat_EV_contrast = EV.replace(EV, 'C(' + cat_EV + \
')[T.' + current_EV+\
']')
elif coding_scheme == 'Sum':
cat_EV_contrast = EV.replace(EV, 'C(' + cat_EV + \
', Sum)[S.' + \
current_EV + ']')
parsed_EVs_in_contrast.append(cat_EV_contrast)
skip = 1
if skip == 0:
parsed_EVs_in_contrast.append(EV)
# handle interactions
if ":" in EV and len(parsed_EVs_in_contrast) == 2:
parsed_EVs_in_contrast = [parsed_EVs_in_contrast[0] + ":" + \
parsed_EVs_in_contrast[1]]
if ":" in EV and len(parsed_EVs_in_contrast) == 3:
parsed_EVs_in_contrast = [parsed_EVs_in_contrast[0], \
parsed_EVs_in_contrast[1] + ":" + \
parsed_EVs_in_contrast[2]]
return parsed_EVs_in_contrast
def run(group_config, current_output, param_file=None, \
derivative_means_dict=None, roi_means_dict=None, \
model_out_dir=None, CPAC_run=True):
import os
import csv
import numpy as np
# open the GROUP ANALYSIS FSL .YML CONFIG FILE, not the main pipeline
# config .yml file!
if CPAC_run:
c = group_config
else:
try:
c = Configuration(yaml.load(open(os.path.realpath(group_config), \
'r')))
except:
raise Exception("Error in reading %s configuration file" \
% group_config)
# pull in the gpa settings!
ph = c.pheno_file
sublist = c.subject_list
ev_selections = c.ev_selections
subject_id_label = c.subject_id_label
formula = c.design_formula
coding_scheme = c.coding_scheme[0]
group_sep = c.group_sep
grouping_var = c.grouping_var
contrasts = c.contrasts
f_tests = c.f_tests
custom_contrasts = c.custom_contrasts
model_name = c.model_name
output_dir = c.output_dir
# make sure the group analysis output directory exists
try:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
except:
print '\n\n[!] CPAC says: Could not successfully create the group ' \
'analysis output directory:\n', output_dir, '\n\nMake sure ' \
'you have write access in this file structure.\n\n\n'
raise Exception
measure_dict = {}
# extract motion measures from CPAC-generated power params file
if param_file != None:
measure_dict = get_measure_dict(param_file)
# combine the motion measures dictionary with the measure_mean
# dictionary (if it exists)
if derivative_means_dict:
measure_dict["Measure_Mean"] = derivative_means_dict
# create the .mat and .grp files for FLAME
design_matrix, regressor_names = create_design_matrix(ph, sublist, \
ev_selections, formula, \
subject_id_label, coding_scheme, \
grouping_var, measure_dict, \
roi_means_dict, model_out_dir, \
model_name, current_output)
# Create contrasts_dict dictionary for the .con file generation later
contrasts_list = contrasts
contrasts_dict = {}
for contrast in contrasts_list:
# each 'contrast' is a string the user input of the desired contrast
# remove all spaces
parsed_contrast = contrast.replace(' ', '')
EVs_in_contrast = []
parsed_EVs_in_contrast = []
if '>' in parsed_contrast:
parsed_EVs_in_contrast = \
process_contrast(parsed_contrast, '>', ev_selections, \
group_sep, grouping_var, coding_scheme)
contrasts_dict[parsed_contrast] = \
greater_than(design_matrix, parsed_EVs_in_contrast[0], \
parsed_EVs_in_contrast[1], coding_scheme, \
group_sep, grouping_var)
elif '<' in parsed_contrast:
parsed_EVs_in_contrast = \
process_contrast(parsed_contrast, '<', ev_selections, \
group_sep, grouping_var, coding_scheme)
contrasts_dict[parsed_contrast] = \
greater_than(design_matrix, parsed_EVs_in_contrast[1], \
parsed_EVs_in_contrast[0], coding_scheme, \
group_sep, grouping_var)
else:
contrast_string = parsed_contrast.replace('+',',+,')
contrast_string = contrast_string.replace('-',',-,')
contrast_items = contrast_string.split(',')
if '' in contrast_items:
contrast_items.remove('')
if '+' in contrast_items and len(contrast_items) == 2:
parsed_EVs_in_contrast = \
process_contrast(parsed_contrast, '+', ev_selections, \
group_sep, grouping_var, coding_scheme)
contrasts_dict[parsed_contrast] = \
positive(design_matrix, parsed_EVs_in_contrast[0], \
coding_scheme, group_sep, grouping_var)
elif '-' in contrast_items and len(contrast_items) == 2:
parsed_EVs_in_contrast = \
process_contrast(parsed_contrast, '-', ev_selections, \
group_sep, grouping_var, coding_scheme)
contrasts_dict[parsed_contrast] = \
negative(design_matrix, parsed_EVs_in_contrast[0], \
coding_scheme, group_sep, grouping_var)
if len(contrast_items) > 2:
idx = 0
for item in contrast_items:
# they need to be put back into Patsy formatted header
# titles because the dmatrix gets passed into the function
# that writes out the contrast matrix
if 'categorical' in ev_selections.keys():
for cat_EV in ev_selections['categorical']:
if cat_EV in item:
if coding_scheme == 'Treatment':
item = item.replace(item, \
'C(' + cat_EV + ')[T.' + item + ']')
elif coding_scheme == 'Sum':
item = item.replace(item, \
'C(' + cat_EV + ', Sum)[S.' + item + ']')
if idx == 0:
if item != '+' and item != '-':
contrast_vector = positive(dmatrix, item)
if parsed_contrast not in contrasts_dict.keys():
contrasts_dict[parsed_contrast] = contrast_vector
else:
contrasts_dict[parsed_contrast] += contrast_vector
elif idx != 0:
if item != '+' and item != '-':
if contrast_items[idx-1] == '+':
contrast_vector = positive(dmatrix, item, \
coding_scheme, group_sep,\
grouping_var)
if parsed_contrast not in contrasts_dict.keys():
contrasts_dict[parsed_contrast] = contrast_vector
else:
contrasts_dict[parsed_contrast] += contrast_vector
if contrast_items[idx-1] == '-':
contrast_vector = negative(dmatrix, item, \
coding_scheme, group_sep,\
grouping_var)
if parsed_contrast not in contrasts_dict.keys():
contrasts_dict[parsed_contrast] = contrast_vector
else:
contrasts_dict[parsed_contrast] += contrast_vector
idx += 1
# finally write out the .con file and .fts file (if f-tests)
if (custom_contrasts == None) or (custom_contrasts == '') or \
("None" in custom_contrasts):
print "Writing contrasts file (.con) based on contrasts provided " \
"using the group analysis model builder's contrasts editor.."
create_con_file(contrasts_dict, regressor_names, model_name, \
current_output, model_out_dir)
if f_tests:
create_fts_file(f_tests, contrasts_dict, model_name, \
current_output, model_out_dir)
else:
print "\nWriting contrasts file (.con) based on contrasts provided " \
"with a custom contrasts matrix CSV file..\n"
create_con_ftst_file(custom_contrasts, model_name, current_output, \
model_out_dir, regressor_names, \
coding_scheme, group_sep)
| 34.62447 | 86 | 0.544966 |
def load_pheno_file(pheno_file):
import os
import pandas as pd
if not os.path.isfile(pheno_file):
err = "\n\n[!] CPAC says: The group-level analysis phenotype file "\
"provided does not exist!\nPath provided: %s\n\n" \
% pheno_file
raise Exception(err)
with open(os.path.abspath(pheno_file),"r") as f:
pheno_dataframe = pd.read_csv(f)
return pheno_dataframe
def load_group_participant_list(group_participant_list_file):
import os
import pandas as pd
if not os.path.isfile(group_participant_list_file):
err = "\n\n[!] CPAC says: The group-level analysis subject list "\
"provided does not exist!\nPath provided: %s\n\n" \
% group_subject_list
raise Exception(err)
with open(group_participant_list_file,"r") as f:
group_subs_dataframe = pd.read_csv(f)
if "participant" not in ga_sublist.columns:
err = "\n\n[!] CPAC says: Your group-level analysis subject "\
"list CSV is missing a 'participant' column.\n\n"
raise Exception(err)
return group_subs_dataframe
def process_pheno_file(pheno_file_dataframe, group_subs_dataframe, \
participant_id_label):
import os
import pandas as pd
if not isinstance(pheno_file_dataframe, pd.DataFrame):
err = "\n\n[!] CPAC says: The phenotype information input should " \
"be a Python Pandas dataframe object.\n\n"
raise Exception(err)
if not isinstance(group_subs_dataframe, pd.DataFrame):
err = "\n\n[!] CPAC says: The group analysis participant list input "\
"should be a Python Pandas dataframe object.\n\n"
raise Exception(err)
if not isinstance(participant_id_label, str):
err = "\n\n[!] CPAC says: The participant ID label input should be " \
"a string.\n\n"
pheno = pheno_file_dataframe
pheno_file_rows = []
df_rows = []
subjects = list(group_subs_dataframe.participant)
subjects = [str(i) for i in subjects]
sessions = None
series = None
if "session" in group_subs_dataframe.columns:
sessions = list(group_subs_dataframe.session)
sessions = [str(i) for i in sessions]
if "series" in group_subs_dataframe.columns:
series = list(group_subs_dataframe.series)
series = [str(i) for i in series]
# sessions and/or series
for i in range(0,len(subjects)):
full_id = []
subject = subjects[i]
full_id.append(subject)
if sessions and series:
session = sessions[i]
scan = series[i]
full_id.append(session)
full_id.append(scan)
try:
row = pheno[(pheno[participant_id_label] == subject) & \
(pheno.session == session) & \
(pheno.series == scan)]
except:
row = pheno[(pheno[participant_id_label] == int(subject)) & \
(pheno.session == session) & \
(pheno.series == scan)]
elif sessions:
session = sessions[i]
full_id.append(session)
try:
row = pheno[(pheno[participant_id_label] == subject) & \
(pheno.session == session)]
except:
row = pheno[(pheno[participant_id_label] == int(subject)) & \
(pheno.session == session)]
elif series:
scan = series[i]
full_id.append(scan)
try:
row = pheno[(pheno[participant_id_label] == subject) & \
(pheno.series == scan)]
except:
row = pheno[(pheno[participant_id_label] == int(subject)) & \
(pheno.series == scan)]
else:
full_id.append(subject)
try:
row = pheno[(pheno[participant_id_label] == subject)]
except:
row = pheno[(pheno[participant_id_label] == int(subject))]
if len(row) > 1:
err = "\n\n[!] CPAC says: Multiple phenotype entries were " \
"found for these criteria:\n\n%s\n\nPlease ensure " \
"your group analysis participant list and phenotype " \
"file are configured correctly.\n\n" % str(full_id)
raise Exception(err)
elif len(row) == 1:
df_rows.append(row)
new_pheno_df = pd.concat(df_rows)
pheno_file_rows = new_pheno_df.to_dict("records")
return pheno_file_rows
def create_pheno_dict(pheno_file_rows, ev_selections, participant_id_label):
# creates the phenotype data dictionary in a format Patsy requires,
# and also demeans the continuous EVs marked for demeaning
# input
# pheno_file_rows: a list of dictionaries, with each dictionary being
# one of the rows from the phenotype file, with the
# format of {header: value, header: value, ..}
# ev_selections: a dictionary with keys for "categorical" and "demean",
# with the entries being lists of phenotype EV names
# participant_id_label: string of the name of the participant column in
# the phenotype file
# output
# pheno_data_dict: a dictionary with each key being a phenotype column,
# and each entry being a list of values, IN ORDER
# data is also in Patsy-acceptable format
import os
import csv
import numpy as np
pheno_data_dict = {}
for line in pheno_file_rows:
for val in line.values():
# if there are any blank values in the pheno row, skip this
# row. if not, continue on with the "else" clause
if val == "":
break
else:
for key in line.keys():
# if there are blank entries because of an empty row in
# the CSV (such as ",,,,,"), move on to the next entry
#if len(line[key]) == 0:
# continue
if key not in pheno_data_dict.keys():
pheno_data_dict[key] = []
# create a list within one of the dictionary values for
# that EV if it is categorical; formats this list into a
# form Patsy can understand regarding categoricals:
# example: { ADHD: ['adhd1', 'adhd1', 'adhd0'] }
# instead of just [1, 1, 0], etc.
if 'categorical' in ev_selections.keys():
if key in ev_selections['categorical']:
pheno_data_dict[key].append(key + str(line[key]))
elif (key == subject_id_label) or (key == "session") or \
(key == "series"):
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
elif (key == subject_id_label) or (key == "session") or \
(key == "series"):
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
# this needs to run after each list in each key has been fully
# populated above
for key in pheno_data_dict.keys():
# demean the EVs marked for demeaning
if 'demean' in ev_selections.keys():
if key in ev_selections['demean']:
new_demeaned_evs = []
mean_evs = 0.0
# populate a dictionary, a key for each demeanable EV, with
# the value being the sum of all the values (which need to be
# converted to float first)
for val in pheno_data_dict[key]:
mean_evs += float(val)
# calculate the mean of the current EV in this loop
mean_evs = mean_evs / len(pheno_data_dict[key])
# remove the EV's mean from each value of this EV
for val in pheno_data_dict[key]:
new_demeaned_evs.append(float(val) - mean_evs)
pheno_data_dict[key] = new_demeaned_evs
if 'categorical' in ev_selections.keys():
if key not in ev_selections['categorical']:
pheno_data_dict[key] = np.array(pheno_data_dict[key])
return pheno_data_dict
def get_measure_dict(param_file):
import os
import pandas as pd
if not os.path.isfile(param_file):
err = "\n\n[!] CPAC says: You've included a motion parameter in " \
"your group-level analysis model design formula, but " \
"there is no motion parameters file available.\n\n"
raise Exception(err)
with open(param_file,"r") as f:
motion_params = pd.read_csv(f, index_col=False)
measures = ['MeanFD_Power', 'MeanFD_Jenkinson', 'MeanDVARS']
measure_dict = {}
for m in measures:
measure_map = {}
if m in motion_params.columns:
part_ids = list(motion_params["Subject"])
part_ids = [str(i) for i in part_ids]
scan_ids = list(motion_params["Scan"])
scan_ids = [str(i) for i in scan_ids]
measure_vals = list(motion_params[m])
measure_vals = [float(i) for i in measure_vals]
for part_id, scan_id, measure_val in \
zip(part_ids, scan_ids, measure_vals):
measure_map[(part_id,scan_id)] = measure_val
measure_dict[m] = measure_map
return measure_dict
def get_custom_roi_info(roi_means_dict):
# check
if roi_means_dict == None:
err_string = "\n\n[!] CPAC says: The custom ROI means were not " \
"calculated properly during the group analysis " \
"model generation.\n\n"
raise Exception(err_string)
roi_num = len(roi_means_dict.values()[0])
# this will be a dictionary matching ROI regressor header labels with
# the actual ROI dictionaries
roi_dict_dict = {}
# split the roi_means_dict from { subID: [mean1,mean2,mean3,..], ..}
# to three dictionaries of { subID: mean1, .. }, { subID: mean2, .. },
# and so on
for num in range(0,roi_num):
label = "Custom_ROI_Mean_%d" % int(num+1)
temp_roi_dict = {}
for key in roi_means_dict.keys():
temp_roi_dict[key] = roi_means_dict[key][num-1]
roi_dict_dict[label] = temp_roi_dict
return roi_dict_dict
def model_group_var_separately(grouping_var, formula, pheno_data_dict, \
ev_selections, coding_scheme):
if grouping_var == None or grouping_var not in formula:
print '\n\n[!] CPAC says: Model group variances separately is ' \
'enabled, but the grouping variable set is either set to ' \
'None, or was not included in the model as one of the ' \
'EVs.\n'
print 'Design formula: ', formula
print 'Grouping variable: ', grouping_var, '\n\n'
raise Exception
# do this a little early for the grouping variable so that it doesn't
if 'categorical' in ev_selections.keys():
for EV_name in ev_selections['categorical']:
if EV_name == grouping_var:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + \
', Sum)')
groupvar_levels = []
grouping_var_id_dict = {}
idx = 0
for cat_ev_value in pheno_data_dict[grouping_var]:
cat_ev_level = str(cat_ev_value).replace(str(grouping_var), "")
if cat_ev_level not in groupvar_levels:
groupvar_levels.append(cat_ev_level)
try:
grouping_var_id_dict[cat_ev_level].append(idx)
except:
grouping_var_id_dict[cat_ev_level] = [idx]
idx += 1
split_EVs = {}
for key in pheno_data_dict.keys():
if (key in formula) and (key != grouping_var):
new_key_string = ""
for level in groupvar_levels:
groupvar_with_level = str(grouping_var) + str(level)
new_key = key + "__" + groupvar_with_level
if new_key_string == "":
new_key_string = new_key
else:
new_key_string = new_key_string + " + " + new_key
split_EVs[new_key] = []
if key in ev_selections["categorical"]:
ev_selections["categorical"].append(new_key)
for val, groupvar_val in zip(pheno_data_dict[key], \
pheno_data_dict[grouping_var]):
if groupvar_with_level == groupvar_val:
split_EVs[new_key].append(val)
else:
split_EVs[new_key].append(0)
del pheno_data_dict[key]
if key in ev_selections["categorical"]:
ev_selections["categorical"].remove(key)
formula = formula.replace(key, new_key_string)
pheno_data_dict.update(split_EVs)
if 'categorical' in ev_selections.keys():
for EV_name in ev_selections['categorical']:
if EV_name != grouping_var:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + \
', Sum)')
formula = formula + " - 1"
return pheno_data_dict, formula, grouping_var_id_dict
def check_multicollinearity(matrix):
import numpy as np
print "\nChecking for multicollinearity in the model.."
U, s, V = np.linalg.svd(matrix)
max_singular = np.max(s)
min_singular = np.min(s)
print "Max singular: ", max_singular
print "Min singular: ", min_singular
print "Rank: ", np.linalg.matrix_rank(matrix), "\n"
if min_singular == 0:
print '[!] CPAC warns: Detected multicollinearity in the ' \
'computed group-level analysis model. Please double-' \
'check your model design.\n\n'
else:
condition_number = float(max_singular)/float(min_singular)
print "Condition number: %f\n\n" % condition_number
if condition_number > 30:
print '[!] CPAC warns: Detected multicollinearity in the ' \
'computed group-level analysis model. Please double-' \
'check your model design.\n\n'
def write_mat_file(design_matrix, output_dir, model_name, \
depatsified_EV_names, current_output=None):
import os
import numpy as np
dimx = None
dimy = None
if len(design_matrix.shape) == 1:
dimy = 1
dimx = design_matrix.shape[0]
else:
dimx, dimy = design_matrix.shape
ppstring = '/PPheights'
for i in range(0, dimy):
ppstring += '\t' + '%1.5e' %(1.0)
ppstring += '\n'
filename = model_name + ".mat"
out_file = os.path.join(output_dir, filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(out_file, 'wt') as f:
print >>f, '/NumWaves\t%d' %dimy
print >>f, '/NumPoints\t%d' %dimx
print >>f, ppstring
col_string = '\n'
for col in depatsified_EV_names:
col_string = col_string + col + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
np.savetxt(f, design_matrix, fmt='%1.5e', delimiter='\t')
def create_grp_file(design_matrix, grouping_var_id_dict, output_dir, \
model_name, current_output=None):
import os
import numpy as np
dimx = None
dimy = None
if len(design_matrix.shape) == 1:
dimy = 1
dimx = design_matrix.shape[0]
else:
dimx, dimy = design_matrix.shape
design_matrix_ones = np.ones(dimx)
if not (grouping_var_id_dict == None):
i = 1
for key in sorted(grouping_var_id_dict.keys()):
for index in grouping_var_id_dict[key]:
design_matrix_ones[index] = i
i += 1
filename = model_name + ".grp"
out_file = os.path.join(output_dir, filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(out_file, "wt") as f:
print >>f, '/NumWaves\t1'
print >>f, '/NumPoints\t%d\n' %dimx
print >>f, '/Matrix'
np.savetxt(f, design_matrix_ones, fmt='%d', delimiter='\t')
def create_design_matrix(pheno_file, ev_selections, formula, \
subject_id_label, sub_list=None, \
coding_scheme="Treatment", grouping_var=None, \
new_regressor_dict=None, roi_means_dict=None, \
output_dir=None, model_name="design", \
current_output=None):
import os
import patsy
import numpy as np
if output_dir == None:
output_dir = os.getcwd()
# they are not listed in the participant list
pheno_file_df = load_pheno_file(pheno_file)
participant_list_df = load_group_participant_list(sub_list)
pheno_file_rows = process_pheno_file(pheno_file_df, participant_list_df, \
subject_id_label)
# get number of subjects that have the derivative for this current model
# (basically, the amount of time points, which must be greater than the
# number of EVs)
num_subjects = len(participant_list_df)
# for repeated measures
if "session" in participant_list_df.columns:
ev_selections["categorical"].append("session")
if "series" in participant_list_df.columns:
ev_selections["categorical"].append("series")
# start adding additionally created EVs
if new_regressor_dict:
for measure in new_regressor_dict.keys():
if (measure in formula):
measure_dict = new_regressor_dict[measure]
for pheno_row_dict in pheno_file_rows:
participant_id = pheno_row_dict[subject_id_label]
if ("session" in pheno_row_dict.keys()) and \
("series" in pheno_row_dict.keys()):
session_id = pheno_row_dict["session"]
series_id = pheno_row_dict["series"]
participant_tuple = \
(participant_id, session_id, series_id)
elif "session" in pheno_row_dict.keys():
session_id = pheno_row_dict["session"]
participant_tuple = (participant_id, session_id)
elif "series" in pheno_row_dict.keys():
series_id = pheno_row_dict["series"]
participant_tuple = (participant_id, series_id)
else:
participant_tuple = (participant_id)
pheno_row_dict[measure] = measure_dict[participant_tuple]
ev_selections["demean"].append(measure)
if "Custom_ROI_Mean" in formula:
# include the means of the specified ROIs as regressors
if roi_means_dict == None:
err = "\n\n[!] You included 'Custom_ROI_Mean' in your model " \
"design, but there are no mean of ROI values provided." \
"\n\n"
raise Exception(err)
# roi_dict_dict is a dictionary of dictionaries, with each dictionary
# holding all of the means for one ROI, with each entry being a mean
# for a participant (the keys are the participant IDs)
# ex. {participant_01: 35.15, participant_02: 50.00}
# with the float values being all of the means of one of
# the ROIs specified
# there will be a dictionary for each ROI specified
roi_dict_dict = get_custom_roi_info(roi_means_dict)
add_formula_string = ""
for roi_column in roi_dict_dict.keys():
roi_dict = roi_dict_dict[roi_column]
for pheno_row_dict in pheno_file_rows:
participant_id = pheno_row_dict[subject_id_label]
if ("session" in pheno_row_dict.keys()) and \
("series" in pheno_row_dict.keys()):
session_id = pheno_row_dict["session"]
series_id = pheno_row_dict["series"]
participant_tuple = \
(participant_id, session_id, series_id)
elif "session" in pheno_row_dict.keys():
session_id = pheno_row_dict["session"]
participant_tuple = (participant_id, session_id)
elif "series" in pheno_row_dict.keys():
series_id = pheno_row_dict["series"]
participant_tuple = (participant_id, series_id)
else:
participant_tuple = (participant_id)
pheno_row_dict[roi_column] = roi_dict[participant_tuple]
ev_selections["demean"].append(roi_column)
# create a string of all the new custom ROI regressor column names
# to be inserted into the design formula, so that Patsy will
# accept the phenotypic data dictionary that now has these columns
if add_formula_string == "":
add_formula_string = add_formula_string + roi_column
else:
add_formula_string = add_formula_string + " + " + roi_column
# a regressor column of ROI means for each custom-specified ROI has
# now been added to the model with appropriate column labels
formula = formula.replace("Custom_ROI_Mean",add_formula_string)
# return the data from the phenotype file processed properly for Patsy
# and load it into 'pheno_data_dict'
# format: dictionary, each key is the name of an EV, and its value is
# a LIST of values in order of the subjects
# - categorical EVs are already renamed from '0,1,..' to
# 'EV0,EV1,..' with EV being the EV name
# - EVs to be demeaned are already demeaned
# - numerical EVs (non-categorical) are in a list which
# have been converted into a NumPy array
pheno_data_dict = create_pheno_dict(pheno_file_rows, ev_selections, \
subject_id_label)
# handle modeling group variances separately (if enabled), then edit the
# formula to be in Patsy language
if grouping_var != None:
pheno_data_dict, formula, grouping_var_id_dict = \
model_group_var_separately(grouping_var, \
formula, pheno_data_dict, \
ev_selections, coding_scheme)
else:
grouping_var_id_dict = None
if 'categorical' in ev_selections.keys():
for EV_name in ev_selections['categorical']:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + \
', Sum)')
# create the Patsy design matrix!
try:
dmatrix = patsy.dmatrix(formula, pheno_data_dict, NA_action='raise')
except:
print '\n\n[!] CPAC says: Design matrix creation wasn\'t ' \
'successful - do the terms in your formula correctly ' \
'correspond to the EVs listed in your phenotype file?\n'
print 'Phenotype file provided: '
print pheno_file, '\n'
print "Phenotypic data columns (regressors): ", pheno_data_dict.keys()
print "Formula: %s\n\n" % formula
raise Exception
check_multicollinearity(np.array(dmatrix))
design_matrix = np.array(dmatrix, dtype=np.float16)
column_names = dmatrix.design_info.column_names
if len(column_names) >= num_subjects:
err = "\n\n[!] CPAC says: There are more EVs than there are " \
"subjects currently included in the model for %s. There must " \
"be more subjects than EVs in the design.\n\nNumber of " \
"subjects: %d\nNumber of EVs: %d\n\nNote: An 'Intercept' " \
"column gets added to the design as an EV, so there will be " \
"one more EV than you may have specified in your design. In " \
"addition, if you specified to model group variances " \
"separately, an Intercept column will not be included, but " \
"the amount of EVs can nearly double once they are split " \
"along the grouping variable.\n\n" \
"If the number of subjects is lower than the number of " \
"subjects in your group analysis subject list, this may be " \
"because not every subject in the subject list has an output " \
"for %s in the individual-level analysis output directory.\n\n"\
% (current_output, num_subjects, len(column_names), \
current_output)
raise Exception(err)
# for Contrasts" list on the next page, and also to test user-made custom
depatsified_EV_names = []
for column in column_names:
column_string = column
string_for_removal = ''
for char in column_string:
string_for_removal = string_for_removal + char
if char == '.':
column_string = column_string.replace(string_for_removal, '')
string_for_removal = ''
column_string = column_string.replace(']', '')
depatsified_EV_names.append(column_string)
write_mat_file(design_matrix, output_dir, model_name, \
depatsified_EV_names, current_output)
create_grp_file(design_matrix, grouping_var_id_dict, output_dir, \
model_name, current_output)
return dmatrix, depatsified_EV_names
def positive(dmat, a, coding, group_sep, grouping_var):
import numpy as np
# contrast builder
evs = dmat.design_info.column_name_indexes
con = np.zeros(dmat.shape[1])
if group_sep == True:
if "__" in a and grouping_var in a:
ev_desc = a.split("__")
for ev in evs:
count = 0
for desc in ev_desc:
if desc in ev:
count += 1
if count == len(ev_desc):
con[evs[ev]] = 1
break
else:
# it is a dropped term so make all other terms in that
# category at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
elif len(a.split(grouping_var)) > 2:
# this is if the current parsed contrast is the actual
# grouping variable, as the Patsified name will have the
# variable's name string in it twice
for ev in evs:
if a.split(".")[1] in ev:
con[evs[ev]] = 1
break
else:
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
else:
if a in evs:
con[evs[a]] = 1
else:
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
if coding == "Treatment":
con[0] = 0
elif coding == "Sum":
con[1] = 1
return con
def greater_than(dmat, a, b, coding, group_sep, grouping_var):
c1 = positive(dmat, a, coding, group_sep, grouping_var)
c2 = positive(dmat, b, coding, group_sep, grouping_var)
return c1-c2
def negative(dmat, a, coding, group_sep, grouping_var):
con = 0-positive(dmat, a, coding, group_sep, grouping_var)
return con
def create_dummy_string(length):
ppstring = ""
for i in range(0, length):
ppstring += '\t' + '%1.5e' %(1.0)
ppstring += '\n'
return ppstring
def create_con_file(con_dict, col_names, file_name, current_output, out_dir):
import os
print "col names: "
print col_names
with open(os.path.join(out_dir, file_name) + ".con",'w+') as f:
num = 1
for key in con_dict:
f.write("/ContrastName%s\t%s\n" %(num,key))
num += 1
f.write("/NumWaves\t%d\n" %len(con_dict[key]))
f.write("/NumContrasts\t%d\n" %len(con_dict))
f.write("/PPheights%s" %create_dummy_string(len(con_dict[key])))
f.write("/RequiredEffect%s" %create_dummy_string(len(con_dict[key])))
f.write("\n\n")
col_string = '\n'
for col in col_names:
col_string = col_string + col + '\t'
print >>f, col_string, '\n'
f.write("/Matrix\n")
for key in con_dict:
for v in con_dict[key]:
f.write("%1.5e\t" %v)
f.write("\n")
def create_fts_file(ftest_list, con_dict, model_name, current_output,
out_dir):
import os
import numpy as np
try:
print "\nFound f-tests in your model, writing f-tests file " \
"(.fts)..\n"
with open(os.path.join(out_dir, model_name + '.fts'), 'w') as f:
print >>f, '/NumWaves\t', len(con_dict)
print >>f, '/NumContrasts\t', len(ftest_list)
ftst = []
for ftest_string in ftest_list:
ftest_vector = []
cons_in_ftest = ftest_string.split(",")
for con in con_dict.keys():
if con in cons_in_ftest:
ftest_vector.append(1)
else:
ftest_vector.append(0)
ftst.append(ftest_vector)
fts_n = np.array(ftst)
col_string = '\n'
for con in con_dict.keys():
col_string = col_string + con + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
for i in range(fts_n.shape[0]):
print >>f, ' '.join(fts_n[i].astype('str'))
except Exception as e:
filepath = os.path.join(out_dir, "model_files", current_output, \
model_name + '.fts')
errmsg = "\n\n[!] CPAC says: Could not create .fts file for " \
"FLAMEO or write it to disk.\nAttempted filepath: %s\n" \
"Error details: %s\n\n" % (filepath, e)
raise Exception(errmsg)
def create_con_ftst_file(con_file, model_name, current_output, output_dir,
column_names, coding_scheme, group_sep):
"""
Create the contrasts and fts file
"""
import os
import numpy as np
with open(con_file, "r") as f:
evs = f.readline()
evs = evs.rstrip('\r\n').split(',')
count_ftests = 0
# TODO: would have the Intercept added to it? but what if it wasn't?
fTest = False
print "evs: "
print evs
for ev in evs:
if "f_test" in ev:
count_ftests += 1
if count_ftests > 0:
fTest = True
try:
data = np.genfromtxt(con_file, names=True, delimiter=',', dtype=None)
except:
print "Error: Could not successfully read in contrast file: ",con_file
raise Exception
lst = data.tolist()
ftst = []
fts_columns = []
contrasts = []
contrast_names = []
length = None
length = len(list(lst[0]))
for tp in lst:
contrast_names.append(tp[0])
con_vector = list(tp)[1:(length-count_ftests)]
fts_vector = list(tp)[(length-count_ftests):length]
fts_columns.append(fts_vector)
contrasts.append(con_vector)
num_EVs_in_con_file = len(contrasts[0])
contrasts = np.array(contrasts, dtype=np.float16)
fts_columns = np.array(fts_columns)
if fTest:
if len(contrast_names) < 2:
errmsg = "\n\n[!] CPAC says: Not enough contrasts for running " \
"f-tests.\nTip: Do you have only one contrast in your " \
"contrasts file? f-tests require more than one contrast.\n"\
"Either remove the f-tests or include more contrasts.\n\n"
raise Exception(errmsg)
fts_n = fts_columns.T
if len(column_names) != (num_EVs_in_con_file):
err_string = "\n\n[!] CPAC says: The number of EVs in your model " \
"design matrix (found in the %s.mat file) does not " \
"match the number of EVs (columns) in your custom " \
"contrasts matrix CSV file.\n\nCustom contrasts matrix "\
"file: %s\n\nNumber of EVs in design matrix: %d\n" \
"Number of EVs in contrasts file: %d\n\nThe column " \
"labels in the design matrix should match those in " \
"your contrasts .CSV file.\nColumn labels in design " \
"matrix:\n%s" % (model_name, con_file, \
len(column_names), num_EVs_in_con_file, \
str(column_names))
raise Exception(err_string)
for design_mat_col, con_csv_col in zip(column_names, evs[1:]):
if con_csv_col not in design_mat_col:
errmsg = "\n\n[!] CPAC says: The names of the EVs in your " \
"custom contrasts .csv file do not match the names or " \
"order of the EVs in the design matrix. Please make " \
"sure these are consistent.\nDesign matrix EV columns: "\
"%s\nYour contrasts matrix columns: %s\n\n" \
% (column_names, evs[1:])
raise Exception(errmsg)
out_dir = os.path.join(output_dir, model_name + '.con')
with open(out_dir,"wt") as f:
idx = 1
pp_str = '/PPheights'
re_str = '/RequiredEffect'
for name in contrast_names:
print >>f, '/ContrastName%d' %idx, '\t', name
pp_str += '\t%1.5e' %(1)
re_str += '\t%1.5e' %(1)
idx += 1
print >>f, '/NumWaves\t', (contrasts.shape)[1]
print >>f, '/NumContrasts\t', (contrasts.shape)[0]
print >>f, pp_str
print >>f, re_str + '\n'
col_string = '\n'
for ev in evs:
col_string = col_string + ev + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
np.savetxt(f, contrasts, fmt='%1.5e', delimiter='\t')
if fTest:
print "\nFound f-tests in your model, writing f-tests file (.fts)..\n"
ftest_out_dir = os.path.join(output_dir, model_name + '.fts')
with open(ftest_out_dir,"wt") as f:
print >>f, '/NumWaves\t', (contrasts.shape)[0]
print >>f, '/NumContrasts\t', count_ftests
col_string = '\n'
for con in contrast_names:
col_string = col_string + con + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
for i in range(fts_n.shape[0]):
print >>f, ' '.join(fts_n[i].astype('str'))
def process_contrast(parsed_contrast, operator, ev_selections, group_sep, \
grouping_var, coding_scheme):
parsed_EVs_in_contrast = []
EVs_in_contrast = parsed_contrast.split(operator)
if '' in EVs_in_contrast:
EVs_in_contrast.remove('')
for EV in EVs_in_contrast:
skip = 0
if 'categorical' in ev_selections.keys():
for cat_EV in ev_selections['categorical']:
# that is the grouping variable (which is now present in
# other EV names) to confound this operation
if group_sep == True:
gpvar = grouping_var
else:
gpvar = "..."
if (cat_EV in EV) and not (gpvar in EV and \
"__" in EV):
# handle interactions
if ":" in EV:
temp_split_EV = EV.split(":")
for interaction_EV in temp_split_EV:
if cat_EV in interaction_EV:
current_EV = interaction_EV
else:
current_EV = EV
if coding_scheme == 'Treatment':
cat_EV_contrast = EV.replace(EV, 'C(' + cat_EV + \
')[T.' + current_EV+\
']')
elif coding_scheme == 'Sum':
cat_EV_contrast = EV.replace(EV, 'C(' + cat_EV + \
', Sum)[S.' + \
current_EV + ']')
parsed_EVs_in_contrast.append(cat_EV_contrast)
skip = 1
if skip == 0:
parsed_EVs_in_contrast.append(EV)
# handle interactions
if ":" in EV and len(parsed_EVs_in_contrast) == 2:
parsed_EVs_in_contrast = [parsed_EVs_in_contrast[0] + ":" + \
parsed_EVs_in_contrast[1]]
if ":" in EV and len(parsed_EVs_in_contrast) == 3:
parsed_EVs_in_contrast = [parsed_EVs_in_contrast[0], \
parsed_EVs_in_contrast[1] + ":" + \
parsed_EVs_in_contrast[2]]
return parsed_EVs_in_contrast
def run(group_config, current_output, param_file=None, \
derivative_means_dict=None, roi_means_dict=None, \
model_out_dir=None, CPAC_run=True):
import os
import csv
import numpy as np
# open the GROUP ANALYSIS FSL .YML CONFIG FILE, not the main pipeline
# config .yml file!
if CPAC_run:
c = group_config
else:
try:
c = Configuration(yaml.load(open(os.path.realpath(group_config), \
'r')))
except:
raise Exception("Error in reading %s configuration file" \
% group_config)
# pull in the gpa settings!
ph = c.pheno_file
sublist = c.subject_list
ev_selections = c.ev_selections
subject_id_label = c.subject_id_label
formula = c.design_formula
coding_scheme = c.coding_scheme[0]
group_sep = c.group_sep
grouping_var = c.grouping_var
contrasts = c.contrasts
f_tests = c.f_tests
custom_contrasts = c.custom_contrasts
model_name = c.model_name
output_dir = c.output_dir
# make sure the group analysis output directory exists
try:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
except:
print '\n\n[!] CPAC says: Could not successfully create the group ' \
'analysis output directory:\n', output_dir, '\n\nMake sure ' \
'you have write access in this file structure.\n\n\n'
raise Exception
measure_dict = {}
# extract motion measures from CPAC-generated power params file
if param_file != None:
measure_dict = get_measure_dict(param_file)
# combine the motion measures dictionary with the measure_mean
# dictionary (if it exists)
if derivative_means_dict:
measure_dict["Measure_Mean"] = derivative_means_dict
# create the .mat and .grp files for FLAME
design_matrix, regressor_names = create_design_matrix(ph, sublist, \
ev_selections, formula, \
subject_id_label, coding_scheme, \
grouping_var, measure_dict, \
roi_means_dict, model_out_dir, \
model_name, current_output)
# Create contrasts_dict dictionary for the .con file generation later
contrasts_list = contrasts
contrasts_dict = {}
for contrast in contrasts_list:
# each 'contrast' is a string the user input of the desired contrast
# remove all spaces
parsed_contrast = contrast.replace(' ', '')
EVs_in_contrast = []
parsed_EVs_in_contrast = []
if '>' in parsed_contrast:
parsed_EVs_in_contrast = \
process_contrast(parsed_contrast, '>', ev_selections, \
group_sep, grouping_var, coding_scheme)
contrasts_dict[parsed_contrast] = \
greater_than(design_matrix, parsed_EVs_in_contrast[0], \
parsed_EVs_in_contrast[1], coding_scheme, \
group_sep, grouping_var)
elif '<' in parsed_contrast:
parsed_EVs_in_contrast = \
process_contrast(parsed_contrast, '<', ev_selections, \
group_sep, grouping_var, coding_scheme)
contrasts_dict[parsed_contrast] = \
greater_than(design_matrix, parsed_EVs_in_contrast[1], \
parsed_EVs_in_contrast[0], coding_scheme, \
group_sep, grouping_var)
else:
contrast_string = parsed_contrast.replace('+',',+,')
contrast_string = contrast_string.replace('-',',-,')
contrast_items = contrast_string.split(',')
if '' in contrast_items:
contrast_items.remove('')
if '+' in contrast_items and len(contrast_items) == 2:
parsed_EVs_in_contrast = \
process_contrast(parsed_contrast, '+', ev_selections, \
group_sep, grouping_var, coding_scheme)
contrasts_dict[parsed_contrast] = \
positive(design_matrix, parsed_EVs_in_contrast[0], \
coding_scheme, group_sep, grouping_var)
elif '-' in contrast_items and len(contrast_items) == 2:
parsed_EVs_in_contrast = \
process_contrast(parsed_contrast, '-', ev_selections, \
group_sep, grouping_var, coding_scheme)
contrasts_dict[parsed_contrast] = \
negative(design_matrix, parsed_EVs_in_contrast[0], \
coding_scheme, group_sep, grouping_var)
if len(contrast_items) > 2:
idx = 0
for item in contrast_items:
# they need to be put back into Patsy formatted header
# titles because the dmatrix gets passed into the function
# that writes out the contrast matrix
if 'categorical' in ev_selections.keys():
for cat_EV in ev_selections['categorical']:
if cat_EV in item:
if coding_scheme == 'Treatment':
item = item.replace(item, \
'C(' + cat_EV + ')[T.' + item + ']')
elif coding_scheme == 'Sum':
item = item.replace(item, \
'C(' + cat_EV + ', Sum)[S.' + item + ']')
if idx == 0:
if item != '+' and item != '-':
contrast_vector = positive(dmatrix, item)
if parsed_contrast not in contrasts_dict.keys():
contrasts_dict[parsed_contrast] = contrast_vector
else:
contrasts_dict[parsed_contrast] += contrast_vector
elif idx != 0:
if item != '+' and item != '-':
if contrast_items[idx-1] == '+':
contrast_vector = positive(dmatrix, item, \
coding_scheme, group_sep,\
grouping_var)
if parsed_contrast not in contrasts_dict.keys():
contrasts_dict[parsed_contrast] = contrast_vector
else:
contrasts_dict[parsed_contrast] += contrast_vector
if contrast_items[idx-1] == '-':
contrast_vector = negative(dmatrix, item, \
coding_scheme, group_sep,\
grouping_var)
if parsed_contrast not in contrasts_dict.keys():
contrasts_dict[parsed_contrast] = contrast_vector
else:
contrasts_dict[parsed_contrast] += contrast_vector
idx += 1
# finally write out the .con file and .fts file (if f-tests)
if (custom_contrasts == None) or (custom_contrasts == '') or \
("None" in custom_contrasts):
print "Writing contrasts file (.con) based on contrasts provided " \
"using the group analysis model builder's contrasts editor.."
create_con_file(contrasts_dict, regressor_names, model_name, \
current_output, model_out_dir)
if f_tests:
create_fts_file(f_tests, contrasts_dict, model_name, \
current_output, model_out_dir)
else:
print "\nWriting contrasts file (.con) based on contrasts provided " \
"with a custom contrasts matrix CSV file..\n"
create_con_ftst_file(custom_contrasts, model_name, current_output, \
model_out_dir, regressor_names, \
coding_scheme, group_sep)
| false | true |
f700578bbd38004bdff93a11c4a048931a765100 | 977 | py | Python | office365/runtime/compat.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | null | null | null | office365/runtime/compat.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | null | null | null | office365/runtime/compat.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | null | null | null | import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
if is_py2:
from urlparse import urlparse
from urllib import quote
from urlparse import urljoin
import pytz as timezone
from email import message_from_string as message_from_bytes_or_string
from __builtin__ import xrange as range_or_xrange
elif is_py3:
from urllib.parse import urlparse
from urllib.parse import quote
from urllib.parse import urljoin
from datetime import timezone
from email import message_from_bytes as message_from_bytes_or_string
from builtins import range as range_or_xrange
def message_as_bytes_or_string(message):
if is_py2:
return message.as_string()
else:
return message.as_bytes()
def is_string_type(value):
if is_py2:
return isinstance(value, basestring)
else:
return type(value) is str
| 22.204545 | 73 | 0.708291 | import sys
_ver = sys.version_info
is_py2 = (_ver[0] == 2)
is_py3 = (_ver[0] == 3)
if is_py2:
from urlparse import urlparse
from urllib import quote
from urlparse import urljoin
import pytz as timezone
from email import message_from_string as message_from_bytes_or_string
from __builtin__ import xrange as range_or_xrange
elif is_py3:
from urllib.parse import urlparse
from urllib.parse import quote
from urllib.parse import urljoin
from datetime import timezone
from email import message_from_bytes as message_from_bytes_or_string
from builtins import range as range_or_xrange
def message_as_bytes_or_string(message):
if is_py2:
return message.as_string()
else:
return message.as_bytes()
def is_string_type(value):
if is_py2:
return isinstance(value, basestring)
else:
return type(value) is str
| true | true |
f70058779cdd3ba0324c41a9eaa37779092a6ad0 | 715 | py | Python | motiv/sync/mixin.py | SaadTalaat/motiv | 5d41fd100ece50b28b137447d33ed00a4050b3da | [
"Apache-2.0"
] | null | null | null | motiv/sync/mixin.py | SaadTalaat/motiv | 5d41fd100ece50b28b137447d33ed00a4050b3da | [
"Apache-2.0"
] | 9 | 2019-03-29T15:01:22.000Z | 2020-09-18T12:52:45.000Z | motiv/sync/mixin.py | SaadTalaat/motiv | 5d41fd100ece50b28b137447d33ed00a4050b3da | [
"Apache-2.0"
] | null | null | null | """motiv synchronization primitives
Module:
Using a uniform interface to define synchronization
primitives helps us use multiple execution frameworks
without changing any of the code written.
for example, multiprocessing vs threading.
"""
import abc
class SystemEvent(abc.ABC):
"""Event abstract class"""
@abc.abstractmethod
def is_set(self):
"""checks if the event is set."""
@abc.abstractmethod
def set(self):
"""sets the event"""
@abc.abstractmethod
def clear(self):
"""clears the event"""
@abc.abstractmethod
def wait(self, *args, **kwargs):
"""waits till event is set"""
__all__ = [
'SystemEvent',
]
| 19.861111 | 57 | 0.636364 |
import abc
class SystemEvent(abc.ABC):
@abc.abstractmethod
def is_set(self):
@abc.abstractmethod
def set(self):
@abc.abstractmethod
def clear(self):
@abc.abstractmethod
def wait(self, *args, **kwargs):
__all__ = [
'SystemEvent',
]
| true | true |
f700596493274b2208efa335ed2a2ced29634349 | 1,595 | py | Python | profiles_api/models.py | nafis-badar/profiles-rest-api | 9188a8dbdad3b7fdf462d38d664f1756b788e2c3 | [
"MIT"
] | null | null | null | profiles_api/models.py | nafis-badar/profiles-rest-api | 9188a8dbdad3b7fdf462d38d664f1756b788e2c3 | [
"MIT"
] | null | null | null | profiles_api/models.py | nafis-badar/profiles-rest-api | 9188a8dbdad3b7fdf462d38d664f1756b788e2c3 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin,BaseUserManager
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager for user profiles """
def create_user(self,email,name,password=None):
""" Create a New user profile"""
if not email:
raise ValueError("User must gave an email address")
email=self.normalize_email(email)
user=self.model(email=email,name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,name,password):
"""Create and save a new superuser with given details """
user=self._create_user(email,aname,password)
user.is_superuser=True
user.is_staff=True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
""" Database model for users in the system """
email=models.EmailField(max_length=100,unique=True)
name=models.CharField(max_length=255)
is_active=models.BooleanField(default=True)
is_staff=models.BooleanField(default=False)
objects=UserProfileManager()
USERNAME_FIELD='email'
REQUIRED_FIELDS=['name']
def get_full_name(self):
""" Retrieve full name of user """
return self.name
def get_short_name(self):
""" Retrieve short name of User """
return self.name
def __str__(self):
"""Return String representation """
return self.email
| 29.537037 | 71 | 0.684639 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin,BaseUserManager
class UserProfileManager(BaseUserManager):
def create_user(self,email,name,password=None):
if not email:
raise ValueError("User must gave an email address")
email=self.normalize_email(email)
user=self.model(email=email,name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,name,password):
user=self._create_user(email,aname,password)
user.is_superuser=True
user.is_staff=True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
email=models.EmailField(max_length=100,unique=True)
name=models.CharField(max_length=255)
is_active=models.BooleanField(default=True)
is_staff=models.BooleanField(default=False)
objects=UserProfileManager()
USERNAME_FIELD='email'
REQUIRED_FIELDS=['name']
def get_full_name(self):
return self.name
def get_short_name(self):
return self.name
def __str__(self):
return self.email
| true | true |
f7005979eaa3f5c9c2b34b11e8e9c3da9e1602fc | 2,997 | bzl | Python | tensorflow_decision_forests/keras/wrapper/wrapper.bzl | xsSilva/decision-forests | 5eb3524f4d86d1fa177dae7caba5c6e4516d7030 | [
"Apache-2.0"
] | null | null | null | tensorflow_decision_forests/keras/wrapper/wrapper.bzl | xsSilva/decision-forests | 5eb3524f4d86d1fa177dae7caba5c6e4516d7030 | [
"Apache-2.0"
] | null | null | null | tensorflow_decision_forests/keras/wrapper/wrapper.bzl | xsSilva/decision-forests | 5eb3524f4d86d1fa177dae7caba5c6e4516d7030 | [
"Apache-2.0"
] | null | null | null | """Rule generation utilities."""
load("@org_tensorflow//tensorflow:tensorflow.bzl", "if_not_windows", "tf_binary_additional_srcs", "tf_cc_binary", "tf_copts")
load("//tensorflow_decision_forests/tensorflow:utils.bzl", "rpath_linkopts_to_tensorflow")
def py_wrap_yggdrasil_learners(
name = None,
learner_deps = []):
"""Creates Keras wrappers around Yggdrasil Decision Forest (YDF) learners.
Creates a py_library called "{name}" and containing the file "{name}.py".
This library introduces a TensorFlow Decision Forests (TFDF) Keras class
wrapping for each YDF learner defined in "learner_deps". The constructor of
these classes contains a argument for the learner generic hyper-parameter.
For example, if "learner_deps" contains a c++ dependency that register a
learner with a key equal to "RANDOM_FOREST", the wrapper will create a
python class called "RandomForestModel" deriving the base TFDF model class.
Args:
name: Name of the rule.
learner_deps: List of dependencies linking Yggdrasil Decision Forest
learners.
"""
# Absolute path to the wrapper generator directory.
wrapper_package = "//tensorflow_decision_forests/keras/wrapper"
# Filename of the wrapper generator source code in the user package.
local_cc_main = name + "_wrapper_main.cc"
# Target name of the wrapper generator binary.
wrapper_name = name + "_wrapper_main"
# Target name of the command running the wrapper generator.
run_wrapper_name = name + "_run_wrapper"
# Copy the wrapper main source code to the user package.
native.genrule(
name = name + "_copy_cc_main",
outs = [local_cc_main],
srcs = [wrapper_package + ":wrapper_main.cc"],
cmd = "cp $< $@",
)
# Compiles the wrapper binary.
tf_cc_binary(
name = wrapper_name,
copts = tf_copts(),
linkopts = if_not_windows(["-lm", "-Wl,-ldl"]) + rpath_linkopts_to_tensorflow(wrapper_name),
srcs = [":" + local_cc_main],
deps = [
wrapper_package + ":wrapper",
] + learner_deps,
linkstatic = 1,
)
# Runs the wrapper binary and generate the wrapper .py source code.
native.genrule(
name = run_wrapper_name,
srcs = [],
outs = [name + ".py"],
cmd = "$(location " + wrapper_name + ") > \"$@\"",
tools = [":" + wrapper_name] + tf_binary_additional_srcs(),
)
# Python library around the generated .py source code.
native.py_library(
name = name,
srcs = [name + ".py"],
srcs_version = "PY3",
deps = [
"//tensorflow_decision_forests/keras:core",
"@org_tensorflow//tensorflow/python",
"@ydf//yggdrasil_decision_forests/model:abstract_model_py_proto",
"@ydf//yggdrasil_decision_forests/learner:abstract_learner_py_proto",
],
data = [":" + run_wrapper_name, ":" + wrapper_name],
)
| 37.4625 | 125 | 0.651652 |
load("@org_tensorflow//tensorflow:tensorflow.bzl", "if_not_windows", "tf_binary_additional_srcs", "tf_cc_binary", "tf_copts")
load("//tensorflow_decision_forests/tensorflow:utils.bzl", "rpath_linkopts_to_tensorflow")
def py_wrap_yggdrasil_learners(
name = None,
learner_deps = []):
wrapper_package = "//tensorflow_decision_forests/keras/wrapper"
local_cc_main = name + "_wrapper_main.cc"
wrapper_name = name + "_wrapper_main"
run_wrapper_name = name + "_run_wrapper"
native.genrule(
name = name + "_copy_cc_main",
outs = [local_cc_main],
srcs = [wrapper_package + ":wrapper_main.cc"],
cmd = "cp $< $@",
)
tf_cc_binary(
name = wrapper_name,
copts = tf_copts(),
linkopts = if_not_windows(["-lm", "-Wl,-ldl"]) + rpath_linkopts_to_tensorflow(wrapper_name),
srcs = [":" + local_cc_main],
deps = [
wrapper_package + ":wrapper",
] + learner_deps,
linkstatic = 1,
)
native.genrule(
name = run_wrapper_name,
srcs = [],
outs = [name + ".py"],
cmd = "$(location " + wrapper_name + ") > \"$@\"",
tools = [":" + wrapper_name] + tf_binary_additional_srcs(),
)
native.py_library(
name = name,
srcs = [name + ".py"],
srcs_version = "PY3",
deps = [
"//tensorflow_decision_forests/keras:core",
"@org_tensorflow//tensorflow/python",
"@ydf//yggdrasil_decision_forests/model:abstract_model_py_proto",
"@ydf//yggdrasil_decision_forests/learner:abstract_learner_py_proto",
],
data = [":" + run_wrapper_name, ":" + wrapper_name],
)
| true | true |
f7005b28d042d57735c533e59720388a5a80e44f | 4,801 | py | Python | tools/poor-mans-video-editor.py | PansoK/slp | e2c478b00f8f054b24eebb257e18a57451471c79 | [
"MIT"
] | null | null | null | tools/poor-mans-video-editor.py | PansoK/slp | e2c478b00f8f054b24eebb257e18a57451471c79 | [
"MIT"
] | null | null | null | tools/poor-mans-video-editor.py | PansoK/slp | e2c478b00f8f054b24eebb257e18a57451471c79 | [
"MIT"
] | null | null | null | """
Input: tsv file in the form
Input Video filename | topic | subtopic | title greek | title english | start time | end time | delete segments
input.mp4 | 1 | 1 | έξοδος | output | 00:10:05 | 00:30:10 | 00:11:15-00:12:30,00:20:35-00:22:10
"""
import os
import subprocess
import sys
import yaml
def run_cmd(command: str):
"""run_cmd Run given shell command
Args:
command (str): Shell command to run
Returns:
(int, str): Status code, stdout of shell command
Examples:
>>> run_cmd("ls /")
(0, 'bin\nboot\ndev\netc\nhome\ninit\nlib\nlib32\nlib64\nlibx32\nlost+found\nmedia\nmnt\nopt\nproc\nroot\nrun\nsbin\nsnap\nsrv\nsys\ntmp\nusr\nvar\n')
"""
command = f'{os.getenv("SHELL")} -c "{command}"'
pipe = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
stdout = ""
if pipe.stdout is not None:
stdout = "".join(
[line.decode("utf-8") for line in iter(pipe.stdout.readline, b"")]
)
pipe.stdout.close()
returncode = pipe.wait()
print(stdout)
return returncode, stdout
def out_video(segment, greek=True):
title_idx = 3 if greek else 4
title, topic, subtopic = segment[title_idx], segment[1], segment[2]
name = f"{title}_{topic}-{subtopic}.mp4"
return name
def input_video(segment):
return segment[0]
def manage_timestamps(segment):
try:
st, et = segment[5], segment[6]
except:
st = segment[5]
return [st]
try:
delete_timestamps = segment[7]
except:
return [st, et]
if not delete_timestamps:
return [st, et]
else:
return (
[st]
+ [
t
for s in delete_timestamps.split(",")
for t in (s.split("-")[0], s.split("-")[1])
]
+ [et]
)
def to_cut_fmt(timestamp):
out = ""
labels = ["h", "m", "s"]
lb_idx = 0
for c in timestamp:
if c == ":":
out += labels[lb_idx]
lb_idx += 1
else:
out += c
return out
def to_cut_yaml(inmp4, outmp4, ymlname, timestamps):
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return list(zip(a, a))
timestamps = [to_cut_fmt(t) for t in timestamps]
timeframe = []
if len(timestamps) == 1:
timeframe = [{"from": "start", "to": timestamps[0]}]
else:
for s, e in pairwise(["start"] + timestamps + ["end"]):
timeframe += [{"from": s, "to": e}]
out = {
"input": inmp4,
"output": outmp4,
"cut_method": "delete",
"timeframe": timeframe,
}
with open(ymlname, "w") as fd:
yaml.dump(out, fd, default_flow_style=False, sort_keys=False)
def format_timestamp_args(timestamps):
if len(timestamps) == 1:
return [f"-ss {timestamps[0]} "]
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return list(zip(a, a))
cmds = [f"-ss {s} -to {e}" for s, e in pairwise(timestamps)]
return cmds
def ffmpeg(inp, out, timestamps_args):
if len(timestamps_args) == 1:
run_cmd(f"ffmpeg -y -i '{inp}' " + timestamps_args[0] + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{out}'")
return
mp4s = []
for i, arg in enumerate(timestamps_args):
mp4s.append(f"{i}.mp4")
cmd = f"ffmpeg -i '{inp}' " + arg + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{i}.mp4'"
print(cmd)
run_cmd(cmd)
tmp = ".tmp_files.txt"
with open(tmp, "w") as fd:
for f in mp4s:
fd.write(f"file '{f}'\n")
run_cmd(f"ffmpeg -y -f concat -i .tmp_files.txt '{out}'")
run_cmd(f"rm {tmp} " + " ".join(mp4s))
def read_split_tsv(timestamp_file):
with open(timestamp_file) as f:
segments = [ln.strip().split("\t") for ln in f]
return segments
def main():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for segment in segments:
inmp4 = input_video(segment)
outmp4 = "out/" + out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
timestamp_args = format_timestamp_args(timestamps)
ffmpeg(inmp4, outmp4, timestamp_args)
def main1():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for i, segment in enumerate(segments):
inmp4 = input_video(segment)
outmp4 = out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
to_cut_yaml(inmp4, outmp4, f"{i}.yml", timestamps)
if __name__ == "__main__":
main()
| 25.005208 | 158 | 0.565715 |
import os
import subprocess
import sys
import yaml
def run_cmd(command: str):
command = f'{os.getenv("SHELL")} -c "{command}"'
pipe = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
stdout = ""
if pipe.stdout is not None:
stdout = "".join(
[line.decode("utf-8") for line in iter(pipe.stdout.readline, b"")]
)
pipe.stdout.close()
returncode = pipe.wait()
print(stdout)
return returncode, stdout
def out_video(segment, greek=True):
title_idx = 3 if greek else 4
title, topic, subtopic = segment[title_idx], segment[1], segment[2]
name = f"{title}_{topic}-{subtopic}.mp4"
return name
def input_video(segment):
return segment[0]
def manage_timestamps(segment):
try:
st, et = segment[5], segment[6]
except:
st = segment[5]
return [st]
try:
delete_timestamps = segment[7]
except:
return [st, et]
if not delete_timestamps:
return [st, et]
else:
return (
[st]
+ [
t
for s in delete_timestamps.split(",")
for t in (s.split("-")[0], s.split("-")[1])
]
+ [et]
)
def to_cut_fmt(timestamp):
out = ""
labels = ["h", "m", "s"]
lb_idx = 0
for c in timestamp:
if c == ":":
out += labels[lb_idx]
lb_idx += 1
else:
out += c
return out
def to_cut_yaml(inmp4, outmp4, ymlname, timestamps):
def pairwise(iterable):
a = iter(iterable)
return list(zip(a, a))
timestamps = [to_cut_fmt(t) for t in timestamps]
timeframe = []
if len(timestamps) == 1:
timeframe = [{"from": "start", "to": timestamps[0]}]
else:
for s, e in pairwise(["start"] + timestamps + ["end"]):
timeframe += [{"from": s, "to": e}]
out = {
"input": inmp4,
"output": outmp4,
"cut_method": "delete",
"timeframe": timeframe,
}
with open(ymlname, "w") as fd:
yaml.dump(out, fd, default_flow_style=False, sort_keys=False)
def format_timestamp_args(timestamps):
if len(timestamps) == 1:
return [f"-ss {timestamps[0]} "]
def pairwise(iterable):
a = iter(iterable)
return list(zip(a, a))
cmds = [f"-ss {s} -to {e}" for s, e in pairwise(timestamps)]
return cmds
def ffmpeg(inp, out, timestamps_args):
if len(timestamps_args) == 1:
run_cmd(f"ffmpeg -y -i '{inp}' " + timestamps_args[0] + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{out}'")
return
mp4s = []
for i, arg in enumerate(timestamps_args):
mp4s.append(f"{i}.mp4")
cmd = f"ffmpeg -i '{inp}' " + arg + f" -c:v h265_nvenc -crf 24 -preset fast -c:a copy '{i}.mp4'"
print(cmd)
run_cmd(cmd)
tmp = ".tmp_files.txt"
with open(tmp, "w") as fd:
for f in mp4s:
fd.write(f"file '{f}'\n")
run_cmd(f"ffmpeg -y -f concat -i .tmp_files.txt '{out}'")
run_cmd(f"rm {tmp} " + " ".join(mp4s))
def read_split_tsv(timestamp_file):
with open(timestamp_file) as f:
segments = [ln.strip().split("\t") for ln in f]
return segments
def main():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for segment in segments:
inmp4 = input_video(segment)
outmp4 = "out/" + out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
timestamp_args = format_timestamp_args(timestamps)
ffmpeg(inmp4, outmp4, timestamp_args)
def main1():
timestamp_file = sys.argv[1]
segments = read_split_tsv(timestamp_file)
for i, segment in enumerate(segments):
inmp4 = input_video(segment)
outmp4 = out_video(segment, greek=True)
timestamps = manage_timestamps(segment)
to_cut_yaml(inmp4, outmp4, f"{i}.yml", timestamps)
if __name__ == "__main__":
main()
| true | true |
f7005b4ebd505da9c9784f4e6d7f8617d47934f7 | 590 | py | Python | pyqldb/__init__.py | awslabs/amazon-qldb-driver-python | 05ed5463189a7126983052fe0e3bc3486b9136ff | [
"Apache-2.0"
] | 39 | 2019-10-30T09:01:54.000Z | 2022-02-06T18:30:27.000Z | pyqldb/__init__.py | awslabs/amazon-qldb-driver-python | 05ed5463189a7126983052fe0e3bc3486b9136ff | [
"Apache-2.0"
] | 30 | 2020-03-05T23:52:57.000Z | 2022-03-24T16:04:06.000Z | pyqldb/__init__.py | awslabs/amazon-qldb-driver-python | 05ed5463189a7126983052fe0e3bc3486b9136ff | [
"Apache-2.0"
] | 28 | 2019-12-09T17:21:59.000Z | 2022-01-25T11:57:51.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
__version__ = '3.2.2'
| 45.384615 | 118 | 0.759322 |
__version__ = '3.2.2'
| true | true |
f7005b6cc167114b14a57d483e597b70ad7731ba | 5,077 | py | Python | init.py | shackspace/one_button | a8b23ffdd0bd39497d465c6d7af2af4464063a48 | [
"WTFPL"
] | 1 | 2015-04-03T21:19:30.000Z | 2015-04-03T21:19:30.000Z | init.py | shackspace/one_button | a8b23ffdd0bd39497d465c6d7af2af4464063a48 | [
"WTFPL"
] | null | null | null | init.py | shackspace/one_button | a8b23ffdd0bd39497d465c6d7af2af4464063a48 | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python
from threading import Timer,Thread
import RPIO
from RPIO import PWM
import paramiko
import json
import sys
from time import time, sleep
from relaxxapi.relaxxapi import relaxx
r = None
sftp_base_path = "/home/shack/music"
button = 4
loud1 = 21
loud2 = 22
state = 0
def init_state():
state = 0
RPIO.setup(loud1, RPIO.OUT)
RPIO.setup(loud2, RPIO.OUT)
t1_2 = 1
timer=None
t2_4 = 1
t4_5 = 3
def time3_trans():
global state
if state is 4:
state = 5
stop_sirene1()
stop_sirene2()
disable_all_timers()
delete_current_music()
state = 0
else:
print("State is not 4, will do nothing")
def time2_trans():
global state
global timer
if state is 2:
state = 4
start_sirene2()
timer= Timer(t4_5,time3_trans).start()
else:
print("State is not 2, will do nothing")
def time1_trans():
global state
global timer
if state is 1:
state = 2
start_sirene1()
timer=Timer(t2_4,time2_trans).start()
else:
print("State is not 1, will do nothing")
def btn_trans(a,edge):
global state
global timer
print("Button: %s , edge: %s, state: %d" % (str(a), str(edge),state))
if edge and state is 0:
state = 1
timer=Timer(t1_2,time1_trans).start()
# stopped pressing the button but the timeout is not over
elif not edge and (state is 1 or state is 4 or state is 2):
state = 0
disable_all_timers()
stop_sirene1()
stop_sirene2()
try:
play_next()
except:
tell_gobbelz("Cannot play next song. Sorry:(")
tell_gobbelz("Bailing out")
sys.exit(1)
elif not edge and state is 5:
print("button released while removing music, all fine")
else:
print("this should never happen")
def disable_all_timers():
print("disabling all the timers")
global timer
try:
timer.cancel()
print("timer canceled")
except: pass
def start_sirene1():
print("start Sirene 1")
RPIO.output(loud1, True)
def start_sirene2():
print("starting Sirene 2")
RPIO.output(loud2, True)
def stop_sirene1():
print("stopping Sirene 1")
RPIO.output(loud1, False)
def stop_sirene2():
print("stopping Sirene 2")
RPIO.output(loud2, False)
def play_radio():
#TODO play radio
if r.get_current().get("file", "") == "http://ice.somafm.com/groovesalad":
print("will not skip own sender")
return
print("playing radio")
tell_gobbelz("Starting Radio Stream")
r.add_song("http://ice.somafm.com/groovesalad")
r.play_last()
def play_next():
print ("playing next song")
try:
#sanity
if is_last_song():
raise Exception("Last song in playlist")
r.next_song()
except:
print("no next song, starting radio")
play_radio()
def is_last_song():
return r.get_current()["Pos"] == r.get_last()["Pos"]
def delete_current_music():
print("delete current music")
current = r.get_current()
if not current:
print("Nothing is running, bailing out")
return
delete_remote_file(current)
play_next()
def delete_remote_file(current):
try:
sftp_delete_remote_file(current["file"])
say_song_killed(current.get("Title", "Unbekannter Title"),
current.get("Artist", "Unbekannter Kuenstler"))
except Exception as e:
print("Cannot delete remote file! ( %s ) " %str(e))
def sftp_delete_remote_file(f):
host = "mpd.shack"
port = 22
transport = paramiko.Transport((host, port))
username = 'shack'
passwd = 'shackit'
transport.connect(username=username, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport)
#print(sftp.stat('%s/%s'%(base_path,f)))
print(sftp.unlink('%s/%s' % (sftp_base_path, f)))
sftp.close()
transport.close()
def say_song_killed(name, author):
tell_gobbelz('%s von %s wurde vernichtet!' % (name, author) )
def tell_gobbelz(text):
import requests
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = {'text': text}
# curl -i -H "content-type: application/json"
# -X POST -d "{\"text\" : \"Hallo shackspace\"}" kiosk.shack:8080/say/
requests.post("http://kiosk.shack:8080/say/",
data=json.dumps(data), headers=headers)
if __name__ == "__main__":
from time import sleep
init_state()
print("initializing relaxxapi")
try:
r = relaxx(relaxxurl="http://lounge.mpd.shack/")
except:
tell_gobbelz("EM PE DE unreachable!")
tell_gobbelz("Bailing out")
sys.exit(1)
print("adding interrupt")
RPIO.add_interrupt_callback(button,callback=btn_trans,pull_up_down=RPIO.PUD_DOWN) #,debounce_timeout_ms=1
print ("Start Interrupt handler")
RPIO.wait_for_interrupts()
#Thread(target=start_hal,args=(hal_speed,)).start()
| 24.887255 | 109 | 0.621233 | from threading import Timer,Thread
import RPIO
from RPIO import PWM
import paramiko
import json
import sys
from time import time, sleep
from relaxxapi.relaxxapi import relaxx
r = None
sftp_base_path = "/home/shack/music"
button = 4
loud1 = 21
loud2 = 22
state = 0
def init_state():
state = 0
RPIO.setup(loud1, RPIO.OUT)
RPIO.setup(loud2, RPIO.OUT)
t1_2 = 1
timer=None
t2_4 = 1
t4_5 = 3
def time3_trans():
global state
if state is 4:
state = 5
stop_sirene1()
stop_sirene2()
disable_all_timers()
delete_current_music()
state = 0
else:
print("State is not 4, will do nothing")
def time2_trans():
global state
global timer
if state is 2:
state = 4
start_sirene2()
timer= Timer(t4_5,time3_trans).start()
else:
print("State is not 2, will do nothing")
def time1_trans():
global state
global timer
if state is 1:
state = 2
start_sirene1()
timer=Timer(t2_4,time2_trans).start()
else:
print("State is not 1, will do nothing")
def btn_trans(a,edge):
global state
global timer
print("Button: %s , edge: %s, state: %d" % (str(a), str(edge),state))
if edge and state is 0:
state = 1
timer=Timer(t1_2,time1_trans).start()
elif not edge and (state is 1 or state is 4 or state is 2):
state = 0
disable_all_timers()
stop_sirene1()
stop_sirene2()
try:
play_next()
except:
tell_gobbelz("Cannot play next song. Sorry:(")
tell_gobbelz("Bailing out")
sys.exit(1)
elif not edge and state is 5:
print("button released while removing music, all fine")
else:
print("this should never happen")
def disable_all_timers():
print("disabling all the timers")
global timer
try:
timer.cancel()
print("timer canceled")
except: pass
def start_sirene1():
print("start Sirene 1")
RPIO.output(loud1, True)
def start_sirene2():
print("starting Sirene 2")
RPIO.output(loud2, True)
def stop_sirene1():
print("stopping Sirene 1")
RPIO.output(loud1, False)
def stop_sirene2():
print("stopping Sirene 2")
RPIO.output(loud2, False)
def play_radio():
if r.get_current().get("file", "") == "http://ice.somafm.com/groovesalad":
print("will not skip own sender")
return
print("playing radio")
tell_gobbelz("Starting Radio Stream")
r.add_song("http://ice.somafm.com/groovesalad")
r.play_last()
def play_next():
print ("playing next song")
try:
if is_last_song():
raise Exception("Last song in playlist")
r.next_song()
except:
print("no next song, starting radio")
play_radio()
def is_last_song():
return r.get_current()["Pos"] == r.get_last()["Pos"]
def delete_current_music():
print("delete current music")
current = r.get_current()
if not current:
print("Nothing is running, bailing out")
return
delete_remote_file(current)
play_next()
def delete_remote_file(current):
try:
sftp_delete_remote_file(current["file"])
say_song_killed(current.get("Title", "Unbekannter Title"),
current.get("Artist", "Unbekannter Kuenstler"))
except Exception as e:
print("Cannot delete remote file! ( %s ) " %str(e))
def sftp_delete_remote_file(f):
host = "mpd.shack"
port = 22
transport = paramiko.Transport((host, port))
username = 'shack'
passwd = 'shackit'
transport.connect(username=username, password=passwd)
sftp = paramiko.SFTPClient.from_transport(transport)
print(sftp.unlink('%s/%s' % (sftp_base_path, f)))
sftp.close()
transport.close()
def say_song_killed(name, author):
tell_gobbelz('%s von %s wurde vernichtet!' % (name, author) )
def tell_gobbelz(text):
import requests
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = {'text': text}
requests.post("http://kiosk.shack:8080/say/",
data=json.dumps(data), headers=headers)
if __name__ == "__main__":
from time import sleep
init_state()
print("initializing relaxxapi")
try:
r = relaxx(relaxxurl="http://lounge.mpd.shack/")
except:
tell_gobbelz("EM PE DE unreachable!")
tell_gobbelz("Bailing out")
sys.exit(1)
print("adding interrupt")
RPIO.add_interrupt_callback(button,callback=btn_trans,pull_up_down=RPIO.PUD_DOWN) print ("Start Interrupt handler")
RPIO.wait_for_interrupts()
| true | true |
f7005bd1aad9ac2334d62b543f0e7ac8f6381776 | 10,551 | py | Python | napari/_qt/layer_controls/qt_vectors_controls.py | Mishrasubha/napari | c4d1038fc3ed30dc228949cbdedf12826ec2efc2 | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/layer_controls/qt_vectors_controls.py | Mishrasubha/napari | c4d1038fc3ed30dc228949cbdedf12826ec2efc2 | [
"BSD-3-Clause"
] | 3 | 2020-11-14T08:35:18.000Z | 2021-07-26T10:06:32.000Z | napari/_qt/layer_controls/qt_vectors_controls.py | Mishrasubha/napari | c4d1038fc3ed30dc228949cbdedf12826ec2efc2 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QComboBox, QDoubleSpinBox, QLabel
from ...layers.utils._color_manager_constants import ColorMode
from ...utils.translations import trans
from ..utils import qt_signals_blocked
from ..widgets.qt_color_swatch import QColorSwatchEdit
from .qt_layer_controls_base import QtLayerControls
class QtVectorsControls(QtLayerControls):
"""Qt view and controls for the napari Vectors layer.
Parameters
----------
layer : napari.layers.Vectors
An instance of a napari Vectors layer.
Attributes
----------
edge_color_label : qtpy.QtWidgets.QLabel
Label for edgeColorSwatch
edgeColorSwatch : qtpy.QtWidgets.QFrame
Color swatch showing display color of vectors.
edgeComboBox : qtpy.QtWidgets.QComboBox
Dropdown widget to select display color for vectors.
color_mode_comboBox : qtpy.QtWidgets.QComboBox
Dropdown widget to select edge_color_mode for the vectors.
color_prop_box : qtpy.QtWidgets.QComboBox
Dropdown widget to select _edge_color_property for the vectors.
edge_prop_label : qtpy.QtWidgets.QLabel
Label for color_prop_box
grid_layout : qtpy.QtWidgets.QGridLayout
Layout of Qt widget controls for the layer.
layer : napari.layers.Vectors
An instance of a napari Vectors layer.
lengthSpinBox : qtpy.QtWidgets.QDoubleSpinBox
Spin box widget controlling line length of vectors.
Multiplicative factor on projections for length of all vectors.
widthSpinBox : qtpy.QtWidgets.QDoubleSpinBox
Spin box widget controlling edge line width of vectors.
"""
def __init__(self, layer):
super().__init__(layer)
self.layer.events.edge_width.connect(self._on_edge_width_change)
self.layer.events.length.connect(self._on_length_change)
self.layer.events.edge_color_mode.connect(
self._on_edge_color_mode_change
)
self.layer.events.edge_color.connect(self._on_edge_color_change)
# dropdown to select the property for mapping edge_color
color_properties = self._get_property_values()
color_prop_box = QComboBox(self)
color_prop_box.activated[str].connect(self.change_edge_color_property)
color_prop_box.addItems(color_properties)
self.color_prop_box = color_prop_box
self.edge_prop_label = QLabel(trans._('edge property:'))
# vector direct color mode adjustment and widget
self.edgeColorEdit = QColorSwatchEdit(
initial_color=self.layer.edge_color,
tooltip=trans._(
'click to set current edge color',
),
)
self.edgeColorEdit.color_changed.connect(self.change_edge_color_direct)
self.edge_color_label = QLabel(trans._('edge color:'))
self._on_edge_color_change()
# dropdown to select the edge color mode
colorModeComboBox = QComboBox(self)
color_modes = [e.value for e in ColorMode]
colorModeComboBox.addItems(color_modes)
colorModeComboBox.activated[str].connect(self.change_edge_color_mode)
self.color_mode_comboBox = colorModeComboBox
self._on_edge_color_mode_change()
# line width in pixels
self.widthSpinBox = QDoubleSpinBox()
self.widthSpinBox.setKeyboardTracking(False)
self.widthSpinBox.setSingleStep(0.1)
self.widthSpinBox.setMinimum(0.1)
self.widthSpinBox.setMaximum(np.inf)
self.widthSpinBox.setValue(self.layer.edge_width)
self.widthSpinBox.valueChanged.connect(self.change_width)
# line length
self.lengthSpinBox = QDoubleSpinBox()
self.lengthSpinBox.setKeyboardTracking(False)
self.lengthSpinBox.setSingleStep(0.1)
self.lengthSpinBox.setValue(self.layer.length)
self.lengthSpinBox.setMinimum(0.1)
self.lengthSpinBox.setMaximum(np.inf)
self.lengthSpinBox.valueChanged.connect(self.change_length)
# grid_layout created in QtLayerControls
# addWidget(widget, row, column, [row_span, column_span])
self.grid_layout.addWidget(QLabel(trans._('opacity:')), 0, 0)
self.grid_layout.addWidget(self.opacitySlider, 0, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('width:')), 1, 0)
self.grid_layout.addWidget(self.widthSpinBox, 1, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('length:')), 2, 0)
self.grid_layout.addWidget(self.lengthSpinBox, 2, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('blending:')), 3, 0)
self.grid_layout.addWidget(self.blendComboBox, 3, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('edge color mode:')), 4, 0)
self.grid_layout.addWidget(self.color_mode_comboBox, 4, 1, 1, 2)
self.grid_layout.addWidget(self.edge_color_label, 5, 0)
self.grid_layout.addWidget(self.edgeColorEdit, 5, 1, 1, 2)
self.grid_layout.addWidget(self.edge_prop_label, 6, 0)
self.grid_layout.addWidget(self.color_prop_box, 6, 1, 1, 2)
self.grid_layout.setRowStretch(7, 1)
self.grid_layout.setColumnStretch(1, 1)
self.grid_layout.setSpacing(4)
def change_edge_color_property(self, property: str):
"""Change edge_color_property of vectors on the layer model.
This property is the property the edge color is mapped to.
Parameters
----------
property : str
property to map the edge color to
"""
mode = self.layer.edge_color_mode
try:
self.layer.edge_color = property
self.layer.edge_color_mode = mode
except TypeError:
# if the selected property is the wrong type for the current color mode
# the color mode will be changed to the appropriate type, so we must update
self._on_edge_color_mode_change()
raise
def change_edge_color_mode(self, mode: str):
"""Change edge color mode of vectors on the layer model.
Parameters
----------
mode : str
Edge color for vectors. Must be: 'direct', 'cycle', or 'colormap'
"""
old_mode = self.layer.edge_color_mode
with self.layer.events.edge_color_mode.blocker():
try:
self.layer.edge_color_mode = mode
self._update_edge_color_gui(mode)
except ValueError:
# if the color mode was invalid, revert to the old mode
self.layer.edge_color_mode = old_mode
raise
def change_edge_color_direct(self, color: np.ndarray):
"""Change edge color of vectors on the layer model.
Parameters
----------
color : np.ndarray
Edge color for vectors, in an RGBA array
"""
self.layer.edge_color = color
def change_width(self, value):
"""Change edge line width of vectors on the layer model.
Parameters
----------
value : float
Line width of vectors.
"""
self.layer.edge_width = value
self.widthSpinBox.clearFocus()
self.setFocus()
def change_length(self, value):
"""Change length of vectors on the layer model.
Multiplicative factor on projections for length of all vectors.
Parameters
----------
value : float
Length of vectors.
"""
self.layer.length = value
self.lengthSpinBox.clearFocus()
self.setFocus()
def _update_edge_color_gui(self, mode: str):
"""Update the GUI element associated with edge_color.
This is typically used when edge_color_mode changes
Parameters
----------
mode : str
The new edge_color mode the GUI needs to be updated for.
Should be: 'direct', 'cycle', 'colormap'
"""
if mode in ('cycle', 'colormap'):
self.edgeColorEdit.setHidden(True)
self.edge_color_label.setHidden(True)
self.color_prop_box.setHidden(False)
self.edge_prop_label.setHidden(False)
elif mode == 'direct':
self.edgeColorEdit.setHidden(False)
self.edge_color_label.setHidden(False)
self.color_prop_box.setHidden(True)
self.edge_prop_label.setHidden(True)
def _get_property_values(self):
"""Get the current property values from the Vectors layer
Returns
-------
property_values : np.ndarray
array of all of the union of the property names (keys)
in Vectors.properties and Vectors._property_choices
"""
property_choices = [*self.layer._property_choices]
properties = [*self.layer.properties]
property_values = np.union1d(property_choices, properties)
return property_values
def _on_length_change(self):
"""Change length of vectors."""
with self.layer.events.length.blocker():
self.lengthSpinBox.setValue(self.layer.length)
def _on_edge_width_change(self):
"""Receive layer model width change event and update width spinbox."""
with self.layer.events.edge_width.blocker():
self.widthSpinBox.setValue(self.layer.edge_width)
def _on_edge_color_mode_change(self):
"""Receive layer model edge color mode change event & update dropdown."""
with qt_signals_blocked(self.color_mode_comboBox):
mode = self.layer._edge.color_mode
index = self.color_mode_comboBox.findText(
mode, Qt.MatchFixedString
)
self.color_mode_comboBox.setCurrentIndex(index)
self._update_edge_color_gui(mode)
def _on_edge_color_change(self):
"""Receive layer model edge color change event & update dropdown."""
if (
self.layer._edge.color_mode == ColorMode.DIRECT
and len(self.layer.data) > 0
):
with qt_signals_blocked(self.edgeColorEdit):
self.edgeColorEdit.setColor(self.layer.edge_color[0])
elif self.layer._edge.color_mode in (
ColorMode.CYCLE,
ColorMode.COLORMAP,
):
with qt_signals_blocked(self.color_prop_box):
prop = self.layer._edge.color_properties.name
index = self.color_prop_box.findText(prop, Qt.MatchFixedString)
self.color_prop_box.setCurrentIndex(index)
| 39.223048 | 87 | 0.65444 | import numpy as np
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QComboBox, QDoubleSpinBox, QLabel
from ...layers.utils._color_manager_constants import ColorMode
from ...utils.translations import trans
from ..utils import qt_signals_blocked
from ..widgets.qt_color_swatch import QColorSwatchEdit
from .qt_layer_controls_base import QtLayerControls
class QtVectorsControls(QtLayerControls):
def __init__(self, layer):
super().__init__(layer)
self.layer.events.edge_width.connect(self._on_edge_width_change)
self.layer.events.length.connect(self._on_length_change)
self.layer.events.edge_color_mode.connect(
self._on_edge_color_mode_change
)
self.layer.events.edge_color.connect(self._on_edge_color_change)
color_properties = self._get_property_values()
color_prop_box = QComboBox(self)
color_prop_box.activated[str].connect(self.change_edge_color_property)
color_prop_box.addItems(color_properties)
self.color_prop_box = color_prop_box
self.edge_prop_label = QLabel(trans._('edge property:'))
self.edgeColorEdit = QColorSwatchEdit(
initial_color=self.layer.edge_color,
tooltip=trans._(
'click to set current edge color',
),
)
self.edgeColorEdit.color_changed.connect(self.change_edge_color_direct)
self.edge_color_label = QLabel(trans._('edge color:'))
self._on_edge_color_change()
colorModeComboBox = QComboBox(self)
color_modes = [e.value for e in ColorMode]
colorModeComboBox.addItems(color_modes)
colorModeComboBox.activated[str].connect(self.change_edge_color_mode)
self.color_mode_comboBox = colorModeComboBox
self._on_edge_color_mode_change()
self.widthSpinBox = QDoubleSpinBox()
self.widthSpinBox.setKeyboardTracking(False)
self.widthSpinBox.setSingleStep(0.1)
self.widthSpinBox.setMinimum(0.1)
self.widthSpinBox.setMaximum(np.inf)
self.widthSpinBox.setValue(self.layer.edge_width)
self.widthSpinBox.valueChanged.connect(self.change_width)
self.lengthSpinBox = QDoubleSpinBox()
self.lengthSpinBox.setKeyboardTracking(False)
self.lengthSpinBox.setSingleStep(0.1)
self.lengthSpinBox.setValue(self.layer.length)
self.lengthSpinBox.setMinimum(0.1)
self.lengthSpinBox.setMaximum(np.inf)
self.lengthSpinBox.valueChanged.connect(self.change_length)
self.grid_layout.addWidget(QLabel(trans._('opacity:')), 0, 0)
self.grid_layout.addWidget(self.opacitySlider, 0, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('width:')), 1, 0)
self.grid_layout.addWidget(self.widthSpinBox, 1, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('length:')), 2, 0)
self.grid_layout.addWidget(self.lengthSpinBox, 2, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('blending:')), 3, 0)
self.grid_layout.addWidget(self.blendComboBox, 3, 1, 1, 2)
self.grid_layout.addWidget(QLabel(trans._('edge color mode:')), 4, 0)
self.grid_layout.addWidget(self.color_mode_comboBox, 4, 1, 1, 2)
self.grid_layout.addWidget(self.edge_color_label, 5, 0)
self.grid_layout.addWidget(self.edgeColorEdit, 5, 1, 1, 2)
self.grid_layout.addWidget(self.edge_prop_label, 6, 0)
self.grid_layout.addWidget(self.color_prop_box, 6, 1, 1, 2)
self.grid_layout.setRowStretch(7, 1)
self.grid_layout.setColumnStretch(1, 1)
self.grid_layout.setSpacing(4)
def change_edge_color_property(self, property: str):
mode = self.layer.edge_color_mode
try:
self.layer.edge_color = property
self.layer.edge_color_mode = mode
except TypeError:
self._on_edge_color_mode_change()
raise
def change_edge_color_mode(self, mode: str):
old_mode = self.layer.edge_color_mode
with self.layer.events.edge_color_mode.blocker():
try:
self.layer.edge_color_mode = mode
self._update_edge_color_gui(mode)
except ValueError:
self.layer.edge_color_mode = old_mode
raise
def change_edge_color_direct(self, color: np.ndarray):
self.layer.edge_color = color
def change_width(self, value):
self.layer.edge_width = value
self.widthSpinBox.clearFocus()
self.setFocus()
def change_length(self, value):
self.layer.length = value
self.lengthSpinBox.clearFocus()
self.setFocus()
def _update_edge_color_gui(self, mode: str):
if mode in ('cycle', 'colormap'):
self.edgeColorEdit.setHidden(True)
self.edge_color_label.setHidden(True)
self.color_prop_box.setHidden(False)
self.edge_prop_label.setHidden(False)
elif mode == 'direct':
self.edgeColorEdit.setHidden(False)
self.edge_color_label.setHidden(False)
self.color_prop_box.setHidden(True)
self.edge_prop_label.setHidden(True)
def _get_property_values(self):
property_choices = [*self.layer._property_choices]
properties = [*self.layer.properties]
property_values = np.union1d(property_choices, properties)
return property_values
def _on_length_change(self):
with self.layer.events.length.blocker():
self.lengthSpinBox.setValue(self.layer.length)
def _on_edge_width_change(self):
with self.layer.events.edge_width.blocker():
self.widthSpinBox.setValue(self.layer.edge_width)
def _on_edge_color_mode_change(self):
with qt_signals_blocked(self.color_mode_comboBox):
mode = self.layer._edge.color_mode
index = self.color_mode_comboBox.findText(
mode, Qt.MatchFixedString
)
self.color_mode_comboBox.setCurrentIndex(index)
self._update_edge_color_gui(mode)
def _on_edge_color_change(self):
if (
self.layer._edge.color_mode == ColorMode.DIRECT
and len(self.layer.data) > 0
):
with qt_signals_blocked(self.edgeColorEdit):
self.edgeColorEdit.setColor(self.layer.edge_color[0])
elif self.layer._edge.color_mode in (
ColorMode.CYCLE,
ColorMode.COLORMAP,
):
with qt_signals_blocked(self.color_prop_box):
prop = self.layer._edge.color_properties.name
index = self.color_prop_box.findText(prop, Qt.MatchFixedString)
self.color_prop_box.setCurrentIndex(index)
| true | true |
f7005d9399a9f18e174ddc2c6c5409f9ec3370f1 | 944 | py | Python | isi_sdk_8_0_1/test/test_hardware_tapes_devices.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_0_1/test/test_hardware_tapes_devices.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_0_1/test/test_hardware_tapes_devices.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.hardware_tapes_devices import HardwareTapesDevices # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestHardwareTapesDevices(unittest.TestCase):
"""HardwareTapesDevices unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHardwareTapesDevices(self):
"""Test HardwareTapesDevices"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.hardware_tapes_devices.HardwareTapesDevices() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.02439 | 98 | 0.71822 |
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.hardware_tapes_devices import HardwareTapesDevices from isi_sdk_8_0_1.rest import ApiException
class TestHardwareTapesDevices(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testHardwareTapesDevices(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f7005e840555e823b72518c963e171385bf675ec | 1,423 | py | Python | run.py | xinetzone/self-driving-dash | 66860074099c90e620e21bc30b26efd366cc8f1a | [
"Apache-2.0"
] | null | null | null | run.py | xinetzone/self-driving-dash | 66860074099c90e620e21bc30b26efd366cc8f1a | [
"Apache-2.0"
] | null | null | null | run.py | xinetzone/self-driving-dash | 66860074099c90e620e21bc30b26efd366cc8f1a | [
"Apache-2.0"
] | null | null | null | from dash import dcc, html
from dash.dependencies import Input, Output
from app import app
from layouts import index, record, watch, replay, about
# from examples.run import callback_example
from callbacks.record import *
from callbacks.watch import *
from callbacks.replay import *
layout = html.Article([
dcc.Location(id='url', refresh=False), # 定位地址栏
html.Section(id='page-content'), # 页面布局
])
@app.callback(Output('page-content', 'children'),
Input('url', 'pathname'))
def display_page(pathname):
if pathname == '/':
return index.layout
if pathname == '/record':
return record.layout
if pathname == '/watch':
return watch.layout
if pathname == '/replay':
return replay.layout
if pathname == '/about':
return about.layout
# elif pathname.startswith('/examples/'):
# return callback_example(pathname)
# else:
# return '404'
app.config.suppress_callback_exceptions = True # 用于支持多页应用
if __name__ == '__main__':
import asyncio
from dash_xinet.server import run_server
port = 7777
# app.run_server(debug=True, port=5555, threaded=True)
# app.run_server(app, debug=True, port=5555, threaded=True)
run = run_server(app, layout,
port=port, debug=True
)
asyncio.run(run)
else:
app.layout = layout
server = app.server # 用于 Dash 服务器部署
| 27.365385 | 63 | 0.647927 | from dash import dcc, html
from dash.dependencies import Input, Output
from app import app
from layouts import index, record, watch, replay, about
from callbacks.record import *
from callbacks.watch import *
from callbacks.replay import *
layout = html.Article([
dcc.Location(id='url', refresh=False), html.Section(id='page-content'), ])
@app.callback(Output('page-content', 'children'),
Input('url', 'pathname'))
def display_page(pathname):
if pathname == '/':
return index.layout
if pathname == '/record':
return record.layout
if pathname == '/watch':
return watch.layout
if pathname == '/replay':
return replay.layout
if pathname == '/about':
return about.layout
app.config.suppress_callback_exceptions = True
if __name__ == '__main__':
import asyncio
from dash_xinet.server import run_server
port = 7777
run = run_server(app, layout,
port=port, debug=True
)
asyncio.run(run)
else:
app.layout = layout
server = app.server | true | true |
f700608f35098a7965b60e53b106f0704bc73300 | 28,769 | py | Python | neorl/tune/runners/estune.py | XuboGU/neorl | 066cdbd9e9cdbfe371278dba3ece116f25edab2d | [
"MIT"
] | 1 | 2021-07-06T20:31:38.000Z | 2021-07-06T20:31:38.000Z | neorl/tune/runners/estune.py | XuboGU/neorl | 066cdbd9e9cdbfe371278dba3ece116f25edab2d | [
"MIT"
] | null | null | null | neorl/tune/runners/estune.py | XuboGU/neorl | 066cdbd9e9cdbfe371278dba3ece116f25edab2d | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import os
import random
import math
from itertools import repeat
import itertools
import sys, copy, shutil
import subprocess
from multiprocessing.dummy import Pool
from collections import defaultdict
import copy
import random
import matplotlib.pyplot as plt
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
class ESTUNE:
"""
A class to parse neorl input template and construct cases for evolution strategy (ES) hyperparameter optimisation
inputs:
The template input file
Class object from PARSER.py, featuring user input for TUNE
neorl logo
"""
def __init__(self, tuneclass, inputfile, tuneblock, logo):
self.logo=logo
self.inputfile=inputfile
self.tuneblock=tuneblock
self.n_last_episodes=int(self.tuneblock["n_last_episodes"])
self.ncores=int(self.tuneblock["ncores"])
self.ncases=int(self.tuneblock["ncases"])
#---------------------------------------
# define genetic algorithm parameters
#---------------------------------------
self.popsize=10
if self.ncases < self.popsize:
self.ngens=1
else:
self.ngens=int(self.ncases/self.popsize)
self.MU=5
if tuneclass == 'gatune': # ES/GA tune
print("Performing semi-GA Tune")
self.INDPB=0.1
elif tuneclass == 'estune': # ES tune
print("Performing ES Tune")
self.INDPB=1.0
else: # default setting is ES tune
print("Performing ES Tune")
self.INDPB=1.0
self.CXPB=0.5
self.MUTPB=0.2
self.ETA=0.6
self.SMAX=0.5
self.paramvals=dict()
self.paraminds=dict()
self.datatypes=[]
#-------------------------------
# construct results directory
#-------------------------------
if os.path.exists('./tunecases/'):
shutil.rmtree('./tunecases/')
os.makedirs('./tunecases/', exist_ok=True)
else:
os.makedirs('./tunecases/', exist_ok=True)
self.csvlogger='tune.csv'
self.tunesummary='tunesummary.txt'
#---------------------------------
# parse the input template
#---------------------------------
with open (self.inputfile, 'r') as input_file_text:
self.template=input_file_text.readlines()
first=0; last=0
for i in range(len(self.template)):
if ('READ TUNE' in self.template[i]):
first=i
if ('END TUNE' in self.template[i]):
last=i
if first == 0 and last ==0:
raise ('TUNE card cannot be found')
del self.template[first: last+1]
self.template="".join(self.template)
def tune_count(self):
"""
1- This function uses self.tuneblock, parse it, infer all parameters to be tuned and thier distribution
2- This function creates GA engine and instantiates the initial population for evolution algorithm
"""
self.param_dict={}
for item in self.tuneblock:
if '{' in item and '}' in item and item[0] != '#':
#-----------------------------------------------------
# check the existence of the name in the template
#-----------------------------------------------------
if item not in self.template:
raise ValueError('parameter {} in TUNE block cannot be found in any other block, e.g. DQN, GA, PPO, etc.'.format(item))
item_lst=self.tuneblock[item].split(",")
item_lst=[item.strip() for item in item_lst] # get rid of white spaces in the splitted values
#-------------------------------------------------------
# check if a uniform distribution of floats is identified
#-------------------------------------------------------
try:
if "float" in item_lst:
item_lst[0]=float(item_lst[0])
item_lst[1]=float(item_lst[1])
self.datatypes.append("float")
print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
elif "u" in item_lst:
item_lst[0]=float(item_lst[0])
item_lst[1]=float(item_lst[1])
self.datatypes.append("float")
print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
except:
raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --floats-- for {} according to (low, high, u) syntax'.format(item))
#---------------------------------------------------
# check if a random integer distribution is identified
#---------------------------------------------------
try:
if "int" in item_lst:
item_lst[0]=int(item_lst[0])
item_lst[1]=int(item_lst[1])
self.datatypes.append("int")
print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
elif "randint" in item_lst:
item_lst[0]=int(item_lst[0])
item_lst[1]=int(item_lst[1])
self.datatypes.append("int")
print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
except:
raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --int-- for {} according to (low, high, u) syntax'.format(item))
#-----------------------------------------------------
# check if a grid is identified
#-----------------------------------------------------
try:
if "grid" in item_lst:
element_lst=[]
for element in item_lst:
# check if it is an integer
not_int=0
try:
element_lst.append(int(element.strip()))
except Exception:
not_int=1
# else check if the elment is float
if not_int:
try:
element_lst.append(float(element.strip()))
# else consider it a string
except Exception:
element_lst.append(str(element.strip()))
item_lst=element_lst
self.datatypes.append("grid")
print ('-- debug: parameter {} has grid type with values {}'.format(item,item_lst))
except:
raise Exception ('--error: TUNE cannot construct the user-given grid for {} according to the comma-seperated syntax'.format(item))
self.param_dict[item]=item_lst # Save the final parsed list for parameter {XXX}
#-----------------------------------------------------
# infer the bounds for strategy vector
#-----------------------------------------------------
if len(self.param_dict.keys()) <= 10:
self.SMIN=0.1
else:
self.SMIN=1/(len(self.param_dict.keys()))
def gen_cases(self, x=0):
"""
This function infers neorl.py path
"""
self.tune_count()
self.param_names=list(self.param_dict.keys())
#-----------------------
# Infer neorl.py path
#-----------------------
# Find neorl path
#self.here=os.path.dirname(os.path.abspath(__file__))
#self.neorl_path=self.here.replace('src/tune','neorl.py') #try to infer neorl.py internally to call neorl inside or neorl
#self.python_path=self.here.replace('neorl/src/tune','anaconda3/bin/python3') #try to infer python3 path to call neorl inside or neorl
self.neorl_path=sys.argv[0]
self.python_path=sys.executable
print('--debug: NEORLPATH=', self.neorl_path)
print('--debug: PYTHONPATH=', self.python_path)
def GenES(self):
"""
Individual generator:
1- This function uses self.param_dict to obtain bounds for individual parameters
Returns:
-ind (list): an individual vector with values samples from inferred distribution
-strategy (list): the strategy vector with values between smin and smax
"""
size=len(self.param_dict.keys()) # size of individual
content=[]
self.LOW=[] # Lower bounds for the parameters to be tuned
self.UP=[] # Upper bounds for parameters to be tuned
for key in list(self.param_dict.keys()):
if 'int' in self.param_dict[key]:
content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))
elif 'randint' in self.param_dict[key]:
content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))
elif 'float' in self.param_dict[key]:
content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))
elif 'u' in self.param_dict[key]:
content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))
elif 'grid' in self.param_dict[key]:
self.real_grid=list(self.param_dict[key])
self.real_grid.remove('grid') # get rid of the 'grid' to avoid sampling it
self.paramvals[key]=self.real_grid
content.append(random.sample(self.real_grid, 1)[0])
self.paraminds[len(content)-1]=key
else:
raise Exception('unknown data type is given, either int/randint, float/u, or grid are allowed for parameter distribution types')
self.LOW.append(self.param_dict[key][0])
self.UP.append(self.param_dict[key][1])
ind=list(content)
size = len(list(self.param_dict.keys()))
strategy= [random.uniform(self.SMIN, self.SMAX) for _ in range(size)]
return ind, strategy
def init_pop(self):
"""
Population initializer
Returns:
-pop (dict): initial population in a dictionary form
"""
# initialize the population and strategy and run them in parallel (these samples will be used to initialize the memory)
pop=defaultdict(list)
for i in range(self.popsize):
#caseid='es_gen{}_ind{}'.format(0,i+1)
data=self.GenES()
pop[i].append(data[0])
pop[i].append(data[1])
if self.ncores > 1: # evaluate warmup in parallel
core_list=[]
for key in pop:
caseid='ind{}'.format(key+1)
core_list.append([pop[key][0], caseid])
p=Pool(self.ncores)
fitness=p.map(self.gen_object, core_list)
p.close(); p.join()
[pop[ind].append(fitness[ind]) for ind in range(len(pop))]
else: # evaluate warmup in series
for key in pop:
caseid='ind{}'.format(key+1)
fitness=self.fit(pop[key][0], caseid)
pop[key].append(fitness)
return pop # return final pop dictionary with ind, strategy, and fitness
def fit(self, ind, caseid):
"""
This function evaluates an individual's fitness
Inputs:
-ind (list): an individual whose fitness to evaluate
-caseid (str): a string that specifies the given individual
Returns:
-mean_reward (float): fitness value
"""
try:
#---------------------------------------------
# Prepares directories and files for one case
# --------------------------------------------
self.param_names=list(self.param_dict.keys())
i = caseid[3:]
os.makedirs('./tunecases/case{}'.format(i), exist_ok=True)
self.new_template=copy.deepcopy(self.template)
for j in range (len(self.param_names)):
self.new_template=self.new_template.replace(str(self.param_names[j]), str(ind[j]))
filename='./tunecases/case{}/case{}.inp'.format(i, i)
with open (filename, 'w') as fout:
fout.writelines(self.new_template)
# copy external files into the new directory, if extfiles card exists
if 'extfiles' in self.tuneblock.keys():
if self.tuneblock['extfiles']:
print('--debug: external files are identified, copying them into each case directory')
for item in self.tuneblock['extfiles']:
os.system('cp -r {} ./tunecases/case{}/'.format(item, i))
casenum = caseid[3:]
print('--------------------------------------------------')
print('Running TUNE Case {}/{}: {}'.format(casenum, self.ncases, ind))
subprocess.call([self.python_path, self.neorl_path, '-i', 'case{}.inp'.format(casenum)], cwd='./tunecases/case{}/'.format(casenum)) # this exceutes neorl for this case.inp
print('--------------------------------------------------')
#--------------------------------------------------------------------------------------------------------------
# Try to infer the _out.csv file in the directory since only one method is allowed
csvfile=[f for f in os.listdir('./tunecases/case{}/case{}_log/'.format(casenum, casenum)) if f.endswith('_out.csv')]
if len(csvfile) > 1:
raise Exception ('multiple *_out.csv files can be found in the logger of TUNE, only one is allowed')
#--------------------------------------------------------------------------------------------------------------
reward_lst=pd.read_csv('./tunecases/case{}/case{}_log/{}'.format(casenum,casenum, csvfile[0]), usecols=['reward']).values
mean_reward=np.mean(reward_lst[-self.n_last_episodes:])
max_reward=np.max(reward_lst)
with open (self.csvlogger, 'a') as fout:
fout.write(str(casenum) +',')
[fout.write(str(item) + ',') for item in ind]
fout.write(str(mean_reward) + ',' + str(max_reward) + '\n')
return mean_reward
except:
print('--error: case{}.inp failed during execution'.format(casenum))
return 'case{}.inp:failed'.format(casenum)
def gen_object(self, inp):
"""
This is a worker for the multiprocess Pool
Inputs:
-inp (list of lists): contains data for each core [[ind1, caseid1], ..., [indN, caseidN]]
Returns:
-fitness value (float)
"""
return self.fit(inp[0], inp[1])
def select(self, pop):
"""
Selection function sorts the population from max to min based on fitness and selects the k best
Inputs:
-pop (dict): population in dictionary structure
-k (int): top k individuals are selected
Returns:
-best_dict (dict): the new orded dictionary with top k selected
"""
k=self.MU
pop=list(pop.items())
pop.sort(key=lambda e: e[1][2], reverse=True)
sorted_dict=dict(pop[:k])
# This block creates a new dict where keys are reset to 0 ... k in order to avoid unordered keys after sort
best_dict=defaultdict(list)
index=0
for key in sorted_dict:
best_dict[index].append(sorted_dict[key][0])
best_dict[index].append(sorted_dict[key][1])
best_dict[index].append(sorted_dict[key][2])
index+=1
sorted_dict.clear()
return best_dict
def cx(self, ind1, ind2, strat1, strat2):
"""
Executes a classical two points crossover on both the individuals and their strategy.
The individuals/strategies should be a list. The crossover points for the individual and the
strategy are the same.
Inputs:
-ind1 (list): The first individual participating in the crossover.
-ind2 (list): The second individual participating in the crossover.
-strat1 (list): The first evolution strategy participating in the crossover.
-strat2 (list): The second evolution strategy
Returns:
- The new ind1, ind2, strat1, strat2, after crossover in list form
"""
#for item in ind1:
# print('individual 1', type(item))
#for item in ind2:
# print('individual 2', type(item))
#for item in strat1:
# print('strategy 1', type(item))
#for item in strat2:
# print('strategy 2', type(item))
size = min(len(ind1), len(ind2))
pt1 = random.randint(1, size)
pt2 = random.randint(1, size-1)
if pt2 >= pt1:
pt2 +=1
else:
pt1, pt2 = pt2, pt1
ind1[pt1:pt2], ind2[pt1:pt2] = ind2[pt1:pt2], ind1[pt1:pt2]
strat1[pt1:pt2], strat2[pt1:pt2] = strat2[pt1:pt2], strat1[pt1:pt2]
return ind1, ind2, strat1, strat2
def mutES(self, ind, strat):
"""
Mutate an evolution strategy according to mixed Discrete/Continuous mutation rules
Input:
-ind (list): individual to be mutated
-strat (list): individual strategy to be mutated
Returns:
-ind (list): new individual after mutation
-strat (list): individual strategy after mutation
"""
size=len(ind)
tau=1/np.sqrt(2*size)
tau_prime=1/np.sqrt(2*np.sqrt(size))
for i in range(size):
# Grid distribution received
if self.datatypes[i] == "grid":
#if i in self.paraminds.keys():
norm=random.gauss(0,1)
# modify the ind strategy
strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))
# make a transformation of strategy to ensure it is between smin, smax
y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)
if np.floor(y) % 2 == 0:
y_prime=np.abs(y-np.floor(y))
else:
y_prime=1-np.abs(y-np.floor(y))
strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime
# check if this attribute is mutated based on the updated strategy
if random.random() < strat[i]:
# make a list of possibilities after excluding the current value to enforce mutation
paramname=self.paraminds[i]
ind[i]=random.sample(self.paramvals[paramname], 1)[0]
# Random integer distribution received
elif self.datatypes[i] == "int":
norm=random.gauss(0,1)
# modify the ind strategy
strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))
# make a transformation of strategy to ensure it is between smin, smax
y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)
if np.floor(y) % 2 == 0:
y_prime=np.abs(y-np.floor(y))
else:
y_prime=1-np.abs(y-np.floor(y))
strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime
# check if this attribute is mutated based on the updated strategy
#if random.random() < strat[i]:
# make a list of possibilities after excluding the current value to enforce mutation
choices=list(range(self.LOW[i], self.UP[i]+1))
choices.remove(ind[i])
ind[i] = random.choice(choices)
# Uniform float distribution received
elif self.datatypes[i] == "float":
norm=random.gauss(0,1)
if random.random() < self.INDPB: # this indicates whether ind/strategy to be mutated or not for this float variable
strat[i] *= np.exp(tau*norm + tau_prime * random.gauss(0,1)) # normal mutation strategy
ind[i] += strat[i] * random.gauss(0,1) # update the individual position
# check if the new individual falls within lower/uppder boundaries
if ind[i] < self.LOW[i]:
ind[i] = self.LOW[i]
if ind[i] > self.UP[i]:
ind[i] = self.UP[i]
else:
raise Exception('ES mutation strategy works with int, float, or grid distributions, the type provided cannot be interpreted')
return ind, strat
def GenOffspring(self, pop):
"""
This function generates the offspring by applying crossover, mutation, OR reproduction.
Inputs:
-pop (dict): population in dictionary structure
Returns:
-offspring (dict): new modified population in dictionary structure
"""
pop_indices=list(range(0,len(pop)))
offspring=defaultdict(list)
for i in range(self.popsize):
alpha=random.random()
#----------------------
# Crossover
#----------------------
if alpha < self.CXPB:
index1, index2=random.sample(pop_indices,2)
ind1, ind2, strat1, strat2=self.cx(ind1=list(pop[index1][0]), ind2=list(pop[index2][0]),
strat1=list(pop[index1][1]), strat2=list(pop[index2][1]))
offspring[i].append(ind1)
offspring[i].append(strat1)
#print('crossover is done for sample {} between {} and {}'.format(i,index1,index2))
#----------------------
# Mutation
#----------------------
elif alpha < self.CXPB + self.MUTPB: # Apply mutation
index = random.choice(pop_indices)
ind, strat=self.mutES(ind=list(pop[index][0]), strat=list(pop[index][1]))
offspring[i].append(ind)
offspring[i].append(strat)
#print('mutation is done for sample {} based on {}'.format(i,index))
#------------------------------
# Reproduction from population
#------------------------------
else:
index=random.choice(pop_indices)
offspring[i].append(pop[index][0])
offspring[i].append(pop[index][1])
#print('reproduction is done for sample {} based on {}'.format(i,index))
return offspring
def run_cases(self):
"""
This function runs the evolutioanry algorithm over self.ngens generations.
"""
#------------------------------
# Begin the evolution process
#------------------------------
with open (self.csvlogger, 'w') as fout:
fout.write('caseid, ')
[fout.write(item + ',') for item in self.param_names]
fout.write('mean_reward,max_reward\n')
#print('PARAM dict', self.param_dict)
#print('PARAM types', self.datatypes)
self.population=self.init_pop()
case_idx=0
self.currentcase=self.popsize+1
for gen in range(1, self.ngens):
case_idx=0
caseids=['ind{}'.format(ind) for ind in range(self.currentcase, self.currentcase+self.popsize+1)]
# Vary the population and generate new offspring
offspring=self.GenOffspring(pop=self.population)
# Evaluate the individuals with invalid fitness using multiprocessing Pool
if self.ncores > 1:
core_list=[]
for key in offspring:
core_list.append([offspring[key][0], caseids[case_idx]])
case_idx+=1
# initialize a pool
p=Pool(self.ncores)
fitness=p.map(self.gen_object, core_list)
p.close(); p.join()
[offspring[ind].append(fitness[ind]) for ind in range(len(offspring))]
else:
for ind in range(len(offspring)):
fitness=self.fit(offspring[ind][0], caseids[case_idx])
case_idx+=1
offspring[ind].append(fitness)
self.currentcase+=self.popsize
# Select the next generation population
self.population = copy.deepcopy(self.select(pop=offspring))
csvdata=pd.read_csv('tune.csv')
asc_data=csvdata.sort_values(by=['caseid'],ascending=True)
des_data=csvdata.sort_values(by=['mean_reward'],ascending=False)
des_data2=csvdata.sort_values(by=['max_reward'],ascending=False)
asc_data.to_csv('tune.csv', index=False)
mean = np.mean(des_data.iloc[:,4:5])
totalmean=mean.tolist()[0]
try:
failed_cases=len([print ('failed') for item in self.population if isinstance(item, str)])
except:
failed_cases='NA'
print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('Mean Rewards for all cases=', totalmean)
print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print ('All TUNE CASES ARE COMPLETED')
print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('--debug: Check tunesummary.txt file for best hyperparameters found')
print('--debug: Check tune.csv file for complete csv logger of all cases results')
print('--debug: Check tunecases directory for case-by-case detailed results')
with open ('tunesummary.txt', 'w') as fout:
fout.write(self.logo)
fout.write('*****************************************************\n')
fout.write('Summary for the TUNE case \n')
fout.write('*****************************************************\n')
fout.write('Number of cases evaluated: {} \n'.format(self.ncases))
fout.write('Number of failed cases: {} \n'.format(failed_cases))
fout.write('Parameter names: {} \n'.format(self.param_names))
fout.write('Parameter values: {} \n '.format(self.param_dict))
fout.write ('--------------------------------------------------------------------------------------\n')
if des_data.shape[0] < 20:
top=des_data.shape[0]
fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \n'.format(top))
fout.write(des_data.iloc[:top].to_string(index=False))
else:
top=20
fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \n'.format(top))
fout.write(des_data.iloc[:top].to_string(index=False))
fout.write ('\n')
fout.write ('--------------------------------------------------------------------------------------\n')
if des_data2.shape[0] < 20:
top=des_data2.shape[0]
fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \n'.format(top))
fout.write(des_data2.iloc[:top].to_string(index=False))
else:
top=20
fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \n'.format(top))
fout.write(des_data2.iloc[:top].to_string(index=False)) | 46.326892 | 184 | 0.509437 | import numpy as np
import pandas as pd
import os
import random
import math
from itertools import repeat
import itertools
import sys, copy, shutil
import subprocess
from multiprocessing.dummy import Pool
from collections import defaultdict
import copy
import random
import matplotlib.pyplot as plt
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
class ESTUNE:
def __init__(self, tuneclass, inputfile, tuneblock, logo):
self.logo=logo
self.inputfile=inputfile
self.tuneblock=tuneblock
self.n_last_episodes=int(self.tuneblock["n_last_episodes"])
self.ncores=int(self.tuneblock["ncores"])
self.ncases=int(self.tuneblock["ncases"])
self.popsize=10
if self.ncases < self.popsize:
self.ngens=1
else:
self.ngens=int(self.ncases/self.popsize)
self.MU=5
if tuneclass == 'gatune': print("Performing semi-GA Tune")
self.INDPB=0.1
elif tuneclass == 'estune': print("Performing ES Tune")
self.INDPB=1.0
else: print("Performing ES Tune")
self.INDPB=1.0
self.CXPB=0.5
self.MUTPB=0.2
self.ETA=0.6
self.SMAX=0.5
self.paramvals=dict()
self.paraminds=dict()
self.datatypes=[]
if os.path.exists('./tunecases/'):
shutil.rmtree('./tunecases/')
os.makedirs('./tunecases/', exist_ok=True)
else:
os.makedirs('./tunecases/', exist_ok=True)
self.csvlogger='tune.csv'
self.tunesummary='tunesummary.txt'
with open (self.inputfile, 'r') as input_file_text:
self.template=input_file_text.readlines()
first=0; last=0
for i in range(len(self.template)):
if ('READ TUNE' in self.template[i]):
first=i
if ('END TUNE' in self.template[i]):
last=i
if first == 0 and last ==0:
raise ('TUNE card cannot be found')
del self.template[first: last+1]
self.template="".join(self.template)
def tune_count(self):
self.param_dict={}
for item in self.tuneblock:
if '{' in item and '}' in item and item[0] != '#':
if item not in self.template:
raise ValueError('parameter {} in TUNE block cannot be found in any other block, e.g. DQN, GA, PPO, etc.'.format(item))
item_lst=self.tuneblock[item].split(",")
item_lst=[item.strip() for item in item_lst] try:
if "float" in item_lst:
item_lst[0]=float(item_lst[0])
item_lst[1]=float(item_lst[1])
self.datatypes.append("float")
print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
elif "u" in item_lst:
item_lst[0]=float(item_lst[0])
item_lst[1]=float(item_lst[1])
self.datatypes.append("float")
print ('-- debug: parameter {} has uniform distribution of type --float-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
except:
raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --floats-- for {} according to (low, high, u) syntax'.format(item))
try:
if "int" in item_lst:
item_lst[0]=int(item_lst[0])
item_lst[1]=int(item_lst[1])
self.datatypes.append("int")
print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
elif "randint" in item_lst:
item_lst[0]=int(item_lst[0])
item_lst[1]=int(item_lst[1])
self.datatypes.append("int")
print ('-- debug: parameter {} has uniform distribution of type --int-- between {} and {}'.format(item,item_lst[0],item_lst[1]))
except:
raise Exception ('--error: TUNE cannot construct the user-given uniform distribution of --int-- for {} according to (low, high, u) syntax'.format(item))
try:
if "grid" in item_lst:
element_lst=[]
for element in item_lst:
not_int=0
try:
element_lst.append(int(element.strip()))
except Exception:
not_int=1
if not_int:
try:
element_lst.append(float(element.strip()))
except Exception:
element_lst.append(str(element.strip()))
item_lst=element_lst
self.datatypes.append("grid")
print ('-- debug: parameter {} has grid type with values {}'.format(item,item_lst))
except:
raise Exception ('--error: TUNE cannot construct the user-given grid for {} according to the comma-seperated syntax'.format(item))
self.param_dict[item]=item_lst
if len(self.param_dict.keys()) <= 10:
self.SMIN=0.1
else:
self.SMIN=1/(len(self.param_dict.keys()))
def gen_cases(self, x=0):
self.tune_count()
self.param_names=list(self.param_dict.keys())
self.neorl_path=sys.argv[0]
self.python_path=sys.executable
print('--debug: NEORLPATH=', self.neorl_path)
print('--debug: PYTHONPATH=', self.python_path)
def GenES(self):
size=len(self.param_dict.keys()) content=[]
self.LOW=[] self.UP=[] for key in list(self.param_dict.keys()):
if 'int' in self.param_dict[key]:
content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))
elif 'randint' in self.param_dict[key]:
content.append(random.randint(self.param_dict[key][0], self.param_dict[key][1]))
elif 'float' in self.param_dict[key]:
content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))
elif 'u' in self.param_dict[key]:
content.append(random.uniform(self.param_dict[key][0], self.param_dict[key][1]))
elif 'grid' in self.param_dict[key]:
self.real_grid=list(self.param_dict[key])
self.real_grid.remove('grid') self.paramvals[key]=self.real_grid
content.append(random.sample(self.real_grid, 1)[0])
self.paraminds[len(content)-1]=key
else:
raise Exception('unknown data type is given, either int/randint, float/u, or grid are allowed for parameter distribution types')
self.LOW.append(self.param_dict[key][0])
self.UP.append(self.param_dict[key][1])
ind=list(content)
size = len(list(self.param_dict.keys()))
strategy= [random.uniform(self.SMIN, self.SMAX) for _ in range(size)]
return ind, strategy
def init_pop(self):
pop=defaultdict(list)
for i in range(self.popsize):
data=self.GenES()
pop[i].append(data[0])
pop[i].append(data[1])
if self.ncores > 1: core_list=[]
for key in pop:
caseid='ind{}'.format(key+1)
core_list.append([pop[key][0], caseid])
p=Pool(self.ncores)
fitness=p.map(self.gen_object, core_list)
p.close(); p.join()
[pop[ind].append(fitness[ind]) for ind in range(len(pop))]
else: for key in pop:
caseid='ind{}'.format(key+1)
fitness=self.fit(pop[key][0], caseid)
pop[key].append(fitness)
return pop
def fit(self, ind, caseid):
try:
self.param_names=list(self.param_dict.keys())
i = caseid[3:]
os.makedirs('./tunecases/case{}'.format(i), exist_ok=True)
self.new_template=copy.deepcopy(self.template)
for j in range (len(self.param_names)):
self.new_template=self.new_template.replace(str(self.param_names[j]), str(ind[j]))
filename='./tunecases/case{}/case{}.inp'.format(i, i)
with open (filename, 'w') as fout:
fout.writelines(self.new_template)
if 'extfiles' in self.tuneblock.keys():
if self.tuneblock['extfiles']:
print('--debug: external files are identified, copying them into each case directory')
for item in self.tuneblock['extfiles']:
os.system('cp -r {} ./tunecases/case{}/'.format(item, i))
casenum = caseid[3:]
print('--------------------------------------------------')
print('Running TUNE Case {}/{}: {}'.format(casenum, self.ncases, ind))
subprocess.call([self.python_path, self.neorl_path, '-i', 'case{}.inp'.format(casenum)], cwd='./tunecases/case{}/'.format(casenum)) print('--------------------------------------------------')
csvfile=[f for f in os.listdir('./tunecases/case{}/case{}_log/'.format(casenum, casenum)) if f.endswith('_out.csv')]
if len(csvfile) > 1:
raise Exception ('multiple *_out.csv files can be found in the logger of TUNE, only one is allowed')
reward_lst=pd.read_csv('./tunecases/case{}/case{}_log/{}'.format(casenum,casenum, csvfile[0]), usecols=['reward']).values
mean_reward=np.mean(reward_lst[-self.n_last_episodes:])
max_reward=np.max(reward_lst)
with open (self.csvlogger, 'a') as fout:
fout.write(str(casenum) +',')
[fout.write(str(item) + ',') for item in ind]
fout.write(str(mean_reward) + ',' + str(max_reward) + '\n')
return mean_reward
except:
print('--error: case{}.inp failed during execution'.format(casenum))
return 'case{}.inp:failed'.format(casenum)
def gen_object(self, inp):
return self.fit(inp[0], inp[1])
def select(self, pop):
k=self.MU
pop=list(pop.items())
pop.sort(key=lambda e: e[1][2], reverse=True)
sorted_dict=dict(pop[:k])
best_dict=defaultdict(list)
index=0
for key in sorted_dict:
best_dict[index].append(sorted_dict[key][0])
best_dict[index].append(sorted_dict[key][1])
best_dict[index].append(sorted_dict[key][2])
index+=1
sorted_dict.clear()
return best_dict
def cx(self, ind1, ind2, strat1, strat2):
size = min(len(ind1), len(ind2))
pt1 = random.randint(1, size)
pt2 = random.randint(1, size-1)
if pt2 >= pt1:
pt2 +=1
else:
pt1, pt2 = pt2, pt1
ind1[pt1:pt2], ind2[pt1:pt2] = ind2[pt1:pt2], ind1[pt1:pt2]
strat1[pt1:pt2], strat2[pt1:pt2] = strat2[pt1:pt2], strat1[pt1:pt2]
return ind1, ind2, strat1, strat2
def mutES(self, ind, strat):
size=len(ind)
tau=1/np.sqrt(2*size)
tau_prime=1/np.sqrt(2*np.sqrt(size))
for i in range(size):
if self.datatypes[i] == "grid":
norm=random.gauss(0,1)
strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))
y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)
if np.floor(y) % 2 == 0:
y_prime=np.abs(y-np.floor(y))
else:
y_prime=1-np.abs(y-np.floor(y))
strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime
if random.random() < strat[i]:
paramname=self.paraminds[i]
ind[i]=random.sample(self.paramvals[paramname], 1)[0]
elif self.datatypes[i] == "int":
norm=random.gauss(0,1)
strat[i] = 1/(1+(1-strat[i])/strat[i]*np.exp(-tau*norm-tau_prime*random.gauss(0,1)))
y=(strat[i]-self.SMIN)/(self.SMAX-self.SMIN)
if np.floor(y) % 2 == 0:
y_prime=np.abs(y-np.floor(y))
else:
y_prime=1-np.abs(y-np.floor(y))
strat[i] = self.SMIN + (self.SMAX-self.SMIN)*y_prime
choices=list(range(self.LOW[i], self.UP[i]+1))
choices.remove(ind[i])
ind[i] = random.choice(choices)
elif self.datatypes[i] == "float":
norm=random.gauss(0,1)
if random.random() < self.INDPB: strat[i] *= np.exp(tau*norm + tau_prime * random.gauss(0,1)) ind[i] += strat[i] * random.gauss(0,1)
if ind[i] < self.LOW[i]:
ind[i] = self.LOW[i]
if ind[i] > self.UP[i]:
ind[i] = self.UP[i]
else:
raise Exception('ES mutation strategy works with int, float, or grid distributions, the type provided cannot be interpreted')
return ind, strat
def GenOffspring(self, pop):
pop_indices=list(range(0,len(pop)))
offspring=defaultdict(list)
for i in range(self.popsize):
alpha=random.random()
if alpha < self.CXPB:
index1, index2=random.sample(pop_indices,2)
ind1, ind2, strat1, strat2=self.cx(ind1=list(pop[index1][0]), ind2=list(pop[index2][0]),
strat1=list(pop[index1][1]), strat2=list(pop[index2][1]))
offspring[i].append(ind1)
offspring[i].append(strat1)
elif alpha < self.CXPB + self.MUTPB: index = random.choice(pop_indices)
ind, strat=self.mutES(ind=list(pop[index][0]), strat=list(pop[index][1]))
offspring[i].append(ind)
offspring[i].append(strat)
else:
index=random.choice(pop_indices)
offspring[i].append(pop[index][0])
offspring[i].append(pop[index][1])
return offspring
def run_cases(self):
with open (self.csvlogger, 'w') as fout:
fout.write('caseid, ')
[fout.write(item + ',') for item in self.param_names]
fout.write('mean_reward,max_reward\n')
self.population=self.init_pop()
case_idx=0
self.currentcase=self.popsize+1
for gen in range(1, self.ngens):
case_idx=0
caseids=['ind{}'.format(ind) for ind in range(self.currentcase, self.currentcase+self.popsize+1)]
offspring=self.GenOffspring(pop=self.population)
if self.ncores > 1:
core_list=[]
for key in offspring:
core_list.append([offspring[key][0], caseids[case_idx]])
case_idx+=1
p=Pool(self.ncores)
fitness=p.map(self.gen_object, core_list)
p.close(); p.join()
[offspring[ind].append(fitness[ind]) for ind in range(len(offspring))]
else:
for ind in range(len(offspring)):
fitness=self.fit(offspring[ind][0], caseids[case_idx])
case_idx+=1
offspring[ind].append(fitness)
self.currentcase+=self.popsize
self.population = copy.deepcopy(self.select(pop=offspring))
csvdata=pd.read_csv('tune.csv')
asc_data=csvdata.sort_values(by=['caseid'],ascending=True)
des_data=csvdata.sort_values(by=['mean_reward'],ascending=False)
des_data2=csvdata.sort_values(by=['max_reward'],ascending=False)
asc_data.to_csv('tune.csv', index=False)
mean = np.mean(des_data.iloc[:,4:5])
totalmean=mean.tolist()[0]
try:
failed_cases=len([print ('failed') for item in self.population if isinstance(item, str)])
except:
failed_cases='NA'
print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('Mean Rewards for all cases=', totalmean)
print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print ('All TUNE CASES ARE COMPLETED')
print ('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('--debug: Check tunesummary.txt file for best hyperparameters found')
print('--debug: Check tune.csv file for complete csv logger of all cases results')
print('--debug: Check tunecases directory for case-by-case detailed results')
with open ('tunesummary.txt', 'w') as fout:
fout.write(self.logo)
fout.write('*****************************************************\n')
fout.write('Summary for the TUNE case \n')
fout.write('*****************************************************\n')
fout.write('Number of cases evaluated: {} \n'.format(self.ncases))
fout.write('Number of failed cases: {} \n'.format(failed_cases))
fout.write('Parameter names: {} \n'.format(self.param_names))
fout.write('Parameter values: {} \n '.format(self.param_dict))
fout.write ('--------------------------------------------------------------------------------------\n')
if des_data.shape[0] < 20:
top=des_data.shape[0]
fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \n'.format(top))
fout.write(des_data.iloc[:top].to_string(index=False))
else:
top=20
fout.write ('Top {} hyperparameter configurations ranked according to MEAN reward \n'.format(top))
fout.write(des_data.iloc[:top].to_string(index=False))
fout.write ('\n')
fout.write ('--------------------------------------------------------------------------------------\n')
if des_data2.shape[0] < 20:
top=des_data2.shape[0]
fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \n'.format(top))
fout.write(des_data2.iloc[:top].to_string(index=False))
else:
top=20
fout.write ('Top {} hyperparameter configurations ranked according to MAX reward \n'.format(top))
fout.write(des_data2.iloc[:top].to_string(index=False)) | true | true |
f70060aa3fd6b00edb6202ecf166cc9464082bba | 12,370 | py | Python | test_net.py | zhuriheng/faster-rcnn.pytorch | 7536b0f5eee254350fb4dce5c4a077ac6d29db16 | [
"MIT"
] | null | null | null | test_net.py | zhuriheng/faster-rcnn.pytorch | 7536b0f5eee254350fb4dce5c4a077ac6d29db16 | [
"MIT"
] | null | null | null | test_net.py | zhuriheng/faster-rcnn.pytorch | 7536b0f5eee254350fb4dce5c4a077ac6d29db16 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_argument('--input_dir', dest='input_dir',
help='directory to save models',
type=str)
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}/{}_ls.yml".format(args.dataset, args.net) if args.large_scale else "cfgs/{}/{}.yml".format(
args.dataset, args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.input_dir
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.05
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
_ = torch.from_numpy(np.tile(boxes, (1, scores.shape[1])))
pred_boxes = _.cuda() if args.cuda > 0 else _
pred_boxes /= data[1][0][2].item()
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in xrange(1, imdb.num_classes):
inds = torch.nonzero(scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes[j][i] = cls_dets.cpu().numpy()
else:
all_boxes[j][i] = empty_array
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
cv2.imwrite('result.png', im2show)
pdb.set_trace()
#cv2.imshow('test', im2show)
#cv2.waitKey(0)
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
end = time.time()
print("test time: %0.4fs" % (end - start))
| 37.147147 | 118 | 0.621746 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
try:
xrange except NameError:
xrange = range
def parse_args():
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_argument('--input_dir', dest='input_dir',
help='directory to save models',
type=str)
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}/{}_ls.yml".format(args.dataset, args.net) if args.large_scale else "cfgs/{}/{}.yml".format(
args.dataset, args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.input_dir
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.05
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
_ = torch.from_numpy(np.tile(boxes, (1, scores.shape[1])))
pred_boxes = _.cuda() if args.cuda > 0 else _
pred_boxes /= data[1][0][2].item()
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in xrange(1, imdb.num_classes):
inds = torch.nonzero(scores[:,j]>thresh).view(-1)
if inds.numel() > 0:
cls_scores = scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
cls_dets = cls_dets[order]
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes[j][i] = cls_dets.cpu().numpy()
else:
all_boxes[j][i] = empty_array
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
cv2.imwrite('result.png', im2show)
pdb.set_trace()
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
end = time.time()
print("test time: %0.4fs" % (end - start))
| true | true |
f7006109577a06b46b567649a683de1311a12be8 | 270 | py | Python | .github/templates/flask/module/__main__.py | mondbev1/python-project-template | 74e8c903643cca2971c09cb3fff08287dfa74bcc | [
"Unlicense"
] | null | null | null | .github/templates/flask/module/__main__.py | mondbev1/python-project-template | 74e8c903643cca2971c09cb3fff08287dfa74bcc | [
"Unlicense"
] | null | null | null | .github/templates/flask/module/__main__.py | mondbev1/python-project-template | 74e8c903643cca2971c09cb3fff08287dfa74bcc | [
"Unlicense"
] | null | null | null | import click
from flask.cli import FlaskGroup
from . import create_app
@click.group(cls=FlaskGroup, create_app=create_app)
def main():
"""Management script for the python_project_template application."""
if __name__ == "__main__": # pragma: no cover
main()
| 20.769231 | 72 | 0.740741 | import click
from flask.cli import FlaskGroup
from . import create_app
@click.group(cls=FlaskGroup, create_app=create_app)
def main():
if __name__ == "__main__": main()
| true | true |
f700615e2a905b6e5d941c75f337b6670c36b49b | 3,149 | py | Python | system/system.py | hirune924/kaggle-HuBMAP | e4c2008378eb773db551cee52380bfccdf3a10fa | [
"Apache-2.0"
] | null | null | null | system/system.py | hirune924/kaggle-HuBMAP | e4c2008378eb773db551cee52380bfccdf3a10fa | [
"Apache-2.0"
] | null | null | null | system/system.py | hirune924/kaggle-HuBMAP | e4c2008378eb773db551cee52380bfccdf3a10fa | [
"Apache-2.0"
] | null | null | null | import pytorch_lightning as pl
from loss.loss import get_loss
from optimizer.optimizer import get_optimizer
from scheduler.scheduler import get_scheduler
import torch
import numpy as np
from pytorch_lightning.metrics import Accuracy
import segmentation_models_pytorch as smp
from utils.utils import load_obj
import albumentations as A
from utils.preprocessing import *
import shutil
class LitClassifier(pl.LightningModule):
def __init__(self, hparams, model):
super().__init__()
self.save_hyperparameters(hparams)
self.model = model
self.criteria = get_loss(hparams.training.loss)
#self.accuracy = Accuracy()
self.dice = smp.utils.losses.DiceLoss(activation='sigmoid')
def forward(self, x):
# use forward for inference/predictions
return self.model(x)
def configure_optimizers(self):
optimizer = get_optimizer(self.model.parameters(), self.hparams.training.optimizer)
scheduler = get_scheduler(optimizer, self.hparams.training.scheduler)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
x, y = batch
if self.hparams.dataset.mixup:
num_batch = self.hparams.dataset.batch_size
alpha = 0.2
#rnd = torch.from_numpy(np.random.beta(alpha,alpha,int(num_batch/2))).type_as(x)
#rnd = rnd.reshape(int(num_batch/2), 1, 1, 1)
#x = x[:int(num_batch/2)]*rnd + x[int(num_batch/2):]*(1-rnd)
#y = y[:int(num_batch/2)]*rnd + y[int(num_batch/2):]*(1-rnd)
rnd = torch.from_numpy(np.random.beta(alpha,alpha,1)).type_as(x)
x = x[:int(num_batch/2)]*rnd + x[int(num_batch/2):]*(1-rnd)
y_hat = self.model(x)
if self.hparams.dataset.mixup:
loss = self.criteria(y_hat, y[:int(num_batch/2)])*rnd + self.criteria(y_hat, y[int(num_batch/2):])*(1-rnd)
else:
loss = self.criteria(y_hat, y)
self.log('train_loss', loss, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = self.criteria(y_hat, y)
dice = 1-self.dice(y_hat, y)
#self.log('val_loss', loss)
#self.log('val_dice', dice)
return {
"val_loss": loss,
"val_dice": dice
}
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
avg_val_dice = torch.stack([x["val_dice"] for x in outputs]).mean()
self.log('val_loss', avg_val_loss)
self.log('val_dice', avg_val_dice)
#y = torch.cat([x["y"] for x in outputs]).cpu()
#y_hat = torch.cat([x["y_hat"] for x in outputs]).cpu()
#preds = np.argmax(y_hat, axis=1)
#val_accuracy = self.accuracy(y, preds)
#self.log('avg_val_loss', avg_val_loss)
#self.log('val_acc', val_accuracy)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = self.criteria(y_hat, y)
self.log('test_loss', loss)
| 33.5 | 118 | 0.621467 | import pytorch_lightning as pl
from loss.loss import get_loss
from optimizer.optimizer import get_optimizer
from scheduler.scheduler import get_scheduler
import torch
import numpy as np
from pytorch_lightning.metrics import Accuracy
import segmentation_models_pytorch as smp
from utils.utils import load_obj
import albumentations as A
from utils.preprocessing import *
import shutil
class LitClassifier(pl.LightningModule):
def __init__(self, hparams, model):
super().__init__()
self.save_hyperparameters(hparams)
self.model = model
self.criteria = get_loss(hparams.training.loss)
self.dice = smp.utils.losses.DiceLoss(activation='sigmoid')
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = get_optimizer(self.model.parameters(), self.hparams.training.optimizer)
scheduler = get_scheduler(optimizer, self.hparams.training.scheduler)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
x, y = batch
if self.hparams.dataset.mixup:
num_batch = self.hparams.dataset.batch_size
alpha = 0.2
rnd = torch.from_numpy(np.random.beta(alpha,alpha,1)).type_as(x)
x = x[:int(num_batch/2)]*rnd + x[int(num_batch/2):]*(1-rnd)
y_hat = self.model(x)
if self.hparams.dataset.mixup:
loss = self.criteria(y_hat, y[:int(num_batch/2)])*rnd + self.criteria(y_hat, y[int(num_batch/2):])*(1-rnd)
else:
loss = self.criteria(y_hat, y)
self.log('train_loss', loss, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = self.criteria(y_hat, y)
dice = 1-self.dice(y_hat, y)
return {
"val_loss": loss,
"val_dice": dice
}
def validation_epoch_end(self, outputs):
avg_val_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
avg_val_dice = torch.stack([x["val_dice"] for x in outputs]).mean()
self.log('val_loss', avg_val_loss)
self.log('val_dice', avg_val_dice)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = self.criteria(y_hat, y)
self.log('test_loss', loss)
| true | true |
f70061697e12c46d39594f2bf2f9bb8e344f31c7 | 9,929 | py | Python | coronatest_analyze_csv.py | han-kwang/coronatest-scandata | 98fd49f4fdcda10561bce41e769bbbb70ecfe94e | [
"MIT"
] | null | null | null | coronatest_analyze_csv.py | han-kwang/coronatest-scandata | 98fd49f4fdcda10561bce41e769bbbb70ecfe94e | [
"MIT"
] | null | null | null | coronatest_analyze_csv.py | han-kwang/coronatest-scandata | 98fd49f4fdcda10561bce41e769bbbb70ecfe94e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze CSV file into scores.
Created on Sat Feb 12 22:15:29 2022 // @hk_nien
"""
from pathlib import Path
import os
import re
import sys
import pandas as pd
import numpy as np
PCODES = dict([
# Regio Noord
(1011, 'Amsterdam'),
(1625, 'Hoorn|Zwaag'),
(1811, 'Alkmaar'),
(7471, 'Goor'),
(7556, 'Hengelo'),
(7903, 'Hoogeveen'),
(7942, 'Meppel'),
(8011, 'Zwolle'),
(8232, 'Lelystad'),
(8442, 'Heerenveen'),
(8911, 'Leeuwarden'),
(9291, 'Kollum'),
(9501, 'Stadskanaal'),
(9726, 'Groningen'),
# Regio Midden
(2406, 'Alphen a/d Rijn'),
(2515, 'Den Haag'),
(3013, 'Rotterdam'),
(3511, 'Utrecht'),
(3901, 'Veenendaal'),
((7137, 7131), 'Lichtenvoorde|Groenlo'),
(7311, 'Apeldoorn'),
# Regio Zuid
(4325, 'Renesse'),
(4462, 'Goes'),
(4701, 'Roosendaal'),
(5038, 'Tilburg'),
(5401, 'Uden'),
(5611, 'Eindhoven'),
(5801, 'Oostrum'),
(6101, 'Echt'),
(6229, 'Maastricht'),
(6541, 'Nijmegen'),
])
def get_bad_scan_times():
"""Return list of Timestamps with bad scan times, from CSV data."""
df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')
tstamps = pd.to_datetime(df['Timestamp']).to_list()
return tstamps
def _mean_time(ts_list):
"""Return mean timestamp value from list of timestamps."""
ts0 = ts_list[0]
delta_sum = pd.Timedelta(0)
for ts in ts_list:
delta_sum += (ts -ts0)
ts_mean = ts0 + delta_sum / len(ts_list)
return ts_mean
def _delta_time_hhmm(hm):
"""Convert 'hh:mm' string to TimeDelta."""
return pd.Timedelta(f'{hm}:00')
def _summary_to_scores(summary):
"""Convert summary from _read_log to scores dict and effective timestamp.
Parameters:
- summary: dict with int(pc4) -> [(query_time, appt_time), ...]
Return:
- scores dict: int(pc4) -> score (int or float or '?')
- timestamp: middle query timestamp of this run.
"""
# Convert to number codes.
scores = {k: '?' for k in PCODES}
multi_pcs = {} # pc4 -> (pc4[0], pc4[1], ...)
for pc in PCODES:
if isinstance(pc, tuple):
for pc1 in pc:
multi_pcs[pc1] = pc
qtms = []
dhm = _delta_time_hhmm
for pc4, vlist in summary.items():
pc4 = int(pc4)
if pc4 not in scores:
if pc4 in multi_pcs:
pc4_key = multi_pcs[pc4]
else:
print(f'{pc4} not in list...')
continue
else:
pc4_key = pc4
if len(vlist) == 0:
scores[pc4_key] = 7
continue
qtm = _mean_time([v[0] for v in vlist]) # query time
qtms.append(qtm)
atm = min(v[1] for v in vlist) # earliest appointment time
qtm_00 = pd.Timestamp(qtm.strftime('%Y-%m-%dT00:00'))
thresholds = [
(3, qtm_00 + dhm('23:59')),
(4, qtm + dhm('24:00')),
(5, qtm_00 + dhm('48:00')),
(6, qtm + dhm('48:00')),
(6.3, qtm_00 + dhm('72:00')),
(6.7, qtm + dhm('72:00')),
(7, atm)
]
if qtm.hour < 9:
thresholds.insert(0, (1, qtm_00 + dhm('13:00')))
elif qtm.hour < 13:
thresholds.insert(0, (1, qtm + dhm('4:00')))
elif qtm.hour < 17:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm + dhm('20:00')))
else:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm_00 + dhm('37:00')))
for s, tm in thresholds:
if atm < tm:
scores[pc4_key] = s
break
if len(qtms) == 0:
qtm_mid = pd.Timestamp(None)
else:
qtm_min = min(qtms)
qtm_mid = qtm_min + (max(qtms) - qtm_min)/2
return scores, qtm_mid
def _get_min_wait(summary):
"""Return minimum and median wait Timedelta between scan time and appointment.
summary is dict of pc4 -> list of timestamps
No data -> 999 h.
For the median, NaT is counted as infinite.
"""
wtimes = []
for _, vlist in summary.items():
wtimes_this = [atm - qtm for qtm, atm in vlist]
wtimes.append(
min(wtimes_this) if wtimes_this else pd.Timedelta(99, 'h')
)
minwait = min(wtimes) if wtimes else 999
medwait = pd.Timedelta(np.median(wtimes))
return minwait, medwait
def load_csv(csv_fname):
"""Return DataFrame and list of start times (+1)."""
df = pd.read_csv(csv_fname, comment='#')
df['req_pc4'] = df['req_pc4'].astype(int)
for c in df.columns:
if c.endswith('_time') or c.endswith('_date'):
df[c] = pd.to_datetime(df[c])
else:
df.loc[df[c].isna(), c] = None
# start_tms: list of scan start times (plus one extra at the end)
start_tms = df.loc[df['scan_time'].diff() > pd.Timedelta('10 min'), 'scan_time']
start_tms = [df.iloc[0]['scan_time']] + list(start_tms)
start_tms += [df.iloc[-1]['scan_time'] + pd.Timedelta('1 min')]
return df, start_tms
def load_multi_csvs(csv_fnames):
"""Return DataFrame and list of start times (+1)"""
dfs = []
start_tms = []
for f in csv_fnames:
df, st = load_csv(f)
dfs.append(df)
start_tms.extend(st[:-1])
df = pd.concat(dfs).reset_index()
start_tms.append(df.iloc[-1]['scan_time'] + pd.Timedelta('1 min'))
return df, start_tms
def get_scan_scores(df, tm_range):
"""Get scan scores as pc4 -> score dict.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_range: (tm_start, tm_stop) timestamps.
Return:
- tstamp: timestamp of the scan (mid-point)
- scores: dict of pc4->score
- min_wait: Timedelta of minimum wait time from scan to appointment
"""
mask = (df['scan_time'] >= tm_range[0]) & (df['scan_time'] < tm_range[1])
df1 = df.loc[mask]
summary = {}
for pc4, city_re in PCODES.items():
pc4_tup = (pc4,) if isinstance(pc4, int) else pc4
options = []
req_pc4 = None
for _, row in df1.loc[df1['req_pc4'].isin(pc4_tup)].iterrows():
req_pc4 = int(row['req_pc4'])
for i in range(3):
addr = row[f'opt{i}_short_addr']
if addr and re.match(f'{city_re}$', addr[5:]):
options.append((row['scan_time'], row[f'opt{i}_time']))
if req_pc4 is not None:
summary[req_pc4] = options
scores, tstamp = _summary_to_scores(summary)
if pd.isna(tstamp):
tstamp = df1.iloc[len(df1)//2]['scan_time']
minwait, medwait = _get_min_wait(summary)
if medwait == 999:
medwait = pd.Timedelta(None)
return tstamp, scores, minwait, medwait
def get_scan_scores_df(df, tm_ranges, decimal_comma=True):
"""Get scan scores as dataframe, from csv dataframe.
Blacklisted scan times are dropped.
Parameters:
- df: DataFrame with scan_time, req_date, req_pc4, opt0_short_addr,
opt0_time, opt0_loc_id, etc.
- tm_ranges: list of timestamps (+one at the end) with boundaries
of timestamp ranges.
- decimal_comma: True to have string values 6,3 rather than float 6.3.
Return:
- Dataframe with scores, date_str, time_str, pc4, min_wait, med_wait as columns.
"""
n = len(tm_ranges)
records = []
index = []
minwait_hs = []
medwait_hs = []
bad_stimes = get_bad_scan_times()
for i in range(n-1):
tm_ra = tm_ranges[i:i+2]
is_ok = True
for tm in bad_stimes:
if tm_ra[0] <= tm < tm_ra[1]:
is_ok = False
break
if not is_ok:
print(f'Dropped scan at {tm_ra[0].strftime("%Y-%m-%d %H:%M")}')
continue
tm, scores, minwait, medwait = get_scan_scores(df, tm_ra)
records.append(scores)
index.append(tm)
minwait_hs.append(minwait.total_seconds() / 3600)
medwait_hs.append(medwait.total_seconds() / 3600)
dates = [t.strftime('%Y-%m-%d') for t in index]
times = [t.strftime('%H:%M') for t in index]
sdf = pd.DataFrame.from_records(records)
sdf.insert(0, 'Time', times)
sdf.insert(0, 'Date', dates)
sdf['min_wait_h'] = np.around(minwait_hs, 2)
sdf['med_wait_h'] = np.around(medwait_hs, 2)
sdf.loc[sdf['min_wait_h'].isna(), 'min_wait_h'] = 999
sdf.columns = [
('/'.join([str(x) for x in c]) if isinstance(c, tuple) else c)
for c in sdf.columns
]
if decimal_comma:
for c in sdf.columns[2:]:
sdf[c] = sdf[c].astype(str)
sdf[c] = sdf[c].str.replace('.', ',', regex=False)
sdf[c] = sdf[c].str.replace(',0$', '', regex=False)
sdf[c] = sdf[c].str.replace('?', '', regex=False)
return sdf
if __name__ == '__main__':
in_spyder = ('SPYDER_ARGS' in os.environ)
csv_fnames = sorted(Path('data-ggd').glob('ggd_scan-????-W??.csv'))
do_all = ('--all' in sys.argv)
do_all = do_all or in_spyder and input('(A)ll or latest?').lower() == 'a'
if do_all:
df, start_tms = load_multi_csvs(csv_fnames)
sdf = get_scan_scores_df(df, start_tms).iloc[::-1]
else:
df, start_tms = load_csv(csv_fnames[-1])
sdf = get_scan_scores_df(df, start_tms[-2:])
print(sdf)
if len(sdf) > 1:
sdf.to_clipboard(index=False)
print('Copied to clipboard including headers')
elif len(sdf) == 1:
sdf.iloc[[0], 2:].to_clipboard(header=False, index=False)
print('Copied to clipboard, scores only.')
else:
print('No output.')
if not in_spyder:
# Note: in Spyder, copy/paste will stall while input is blocked.
input('Press Enter to quit and clear clipboard.')
| 30.835404 | 84 | 0.570551 | from pathlib import Path
import os
import re
import sys
import pandas as pd
import numpy as np
PCODES = dict([
(1011, 'Amsterdam'),
(1625, 'Hoorn|Zwaag'),
(1811, 'Alkmaar'),
(7471, 'Goor'),
(7556, 'Hengelo'),
(7903, 'Hoogeveen'),
(7942, 'Meppel'),
(8011, 'Zwolle'),
(8232, 'Lelystad'),
(8442, 'Heerenveen'),
(8911, 'Leeuwarden'),
(9291, 'Kollum'),
(9501, 'Stadskanaal'),
(9726, 'Groningen'),
(2406, 'Alphen a/d Rijn'),
(2515, 'Den Haag'),
(3013, 'Rotterdam'),
(3511, 'Utrecht'),
(3901, 'Veenendaal'),
((7137, 7131), 'Lichtenvoorde|Groenlo'),
(7311, 'Apeldoorn'),
(4325, 'Renesse'),
(4462, 'Goes'),
(4701, 'Roosendaal'),
(5038, 'Tilburg'),
(5401, 'Uden'),
(5611, 'Eindhoven'),
(5801, 'Oostrum'),
(6101, 'Echt'),
(6229, 'Maastricht'),
(6541, 'Nijmegen'),
])
def get_bad_scan_times():
df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')
tstamps = pd.to_datetime(df['Timestamp']).to_list()
return tstamps
def _mean_time(ts_list):
ts0 = ts_list[0]
delta_sum = pd.Timedelta(0)
for ts in ts_list:
delta_sum += (ts -ts0)
ts_mean = ts0 + delta_sum / len(ts_list)
return ts_mean
def _delta_time_hhmm(hm):
return pd.Timedelta(f'{hm}:00')
def _summary_to_scores(summary):
scores = {k: '?' for k in PCODES}
multi_pcs = {} for pc in PCODES:
if isinstance(pc, tuple):
for pc1 in pc:
multi_pcs[pc1] = pc
qtms = []
dhm = _delta_time_hhmm
for pc4, vlist in summary.items():
pc4 = int(pc4)
if pc4 not in scores:
if pc4 in multi_pcs:
pc4_key = multi_pcs[pc4]
else:
print(f'{pc4} not in list...')
continue
else:
pc4_key = pc4
if len(vlist) == 0:
scores[pc4_key] = 7
continue
qtm = _mean_time([v[0] for v in vlist]) qtms.append(qtm)
atm = min(v[1] for v in vlist) qtm_00 = pd.Timestamp(qtm.strftime('%Y-%m-%dT00:00'))
thresholds = [
(3, qtm_00 + dhm('23:59')),
(4, qtm + dhm('24:00')),
(5, qtm_00 + dhm('48:00')),
(6, qtm + dhm('48:00')),
(6.3, qtm_00 + dhm('72:00')),
(6.7, qtm + dhm('72:00')),
(7, atm)
]
if qtm.hour < 9:
thresholds.insert(0, (1, qtm_00 + dhm('13:00')))
elif qtm.hour < 13:
thresholds.insert(0, (1, qtm + dhm('4:00')))
elif qtm.hour < 17:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm + dhm('20:00')))
else:
thresholds.insert(0, (1, qtm_00 + dhm('24:00')))
thresholds.insert(1, (2, qtm_00 + dhm('37:00')))
for s, tm in thresholds:
if atm < tm:
scores[pc4_key] = s
break
if len(qtms) == 0:
qtm_mid = pd.Timestamp(None)
else:
qtm_min = min(qtms)
qtm_mid = qtm_min + (max(qtms) - qtm_min)/2
return scores, qtm_mid
def _get_min_wait(summary):
wtimes = []
for _, vlist in summary.items():
wtimes_this = [atm - qtm for qtm, atm in vlist]
wtimes.append(
min(wtimes_this) if wtimes_this else pd.Timedelta(99, 'h')
)
minwait = min(wtimes) if wtimes else 999
medwait = pd.Timedelta(np.median(wtimes))
return minwait, medwait
def load_csv(csv_fname):
df = pd.read_csv(csv_fname, comment='#')
df['req_pc4'] = df['req_pc4'].astype(int)
for c in df.columns:
if c.endswith('_time') or c.endswith('_date'):
df[c] = pd.to_datetime(df[c])
else:
df.loc[df[c].isna(), c] = None
start_tms = df.loc[df['scan_time'].diff() > pd.Timedelta('10 min'), 'scan_time']
start_tms = [df.iloc[0]['scan_time']] + list(start_tms)
start_tms += [df.iloc[-1]['scan_time'] + pd.Timedelta('1 min')]
return df, start_tms
def load_multi_csvs(csv_fnames):
dfs = []
start_tms = []
for f in csv_fnames:
df, st = load_csv(f)
dfs.append(df)
start_tms.extend(st[:-1])
df = pd.concat(dfs).reset_index()
start_tms.append(df.iloc[-1]['scan_time'] + pd.Timedelta('1 min'))
return df, start_tms
def get_scan_scores(df, tm_range):
mask = (df['scan_time'] >= tm_range[0]) & (df['scan_time'] < tm_range[1])
df1 = df.loc[mask]
summary = {}
for pc4, city_re in PCODES.items():
pc4_tup = (pc4,) if isinstance(pc4, int) else pc4
options = []
req_pc4 = None
for _, row in df1.loc[df1['req_pc4'].isin(pc4_tup)].iterrows():
req_pc4 = int(row['req_pc4'])
for i in range(3):
addr = row[f'opt{i}_short_addr']
if addr and re.match(f'{city_re}$', addr[5:]):
options.append((row['scan_time'], row[f'opt{i}_time']))
if req_pc4 is not None:
summary[req_pc4] = options
scores, tstamp = _summary_to_scores(summary)
if pd.isna(tstamp):
tstamp = df1.iloc[len(df1)//2]['scan_time']
minwait, medwait = _get_min_wait(summary)
if medwait == 999:
medwait = pd.Timedelta(None)
return tstamp, scores, minwait, medwait
def get_scan_scores_df(df, tm_ranges, decimal_comma=True):
n = len(tm_ranges)
records = []
index = []
minwait_hs = []
medwait_hs = []
bad_stimes = get_bad_scan_times()
for i in range(n-1):
tm_ra = tm_ranges[i:i+2]
is_ok = True
for tm in bad_stimes:
if tm_ra[0] <= tm < tm_ra[1]:
is_ok = False
break
if not is_ok:
print(f'Dropped scan at {tm_ra[0].strftime("%Y-%m-%d %H:%M")}')
continue
tm, scores, minwait, medwait = get_scan_scores(df, tm_ra)
records.append(scores)
index.append(tm)
minwait_hs.append(minwait.total_seconds() / 3600)
medwait_hs.append(medwait.total_seconds() / 3600)
dates = [t.strftime('%Y-%m-%d') for t in index]
times = [t.strftime('%H:%M') for t in index]
sdf = pd.DataFrame.from_records(records)
sdf.insert(0, 'Time', times)
sdf.insert(0, 'Date', dates)
sdf['min_wait_h'] = np.around(minwait_hs, 2)
sdf['med_wait_h'] = np.around(medwait_hs, 2)
sdf.loc[sdf['min_wait_h'].isna(), 'min_wait_h'] = 999
sdf.columns = [
('/'.join([str(x) for x in c]) if isinstance(c, tuple) else c)
for c in sdf.columns
]
if decimal_comma:
for c in sdf.columns[2:]:
sdf[c] = sdf[c].astype(str)
sdf[c] = sdf[c].str.replace('.', ',', regex=False)
sdf[c] = sdf[c].str.replace(',0$', '', regex=False)
sdf[c] = sdf[c].str.replace('?', '', regex=False)
return sdf
if __name__ == '__main__':
in_spyder = ('SPYDER_ARGS' in os.environ)
csv_fnames = sorted(Path('data-ggd').glob('ggd_scan-????-W??.csv'))
do_all = ('--all' in sys.argv)
do_all = do_all or in_spyder and input('(A)ll or latest?').lower() == 'a'
if do_all:
df, start_tms = load_multi_csvs(csv_fnames)
sdf = get_scan_scores_df(df, start_tms).iloc[::-1]
else:
df, start_tms = load_csv(csv_fnames[-1])
sdf = get_scan_scores_df(df, start_tms[-2:])
print(sdf)
if len(sdf) > 1:
sdf.to_clipboard(index=False)
print('Copied to clipboard including headers')
elif len(sdf) == 1:
sdf.iloc[[0], 2:].to_clipboard(header=False, index=False)
print('Copied to clipboard, scores only.')
else:
print('No output.')
if not in_spyder:
input('Press Enter to quit and clear clipboard.')
| true | true |
f700617c620c0aa66f529697d1f5e651108eedf8 | 4,485 | py | Python | Starter_Code 2/qualifier/app.py | RonakAgarwal/FinTech_M2_CH | 78bcd41bf1944106106bb6e31bd4a8d92d974565 | [
"MIT"
] | null | null | null | Starter_Code 2/qualifier/app.py | RonakAgarwal/FinTech_M2_CH | 78bcd41bf1944106106bb6e31bd4a8d92d974565 | [
"MIT"
] | null | null | null | Starter_Code 2/qualifier/app.py | RonakAgarwal/FinTech_M2_CH | 78bcd41bf1944106106bb6e31bd4a8d92d974565 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Loan Qualifier Application.
This is a command line application to match applicants with qualifying loans.
Example:
$ python app.py
"""
from re import T
import sys
import fire
import questionary
from pathlib import Path
import csv
from qualifier.utils.fileio import (
load_csv,
save_csv,
)
from qualifier.utils.calculators import (
calculate_monthly_debt_ratio,
calculate_loan_to_value_ratio,
)
from qualifier.filters.max_loan_size import filter_max_loan_size
from qualifier.filters.credit_score import filter_credit_score
from qualifier.filters.debt_to_income import filter_debt_to_income
from qualifier.filters.loan_to_value import filter_loan_to_value
def load_bank_data():
"""Ask for the file path to the latest banking data and load the CSV file.
Returns:
The bank data from the data rate sheet CSV file.
"""
csvpath = questionary.text("Enter a file path to a rate-sheet (.csv):").ask()
csvpath = Path(csvpath)
if not csvpath.exists():
sys.exit(f"Oops! Can't find this path: {csvpath}")
return load_csv(csvpath)
def get_applicant_info():
"""Prompt dialog to get the applicant's financial information.
Returns:
Returns the applicant's financial information.
"""
credit_score = questionary.text("What's your credit score?").ask()
debt = questionary.text("What's your current amount of monthly debt?").ask()
income = questionary.text("What's your total monthly income?").ask()
loan_amount = questionary.text("What's your desired loan amount?").ask()
home_value = questionary.text("What's your home value?").ask()
credit_score = int(credit_score)
debt = float(debt)
income = float(income)
loan_amount = float(loan_amount)
home_value = float(home_value)
return credit_score, debt, income, loan_amount, home_value
def find_qualifying_loans(bank_data, credit_score, debt, income, loan, home_value):
"""Determine which loans the user qualifies for.
Loan qualification criteria is based on:
- Credit Score
- Loan Size
- Debit to Income ratio (calculated)
- Loan to Value ratio (calculated)
Args:
bank_data (list): A list of bank data.
credit_score (int): The applicant's current credit score.
debt (float): The applicant's total monthly debt payments.
income (float): The applicant's total monthly income.
loan (float): The total loan amount applied for.
home_value (float): The estimated home value.
Returns:
A list of the banks willing to underwrite the loan.
"""
# Calculate the monthly debt ratio
monthly_debt_ratio = calculate_monthly_debt_ratio(debt, income)
print(f"The monthly debt to income ratio is {monthly_debt_ratio:.02f}")
# Calculate loan to value ratio
loan_to_value_ratio = calculate_loan_to_value_ratio(loan, home_value)
print(f"The loan to value ratio is {loan_to_value_ratio:.02f}.")
# Run qualification filters
bank_data_filtered = filter_max_loan_size(loan, bank_data)
bank_data_filtered = filter_credit_score(credit_score, bank_data_filtered)
bank_data_filtered = filter_debt_to_income(monthly_debt_ratio, bank_data_filtered)
bank_data_filtered = filter_loan_to_value(loan_to_value_ratio, bank_data_filtered)
print(f"Found {len(bank_data_filtered)} qualifying loans")
return bank_data_filtered
def save_qualifying_loans(qualifying_loans):
"""Saves the qualifying loans to a CSV file.
Args:
qualifying_loans (list of lists): The qualifying bank loans.
"""
# @TODO: Complete the usability dialog for savings the CSV Files.
# YOUR CODE HERE!
choice = questionary.confirm ("Would you like to save the qualifying loans?").ask()
if (choice == T) :
filepath = questionary.text ("Please enter the file path").ask()
save_csv(qualifying_loans, filepath)
def run():
"""The main function for running the script."""
# Load the latest Bank data
bank_data = load_bank_data()
# Get the applicant's information
credit_score, debt, income, loan_amount, home_value = get_applicant_info()
# Find qualifying loans
qualifying_loans = find_qualifying_loans(
bank_data, credit_score, debt, income, loan_amount, home_value
)
# Save qualifying loans
save_qualifying_loans(qualifying_loans)
if __name__ == "__main__":
fire.Fire(run)
| 30.719178 | 87 | 0.716611 | from re import T
import sys
import fire
import questionary
from pathlib import Path
import csv
from qualifier.utils.fileio import (
load_csv,
save_csv,
)
from qualifier.utils.calculators import (
calculate_monthly_debt_ratio,
calculate_loan_to_value_ratio,
)
from qualifier.filters.max_loan_size import filter_max_loan_size
from qualifier.filters.credit_score import filter_credit_score
from qualifier.filters.debt_to_income import filter_debt_to_income
from qualifier.filters.loan_to_value import filter_loan_to_value
def load_bank_data():
csvpath = questionary.text("Enter a file path to a rate-sheet (.csv):").ask()
csvpath = Path(csvpath)
if not csvpath.exists():
sys.exit(f"Oops! Can't find this path: {csvpath}")
return load_csv(csvpath)
def get_applicant_info():
credit_score = questionary.text("What's your credit score?").ask()
debt = questionary.text("What's your current amount of monthly debt?").ask()
income = questionary.text("What's your total monthly income?").ask()
loan_amount = questionary.text("What's your desired loan amount?").ask()
home_value = questionary.text("What's your home value?").ask()
credit_score = int(credit_score)
debt = float(debt)
income = float(income)
loan_amount = float(loan_amount)
home_value = float(home_value)
return credit_score, debt, income, loan_amount, home_value
def find_qualifying_loans(bank_data, credit_score, debt, income, loan, home_value):
monthly_debt_ratio = calculate_monthly_debt_ratio(debt, income)
print(f"The monthly debt to income ratio is {monthly_debt_ratio:.02f}")
loan_to_value_ratio = calculate_loan_to_value_ratio(loan, home_value)
print(f"The loan to value ratio is {loan_to_value_ratio:.02f}.")
bank_data_filtered = filter_max_loan_size(loan, bank_data)
bank_data_filtered = filter_credit_score(credit_score, bank_data_filtered)
bank_data_filtered = filter_debt_to_income(monthly_debt_ratio, bank_data_filtered)
bank_data_filtered = filter_loan_to_value(loan_to_value_ratio, bank_data_filtered)
print(f"Found {len(bank_data_filtered)} qualifying loans")
return bank_data_filtered
def save_qualifying_loans(qualifying_loans):
choice = questionary.confirm ("Would you like to save the qualifying loans?").ask()
if (choice == T) :
filepath = questionary.text ("Please enter the file path").ask()
save_csv(qualifying_loans, filepath)
def run():
bank_data = load_bank_data()
credit_score, debt, income, loan_amount, home_value = get_applicant_info()
# Find qualifying loans
qualifying_loans = find_qualifying_loans(
bank_data, credit_score, debt, income, loan_amount, home_value
)
# Save qualifying loans
save_qualifying_loans(qualifying_loans)
if __name__ == "__main__":
fire.Fire(run)
| true | true |
f70061f5cb80071c33ec1a33d265c8b68682e42c | 26,852 | py | Python | vnpy/app/portfolio_strategy/backtesting.py | hardywu/vnpy | 81ab73dc57d12a3ff7c74c73665513b46fc0f668 | [
"MIT"
] | 1 | 2021-05-14T12:57:08.000Z | 2021-05-14T12:57:08.000Z | vnpy/app/portfolio_strategy/backtesting.py | hardywu/vnpy | 81ab73dc57d12a3ff7c74c73665513b46fc0f668 | [
"MIT"
] | null | null | null | vnpy/app/portfolio_strategy/backtesting.py | hardywu/vnpy | 81ab73dc57d12a3ff7c74c73665513b46fc0f668 | [
"MIT"
] | 1 | 2021-06-14T13:26:41.000Z | 2021-06-14T13:26:41.000Z | from collections import defaultdict
from datetime import date, datetime, timedelta
from typing import Dict, List, Set, Tuple
from functools import lru_cache
from copy import copy
import traceback
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from pandas import DataFrame
from vnpy.trader.constant import Direction, Offset, Interval, Status
from vnpy.trader.database import database_manager
from vnpy.trader.object import OrderData, TradeData, BarData
from vnpy.trader.utility import round_to, extract_vt_symbol
from .template import StrategyTemplate
INTERVAL_DELTA_MAP = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
}
class BacktestingEngine:
""""""
gateway_name = "BACKTESTING"
def __init__(self):
""""""
self.vt_symbols: List[str] = []
self.start: datetime = None
self.end: datetime = None
self.rates: Dict[str, float] = 0
self.slippages: Dict[str, float] = 0
self.sizes: Dict[str, float] = 1
self.priceticks: Dict[str, float] = 0
self.capital: float = 1_000_000
self.risk_free: float = 0.02
self.strategy: StrategyTemplate = None
self.bars: Dict[str, BarData] = {}
self.datetime: datetime = None
self.interval: Interval = None
self.days: int = 0
self.history_data: Dict[Tuple, BarData] = {}
self.dts: Set[datetime] = set()
self.limit_order_count = 0
self.limit_orders = {}
self.active_limit_orders = {}
self.trade_count = 0
self.trades = {}
self.logs = []
self.daily_results = {}
self.daily_df = None
def clear_data(self) -> None:
"""
Clear all data of last backtesting.
"""
self.strategy = None
self.bars = {}
self.datetime = None
self.limit_order_count = 0
self.limit_orders.clear()
self.active_limit_orders.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
self.daily_df = None
def set_parameters(
self,
vt_symbols: List[str],
interval: Interval,
start: datetime,
rates: Dict[str, float],
slippages: Dict[str, float],
sizes: Dict[str, float],
priceticks: Dict[str, float],
capital: int = 0,
end: datetime = None,
risk_free: float = 0
) -> None:
""""""
self.vt_symbols = vt_symbols
self.interval = interval
self.rates = rates
self.slippages = slippages
self.sizes = sizes
self.priceticks = priceticks
self.start = start
self.end = end
self.capital = capital
self.risk_free = risk_free
def add_strategy(self, strategy_class: type, setting: dict) -> None:
""""""
self.strategy = strategy_class(
self, strategy_class.__name__, copy(self.vt_symbols), setting
)
def load_data(self) -> None:
""""""
self.output("开始加载历史数据")
if not self.end:
self.end = datetime.now()
if self.start >= self.end:
self.output("起始日期必须小于结束日期")
return
# Clear previously loaded history data
self.history_data.clear()
self.dts.clear()
# Load 30 days of data each time and allow for progress update
progress_delta = timedelta(days=30)
total_delta = self.end - self.start
interval_delta = INTERVAL_DELTA_MAP[self.interval]
for vt_symbol in self.vt_symbols:
start = self.start
end = self.start + progress_delta
progress = 0
data_count = 0
while start < self.end:
end = min(end, self.end) # Make sure end time stays within set range
data = load_bar_data(
vt_symbol,
self.interval,
start,
end
)
for bar in data:
self.dts.add(bar.datetime)
self.history_data[(bar.datetime, vt_symbol)] = bar
data_count += 1
progress += progress_delta / total_delta
progress = min(progress, 1)
progress_bar = "#" * int(progress * 10)
self.output(f"{vt_symbol}加载进度:{progress_bar} [{progress:.0%}]")
start = end + interval_delta
end += (progress_delta + interval_delta)
self.output(f"{vt_symbol}历史数据加载完成,数据量:{data_count}")
self.output("所有历史数据加载完成")
def run_backtesting(self) -> None:
""""""
self.strategy.on_init()
# Generate sorted datetime list
dts = list(self.dts)
dts.sort()
# Use the first [days] of history data for initializing strategy
day_count = 0
ix = 0
for ix, dt in enumerate(dts):
if self.datetime and dt.day != self.datetime.day:
day_count += 1
if day_count >= self.days:
break
try:
self.new_bars(dt)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.strategy.inited = True
self.output("策略初始化完成")
self.strategy.on_start()
self.strategy.trading = True
self.output("开始回放历史数据")
# Use the rest of history data for running backtesting
for dt in dts[ix:]:
try:
self.new_bars(dt)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.output("历史数据回放结束")
def calculate_result(self) -> None:
""""""
self.output("开始计算逐日盯市盈亏")
if not self.trades:
self.output("成交记录为空,无法计算")
return
# Add trade data into daily reuslt.
for trade in self.trades.values():
d = trade.datetime.date()
daily_result = self.daily_results[d]
daily_result.add_trade(trade)
# Calculate daily result by iteration.
pre_closes = {}
start_poses = {}
for daily_result in self.daily_results.values():
daily_result.calculate_pnl(
pre_closes,
start_poses,
self.sizes,
self.rates,
self.slippages,
)
pre_closes = daily_result.close_prices
start_poses = daily_result.end_poses
# Generate dataframe
results = defaultdict(list)
for daily_result in self.daily_results.values():
fields = [
"date", "trade_count", "turnover",
"commission", "slippage", "trading_pnl",
"holding_pnl", "total_pnl", "net_pnl"
]
for key in fields:
value = getattr(daily_result, key)
results[key].append(value)
self.daily_df = DataFrame.from_dict(results).set_index("date")
self.output("逐日盯市盈亏计算完成")
return self.daily_df
def calculate_statistics(self, df: DataFrame = None, output=True) -> None:
""""""
self.output("开始计算策略统计指标")
# Check DataFrame input exterior
if df is None:
df = self.daily_df
# Check for init DataFrame
if df is None:
# Set all statistics to 0 if no trade.
start_date = ""
end_date = ""
total_days = 0
profit_days = 0
loss_days = 0
end_balance = 0
max_drawdown = 0
max_ddpercent = 0
max_drawdown_duration = 0
total_net_pnl = 0
daily_net_pnl = 0
total_commission = 0
daily_commission = 0
total_slippage = 0
daily_slippage = 0
total_turnover = 0
daily_turnover = 0
total_trade_count = 0
daily_trade_count = 0
total_return = 0
annual_return = 0
daily_return = 0
return_std = 0
sharpe_ratio = 0
return_drawdown_ratio = 0
else:
# Calculate balance related time series data
df["balance"] = df["net_pnl"].cumsum() + self.capital
df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0)
df["highlevel"] = (
df["balance"].rolling(
min_periods=1, window=len(df), center=False).max()
)
df["drawdown"] = df["balance"] - df["highlevel"]
df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100
# Calculate statistics value
start_date = df.index[0]
end_date = df.index[-1]
total_days = len(df)
profit_days = len(df[df["net_pnl"] > 0])
loss_days = len(df[df["net_pnl"] < 0])
end_balance = df["balance"].iloc[-1]
max_drawdown = df["drawdown"].min()
max_ddpercent = df["ddpercent"].min()
max_drawdown_end = df["drawdown"].idxmin()
if isinstance(max_drawdown_end, date):
max_drawdown_start = df["balance"][:max_drawdown_end].idxmax()
max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days
else:
max_drawdown_duration = 0
total_net_pnl = df["net_pnl"].sum()
daily_net_pnl = total_net_pnl / total_days
total_commission = df["commission"].sum()
daily_commission = total_commission / total_days
total_slippage = df["slippage"].sum()
daily_slippage = total_slippage / total_days
total_turnover = df["turnover"].sum()
daily_turnover = total_turnover / total_days
total_trade_count = df["trade_count"].sum()
daily_trade_count = total_trade_count / total_days
total_return = (end_balance / self.capital - 1) * 100
annual_return = total_return / total_days * 240
daily_return = df["return"].mean() * 100
return_std = df["return"].std() * 100
if return_std:
daily_risk_free = self.risk_free / np.sqrt(240)
sharpe_ratio = (daily_return - daily_risk_free) / return_std * np.sqrt(240)
else:
sharpe_ratio = 0
return_drawdown_ratio = -total_net_pnl / max_drawdown
# Output
if output:
self.output("-" * 30)
self.output(f"首个交易日:\t{start_date}")
self.output(f"最后交易日:\t{end_date}")
self.output(f"总交易日:\t{total_days}")
self.output(f"盈利交易日:\t{profit_days}")
self.output(f"亏损交易日:\t{loss_days}")
self.output(f"起始资金:\t{self.capital:,.2f}")
self.output(f"结束资金:\t{end_balance:,.2f}")
self.output(f"总收益率:\t{total_return:,.2f}%")
self.output(f"年化收益:\t{annual_return:,.2f}%")
self.output(f"最大回撤: \t{max_drawdown:,.2f}")
self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%")
self.output(f"最长回撤天数: \t{max_drawdown_duration}")
self.output(f"总盈亏:\t{total_net_pnl:,.2f}")
self.output(f"总手续费:\t{total_commission:,.2f}")
self.output(f"总滑点:\t{total_slippage:,.2f}")
self.output(f"总成交金额:\t{total_turnover:,.2f}")
self.output(f"总成交笔数:\t{total_trade_count}")
self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}")
self.output(f"日均手续费:\t{daily_commission:,.2f}")
self.output(f"日均滑点:\t{daily_slippage:,.2f}")
self.output(f"日均成交金额:\t{daily_turnover:,.2f}")
self.output(f"日均成交笔数:\t{daily_trade_count}")
self.output(f"日均收益率:\t{daily_return:,.2f}%")
self.output(f"收益标准差:\t{return_std:,.2f}%")
self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}")
self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}")
statistics = {
"start_date": start_date,
"end_date": end_date,
"total_days": total_days,
"profit_days": profit_days,
"loss_days": loss_days,
"capital": self.capital,
"end_balance": end_balance,
"max_drawdown": max_drawdown,
"max_ddpercent": max_ddpercent,
"max_drawdown_duration": max_drawdown_duration,
"total_net_pnl": total_net_pnl,
"daily_net_pnl": daily_net_pnl,
"total_commission": total_commission,
"daily_commission": daily_commission,
"total_slippage": total_slippage,
"daily_slippage": daily_slippage,
"total_turnover": total_turnover,
"daily_turnover": daily_turnover,
"total_trade_count": total_trade_count,
"daily_trade_count": daily_trade_count,
"total_return": total_return,
"annual_return": annual_return,
"daily_return": daily_return,
"return_std": return_std,
"sharpe_ratio": sharpe_ratio,
"return_drawdown_ratio": return_drawdown_ratio,
}
# Filter potential error infinite value
for key, value in statistics.items():
if value in (np.inf, -np.inf):
value = 0
statistics[key] = np.nan_to_num(value)
self.output("策略统计指标计算完成")
return statistics
def show_chart(self, df: DataFrame = None) -> None:
""""""
# Check DataFrame input exterior
if df is None:
df = self.daily_df
# Check for init DataFrame
if df is None:
return
fig = make_subplots(
rows=4,
cols=1,
subplot_titles=["Balance", "Drawdown", "Daily Pnl", "Pnl Distribution"],
vertical_spacing=0.06
)
balance_line = go.Scatter(
x=df.index,
y=df["balance"],
mode="lines",
name="Balance"
)
drawdown_scatter = go.Scatter(
x=df.index,
y=df["drawdown"],
fillcolor="red",
fill='tozeroy',
mode="lines",
name="Drawdown"
)
pnl_bar = go.Bar(y=df["net_pnl"], name="Daily Pnl")
pnl_histogram = go.Histogram(x=df["net_pnl"], nbinsx=100, name="Days")
fig.add_trace(balance_line, row=1, col=1)
fig.add_trace(drawdown_scatter, row=2, col=1)
fig.add_trace(pnl_bar, row=3, col=1)
fig.add_trace(pnl_histogram, row=4, col=1)
fig.update_layout(height=1000, width=1000)
fig.show()
def update_daily_close(self, bars: Dict[str, BarData], dt: datetime) -> None:
""""""
d = dt.date()
close_prices = {}
for bar in bars.values():
close_prices[bar.vt_symbol] = bar.close_price
daily_result = self.daily_results.get(d, None)
if daily_result:
daily_result.update_close_prices(close_prices)
else:
self.daily_results[d] = PortfolioDailyResult(d, close_prices)
def new_bars(self, dt: datetime) -> None:
""""""
self.datetime = dt
bars: Dict[str, BarData] = {}
for vt_symbol in self.vt_symbols:
bar = self.history_data.get((dt, vt_symbol), None)
# If bar data of vt_symbol at dt exists
if bar:
# Update bar data for crossing order
self.bars[vt_symbol] = bar
# Put bar into dict for strategy.on_bars update
bars[vt_symbol] = bar
# Otherwise, use previous close to backfill
elif vt_symbol in self.bars:
old_bar = self.bars[vt_symbol]
bar = BarData(
symbol=old_bar.symbol,
exchange=old_bar.exchange,
datetime=dt,
open_price=old_bar.close_price,
high_price=old_bar.close_price,
low_price=old_bar.close_price,
close_price=old_bar.close_price,
gateway_name=old_bar.gateway_name
)
self.bars[vt_symbol] = bar
self.cross_limit_order()
self.strategy.on_bars(bars)
self.update_daily_close(self.bars, dt)
def cross_limit_order(self) -> None:
"""
Cross limit order with last bar/tick data.
"""
for order in list(self.active_limit_orders.values()):
bar = self.bars[order.vt_symbol]
long_cross_price = bar.low_price
short_cross_price = bar.high_price
long_best_price = bar.open_price
short_best_price = bar.open_price
# Push order update with status "not traded" (pending).
if order.status == Status.SUBMITTING:
order.status = Status.NOTTRADED
self.strategy.update_order(order)
# Check whether limit orders can be filled.
long_cross = (
order.direction == Direction.LONG
and order.price >= long_cross_price
and long_cross_price > 0
)
short_cross = (
order.direction == Direction.SHORT
and order.price <= short_cross_price
and short_cross_price > 0
)
if not long_cross and not short_cross:
continue
# Push order update with status "all traded" (filled).
order.traded = order.volume
order.status = Status.ALLTRADED
self.strategy.update_order(order)
self.active_limit_orders.pop(order.vt_orderid)
# Push trade update
self.trade_count += 1
if long_cross:
trade_price = min(order.price, long_best_price)
else:
trade_price = max(order.price, short_best_price)
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
self.strategy.update_trade(trade)
self.trades[trade.vt_tradeid] = trade
def load_bars(
self,
strategy: StrategyTemplate,
days: int,
interval: Interval
) -> None:
""""""
self.days = days
def send_order(
self,
strategy: StrategyTemplate,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool,
net: bool
) -> List[str]:
""""""
price = round_to(price, self.priceticks[vt_symbol])
symbol, exchange = extract_vt_symbol(vt_symbol)
self.limit_order_count += 1
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=str(self.limit_order_count),
direction=direction,
offset=offset,
price=price,
volume=volume,
status=Status.SUBMITTING,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
self.active_limit_orders[order.vt_orderid] = order
self.limit_orders[order.vt_orderid] = order
return [order.vt_orderid]
def cancel_order(self, strategy: StrategyTemplate, vt_orderid: str) -> None:
"""
Cancel order by vt_orderid.
"""
if vt_orderid not in self.active_limit_orders:
return
order = self.active_limit_orders.pop(vt_orderid)
order.status = Status.CANCELLED
self.strategy.update_order(order)
def write_log(self, msg: str, strategy: StrategyTemplate = None) -> None:
"""
Write log message.
"""
msg = f"{self.datetime}\t{msg}"
self.logs.append(msg)
def send_email(self, msg: str, strategy: StrategyTemplate = None) -> None:
"""
Send email to default receiver.
"""
pass
def sync_strategy_data(self, strategy: StrategyTemplate) -> None:
"""
Sync strategy data into json file.
"""
pass
def put_strategy_event(self, strategy: StrategyTemplate) -> None:
"""
Put an event to update strategy status.
"""
pass
def output(self, msg) -> None:
"""
Output message of backtesting engine.
"""
print(f"{datetime.now()}\t{msg}")
def get_all_trades(self) -> List[TradeData]:
"""
Return all trade data of current backtesting result.
"""
return list(self.trades.values())
def get_all_orders(self) -> List[OrderData]:
"""
Return all limit order data of current backtesting result.
"""
return list(self.limit_orders.values())
def get_all_daily_results(self) -> List["PortfolioDailyResult"]:
"""
Return all daily result data.
"""
return list(self.daily_results.values())
class ContractDailyResult:
""""""
def __init__(self, result_date: date, close_price: float):
""""""
self.date: date = result_date
self.close_price: float = close_price
self.pre_close: float = 0
self.trades: List[TradeData] = []
self.trade_count: int = 0
self.start_pos: float = 0
self.end_pos: float = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
""""""
self.trades.append(trade)
def calculate_pnl(
self,
pre_close: float,
start_pos: float,
size: int,
rate: float,
slippage: float
) -> None:
""""""
# If no pre_close provided on the first day,
# use value 1 to avoid zero division error
if pre_close:
self.pre_close = pre_close
else:
self.pre_close = 1
# Holding pnl is the pnl from holding position at day start
self.start_pos = start_pos
self.end_pos = start_pos
self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size
# Trading pnl is the pnl from new trade during the day
self.trade_count = len(self.trades)
for trade in self.trades:
if trade.direction == Direction.LONG:
pos_change = trade.volume
else:
pos_change = -trade.volume
self.end_pos += pos_change
turnover = trade.volume * size * trade.price
self.trading_pnl += pos_change * (self.close_price - trade.price) * size
self.slippage += trade.volume * size * slippage
self.turnover += turnover
self.commission += turnover * rate
# Net pnl takes account of commission and slippage cost
self.total_pnl = self.trading_pnl + self.holding_pnl
self.net_pnl = self.total_pnl - self.commission - self.slippage
def update_close_price(self, close_price: float) -> None:
""""""
self.close_price = close_price
class PortfolioDailyResult:
""""""
def __init__(self, result_date: date, close_prices: Dict[str, float]):
""""""
self.date: date = result_date
self.close_prices: Dict[str, float] = close_prices
self.pre_closes: Dict[str, float] = {}
self.start_poses: Dict[str, float] = {}
self.end_poses: Dict[str, float] = {}
self.contract_results: Dict[str, ContractDailyResult] = {}
for vt_symbol, close_price in close_prices.items():
self.contract_results[vt_symbol] = ContractDailyResult(result_date, close_price)
self.trade_count: int = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
""""""
contract_result = self.contract_results[trade.vt_symbol]
contract_result.add_trade(trade)
def calculate_pnl(
self,
pre_closes: Dict[str, float],
start_poses: Dict[str, float],
sizes: Dict[str, float],
rates: Dict[str, float],
slippages: Dict[str, float],
) -> None:
""""""
self.pre_closes = pre_closes
for vt_symbol, contract_result in self.contract_results.items():
contract_result.calculate_pnl(
pre_closes.get(vt_symbol, 0),
start_poses.get(vt_symbol, 0),
sizes[vt_symbol],
rates[vt_symbol],
slippages[vt_symbol]
)
self.trade_count += contract_result.trade_count
self.turnover += contract_result.turnover
self.commission += contract_result.commission
self.slippage += contract_result.slippage
self.trading_pnl += contract_result.trading_pnl
self.holding_pnl += contract_result.holding_pnl
self.total_pnl += contract_result.total_pnl
self.net_pnl += contract_result.net_pnl
self.end_poses[vt_symbol] = contract_result.end_pos
def update_close_prices(self, close_prices: Dict[str, float]) -> None:
""""""
self.close_prices = close_prices
for vt_symbol, close_price in close_prices.items():
contract_result = self.contract_results.get(vt_symbol, None)
if contract_result:
contract_result.update_close_price(close_price)
@lru_cache(maxsize=999)
def load_bar_data(
vt_symbol: str,
interval: Interval,
start: datetime,
end: datetime
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
return database_manager.load_bar_data(
symbol, exchange, interval, start, end
)
| 31.405848 | 92 | 0.559437 | from collections import defaultdict
from datetime import date, datetime, timedelta
from typing import Dict, List, Set, Tuple
from functools import lru_cache
from copy import copy
import traceback
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from pandas import DataFrame
from vnpy.trader.constant import Direction, Offset, Interval, Status
from vnpy.trader.database import database_manager
from vnpy.trader.object import OrderData, TradeData, BarData
from vnpy.trader.utility import round_to, extract_vt_symbol
from .template import StrategyTemplate
INTERVAL_DELTA_MAP = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
}
class BacktestingEngine:
gateway_name = "BACKTESTING"
def __init__(self):
self.vt_symbols: List[str] = []
self.start: datetime = None
self.end: datetime = None
self.rates: Dict[str, float] = 0
self.slippages: Dict[str, float] = 0
self.sizes: Dict[str, float] = 1
self.priceticks: Dict[str, float] = 0
self.capital: float = 1_000_000
self.risk_free: float = 0.02
self.strategy: StrategyTemplate = None
self.bars: Dict[str, BarData] = {}
self.datetime: datetime = None
self.interval: Interval = None
self.days: int = 0
self.history_data: Dict[Tuple, BarData] = {}
self.dts: Set[datetime] = set()
self.limit_order_count = 0
self.limit_orders = {}
self.active_limit_orders = {}
self.trade_count = 0
self.trades = {}
self.logs = []
self.daily_results = {}
self.daily_df = None
def clear_data(self) -> None:
self.strategy = None
self.bars = {}
self.datetime = None
self.limit_order_count = 0
self.limit_orders.clear()
self.active_limit_orders.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
self.daily_df = None
def set_parameters(
self,
vt_symbols: List[str],
interval: Interval,
start: datetime,
rates: Dict[str, float],
slippages: Dict[str, float],
sizes: Dict[str, float],
priceticks: Dict[str, float],
capital: int = 0,
end: datetime = None,
risk_free: float = 0
) -> None:
self.vt_symbols = vt_symbols
self.interval = interval
self.rates = rates
self.slippages = slippages
self.sizes = sizes
self.priceticks = priceticks
self.start = start
self.end = end
self.capital = capital
self.risk_free = risk_free
def add_strategy(self, strategy_class: type, setting: dict) -> None:
self.strategy = strategy_class(
self, strategy_class.__name__, copy(self.vt_symbols), setting
)
def load_data(self) -> None:
self.output("开始加载历史数据")
if not self.end:
self.end = datetime.now()
if self.start >= self.end:
self.output("起始日期必须小于结束日期")
return
self.history_data.clear()
self.dts.clear()
progress_delta = timedelta(days=30)
total_delta = self.end - self.start
interval_delta = INTERVAL_DELTA_MAP[self.interval]
for vt_symbol in self.vt_symbols:
start = self.start
end = self.start + progress_delta
progress = 0
data_count = 0
while start < self.end:
end = min(end, self.end)
data = load_bar_data(
vt_symbol,
self.interval,
start,
end
)
for bar in data:
self.dts.add(bar.datetime)
self.history_data[(bar.datetime, vt_symbol)] = bar
data_count += 1
progress += progress_delta / total_delta
progress = min(progress, 1)
progress_bar = "#" * int(progress * 10)
self.output(f"{vt_symbol}加载进度:{progress_bar} [{progress:.0%}]")
start = end + interval_delta
end += (progress_delta + interval_delta)
self.output(f"{vt_symbol}历史数据加载完成,数据量:{data_count}")
self.output("所有历史数据加载完成")
def run_backtesting(self) -> None:
self.strategy.on_init()
dts = list(self.dts)
dts.sort()
day_count = 0
ix = 0
for ix, dt in enumerate(dts):
if self.datetime and dt.day != self.datetime.day:
day_count += 1
if day_count >= self.days:
break
try:
self.new_bars(dt)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.strategy.inited = True
self.output("策略初始化完成")
self.strategy.on_start()
self.strategy.trading = True
self.output("开始回放历史数据")
for dt in dts[ix:]:
try:
self.new_bars(dt)
except Exception:
self.output("触发异常,回测终止")
self.output(traceback.format_exc())
return
self.output("历史数据回放结束")
def calculate_result(self) -> None:
self.output("开始计算逐日盯市盈亏")
if not self.trades:
self.output("成交记录为空,无法计算")
return
for trade in self.trades.values():
d = trade.datetime.date()
daily_result = self.daily_results[d]
daily_result.add_trade(trade)
pre_closes = {}
start_poses = {}
for daily_result in self.daily_results.values():
daily_result.calculate_pnl(
pre_closes,
start_poses,
self.sizes,
self.rates,
self.slippages,
)
pre_closes = daily_result.close_prices
start_poses = daily_result.end_poses
results = defaultdict(list)
for daily_result in self.daily_results.values():
fields = [
"date", "trade_count", "turnover",
"commission", "slippage", "trading_pnl",
"holding_pnl", "total_pnl", "net_pnl"
]
for key in fields:
value = getattr(daily_result, key)
results[key].append(value)
self.daily_df = DataFrame.from_dict(results).set_index("date")
self.output("逐日盯市盈亏计算完成")
return self.daily_df
def calculate_statistics(self, df: DataFrame = None, output=True) -> None:
self.output("开始计算策略统计指标")
if df is None:
df = self.daily_df
if df is None:
start_date = ""
end_date = ""
total_days = 0
profit_days = 0
loss_days = 0
end_balance = 0
max_drawdown = 0
max_ddpercent = 0
max_drawdown_duration = 0
total_net_pnl = 0
daily_net_pnl = 0
total_commission = 0
daily_commission = 0
total_slippage = 0
daily_slippage = 0
total_turnover = 0
daily_turnover = 0
total_trade_count = 0
daily_trade_count = 0
total_return = 0
annual_return = 0
daily_return = 0
return_std = 0
sharpe_ratio = 0
return_drawdown_ratio = 0
else:
df["balance"] = df["net_pnl"].cumsum() + self.capital
df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0)
df["highlevel"] = (
df["balance"].rolling(
min_periods=1, window=len(df), center=False).max()
)
df["drawdown"] = df["balance"] - df["highlevel"]
df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100
start_date = df.index[0]
end_date = df.index[-1]
total_days = len(df)
profit_days = len(df[df["net_pnl"] > 0])
loss_days = len(df[df["net_pnl"] < 0])
end_balance = df["balance"].iloc[-1]
max_drawdown = df["drawdown"].min()
max_ddpercent = df["ddpercent"].min()
max_drawdown_end = df["drawdown"].idxmin()
if isinstance(max_drawdown_end, date):
max_drawdown_start = df["balance"][:max_drawdown_end].idxmax()
max_drawdown_duration = (max_drawdown_end - max_drawdown_start).days
else:
max_drawdown_duration = 0
total_net_pnl = df["net_pnl"].sum()
daily_net_pnl = total_net_pnl / total_days
total_commission = df["commission"].sum()
daily_commission = total_commission / total_days
total_slippage = df["slippage"].sum()
daily_slippage = total_slippage / total_days
total_turnover = df["turnover"].sum()
daily_turnover = total_turnover / total_days
total_trade_count = df["trade_count"].sum()
daily_trade_count = total_trade_count / total_days
total_return = (end_balance / self.capital - 1) * 100
annual_return = total_return / total_days * 240
daily_return = df["return"].mean() * 100
return_std = df["return"].std() * 100
if return_std:
daily_risk_free = self.risk_free / np.sqrt(240)
sharpe_ratio = (daily_return - daily_risk_free) / return_std * np.sqrt(240)
else:
sharpe_ratio = 0
return_drawdown_ratio = -total_net_pnl / max_drawdown
if output:
self.output("-" * 30)
self.output(f"首个交易日:\t{start_date}")
self.output(f"最后交易日:\t{end_date}")
self.output(f"总交易日:\t{total_days}")
self.output(f"盈利交易日:\t{profit_days}")
self.output(f"亏损交易日:\t{loss_days}")
self.output(f"起始资金:\t{self.capital:,.2f}")
self.output(f"结束资金:\t{end_balance:,.2f}")
self.output(f"总收益率:\t{total_return:,.2f}%")
self.output(f"年化收益:\t{annual_return:,.2f}%")
self.output(f"最大回撤: \t{max_drawdown:,.2f}")
self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%")
self.output(f"最长回撤天数: \t{max_drawdown_duration}")
self.output(f"总盈亏:\t{total_net_pnl:,.2f}")
self.output(f"总手续费:\t{total_commission:,.2f}")
self.output(f"总滑点:\t{total_slippage:,.2f}")
self.output(f"总成交金额:\t{total_turnover:,.2f}")
self.output(f"总成交笔数:\t{total_trade_count}")
self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}")
self.output(f"日均手续费:\t{daily_commission:,.2f}")
self.output(f"日均滑点:\t{daily_slippage:,.2f}")
self.output(f"日均成交金额:\t{daily_turnover:,.2f}")
self.output(f"日均成交笔数:\t{daily_trade_count}")
self.output(f"日均收益率:\t{daily_return:,.2f}%")
self.output(f"收益标准差:\t{return_std:,.2f}%")
self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}")
self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}")
statistics = {
"start_date": start_date,
"end_date": end_date,
"total_days": total_days,
"profit_days": profit_days,
"loss_days": loss_days,
"capital": self.capital,
"end_balance": end_balance,
"max_drawdown": max_drawdown,
"max_ddpercent": max_ddpercent,
"max_drawdown_duration": max_drawdown_duration,
"total_net_pnl": total_net_pnl,
"daily_net_pnl": daily_net_pnl,
"total_commission": total_commission,
"daily_commission": daily_commission,
"total_slippage": total_slippage,
"daily_slippage": daily_slippage,
"total_turnover": total_turnover,
"daily_turnover": daily_turnover,
"total_trade_count": total_trade_count,
"daily_trade_count": daily_trade_count,
"total_return": total_return,
"annual_return": annual_return,
"daily_return": daily_return,
"return_std": return_std,
"sharpe_ratio": sharpe_ratio,
"return_drawdown_ratio": return_drawdown_ratio,
}
for key, value in statistics.items():
if value in (np.inf, -np.inf):
value = 0
statistics[key] = np.nan_to_num(value)
self.output("策略统计指标计算完成")
return statistics
def show_chart(self, df: DataFrame = None) -> None:
if df is None:
df = self.daily_df
if df is None:
return
fig = make_subplots(
rows=4,
cols=1,
subplot_titles=["Balance", "Drawdown", "Daily Pnl", "Pnl Distribution"],
vertical_spacing=0.06
)
balance_line = go.Scatter(
x=df.index,
y=df["balance"],
mode="lines",
name="Balance"
)
drawdown_scatter = go.Scatter(
x=df.index,
y=df["drawdown"],
fillcolor="red",
fill='tozeroy',
mode="lines",
name="Drawdown"
)
pnl_bar = go.Bar(y=df["net_pnl"], name="Daily Pnl")
pnl_histogram = go.Histogram(x=df["net_pnl"], nbinsx=100, name="Days")
fig.add_trace(balance_line, row=1, col=1)
fig.add_trace(drawdown_scatter, row=2, col=1)
fig.add_trace(pnl_bar, row=3, col=1)
fig.add_trace(pnl_histogram, row=4, col=1)
fig.update_layout(height=1000, width=1000)
fig.show()
def update_daily_close(self, bars: Dict[str, BarData], dt: datetime) -> None:
d = dt.date()
close_prices = {}
for bar in bars.values():
close_prices[bar.vt_symbol] = bar.close_price
daily_result = self.daily_results.get(d, None)
if daily_result:
daily_result.update_close_prices(close_prices)
else:
self.daily_results[d] = PortfolioDailyResult(d, close_prices)
def new_bars(self, dt: datetime) -> None:
self.datetime = dt
bars: Dict[str, BarData] = {}
for vt_symbol in self.vt_symbols:
bar = self.history_data.get((dt, vt_symbol), None)
if bar:
self.bars[vt_symbol] = bar
bars[vt_symbol] = bar
elif vt_symbol in self.bars:
old_bar = self.bars[vt_symbol]
bar = BarData(
symbol=old_bar.symbol,
exchange=old_bar.exchange,
datetime=dt,
open_price=old_bar.close_price,
high_price=old_bar.close_price,
low_price=old_bar.close_price,
close_price=old_bar.close_price,
gateway_name=old_bar.gateway_name
)
self.bars[vt_symbol] = bar
self.cross_limit_order()
self.strategy.on_bars(bars)
self.update_daily_close(self.bars, dt)
def cross_limit_order(self) -> None:
for order in list(self.active_limit_orders.values()):
bar = self.bars[order.vt_symbol]
long_cross_price = bar.low_price
short_cross_price = bar.high_price
long_best_price = bar.open_price
short_best_price = bar.open_price
if order.status == Status.SUBMITTING:
order.status = Status.NOTTRADED
self.strategy.update_order(order)
long_cross = (
order.direction == Direction.LONG
and order.price >= long_cross_price
and long_cross_price > 0
)
short_cross = (
order.direction == Direction.SHORT
and order.price <= short_cross_price
and short_cross_price > 0
)
if not long_cross and not short_cross:
continue
order.traded = order.volume
order.status = Status.ALLTRADED
self.strategy.update_order(order)
self.active_limit_orders.pop(order.vt_orderid)
self.trade_count += 1
if long_cross:
trade_price = min(order.price, long_best_price)
else:
trade_price = max(order.price, short_best_price)
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
self.strategy.update_trade(trade)
self.trades[trade.vt_tradeid] = trade
def load_bars(
self,
strategy: StrategyTemplate,
days: int,
interval: Interval
) -> None:
self.days = days
def send_order(
self,
strategy: StrategyTemplate,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool,
net: bool
) -> List[str]:
price = round_to(price, self.priceticks[vt_symbol])
symbol, exchange = extract_vt_symbol(vt_symbol)
self.limit_order_count += 1
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=str(self.limit_order_count),
direction=direction,
offset=offset,
price=price,
volume=volume,
status=Status.SUBMITTING,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
self.active_limit_orders[order.vt_orderid] = order
self.limit_orders[order.vt_orderid] = order
return [order.vt_orderid]
def cancel_order(self, strategy: StrategyTemplate, vt_orderid: str) -> None:
if vt_orderid not in self.active_limit_orders:
return
order = self.active_limit_orders.pop(vt_orderid)
order.status = Status.CANCELLED
self.strategy.update_order(order)
def write_log(self, msg: str, strategy: StrategyTemplate = None) -> None:
msg = f"{self.datetime}\t{msg}"
self.logs.append(msg)
def send_email(self, msg: str, strategy: StrategyTemplate = None) -> None:
pass
def sync_strategy_data(self, strategy: StrategyTemplate) -> None:
pass
def put_strategy_event(self, strategy: StrategyTemplate) -> None:
pass
def output(self, msg) -> None:
print(f"{datetime.now()}\t{msg}")
def get_all_trades(self) -> List[TradeData]:
return list(self.trades.values())
def get_all_orders(self) -> List[OrderData]:
return list(self.limit_orders.values())
def get_all_daily_results(self) -> List["PortfolioDailyResult"]:
return list(self.daily_results.values())
class ContractDailyResult:
def __init__(self, result_date: date, close_price: float):
self.date: date = result_date
self.close_price: float = close_price
self.pre_close: float = 0
self.trades: List[TradeData] = []
self.trade_count: int = 0
self.start_pos: float = 0
self.end_pos: float = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
self.trades.append(trade)
def calculate_pnl(
self,
pre_close: float,
start_pos: float,
size: int,
rate: float,
slippage: float
) -> None:
if pre_close:
self.pre_close = pre_close
else:
self.pre_close = 1
self.start_pos = start_pos
self.end_pos = start_pos
self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size
self.trade_count = len(self.trades)
for trade in self.trades:
if trade.direction == Direction.LONG:
pos_change = trade.volume
else:
pos_change = -trade.volume
self.end_pos += pos_change
turnover = trade.volume * size * trade.price
self.trading_pnl += pos_change * (self.close_price - trade.price) * size
self.slippage += trade.volume * size * slippage
self.turnover += turnover
self.commission += turnover * rate
self.total_pnl = self.trading_pnl + self.holding_pnl
self.net_pnl = self.total_pnl - self.commission - self.slippage
def update_close_price(self, close_price: float) -> None:
self.close_price = close_price
class PortfolioDailyResult:
def __init__(self, result_date: date, close_prices: Dict[str, float]):
self.date: date = result_date
self.close_prices: Dict[str, float] = close_prices
self.pre_closes: Dict[str, float] = {}
self.start_poses: Dict[str, float] = {}
self.end_poses: Dict[str, float] = {}
self.contract_results: Dict[str, ContractDailyResult] = {}
for vt_symbol, close_price in close_prices.items():
self.contract_results[vt_symbol] = ContractDailyResult(result_date, close_price)
self.trade_count: int = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
contract_result = self.contract_results[trade.vt_symbol]
contract_result.add_trade(trade)
def calculate_pnl(
self,
pre_closes: Dict[str, float],
start_poses: Dict[str, float],
sizes: Dict[str, float],
rates: Dict[str, float],
slippages: Dict[str, float],
) -> None:
self.pre_closes = pre_closes
for vt_symbol, contract_result in self.contract_results.items():
contract_result.calculate_pnl(
pre_closes.get(vt_symbol, 0),
start_poses.get(vt_symbol, 0),
sizes[vt_symbol],
rates[vt_symbol],
slippages[vt_symbol]
)
self.trade_count += contract_result.trade_count
self.turnover += contract_result.turnover
self.commission += contract_result.commission
self.slippage += contract_result.slippage
self.trading_pnl += contract_result.trading_pnl
self.holding_pnl += contract_result.holding_pnl
self.total_pnl += contract_result.total_pnl
self.net_pnl += contract_result.net_pnl
self.end_poses[vt_symbol] = contract_result.end_pos
def update_close_prices(self, close_prices: Dict[str, float]) -> None:
self.close_prices = close_prices
for vt_symbol, close_price in close_prices.items():
contract_result = self.contract_results.get(vt_symbol, None)
if contract_result:
contract_result.update_close_price(close_price)
@lru_cache(maxsize=999)
def load_bar_data(
vt_symbol: str,
interval: Interval,
start: datetime,
end: datetime
):
symbol, exchange = extract_vt_symbol(vt_symbol)
return database_manager.load_bar_data(
symbol, exchange, interval, start, end
)
| true | true |
f700642b6d0a80da9a02e1d4c273cd2930c77980 | 2,659 | py | Python | src/python/pants/backend/codegen/export_codegen_goal_test.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 1 | 2021-02-22T18:11:26.000Z | 2021-02-22T18:11:26.000Z | src/python/pants/backend/codegen/export_codegen_goal_test.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/codegen/export_codegen_goal_test.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 2 | 2021-05-11T07:51:26.000Z | 2021-05-19T10:14:46.000Z | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import Path
import pytest
from pants.backend.codegen.export_codegen_goal import ExportCodegen
from pants.backend.codegen.export_codegen_goal import rules as write_codegen_rules
from pants.core.target_types import FilesSources, ResourcesSources
from pants.core.util_rules import distdir
from pants.engine.fs import CreateDigest, FileContent, Snapshot
from pants.engine.rules import Get, rule
from pants.engine.target import GeneratedSources, GenerateSourcesRequest, Sources, Target
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import RuleRunner
class Gen1Sources(Sources):
pass
class Gen2Sources(Sources):
pass
class Gen1Target(Target):
alias = "gen1"
core_fields = (Gen1Sources,)
class Gen2Target(Target):
alias = "gen2"
core_fields = (Gen2Sources,)
class Gen1Request(GenerateSourcesRequest):
input = Gen1Sources
output = FilesSources
class Gen2Request(GenerateSourcesRequest):
input = Gen2Sources
output = ResourcesSources
@rule
async def gen1(_: Gen1Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("assets/README.md", b"Hello!")]))
return GeneratedSources(result)
@rule
async def gen2(_: Gen2Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("src/haskell/app.hs", b"10 * 4")]))
return GeneratedSources(result)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*write_codegen_rules(),
gen1,
gen2,
UnionRule(GenerateSourcesRequest, Gen1Request),
UnionRule(GenerateSourcesRequest, Gen2Request),
*distdir.rules(),
],
target_types=[Gen1Target, Gen2Target],
)
def test_no_codegen_targets(rule_runner: RuleRunner, caplog) -> None:
result = rule_runner.run_goal_rule(ExportCodegen)
assert result.exit_code == 0
assert len(caplog.records) == 1
assert "No codegen files/targets matched. All codegen target types: gen1, gen2" in caplog.text
def test_export_codegen(rule_runner: RuleRunner) -> None:
rule_runner.add_to_build_file("", "gen1(name='gen1')\ngen2(name='gen2')\n")
result = rule_runner.run_goal_rule(ExportCodegen, args=["::"])
assert result.exit_code == 0
parent_dir = Path(rule_runner.build_root, "dist", "codegen")
assert (parent_dir / "assets" / "README.md").read_text() == "Hello!"
assert (parent_dir / "src" / "haskell" / "app.hs").read_text() == "10 * 4"
| 30.215909 | 98 | 0.723956 |
from pathlib import Path
import pytest
from pants.backend.codegen.export_codegen_goal import ExportCodegen
from pants.backend.codegen.export_codegen_goal import rules as write_codegen_rules
from pants.core.target_types import FilesSources, ResourcesSources
from pants.core.util_rules import distdir
from pants.engine.fs import CreateDigest, FileContent, Snapshot
from pants.engine.rules import Get, rule
from pants.engine.target import GeneratedSources, GenerateSourcesRequest, Sources, Target
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import RuleRunner
class Gen1Sources(Sources):
pass
class Gen2Sources(Sources):
pass
class Gen1Target(Target):
alias = "gen1"
core_fields = (Gen1Sources,)
class Gen2Target(Target):
alias = "gen2"
core_fields = (Gen2Sources,)
class Gen1Request(GenerateSourcesRequest):
input = Gen1Sources
output = FilesSources
class Gen2Request(GenerateSourcesRequest):
input = Gen2Sources
output = ResourcesSources
@rule
async def gen1(_: Gen1Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("assets/README.md", b"Hello!")]))
return GeneratedSources(result)
@rule
async def gen2(_: Gen2Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("src/haskell/app.hs", b"10 * 4")]))
return GeneratedSources(result)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*write_codegen_rules(),
gen1,
gen2,
UnionRule(GenerateSourcesRequest, Gen1Request),
UnionRule(GenerateSourcesRequest, Gen2Request),
*distdir.rules(),
],
target_types=[Gen1Target, Gen2Target],
)
def test_no_codegen_targets(rule_runner: RuleRunner, caplog) -> None:
result = rule_runner.run_goal_rule(ExportCodegen)
assert result.exit_code == 0
assert len(caplog.records) == 1
assert "No codegen files/targets matched. All codegen target types: gen1, gen2" in caplog.text
def test_export_codegen(rule_runner: RuleRunner) -> None:
rule_runner.add_to_build_file("", "gen1(name='gen1')\ngen2(name='gen2')\n")
result = rule_runner.run_goal_rule(ExportCodegen, args=["::"])
assert result.exit_code == 0
parent_dir = Path(rule_runner.build_root, "dist", "codegen")
assert (parent_dir / "assets" / "README.md").read_text() == "Hello!"
assert (parent_dir / "src" / "haskell" / "app.hs").read_text() == "10 * 4"
| true | true |
f7006506115787b6ab648322e288f899a2ea56b5 | 3,002 | py | Python | movies_recommender/RecommenderSVD.py | mateuszrusin/filmweb-rekomendacje | 06bdff3825f4c9e7b80fb5778d1a40388d2313d9 | [
"MIT"
] | 3 | 2019-12-10T10:20:07.000Z | 2020-12-03T17:37:24.000Z | movies_recommender/RecommenderSVD.py | mateuszrusin/filmweb-rekomendacje | 06bdff3825f4c9e7b80fb5778d1a40388d2313d9 | [
"MIT"
] | 2 | 2021-03-31T19:31:04.000Z | 2021-12-13T20:32:18.000Z | movies_recommender/RecommenderSVD.py | mateuszrusin/filmweb-rekomendacje | 06bdff3825f4c9e7b80fb5778d1a40388d2313d9 | [
"MIT"
] | 4 | 2019-11-21T23:49:39.000Z | 2020-12-03T17:37:26.000Z | from collections import defaultdict
from operator import itemgetter
# python -m movies_recommender.RecommenderSVD
from movies_analyzer.Movies import Movies
from movies_analyzer.RecommendationDataset import RecommendationDataSet
from movies_recommender.Recommender import Recommender
from surprise import SVD, KNNBasic
from movies_recommender.utils import get_top_n
class RecommenderSVD(Recommender):
def __init__(self, recommendation_dataset: RecommendationDataSet):
super(RecommenderSVD, self).__init__(recommendation_dataset.movies)
self.algorithm = SVD()
self.recommendation_dataset = recommendation_dataset
def fit(self, dataset):
return self.algorithm.fit(dataset)
def test(self, test_set):
return self.algorithm.test(test_set)
def get_recommendation(self, watched, k=20):
# get dataset
new_user_id, full_dataset = self.recommendation_dataset.get_dataset_with_extended_user(watched)
inner_user_id = full_dataset.to_inner_uid(new_user_id)
# after new dataset we need again train our model with the new user for the whole
# dataset with the new user.
self.algorithm.fit(full_dataset)
# watched movies
watched = {full_dataset.to_inner_iid(key): value for key,value in watched.items()}
# Calculate for all similar user, predictions
test_items = [
self.algorithm.predict(new_user_id, full_dataset.to_raw_iid(i))
for i in range(0, full_dataset.n_items)
if i not in watched
]
topn_items = [i[0] for i in get_top_n(test_items, n=k, minimum_rating=1.0)[new_user_id]]
return self.movies.get_movie_by_movie_ids(topn_items)
if __name__ == '__main__':
from movies_recommender.Recommender import test_recommendation
from movies_recommender.RecommenderSVD import RecommenderSVD
from movies_analyzer.RecommendationDataset import RecommendationDataSet
from movies_analyzer.Movies import Movies
movies = Movies()
recommendation_dataset = RecommendationDataSet(movies=movies)
recommender = RecommenderSVD(recommendation_dataset)
assert recommender.__module__[:len('movies_recommender.')] == 'movies_recommender.'
test_recommendation(recommender, recommendation_dataset,
example_items=['arek','mateusz'], anti_test=True)
""" For test only
%load_ext autoreload
%autoreload 2
from filmweb_integrator.fwimdbmerge.filmweb import Filmweb
from filmweb_integrator.fwimdbmerge.merger import Merger, get_json_df
from movies_recommender.Recommender import get_moviescore_df, get_watched
recommender.fit(recommendation_dataset.full_dataset)
self = recommender
# get recommendation for one user
merger = Merger(filmweb=Filmweb(), imdb=movies.imdb)
watched = get_watched(get_moviescore_df(merger, recommender.movies,'arek'))
k = 20
k_inner_item = 20
self.get_recommendation(watched)
"""
| 37.061728 | 103 | 0.741506 | from collections import defaultdict
from operator import itemgetter
from movies_analyzer.Movies import Movies
from movies_analyzer.RecommendationDataset import RecommendationDataSet
from movies_recommender.Recommender import Recommender
from surprise import SVD, KNNBasic
from movies_recommender.utils import get_top_n
class RecommenderSVD(Recommender):
def __init__(self, recommendation_dataset: RecommendationDataSet):
super(RecommenderSVD, self).__init__(recommendation_dataset.movies)
self.algorithm = SVD()
self.recommendation_dataset = recommendation_dataset
def fit(self, dataset):
return self.algorithm.fit(dataset)
def test(self, test_set):
return self.algorithm.test(test_set)
def get_recommendation(self, watched, k=20):
new_user_id, full_dataset = self.recommendation_dataset.get_dataset_with_extended_user(watched)
inner_user_id = full_dataset.to_inner_uid(new_user_id)
self.algorithm.fit(full_dataset)
watched = {full_dataset.to_inner_iid(key): value for key,value in watched.items()}
test_items = [
self.algorithm.predict(new_user_id, full_dataset.to_raw_iid(i))
for i in range(0, full_dataset.n_items)
if i not in watched
]
topn_items = [i[0] for i in get_top_n(test_items, n=k, minimum_rating=1.0)[new_user_id]]
return self.movies.get_movie_by_movie_ids(topn_items)
if __name__ == '__main__':
from movies_recommender.Recommender import test_recommendation
from movies_recommender.RecommenderSVD import RecommenderSVD
from movies_analyzer.RecommendationDataset import RecommendationDataSet
from movies_analyzer.Movies import Movies
movies = Movies()
recommendation_dataset = RecommendationDataSet(movies=movies)
recommender = RecommenderSVD(recommendation_dataset)
assert recommender.__module__[:len('movies_recommender.')] == 'movies_recommender.'
test_recommendation(recommender, recommendation_dataset,
example_items=['arek','mateusz'], anti_test=True)
| true | true |
f700658fdb8362dbb88d94c948cdb708d1fac1ec | 1,320 | py | Python | lib/iana/ripe/objects/mntner.py | sschwetz/network_tech | fc65166e71bfdb5a0e99ca7e7ce9f7814b92869b | [
"Apache-2.0"
] | 73 | 2017-05-04T06:35:20.000Z | 2022-02-03T13:57:00.000Z | lib/iana/ripe/objects/mntner.py | sschwetz/network_tech | fc65166e71bfdb5a0e99ca7e7ce9f7814b92869b | [
"Apache-2.0"
] | 35 | 2017-11-09T16:28:48.000Z | 2022-01-12T08:15:48.000Z | lib/iana/ripe/objects/mntner.py | sschwetz/network_tech | fc65166e71bfdb5a0e99ca7e7ce9f7814b92869b | [
"Apache-2.0"
] | 20 | 2017-11-08T05:07:59.000Z | 2021-12-09T17:41:06.000Z | """
Copyright 2019 Glen Harmon
MNTNER Object Description
https://www.ripe.net/manage-ips-and-asns/db/support/documentation/ripe-database-documentation/rpsl-object-types/4-3-descriptions-of-secondary-objects/4-3-4-description-of-the-mntner-object
"""
from .rpsl import Rpsl
class Maintainer(Rpsl):
def __init__(self):
self.handle = None
self.description = list()
self.update_to = list()
self.maintainer_notify = list()
self.authentication = list()
super().__init__()
def html(self, heading_level=1):
return super().html(
title='Maintainer',
attributes=[
(None, self.handle),
('Description', self.description),
('Update To', self.update_to),
('Maintainer Notify', self.maintainer_notify),
('Authentication', self.authentication),
('Organisation', self.organisation),
('Admin Contact', self.admin_contact),
('Technical Contact', self.technical_contact),
('Remarks', self.remarks),
('Notify', self.notify),
('Maintained By', self.maintained_by),
('Modified', self.modified),
('Type', self.type_),
]
)
| 32.195122 | 188 | 0.567424 |
from .rpsl import Rpsl
class Maintainer(Rpsl):
def __init__(self):
self.handle = None
self.description = list()
self.update_to = list()
self.maintainer_notify = list()
self.authentication = list()
super().__init__()
def html(self, heading_level=1):
return super().html(
title='Maintainer',
attributes=[
(None, self.handle),
('Description', self.description),
('Update To', self.update_to),
('Maintainer Notify', self.maintainer_notify),
('Authentication', self.authentication),
('Organisation', self.organisation),
('Admin Contact', self.admin_contact),
('Technical Contact', self.technical_contact),
('Remarks', self.remarks),
('Notify', self.notify),
('Maintained By', self.maintained_by),
('Modified', self.modified),
('Type', self.type_),
]
)
| true | true |
f700666f3ddc127976f134add98c0159870b03e1 | 8,280 | py | Python | safedelete/queryset.py | MarcelForArt/django-safedelete | 0902e3db957bee0fa9bf9058e8f0a0d2e0e027d7 | [
"BSD-3-Clause"
] | null | null | null | safedelete/queryset.py | MarcelForArt/django-safedelete | 0902e3db957bee0fa9bf9058e8f0a0d2e0e027d7 | [
"BSD-3-Clause"
] | null | null | null | safedelete/queryset.py | MarcelForArt/django-safedelete | 0902e3db957bee0fa9bf9058e8f0a0d2e0e027d7 | [
"BSD-3-Clause"
] | null | null | null | from django.db.models import query
from .query import SafeDeleteQuery
from functools import partial, reduce
from django.db.models.constants import LOOKUP_SEP
from django.db.models import Max, Min, F
from django.utils.module_loading import import_string
def get_lookup_value(obj, field):
return reduce(lambda i, f: getattr(i, f), field.split(LOOKUP_SEP), obj)
class SafeDeleteQueryset(query.QuerySet):
"""Default queryset for the SafeDeleteManager.
Takes care of "lazily evaluating" safedelete QuerySets. QuerySets passed
within the ``SafeDeleteQueryset`` will have all of the models available.
The deleted policy is evaluated at the very end of the chain when the
QuerySet itself is evaluated.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
super(SafeDeleteQueryset, self).__init__(model=model, query=query, using=using, hints=hints)
self.query = query or SafeDeleteQuery(self.model)
def delete(self, force_policy=None):
"""Overrides bulk delete behaviour.
.. note::
The current implementation loses performance on bulk deletes in order
to safely delete objects according to the deletion policies set.
.. seealso::
:py:func:`safedelete.models.SafeDeleteModel.delete`
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
# TODO: Replace this by bulk update if we can
for obj in self.all():
obj.delete(force_policy=force_policy)
self._result_cache = None
delete.alters_data = True
def undelete(self, force_policy=None):
"""Undelete all soft deleted models.
.. note::
The current implementation loses performance on bulk undeletes in
order to call the pre/post-save signals.
.. seealso::
:py:func:`safedelete.models.SafeDeleteModel.undelete`
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with undelete."
# TODO: Replace this by bulk update if we can (need to call pre/post-save signal)
for obj in self.all():
obj.undelete(force_policy=force_policy)
self._result_cache = None
undelete.alters_data = True
def all(self, force_visibility=None):
"""Override so related managers can also see the deleted models.
A model's m2m field does not easily have access to `all_objects` and
so setting `force_visibility` to True is a way of getting all of the
models. It is not recommended to use `force_visibility` outside of related
models because it will create a new queryset.
Args:
force_visibility: Force a deletion visibility. (default: {None})
"""
if force_visibility is not None:
self.query._safedelete_force_visibility = force_visibility
return super(SafeDeleteQueryset, self).all()
def filter(self, *args, **kwargs):
# Return a copy, see #131
queryset = self._clone()
queryset.query.check_field_filter(**kwargs)
return super(SafeDeleteQueryset, queryset).filter(*args, **kwargs)
class OrderedSafeDeleteQueryset(SafeDeleteQueryset):
"""
# ADDED BY LEE
This extends SafeDeleteQueryset with methods from OrderedModelQuerySet
of the django-ordered-model package, so that we can have both proper ordering and
safe-deletion
"""
def _get_order_field_name(self):
return self.model.order_field_name
def _get_order_field_lookup(self, lookup):
order_field_name = self._get_order_field_name()
return LOOKUP_SEP.join([order_field_name, lookup])
def _get_order_with_respect_to(self):
model = self.model
order_with_respect_to = model.order_with_respect_to
if isinstance(order_with_respect_to, str):
order_with_respect_to = (order_with_respect_to,)
if order_with_respect_to is None:
raise AssertionError(
(
'ordered model admin "{0}" has not specified "order_with_respect_to"; note that this '
"should go in the model body, and is not to be confused with the Meta property of the same name, "
"which is independent Django functionality"
).format(model)
)
return order_with_respect_to
def get_max_order(self):
order_field_name = self._get_order_field_name()
return self.aggregate(Max(order_field_name)).get(
self._get_order_field_lookup("max")
)
def get_min_order(self):
order_field_name = self._get_order_field_name()
return self.aggregate(Min(order_field_name)).get(
self._get_order_field_lookup("min")
)
def get_next_order(self):
order = self.get_max_order()
return order + 1 if order is not None else 0
def above(self, order, inclusive=False):
"""Filter items above order."""
lookup = "gte" if inclusive else "gt"
return self.filter(**{self._get_order_field_lookup(lookup): order})
def above_instance(self, ref, inclusive=False):
"""Filter items above ref's order."""
order_field_name = self._get_order_field_name()
order = getattr(ref, order_field_name)
return self.above(order, inclusive=inclusive)
def below(self, order, inclusive=False):
"""Filter items below order."""
lookup = "lte" if inclusive else "lt"
return self.filter(**{self._get_order_field_lookup(lookup): order})
def below_instance(self, ref, inclusive=False):
"""Filter items below ref's order."""
order_field_name = self._get_order_field_name()
order = getattr(ref, order_field_name)
return self.below(order, inclusive=inclusive)
def decrease_order(self, **extra_kwargs):
"""Decrease `order_field_name` value by 1."""
order_field_name = self._get_order_field_name()
update_kwargs = {order_field_name: F(order_field_name) - 1}
if extra_kwargs:
update_kwargs.update(extra_kwargs)
return self.update(**update_kwargs)
def increase_order(self, **extra_kwargs):
"""Increase `order_field_name` value by 1."""
order_field_name = self._get_order_field_name()
update_kwargs = {order_field_name: F(order_field_name) + 1}
if extra_kwargs:
update_kwargs.update(extra_kwargs)
return self.update(**update_kwargs)
def bulk_create(self, objs, batch_size=None):
order_field_name = self._get_order_field_name()
order_with_respect_to = self.model.order_with_respect_to
objs = list(objs)
if order_with_respect_to:
order_with_respect_to_mapping = {}
order_with_respect_to = self._get_order_with_respect_to()
for obj in objs:
key = tuple(
get_lookup_value(obj, field) for field in order_with_respect_to
)
if key in order_with_respect_to_mapping:
order_with_respect_to_mapping[key] += 1
else:
order_with_respect_to_mapping[
key
] = self.filter_by_order_with_respect_to(obj).get_next_order()
setattr(obj, order_field_name, order_with_respect_to_mapping[key])
else:
for order, obj in enumerate(objs, self.get_next_order()):
setattr(obj, order_field_name, order)
return super().bulk_create(objs, batch_size=batch_size)
def _get_order_with_respect_to_filter_kwargs(self, ref):
order_with_respect_to = self._get_order_with_respect_to()
_get_lookup_value = partial(get_lookup_value, ref)
return {field: _get_lookup_value(field) for field in order_with_respect_to}
_get_order_with_respect_to_filter_kwargs.queryset_only = False
def filter_by_order_with_respect_to(self, ref):
order_with_respect_to = self.model.order_with_respect_to
if order_with_respect_to:
filter_kwargs = self._get_order_with_respect_to_filter_kwargs(ref)
return self.filter(**filter_kwargs)
return self
| 40.390244 | 118 | 0.666304 | from django.db.models import query
from .query import SafeDeleteQuery
from functools import partial, reduce
from django.db.models.constants import LOOKUP_SEP
from django.db.models import Max, Min, F
from django.utils.module_loading import import_string
def get_lookup_value(obj, field):
return reduce(lambda i, f: getattr(i, f), field.split(LOOKUP_SEP), obj)
class SafeDeleteQueryset(query.QuerySet):
def __init__(self, model=None, query=None, using=None, hints=None):
super(SafeDeleteQueryset, self).__init__(model=model, query=query, using=using, hints=hints)
self.query = query or SafeDeleteQuery(self.model)
def delete(self, force_policy=None):
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
for obj in self.all():
obj.delete(force_policy=force_policy)
self._result_cache = None
delete.alters_data = True
def undelete(self, force_policy=None):
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with undelete."
for obj in self.all():
obj.undelete(force_policy=force_policy)
self._result_cache = None
undelete.alters_data = True
def all(self, force_visibility=None):
if force_visibility is not None:
self.query._safedelete_force_visibility = force_visibility
return super(SafeDeleteQueryset, self).all()
def filter(self, *args, **kwargs):
queryset = self._clone()
queryset.query.check_field_filter(**kwargs)
return super(SafeDeleteQueryset, queryset).filter(*args, **kwargs)
class OrderedSafeDeleteQueryset(SafeDeleteQueryset):
def _get_order_field_name(self):
return self.model.order_field_name
def _get_order_field_lookup(self, lookup):
order_field_name = self._get_order_field_name()
return LOOKUP_SEP.join([order_field_name, lookup])
def _get_order_with_respect_to(self):
model = self.model
order_with_respect_to = model.order_with_respect_to
if isinstance(order_with_respect_to, str):
order_with_respect_to = (order_with_respect_to,)
if order_with_respect_to is None:
raise AssertionError(
(
'ordered model admin "{0}" has not specified "order_with_respect_to"; note that this '
"should go in the model body, and is not to be confused with the Meta property of the same name, "
"which is independent Django functionality"
).format(model)
)
return order_with_respect_to
def get_max_order(self):
order_field_name = self._get_order_field_name()
return self.aggregate(Max(order_field_name)).get(
self._get_order_field_lookup("max")
)
def get_min_order(self):
order_field_name = self._get_order_field_name()
return self.aggregate(Min(order_field_name)).get(
self._get_order_field_lookup("min")
)
def get_next_order(self):
order = self.get_max_order()
return order + 1 if order is not None else 0
def above(self, order, inclusive=False):
lookup = "gte" if inclusive else "gt"
return self.filter(**{self._get_order_field_lookup(lookup): order})
def above_instance(self, ref, inclusive=False):
order_field_name = self._get_order_field_name()
order = getattr(ref, order_field_name)
return self.above(order, inclusive=inclusive)
def below(self, order, inclusive=False):
lookup = "lte" if inclusive else "lt"
return self.filter(**{self._get_order_field_lookup(lookup): order})
def below_instance(self, ref, inclusive=False):
order_field_name = self._get_order_field_name()
order = getattr(ref, order_field_name)
return self.below(order, inclusive=inclusive)
def decrease_order(self, **extra_kwargs):
order_field_name = self._get_order_field_name()
update_kwargs = {order_field_name: F(order_field_name) - 1}
if extra_kwargs:
update_kwargs.update(extra_kwargs)
return self.update(**update_kwargs)
def increase_order(self, **extra_kwargs):
order_field_name = self._get_order_field_name()
update_kwargs = {order_field_name: F(order_field_name) + 1}
if extra_kwargs:
update_kwargs.update(extra_kwargs)
return self.update(**update_kwargs)
def bulk_create(self, objs, batch_size=None):
order_field_name = self._get_order_field_name()
order_with_respect_to = self.model.order_with_respect_to
objs = list(objs)
if order_with_respect_to:
order_with_respect_to_mapping = {}
order_with_respect_to = self._get_order_with_respect_to()
for obj in objs:
key = tuple(
get_lookup_value(obj, field) for field in order_with_respect_to
)
if key in order_with_respect_to_mapping:
order_with_respect_to_mapping[key] += 1
else:
order_with_respect_to_mapping[
key
] = self.filter_by_order_with_respect_to(obj).get_next_order()
setattr(obj, order_field_name, order_with_respect_to_mapping[key])
else:
for order, obj in enumerate(objs, self.get_next_order()):
setattr(obj, order_field_name, order)
return super().bulk_create(objs, batch_size=batch_size)
def _get_order_with_respect_to_filter_kwargs(self, ref):
order_with_respect_to = self._get_order_with_respect_to()
_get_lookup_value = partial(get_lookup_value, ref)
return {field: _get_lookup_value(field) for field in order_with_respect_to}
_get_order_with_respect_to_filter_kwargs.queryset_only = False
def filter_by_order_with_respect_to(self, ref):
order_with_respect_to = self.model.order_with_respect_to
if order_with_respect_to:
filter_kwargs = self._get_order_with_respect_to_filter_kwargs(ref)
return self.filter(**filter_kwargs)
return self
| true | true |
f70067377c11a437d33b5c947e65a2cb6d20e8e6 | 2,056 | py | Python | markovGames/learning/bruteSearch.py | rohit-konda/markovGames | d6dd1b8a11f1c95658a468f9e471aecfcf0e6839 | [
"MIT"
] | null | null | null | markovGames/learning/bruteSearch.py | rohit-konda/markovGames | d6dd1b8a11f1c95658a468f9e471aecfcf0e6839 | [
"MIT"
] | null | null | null | markovGames/learning/bruteSearch.py | rohit-konda/markovGames | d6dd1b8a11f1c95658a468f9e471aecfcf0e6839 | [
"MIT"
] | null | null | null | import numpy as np
from itertools import product
from markovGames.gameDefs.mdpDefs import Policy
def getAllDetPol(numStates, numActions):
detProbs = [np.array([1 if j == i else 0 for j in range(numActions)]) for i in range(numActions)]
return product(detProbs, repeat=numStates)
def getPolList(states, acSet):
# list of possible deterministic policies
numStates = len(states)
numActions = len(acSet)
detPol = getAllDetPol(numStates, numActions)
return [Policy(states, pol, acSet) for pol in detPol]
def prodPolList(states, listActions):
# get policies for each action Set
polList = [getPolList(states, ac) for ac in listActions]
return polList
def getPayoff(utilMap, listAcSet):
# utilMap: maps list of agent policies to real numbers,
# allPolicyList: list of agent i (list of possible policies)
def utilInd(index):
jointAc = [listAcSet[j][ind] for j, ind in enumerate(index)]
val = utilMap(jointAc)
return val
numPL = [len(pL) for pL in listAcSet]
payoff = np.zeros(numPL)
for ind in product(*[range(nI) for nI in numPL]):
payoff[ind] = utilInd(ind)
return payoff
def getArgOpt(tensor):
return np.unravel_index(np.argmax(tensor), tensor.shape)
def bruteFindNash(payoffList):
TOLERANCE = 1e-7
cpnes = list(np.argwhere(payoffList[0] > np.amax(payoffList[0], 0) - TOLERANCE))
cpnes = [tuple(cpne) for cpne in cpnes]
N = len(payoffList)
for i in range(1, N):
pMat = payoffList[i]
for cpne in cpnes[:]:
ind = cpne[:i] + (slice(None),) + cpne[i + 1:]
if pMat[cpne] < np.max(pMat[ind]) - TOLERANCE:
cpnes.pop(cpnes.index(cpne))
return cpnes
def getEfficiency(cpnes, welfareMat):
# welfareMat - matrix form of welfare
pneWelf = [welfareMat[cpne] for cpne in cpnes]
opt = np.max(welfareMat)
priceRatios = [float(pne) / opt for pne in pneWelf]
return priceRatios
def getPoA(cpnes, welfareMat):
return min(getEfficiency(cpnes, welfareMat))
| 29.797101 | 101 | 0.668774 | import numpy as np
from itertools import product
from markovGames.gameDefs.mdpDefs import Policy
def getAllDetPol(numStates, numActions):
detProbs = [np.array([1 if j == i else 0 for j in range(numActions)]) for i in range(numActions)]
return product(detProbs, repeat=numStates)
def getPolList(states, acSet):
numStates = len(states)
numActions = len(acSet)
detPol = getAllDetPol(numStates, numActions)
return [Policy(states, pol, acSet) for pol in detPol]
def prodPolList(states, listActions):
polList = [getPolList(states, ac) for ac in listActions]
return polList
def getPayoff(utilMap, listAcSet):
def utilInd(index):
jointAc = [listAcSet[j][ind] for j, ind in enumerate(index)]
val = utilMap(jointAc)
return val
numPL = [len(pL) for pL in listAcSet]
payoff = np.zeros(numPL)
for ind in product(*[range(nI) for nI in numPL]):
payoff[ind] = utilInd(ind)
return payoff
def getArgOpt(tensor):
return np.unravel_index(np.argmax(tensor), tensor.shape)
def bruteFindNash(payoffList):
TOLERANCE = 1e-7
cpnes = list(np.argwhere(payoffList[0] > np.amax(payoffList[0], 0) - TOLERANCE))
cpnes = [tuple(cpne) for cpne in cpnes]
N = len(payoffList)
for i in range(1, N):
pMat = payoffList[i]
for cpne in cpnes[:]:
ind = cpne[:i] + (slice(None),) + cpne[i + 1:]
if pMat[cpne] < np.max(pMat[ind]) - TOLERANCE:
cpnes.pop(cpnes.index(cpne))
return cpnes
def getEfficiency(cpnes, welfareMat):
pneWelf = [welfareMat[cpne] for cpne in cpnes]
opt = np.max(welfareMat)
priceRatios = [float(pne) / opt for pne in pneWelf]
return priceRatios
def getPoA(cpnes, welfareMat):
return min(getEfficiency(cpnes, welfareMat))
| true | true |
f70068b2a70ca8e179b30626bfb6b8c33328a45e | 2,694 | py | Python | cloudmesh-john/setup.py | cybertraining-dsc/fa19-516-163 | d6ba96bd08c8f37c50ac86bf83df7a01d1f31b98 | [
"Apache-2.0"
] | null | null | null | cloudmesh-john/setup.py | cybertraining-dsc/fa19-516-163 | d6ba96bd08c8f37c50ac86bf83df7a01d1f31b98 | [
"Apache-2.0"
] | 1 | 2019-09-24T13:59:25.000Z | 2019-09-27T00:16:20.000Z | cloudmesh-john/setup.py | cybertraining-dsc/fa19-516-163 | d6ba96bd08c8f37c50ac86bf83df7a01d1f31b98 | [
"Apache-2.0"
] | 1 | 2019-09-25T00:21:42.000Z | 2019-09-25T00:21:42.000Z | #!/usr/bin/env python
# ----------------------------------------------------------------------- #
# Copyright 2017, Gregor von Laszewski, Indiana University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ------------------------------------------------------------------------#
from setuptools import find_packages, setup
import io
def readfile(filename):
with io.open(filename, encoding="utf-8") as stream:
return stream.read().split()
#requiers = readfile ('requirements.txt')
#
# add minimum requirements here
#
requiers = """
psutil
pygments
""".split("\n")
# dependency_links = ['http://github.com/nicolaiarocci/eve.git@develop']
version = readfile("VERSION")[0].strip()
with open('README.md') as f:
long_description = f.read()
NAME = "cloudmesh-john"
DESCRIPTION = "A command called john and foo for the cloudmesh shell"
AUTHOR = "Gregor von Laszewski"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/cloudmesh/cloudmesh-john"
setup(
name=NAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
version=version,
license="Apache 2.0",
url=URL,
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
install_requires=requiers,
tests_require=[
"flake8",
"coverage",
],
zip_safe=False,
namespace_packages=['cloudmesh'],
)
| 33.675 | 75 | 0.554937 |
from setuptools import find_packages, setup
import io
def readfile(filename):
with io.open(filename, encoding="utf-8") as stream:
return stream.read().split()
requiers = """
psutil
pygments
""".split("\n")
version = readfile("VERSION")[0].strip()
with open('README.md') as f:
long_description = f.read()
NAME = "cloudmesh-john"
DESCRIPTION = "A command called john and foo for the cloudmesh shell"
AUTHOR = "Gregor von Laszewski"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/cloudmesh/cloudmesh-john"
setup(
name=NAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
version=version,
license="Apache 2.0",
url=URL,
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
install_requires=requiers,
tests_require=[
"flake8",
"coverage",
],
zip_safe=False,
namespace_packages=['cloudmesh'],
)
| true | true |
f7006aafd3946f2f78f841d508a5f60194382134 | 6,057 | py | Python | ctd_processing/_old_dialog.py | sharksmhi/ctd_processing | 616df4cd7ed626b678622448a08a0356086a8a3f | [
"MIT"
] | null | null | null | ctd_processing/_old_dialog.py | sharksmhi/ctd_processing | 616df4cd7ed626b678622448a08a0356086a8a3f | [
"MIT"
] | null | null | null | ctd_processing/_old_dialog.py | sharksmhi/ctd_processing | 616df4cd7ed626b678622448a08a0356086a8a3f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 06 11:19:04 2012
Program to open a dialog box and get a file, check the file name and send it
along with the serienummer
@author: a001109
Updated: 191010 MHan, Updated for Svea profiles and new CTD.
"""
def checkCtdFileName(ctd=None, confile='.XMLCON'):
import Tkinter, tkFileDialog
import os
import time
import sys
import codecs
root = Tkinter.Tk()
root.withdraw() #hiding tkinter window
if ctd == None:
sys.exit("No CTD given, when calling checkCtdFileName() add a CTD number like checkCtdFileName(ctd='1044')")
file_path = tkFileDialog.askopenfilename(title="Open file", initialdir="C:\\ctd\\temp",
filetypes=[("hdr files",".hdr")])
# if '/' in file_path:
# file_path = file_path.replace('/','\\')
fname = os.path.basename(file_path).upper()
print fname
path = os.path.dirname(file_path)
#Ctd from Dana
if fname[:4] == '26DA':
#Dana filenames usually look lite this: 26DA.2016.10.1.hdr
#Merge Cruisenr and activity number 1001. Add zeros.
#If serienummer is over 100 add this to next cruise nr 1000
if int(fname.split('.')[3].zfill(2)) > 99:
serienummer = str(int(fname.split('.')[2])+1).zfill(2) + fname.split('.')[3][1:]
else:
serienummer = fname.split('.')[2].zfill(2) + fname.split('.')[3].zfill(2)
#CTD from Aranda or Meri or Aura
elif fname[:2] == 'AR' or fname[:2] == 'ME' or fname[:2] == 'AU':
#sv19d0003.hex
#serienummer = fname[3:7]
if len(fname) == 12:
serienummer = fname[5:8]
# justerar till 4 siffror
serienummer = serienummer.zfill(4)
else: # if series is 4 digits
serienummer = fname[5:8]
serienummer = serienummer.zfill(4)
elif fname[:2] == 'SV': # Svea
serienummer = fname[5:9]
#CTD in processed format: 'SBE09_0745_20161010_1139_34_01_0544.hdr'
#to be reprocessed
elif fname[:5] == 'SBE09':
serienummer = fname.split('_')[-1][:4]
else:
sys.exit('could not get "serienummer" from file name %s, stops!' % fname)
print 'serienummer: ',serienummer,
# Open the header file:
with codecs.open(file_path,'r', encoding = 'cp1252') as f:
allHeaderInfo = f.readlines()
f.closed
stationname = ''
for rows in allHeaderInfo:
print rows
if '* System UpLoad Time' in rows:
datestring = rows[23:40]
if '** Station:' in rows:
stationname = rows[13:].strip('\r\n')
print 'Stationsnamn:',stationname
#Feb 28 2012 16:13 -> 20120228_1613
c = time.strptime(datestring,"%b %d %Y %H:%M")
datum = time.strftime("%Y%m%d_%H%M",c)
if fname[:2] == 'AR':
new_fname = 'SBE09_' + ctd + '_' + datum + '_34_01_' + serienummer
elif fname[:2] == 'SV':
new_fname = 'SBE09_' + ctd + '_' + datum + '_77_10_' + serienummer
elif fname[:2] == 'ME':
new_fname = 'SBE09_' + ctd + '_' + datum + '_34_02_' + serienummer
elif fname[:2] == 'AU':
new_fname = 'SBE09_' + ctd + '_' + datum + '_34_07_' + serienummer
elif fname[:4] == '26DA':
new_fname = 'SBE09_' + ctd + '_' + datum + '_26_01_' + serienummer
elif fname[:5] == 'SBE09':
new_fname = fname.split('.')[0]
else:
sys.exit('Fel format serienummer!')
print new_fname
#fname = 'SBE19_0745_20110409_0522_77_28_0053.hex'
#fname = os.path.basename(file_path)
sub_str = new_fname.split('_')
#sub_str = fname.split('_')
#Kontrollera filnames längd:
#SBE19_6164_20110801_1550_77_01_0053.hex
counter = 0
for part in sub_str:
counter = counter + 1
if counter == 1:
#kontrollera filnamn, ev behöver vi lägga in en bättre kontroll
print counter
if part != 'SBE09':
sys.exit('Fel instrumentnamn!')
if counter == 2:
print counter
if part not in ['0745','1044','0817','0403','0827','1387']:
sys.exit('Fel intrument serienummer!')
if counter == 3:
print counter
if len(part) != 8:
sys.exit('Fel datumformat!')
if counter == 4:
print counter
if len(part) != 4:
sys.exit('Fel tidsformat!')
if counter == 5:
print counter
if part == 34:
sys.exit('Fel landkod!')
if counter == 6:
print counter
if part == 01:
sys.exit('Fel fartygskod!')
if counter == 7:
print counter
serieNo = part.split('.')[0]
print serieNo
if len(part.split('.')[0]) != 4:
sys.exit('Fel format serienummer!')
# if part.split('.')[1] != 'hex':
# sys.exit('Fel filformat skall vara *.hex!')
#Ändrar namnet på filerna men enbart om de är olika.
if fname.split('.')[0] != new_fname:
os.rename(file_path, path + '\\' + new_fname + '.hdr')
print path + '\\' + fname.rsplit('.',1)[0] + confile
print path + '\\' + new_fname + confile
os.rename(path + '\\' + fname.rsplit('.',1)[0] + confile, path + '\\' + new_fname + confile)
os.rename(path + '\\' + fname.rsplit('.',1)[0] + '.hex', path + '\\' + new_fname + '.hex')
os.rename(path + '\\' + fname.rsplit('.',1)[0] + '.bl', path + '\\' + new_fname + '.bl')
return new_fname, serieNo, stationname
#Korrekt namn format: Kontrollera namnet
#SBE19plus_01906164_2011_08_03_0001.hex
#SBE19_6164_20110801_1550_77_01_0053
| 37.85625 | 116 | 0.527819 | """
Created on Mon Feb 06 11:19:04 2012
Program to open a dialog box and get a file, check the file name and send it
along with the serienummer
@author: a001109
Updated: 191010 MHan, Updated for Svea profiles and new CTD.
"""
def checkCtdFileName(ctd=None, confile='.XMLCON'):
import Tkinter, tkFileDialog
import os
import time
import sys
import codecs
root = Tkinter.Tk()
root.withdraw()
if ctd == None:
sys.exit("No CTD given, when calling checkCtdFileName() add a CTD number like checkCtdFileName(ctd='1044')")
file_path = tkFileDialog.askopenfilename(title="Open file", initialdir="C:\\ctd\\temp",
filetypes=[("hdr files",".hdr")])
fname = os.path.basename(file_path).upper()
print fname
path = os.path.dirname(file_path)
if fname[:4] == '26DA':
if int(fname.split('.')[3].zfill(2)) > 99:
serienummer = str(int(fname.split('.')[2])+1).zfill(2) + fname.split('.')[3][1:]
else:
serienummer = fname.split('.')[2].zfill(2) + fname.split('.')[3].zfill(2)
elif fname[:2] == 'AR' or fname[:2] == 'ME' or fname[:2] == 'AU':
if len(fname) == 12:
serienummer = fname[5:8]
serienummer = serienummer.zfill(4)
else: serienummer = fname[5:8]
serienummer = serienummer.zfill(4)
elif fname[:2] == 'SV': serienummer = fname[5:9]
elif fname[:5] == 'SBE09':
serienummer = fname.split('_')[-1][:4]
else:
sys.exit('could not get "serienummer" from file name %s, stops!' % fname)
print 'serienummer: ',serienummer,
with codecs.open(file_path,'r', encoding = 'cp1252') as f:
allHeaderInfo = f.readlines()
f.closed
stationname = ''
for rows in allHeaderInfo:
print rows
if '* System UpLoad Time' in rows:
datestring = rows[23:40]
if '** Station:' in rows:
stationname = rows[13:].strip('\r\n')
print 'Stationsnamn:',stationname
c = time.strptime(datestring,"%b %d %Y %H:%M")
datum = time.strftime("%Y%m%d_%H%M",c)
if fname[:2] == 'AR':
new_fname = 'SBE09_' + ctd + '_' + datum + '_34_01_' + serienummer
elif fname[:2] == 'SV':
new_fname = 'SBE09_' + ctd + '_' + datum + '_77_10_' + serienummer
elif fname[:2] == 'ME':
new_fname = 'SBE09_' + ctd + '_' + datum + '_34_02_' + serienummer
elif fname[:2] == 'AU':
new_fname = 'SBE09_' + ctd + '_' + datum + '_34_07_' + serienummer
elif fname[:4] == '26DA':
new_fname = 'SBE09_' + ctd + '_' + datum + '_26_01_' + serienummer
elif fname[:5] == 'SBE09':
new_fname = fname.split('.')[0]
else:
sys.exit('Fel format serienummer!')
print new_fname
sub_str = new_fname.split('_')
counter = 0
for part in sub_str:
counter = counter + 1
if counter == 1:
print counter
if part != 'SBE09':
sys.exit('Fel instrumentnamn!')
if counter == 2:
print counter
if part not in ['0745','1044','0817','0403','0827','1387']:
sys.exit('Fel intrument serienummer!')
if counter == 3:
print counter
if len(part) != 8:
sys.exit('Fel datumformat!')
if counter == 4:
print counter
if len(part) != 4:
sys.exit('Fel tidsformat!')
if counter == 5:
print counter
if part == 34:
sys.exit('Fel landkod!')
if counter == 6:
print counter
if part == 01:
sys.exit('Fel fartygskod!')
if counter == 7:
print counter
serieNo = part.split('.')[0]
print serieNo
if len(part.split('.')[0]) != 4:
sys.exit('Fel format serienummer!')
if fname.split('.')[0] != new_fname:
os.rename(file_path, path + '\\' + new_fname + '.hdr')
print path + '\\' + fname.rsplit('.',1)[0] + confile
print path + '\\' + new_fname + confile
os.rename(path + '\\' + fname.rsplit('.',1)[0] + confile, path + '\\' + new_fname + confile)
os.rename(path + '\\' + fname.rsplit('.',1)[0] + '.hex', path + '\\' + new_fname + '.hex')
os.rename(path + '\\' + fname.rsplit('.',1)[0] + '.bl', path + '\\' + new_fname + '.bl')
return new_fname, serieNo, stationname
| false | true |
f7006be989e72bdce78c68bad744121539b63e0a | 8,142 | py | Python | framework/TSA/Wavelet.py | FlanFlanagan/raven | bd7fca18af94376a28e2144ba1da72c01c8d343c | [
"Apache-2.0"
] | 159 | 2017-03-24T21:07:06.000Z | 2022-03-20T13:44:40.000Z | framework/TSA/Wavelet.py | FlanFlanagan/raven | bd7fca18af94376a28e2144ba1da72c01c8d343c | [
"Apache-2.0"
] | 1,667 | 2017-03-27T14:41:22.000Z | 2022-03-31T19:50:06.000Z | framework/TSA/Wavelet.py | wanghy-anl/raven | ef1372364a2776385931763f2b28fdf2930c77b9 | [
"Apache-2.0"
] | 95 | 2017-03-24T21:05:03.000Z | 2022-03-08T17:30:22.000Z | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Discrete Wavelet Transformation for Characterization of Time-Series Data.
"""
import numpy as np
from utils import InputData, InputTypes, xmlUtils
from .TimeSeriesAnalyzer import TimeSeriesGenerator, TimeSeriesCharacterizer
# utility methods
class Wavelet(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
Perform Discrete Wavelet Transformation on time-dependent data.
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for class cls.
@ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.
"""
specs = super(Wavelet, cls).getInputSpecification()
specs.name = 'wavelet'
specs.description = r"""Discrete Wavelet TimeSeriesAnalysis algorithm. Performs a discrete wavelet transform
on time-dependent data. Note: This TSA module requires pywavelets to be installed within your
python environment."""
specs.addSub(InputData.parameterInputFactory(
'family',
contentType=InputTypes.StringType,
descr=r"""The type of wavelet to use for the transformation.
There are several possible families to choose from, and most families contain
more than one variation. For more information regarding the wavelet families,
refer to the Pywavelets documentation located at:
https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html (wavelet-families)
\\
Possible values are:
\begin{itemize}
\item \textbf{haar family}: haar
\item \textbf{db family}: db1, db2, db3, db4, db5, db6, db7, db8, db9, db10, db11,
db12, db13, db14, db15, db16, db17, db18, db19, db20, db21, db22, db23,
db24, db25, db26, db27, db28, db29, db30, db31, db32, db33, db34, db35,
db36, db37, db38
\item \textbf{sym family}: sym2, sym3, sym4, sym5, sym6, sym7, sym8, sym9, sym10,
sym11, sym12, sym13, sym14, sym15, sym16, sym17, sym18, sym19, sym20
\item \textbf{coif family}: coif1, coif2, coif3, coif4, coif5, coif6, coif7, coif8,
coif9, coif10, coif11, coif12, coif13, coif14, coif15, coif16, coif17
\item \textbf{bior family}: bior1.1, bior1.3, bior1.5, bior2.2, bior2.4, bior2.6,
bior2.8, bior3.1, bior3.3, bior3.5, bior3.7, bior3.9, bior4.4, bior5.5,
bior6.8
\item \textbf{rbio family}: rbio1.1, rbio1.3, rbio1.5, rbio2.2, rbio2.4, rbio2.6,
rbio2.8, rbio3.1, rbio3.3, rbio3.5, rbio3.7, rbio3.9, rbio4.4, rbio5.5,
rbio6.8
\item \textbf{dmey family}: dmey
\item \textbf{gaus family}: gaus1, gaus2, gaus3, gaus4, gaus5, gaus6, gaus7, gaus8
\item \textbf{mexh family}: mexh
\item \textbf{morl family}: morl
\item \textbf{cgau family}: cgau1, cgau2, cgau3, cgau4, cgau5, cgau6, cgau7, cgau8
\item \textbf{shan family}: shan
\item \textbf{fbsp family}: fbsp
\item \textbf{cmor family}: cmor
\end{itemize}"""))
return specs
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a time-series analysis object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, spec, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['family'] = spec.findFirst('family').value
return settings
def characterize(self, signal, pivot, targets, settings):
"""
This function utilizes the Discrete Wavelet Transform to
characterize a time-dependent series of data.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
# TODO extend to continuous wavelet transform
try:
import pywt
except ModuleNotFoundError:
print("This RAVEN TSA Module requires the PYWAVELETS library to be installed in the current python environment")
raise ModuleNotFoundError
## The pivot input parameter isn't used explicity in the
## transformation as it assumed/required that each element in the
## time-dependent series is independent, uniquely indexed and
## sorted in time.
family = settings['family']
params = {target: {'results': {}} for target in targets}
for i, target in enumerate(targets):
results = params[target]['results']
results['coeff_a'], results['coeff_d'] = pywt.dwt(signal[:, i], family)
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
# FIXME we don't know a priori how many entries will be in the decomp, so we can't register it yet!
raise NotImplementedError('Cannot predict variables for Wavelet!')
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
# FIXME we don't know a priori how many entries will be in the decomp, so we can't register it yet!
raise NotImplementedError('Cannot predict variables for Wavelet!')
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, values in info['results'].items():
for v, val in enumerate(values):
rlz[f'{base}__{name}__{v}'] = val
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic ARMA signal
"""
try:
import pywt
except ModuleNotFoundError:
print("This RAVEN TSA Module requires the PYWAVELETS library to be installed in the current python environment")
raise ModuleNotFoundError
synthetic = np.zeros((len(pivot), len(params)))
family = settings['family']
for t, (target, _) in enumerate(params.items()):
results = params[target]['results']
cA = results['coeff_a']
cD = results['coeff_d']
synthetic[:, t] = pywt.idwt(cA, cD, family)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['results'].items():
base.append(xmlUtils.newNode(name, text=','.join([str(v) for v in value])))
| 41.753846 | 118 | 0.681774 | import numpy as np
from utils import InputData, InputTypes, xmlUtils
from .TimeSeriesAnalyzer import TimeSeriesGenerator, TimeSeriesCharacterizer
class Wavelet(TimeSeriesGenerator, TimeSeriesCharacterizer):
@classmethod
def getInputSpecification(cls):
specs = super(Wavelet, cls).getInputSpecification()
specs.name = 'wavelet'
specs.description = r"""Discrete Wavelet TimeSeriesAnalysis algorithm. Performs a discrete wavelet transform
on time-dependent data. Note: This TSA module requires pywavelets to be installed within your
python environment."""
specs.addSub(InputData.parameterInputFactory(
'family',
contentType=InputTypes.StringType,
descr=r"""The type of wavelet to use for the transformation.
There are several possible families to choose from, and most families contain
more than one variation. For more information regarding the wavelet families,
refer to the Pywavelets documentation located at:
https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html (wavelet-families)
\\
Possible values are:
\begin{itemize}
\item \textbf{haar family}: haar
\item \textbf{db family}: db1, db2, db3, db4, db5, db6, db7, db8, db9, db10, db11,
db12, db13, db14, db15, db16, db17, db18, db19, db20, db21, db22, db23,
db24, db25, db26, db27, db28, db29, db30, db31, db32, db33, db34, db35,
db36, db37, db38
\item \textbf{sym family}: sym2, sym3, sym4, sym5, sym6, sym7, sym8, sym9, sym10,
sym11, sym12, sym13, sym14, sym15, sym16, sym17, sym18, sym19, sym20
\item \textbf{coif family}: coif1, coif2, coif3, coif4, coif5, coif6, coif7, coif8,
coif9, coif10, coif11, coif12, coif13, coif14, coif15, coif16, coif17
\item \textbf{bior family}: bior1.1, bior1.3, bior1.5, bior2.2, bior2.4, bior2.6,
bior2.8, bior3.1, bior3.3, bior3.5, bior3.7, bior3.9, bior4.4, bior5.5,
bior6.8
\item \textbf{rbio family}: rbio1.1, rbio1.3, rbio1.5, rbio2.2, rbio2.4, rbio2.6,
rbio2.8, rbio3.1, rbio3.3, rbio3.5, rbio3.7, rbio3.9, rbio4.4, rbio5.5,
rbio6.8
\item \textbf{dmey family}: dmey
\item \textbf{gaus family}: gaus1, gaus2, gaus3, gaus4, gaus5, gaus6, gaus7, gaus8
\item \textbf{mexh family}: mexh
\item \textbf{morl family}: morl
\item \textbf{cgau family}: cgau1, cgau2, cgau3, cgau4, cgau5, cgau6, cgau7, cgau8
\item \textbf{shan family}: shan
\item \textbf{fbsp family}: fbsp
\item \textbf{cmor family}: cmor
\end{itemize}"""))
return specs
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def handleInput(self, spec):
settings = super().handleInput(spec)
settings['family'] = spec.findFirst('family').value
return settings
def characterize(self, signal, pivot, targets, settings):
try:
import pywt
except ModuleNotFoundError:
print("This RAVEN TSA Module requires the PYWAVELETS library to be installed in the current python environment")
raise ModuleNotFoundError
## transformation as it assumed/required that each element in the
## time-dependent series is independent, uniquely indexed and
## sorted in time.
family = settings['family']
params = {target: {'results': {}} for target in targets}
for i, target in enumerate(targets):
results = params[target]['results']
results['coeff_a'], results['coeff_d'] = pywt.dwt(signal[:, i], family)
return params
def getParamNames(self, settings):
# FIXME we don't know a priori how many entries will be in the decomp, so we can't register it yet!
raise NotImplementedError('Cannot predict variables for Wavelet!')
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
def getParamsAsVars(self, params):
# FIXME we don't know a priori how many entries will be in the decomp, so we can't register it yet!
raise NotImplementedError('Cannot predict variables for Wavelet!')
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, values in info['results'].items():
for v, val in enumerate(values):
rlz[f'{base}__{name}__{v}'] = val
return rlz
def generate(self, params, pivot, settings):
try:
import pywt
except ModuleNotFoundError:
print("This RAVEN TSA Module requires the PYWAVELETS library to be installed in the current python environment")
raise ModuleNotFoundError
synthetic = np.zeros((len(pivot), len(params)))
family = settings['family']
for t, (target, _) in enumerate(params.items()):
results = params[target]['results']
cA = results['coeff_a']
cD = results['coeff_d']
synthetic[:, t] = pywt.idwt(cA, cD, family)
return synthetic
def writeXML(self, writeTo, params):
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['results'].items():
base.append(xmlUtils.newNode(name, text=','.join([str(v) for v in value])))
| true | true |
f7006c01dfe517f7ba15beb6a2ddbc1643eb7d09 | 12,557 | py | Python | openstack_dashboard/test/integration_tests/pages/project/network/networkspage.py | Mirantis/mos-horizon | d2444220d959c8b921436bd75459c2face0e71d2 | [
"Apache-2.0"
] | 9 | 2016-06-03T03:53:24.000Z | 2017-05-20T16:53:23.000Z | openstack_dashboard/test/integration_tests/pages/project/network/networkspage.py | Mirantis/mos-horizon | d2444220d959c8b921436bd75459c2face0e71d2 | [
"Apache-2.0"
] | 1 | 2016-09-08T10:57:46.000Z | 2016-09-08T10:59:06.000Z | openstack_dashboard/test/integration_tests/pages/project/network/networkspage.py | Mirantis/mos-horizon | d2444220d959c8b921436bd75459c2face0e71d2 | [
"Apache-2.0"
] | 4 | 2016-08-01T10:50:15.000Z | 2017-02-22T12:11:19.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
from selenium.common import exceptions
from selenium.webdriver.common import by
class ElementTable(tables.TableRegion):
name = "element"
CREATE_FORM_FIELDS = ()
EDIT_FORM_FIELDS = ()
@tables.bind_table_action('create')
def create(self, create_button):
create_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.CREATE_FORM_FIELDS)
@tables.bind_table_action('delete')
def delete(self, delete_button):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('edit', primary=True)
def edit(self, edit_button, row):
edit_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.EDIT_FORM_FIELDS)
class SubnetsTable(ElementTable):
name = "subnets"
CREATE_FORM_FIELDS = (("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway"),
("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes"))
EDIT_FORM_FIELDS = CREATE_FORM_FIELDS
@tables.bind_table_action('create')
def create(self, create_button):
create_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.CREATE_FORM_FIELDS)
@tables.bind_row_action('edit')
def edit(self, edit_button):
edit_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.EDIT_FORM_FIELDS)
class NetworksTable(ElementTable):
name = "networks"
CREATE_FORM_FIELDS = (("net_name", "admin_state", "shared",
"with_subnet"),
("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway"),
("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes"))
EDIT_FORM_FIELDS = ("name", "network_id", "admin_state",
"shared")
ADD_SUBNET_FORM_FIELDS = (("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway"),
("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes"))
@tables.bind_table_action('create')
def create(self, create_button):
create_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.CREATE_FORM_FIELDS)
@tables.bind_row_action('subnet')
def edit_add_subnet(self, edit_button, row):
edit_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.ADD_SUBNET_FORM_FIELDS)
@tables.bind_row_action('delete')
def edit_delete_network(self, delete_button, row):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
class NetworksPage(basepage.BaseNavigationPage):
DEFAULT_ADMIN_STATE = 'True'
DEFAULT_CREATE_SUBNET = True
DEFAULT_IP_VERSION = '4'
DEFAULT_DISABLE_GATEWAY = False
DEFAULT_ENABLE_DHCP = True
NETWORKS_TABLE_NAME_COLUMN = 'name'
NETWORKS_TABLE_STATUS_COLUMN = 'status'
SUBNET_TAB_INDEX = 1
DETAILS_TAB_INDEX = 2
def __init__(self, driver, conf):
super(NetworksPage, self).__init__(driver, conf)
self._page_title = "Networks"
def _get_row_with_network_name(self, name):
return self.networks_table.get_row(
self.NETWORKS_TABLE_NAME_COLUMN, name)
@property
def networks_table(self):
return NetworksTable(self.driver, self.conf)
def create_network(self, network_name, subnet_name,
admin_state=DEFAULT_ADMIN_STATE,
create_subnet=DEFAULT_CREATE_SUBNET,
network_address=None, ip_version=DEFAULT_IP_VERSION,
gateway_ip=None,
disable_gateway=DEFAULT_DISABLE_GATEWAY,
enable_dhcp=DEFAULT_ENABLE_DHCP, allocation_pools=None,
dns_name_servers=None, host_routes=None):
create_network_form = self.networks_table.create()
create_network_form.net_name.text = network_name
create_network_form.admin_state.value = admin_state
if not create_subnet:
create_network_form.with_subnet.unmark()
else:
create_network_form.switch_to(self.SUBNET_TAB_INDEX)
create_network_form.subnet_name.text = subnet_name
if network_address is None:
network_address = self.conf.network.network_cidr
create_network_form.cidr.text = network_address
create_network_form.ip_version.value = ip_version
if gateway_ip is not None:
create_network_form.gateway_ip.text = gateway_ip
if disable_gateway:
create_network_form.disable_gateway.mark()
create_network_form.switch_to(self.DETAILS_TAB_INDEX)
if not enable_dhcp:
create_network_form.enable_dhcp.unmark()
if allocation_pools is not None:
create_network_form.allocation_pools.text = allocation_pools
if dns_name_servers is not None:
create_network_form.dns_nameservers.text = dns_name_servers
if host_routes is not None:
create_network_form.host_routes.text = host_routes
create_network_form.submit()
def delete_network(self, name):
row = self._get_row_with_network_name(name)
confirm_delete_networks_form = \
self.networks_table.edit_delete_network(row)
confirm_delete_networks_form.submit()
def is_network_present(self, name):
return bool(self._get_row_with_network_name(name))
def is_network_active(self, name):
def cell_getter():
row = self._get_row_with_network_name(name)
return row and row.cells[self.NETWORKS_TABLE_STATUS_COLUMN]
return bool(self.networks_table.wait_cell_status(cell_getter,
'Active'))
def add_subnet(self, net_name, subnet_name,
network_address=None, ip_version=DEFAULT_IP_VERSION,
gateway_ip=None,
disable_gateway=DEFAULT_DISABLE_GATEWAY,
enable_dhcp=DEFAULT_ENABLE_DHCP, allocation_pools=None,
dns_name_servers=None, host_routes=None):
row = self._get_row_with_network_name(net_name)
add_subnet_form = self.networks_table.edit_add_subnet(row)
add_subnet_form.subnet_name.text = subnet_name
if network_address is None:
network_address = self.conf.network.network_cidr
add_subnet_form.cidr.text = network_address
add_subnet_form.ip_version.value = ip_version
if gateway_ip is not None:
add_subnet_form.gateway_ip.text = gateway_ip
if disable_gateway:
add_subnet_form.disable_gateway.mark()
add_subnet_form.switch_to(self.SUBNET_TAB_INDEX)
if not enable_dhcp:
add_subnet_form.enable_dhcp.unmark()
if allocation_pools is not None:
add_subnet_form.allocation_pools.text = allocation_pools
if dns_name_servers is not None:
add_subnet_form.dns_nameservers.text = dns_name_servers
if host_routes is not None:
add_subnet_form.host_routes.text = host_routes
add_subnet_form.submit()
return NetworkOverviewPage(self.driver, self.conf, net_name)
def go_to_overview(self, name):
_network_items_locator = (by.By.CSS_SELECTOR, 'a[href$="/detail"]')
net_items = self._get_elements(*_network_items_locator)
for item in net_items:
if item.text == name:
item.click()
break
else:
raise exceptions.NoSuchElementException(
"Not found element with text: %s" % name)
return NetworkOverviewPage(self.driver, self.conf, name)
class NetworkOverviewPage(basepage.BaseNavigationPage):
DEFAULT_ADMIN_STATE = 'True'
DEFAULT_IP_VERSION = '4'
DEFAULT_DISABLE_GATEWAY = False
DEFAULT_ENABLE_DHCP = True
DETAILS_TAB_INDEX = 1
TABLE_NAME_COLUMN = 'name'
_edit_network_locator = (
by.By.CSS_SELECTOR,
'form.actions_column > .btn-group > a.btn:nth-child(1)')
_dropdown_open_locator = (
by.By.CSS_SELECTOR,
'form.actions_column > .btn-group > a.btn:nth-child(2)')
_dropdown_menu_locator = (
by.By.CSS_SELECTOR,
'form.actions_column > .btn-group > ul.row_actions > li > *')
def __init__(self, driver, conf, network_name):
super(NetworkOverviewPage, self).__init__(driver, conf)
self._page_title = "Network Details: {}".format(network_name)
@property
def subnets_table(self):
return SubnetsTable(self.driver, self.conf)
def _get_row_with_name(self, name, table):
return table.get_row(self.TABLE_NAME_COLUMN, name)
def _get_row_action(self, action_name):
open_dropdown_elem = self._get_element(*self._dropdown_open_locator)
open_dropdown_elem.click()
for action in self._get_elements(*self._dropdown_menu_locator):
pattern = "__action_%s" % action_name
if action.get_attribute('id').endswith(pattern):
action_element = action
break
return action_element
def delete_network(self):
delete_elem = self._get_row_action('delete')
delete_elem.click()
confirm_delete_network_form = forms.BaseFormRegion(self.driver,
self.conf)
confirm_delete_network_form.submit()
return NetworksPage(self.driver, self.conf)
def create_subnet(self, subnet_name,
network_address=None, ip_version=DEFAULT_IP_VERSION,
gateway_ip=None,
disable_gateway=DEFAULT_DISABLE_GATEWAY,
enable_dhcp=DEFAULT_ENABLE_DHCP, allocation_pools=None,
dns_name_servers=None, host_routes=None):
create_subnet_form = self.subnets_table.create()
create_subnet_form.subnet_name.text = subnet_name
if network_address is None:
network_address = self.conf.network.network_cidr
create_subnet_form.cidr.text = network_address
create_subnet_form.ip_version.value = ip_version
if gateway_ip is not None:
create_subnet_form.gateway_ip.text = gateway_ip
if disable_gateway:
create_subnet_form.disable_gateway.mark()
create_subnet_form.tabs.switch_to(self.DETAILS_TAB_INDEX)
if not enable_dhcp:
create_subnet_form.enable_dhcp.unmark()
if allocation_pools is not None:
create_subnet_form.allocation_pools.text = allocation_pools
if dns_name_servers is not None:
create_subnet_form.dns_nameservers.text = dns_name_servers
if host_routes is not None:
create_subnet_form.host_routes.text = host_routes
create_subnet_form.submit()
def delete_subnet(self, name):
row = self._get_row_with_name(name, self.subnets_table)
row.mark()
confirm_delete_subnet_form = self.subnets_table.delete()
confirm_delete_subnet_form.submit()
def is_subnet_present(self, name):
return bool(self._get_row_with_name(name, self.subnets_table))
| 41.305921 | 78 | 0.649359 |
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
from selenium.common import exceptions
from selenium.webdriver.common import by
class ElementTable(tables.TableRegion):
name = "element"
CREATE_FORM_FIELDS = ()
EDIT_FORM_FIELDS = ()
@tables.bind_table_action('create')
def create(self, create_button):
create_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.CREATE_FORM_FIELDS)
@tables.bind_table_action('delete')
def delete(self, delete_button):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('edit', primary=True)
def edit(self, edit_button, row):
edit_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.EDIT_FORM_FIELDS)
class SubnetsTable(ElementTable):
name = "subnets"
CREATE_FORM_FIELDS = (("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway"),
("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes"))
EDIT_FORM_FIELDS = CREATE_FORM_FIELDS
@tables.bind_table_action('create')
def create(self, create_button):
create_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.CREATE_FORM_FIELDS)
@tables.bind_row_action('edit')
def edit(self, edit_button):
edit_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.EDIT_FORM_FIELDS)
class NetworksTable(ElementTable):
name = "networks"
CREATE_FORM_FIELDS = (("net_name", "admin_state", "shared",
"with_subnet"),
("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway"),
("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes"))
EDIT_FORM_FIELDS = ("name", "network_id", "admin_state",
"shared")
ADD_SUBNET_FORM_FIELDS = (("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway"),
("enable_dhcp", "allocation_pools",
"dns_nameservers", "host_routes"))
@tables.bind_table_action('create')
def create(self, create_button):
create_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.CREATE_FORM_FIELDS)
@tables.bind_row_action('subnet')
def edit_add_subnet(self, edit_button, row):
edit_button.click()
return forms.TabbedFormRegion(self.driver, self.conf,
self.ADD_SUBNET_FORM_FIELDS)
@tables.bind_row_action('delete')
def edit_delete_network(self, delete_button, row):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
class NetworksPage(basepage.BaseNavigationPage):
DEFAULT_ADMIN_STATE = 'True'
DEFAULT_CREATE_SUBNET = True
DEFAULT_IP_VERSION = '4'
DEFAULT_DISABLE_GATEWAY = False
DEFAULT_ENABLE_DHCP = True
NETWORKS_TABLE_NAME_COLUMN = 'name'
NETWORKS_TABLE_STATUS_COLUMN = 'status'
SUBNET_TAB_INDEX = 1
DETAILS_TAB_INDEX = 2
def __init__(self, driver, conf):
super(NetworksPage, self).__init__(driver, conf)
self._page_title = "Networks"
def _get_row_with_network_name(self, name):
return self.networks_table.get_row(
self.NETWORKS_TABLE_NAME_COLUMN, name)
@property
def networks_table(self):
return NetworksTable(self.driver, self.conf)
def create_network(self, network_name, subnet_name,
admin_state=DEFAULT_ADMIN_STATE,
create_subnet=DEFAULT_CREATE_SUBNET,
network_address=None, ip_version=DEFAULT_IP_VERSION,
gateway_ip=None,
disable_gateway=DEFAULT_DISABLE_GATEWAY,
enable_dhcp=DEFAULT_ENABLE_DHCP, allocation_pools=None,
dns_name_servers=None, host_routes=None):
create_network_form = self.networks_table.create()
create_network_form.net_name.text = network_name
create_network_form.admin_state.value = admin_state
if not create_subnet:
create_network_form.with_subnet.unmark()
else:
create_network_form.switch_to(self.SUBNET_TAB_INDEX)
create_network_form.subnet_name.text = subnet_name
if network_address is None:
network_address = self.conf.network.network_cidr
create_network_form.cidr.text = network_address
create_network_form.ip_version.value = ip_version
if gateway_ip is not None:
create_network_form.gateway_ip.text = gateway_ip
if disable_gateway:
create_network_form.disable_gateway.mark()
create_network_form.switch_to(self.DETAILS_TAB_INDEX)
if not enable_dhcp:
create_network_form.enable_dhcp.unmark()
if allocation_pools is not None:
create_network_form.allocation_pools.text = allocation_pools
if dns_name_servers is not None:
create_network_form.dns_nameservers.text = dns_name_servers
if host_routes is not None:
create_network_form.host_routes.text = host_routes
create_network_form.submit()
def delete_network(self, name):
row = self._get_row_with_network_name(name)
confirm_delete_networks_form = \
self.networks_table.edit_delete_network(row)
confirm_delete_networks_form.submit()
def is_network_present(self, name):
return bool(self._get_row_with_network_name(name))
def is_network_active(self, name):
def cell_getter():
row = self._get_row_with_network_name(name)
return row and row.cells[self.NETWORKS_TABLE_STATUS_COLUMN]
return bool(self.networks_table.wait_cell_status(cell_getter,
'Active'))
def add_subnet(self, net_name, subnet_name,
network_address=None, ip_version=DEFAULT_IP_VERSION,
gateway_ip=None,
disable_gateway=DEFAULT_DISABLE_GATEWAY,
enable_dhcp=DEFAULT_ENABLE_DHCP, allocation_pools=None,
dns_name_servers=None, host_routes=None):
row = self._get_row_with_network_name(net_name)
add_subnet_form = self.networks_table.edit_add_subnet(row)
add_subnet_form.subnet_name.text = subnet_name
if network_address is None:
network_address = self.conf.network.network_cidr
add_subnet_form.cidr.text = network_address
add_subnet_form.ip_version.value = ip_version
if gateway_ip is not None:
add_subnet_form.gateway_ip.text = gateway_ip
if disable_gateway:
add_subnet_form.disable_gateway.mark()
add_subnet_form.switch_to(self.SUBNET_TAB_INDEX)
if not enable_dhcp:
add_subnet_form.enable_dhcp.unmark()
if allocation_pools is not None:
add_subnet_form.allocation_pools.text = allocation_pools
if dns_name_servers is not None:
add_subnet_form.dns_nameservers.text = dns_name_servers
if host_routes is not None:
add_subnet_form.host_routes.text = host_routes
add_subnet_form.submit()
return NetworkOverviewPage(self.driver, self.conf, net_name)
def go_to_overview(self, name):
_network_items_locator = (by.By.CSS_SELECTOR, 'a[href$="/detail"]')
net_items = self._get_elements(*_network_items_locator)
for item in net_items:
if item.text == name:
item.click()
break
else:
raise exceptions.NoSuchElementException(
"Not found element with text: %s" % name)
return NetworkOverviewPage(self.driver, self.conf, name)
class NetworkOverviewPage(basepage.BaseNavigationPage):
DEFAULT_ADMIN_STATE = 'True'
DEFAULT_IP_VERSION = '4'
DEFAULT_DISABLE_GATEWAY = False
DEFAULT_ENABLE_DHCP = True
DETAILS_TAB_INDEX = 1
TABLE_NAME_COLUMN = 'name'
_edit_network_locator = (
by.By.CSS_SELECTOR,
'form.actions_column > .btn-group > a.btn:nth-child(1)')
_dropdown_open_locator = (
by.By.CSS_SELECTOR,
'form.actions_column > .btn-group > a.btn:nth-child(2)')
_dropdown_menu_locator = (
by.By.CSS_SELECTOR,
'form.actions_column > .btn-group > ul.row_actions > li > *')
def __init__(self, driver, conf, network_name):
super(NetworkOverviewPage, self).__init__(driver, conf)
self._page_title = "Network Details: {}".format(network_name)
@property
def subnets_table(self):
return SubnetsTable(self.driver, self.conf)
def _get_row_with_name(self, name, table):
return table.get_row(self.TABLE_NAME_COLUMN, name)
def _get_row_action(self, action_name):
open_dropdown_elem = self._get_element(*self._dropdown_open_locator)
open_dropdown_elem.click()
for action in self._get_elements(*self._dropdown_menu_locator):
pattern = "__action_%s" % action_name
if action.get_attribute('id').endswith(pattern):
action_element = action
break
return action_element
def delete_network(self):
delete_elem = self._get_row_action('delete')
delete_elem.click()
confirm_delete_network_form = forms.BaseFormRegion(self.driver,
self.conf)
confirm_delete_network_form.submit()
return NetworksPage(self.driver, self.conf)
def create_subnet(self, subnet_name,
network_address=None, ip_version=DEFAULT_IP_VERSION,
gateway_ip=None,
disable_gateway=DEFAULT_DISABLE_GATEWAY,
enable_dhcp=DEFAULT_ENABLE_DHCP, allocation_pools=None,
dns_name_servers=None, host_routes=None):
create_subnet_form = self.subnets_table.create()
create_subnet_form.subnet_name.text = subnet_name
if network_address is None:
network_address = self.conf.network.network_cidr
create_subnet_form.cidr.text = network_address
create_subnet_form.ip_version.value = ip_version
if gateway_ip is not None:
create_subnet_form.gateway_ip.text = gateway_ip
if disable_gateway:
create_subnet_form.disable_gateway.mark()
create_subnet_form.tabs.switch_to(self.DETAILS_TAB_INDEX)
if not enable_dhcp:
create_subnet_form.enable_dhcp.unmark()
if allocation_pools is not None:
create_subnet_form.allocation_pools.text = allocation_pools
if dns_name_servers is not None:
create_subnet_form.dns_nameservers.text = dns_name_servers
if host_routes is not None:
create_subnet_form.host_routes.text = host_routes
create_subnet_form.submit()
def delete_subnet(self, name):
row = self._get_row_with_name(name, self.subnets_table)
row.mark()
confirm_delete_subnet_form = self.subnets_table.delete()
confirm_delete_subnet_form.submit()
def is_subnet_present(self, name):
return bool(self._get_row_with_name(name, self.subnets_table))
| true | true |
f7006e06b03a87904485aac3fcf8e274655056ff | 226 | py | Python | part/pg/fixed_percent/part.py | fasiondog/hikyuu_house | 0f0b609f17d94ba60da193de9c753eae54693817 | [
"MIT"
] | null | null | null | part/pg/fixed_percent/part.py | fasiondog/hikyuu_house | 0f0b609f17d94ba60da193de9c753eae54693817 | [
"MIT"
] | null | null | null | part/pg/fixed_percent/part.py | fasiondog/hikyuu_house | 0f0b609f17d94ba60da193de9c753eae54693817 | [
"MIT"
] | 1 | 2021-12-22T06:31:26.000Z | 2021-12-22T06:31:26.000Z | from hikyuu import PG_FixedPercent
# 部件作者
author = "fasiondog"
# 版本
version = '20200825'
def part(p=0.2):
return PG_FixedPercent(p)
part.__doc__ = PG_FixedPercent.__doc__
if __name__ == '__main__':
print(part()) | 13.294118 | 38 | 0.70354 | from hikyuu import PG_FixedPercent
author = "fasiondog"
version = '20200825'
def part(p=0.2):
return PG_FixedPercent(p)
part.__doc__ = PG_FixedPercent.__doc__
if __name__ == '__main__':
print(part()) | true | true |
f7006fac7a55e33300dd99dba56d7fa113cff7fb | 980 | py | Python | kubernetes/test/test_v1_flex_volume_source.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 1 | 2018-10-20T19:37:57.000Z | 2018-10-20T19:37:57.000Z | kubernetes/test/test_v1_flex_volume_source.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_flex_volume_source.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 2 | 2018-07-27T19:39:34.000Z | 2020-12-25T02:48:27.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_flex_volume_source import V1FlexVolumeSource
class TestV1FlexVolumeSource(unittest.TestCase):
""" V1FlexVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1FlexVolumeSource(self):
"""
Test V1FlexVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_flex_volume_source.V1FlexVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
| 21.777778 | 105 | 0.714286 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_flex_volume_source import V1FlexVolumeSource
class TestV1FlexVolumeSource(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1FlexVolumeSource(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f700701e51582a6f314450ea9547949094b4db62 | 3,429 | py | Python | fineract/objects/group.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
] | 7 | 2019-03-11T16:17:33.000Z | 2020-10-22T21:57:51.000Z | fineract/objects/group.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
] | 3 | 2019-11-05T20:22:16.000Z | 2019-12-11T17:09:04.000Z | fineract/objects/group.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
] | 2 | 2020-11-19T16:00:36.000Z | 2021-11-19T09:36:13.000Z | from fineract.objects.fineract_object import DataFineractObject
from fineract.objects.types import Type
class Group(DataFineractObject):
"""
This class represents a Group.
"""
def __repr__(self):
return self.get__repr__({'group_id': self.id})
def _init_attributes(self):
self.id = None
self.account_no = None
self.external_id = None
self.name = None
self.status = None
self.active = None
self.activation_date = None
self.office_id = None
self.office_name = None
self.hierarchy = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.account_no = attributes.get('accountNo', None)
self.external_id = attributes.get('externalId', None)
self.name = attributes.get('name', None)
self.status = self._make_fineract_object(GroupStatus, attributes.get('status', None))
self.active = attributes.get('active', None)
self.activation_date = self._make_date_object(attributes.get('activationDate', None))
self.office_id = attributes.get('officeId', None)
self.office_name = attributes.get('officeName', None)
self.hierarchy = attributes.get('hierarchy', None)
def add_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=associateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
def remove_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=disassociateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
@classmethod
def create(cls, request_handler, name, office_id, active=True, activation_date=None):
"""Create a group
:param request_handler:
:param name:
:param office_id:
:param active:
:param activation_date:
:rtype: :class:`fineract.objects.group.Group`
"""
data = {
'name': name,
'officeId': office_id,
'active': active,
'activationDate': activation_date or cls._get_current_date()
}
res = request_handler.make_request(
'POST',
'/groups',
json=data
)
group_id = res['groupId']
return cls(request_handler,
request_handler.make_request(
'GET',
'/groups/{}'.format(group_id)
), False)
@classmethod
def get_group_by_name(cls, request_handler, name):
"""Get a group by name
:param request_handler:
:param name:
:rtype: :class:`fineract.objects.group.Group`
"""
data = request_handler.make_request(
'GET',
'/groups'
)
if data:
for item in data:
if item['name'] == name:
print(item)
return cls(request_handler, item, False)
return None
class GroupStatus(Type):
"""
This class represents a Group status.
"""
pass
| 29.307692 | 93 | 0.567221 | from fineract.objects.fineract_object import DataFineractObject
from fineract.objects.types import Type
class Group(DataFineractObject):
def __repr__(self):
return self.get__repr__({'group_id': self.id})
def _init_attributes(self):
self.id = None
self.account_no = None
self.external_id = None
self.name = None
self.status = None
self.active = None
self.activation_date = None
self.office_id = None
self.office_name = None
self.hierarchy = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.account_no = attributes.get('accountNo', None)
self.external_id = attributes.get('externalId', None)
self.name = attributes.get('name', None)
self.status = self._make_fineract_object(GroupStatus, attributes.get('status', None))
self.active = attributes.get('active', None)
self.activation_date = self._make_date_object(attributes.get('activationDate', None))
self.office_id = attributes.get('officeId', None)
self.office_name = attributes.get('officeName', None)
self.hierarchy = attributes.get('hierarchy', None)
def add_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=associateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
def remove_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=disassociateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
@classmethod
def create(cls, request_handler, name, office_id, active=True, activation_date=None):
data = {
'name': name,
'officeId': office_id,
'active': active,
'activationDate': activation_date or cls._get_current_date()
}
res = request_handler.make_request(
'POST',
'/groups',
json=data
)
group_id = res['groupId']
return cls(request_handler,
request_handler.make_request(
'GET',
'/groups/{}'.format(group_id)
), False)
@classmethod
def get_group_by_name(cls, request_handler, name):
data = request_handler.make_request(
'GET',
'/groups'
)
if data:
for item in data:
if item['name'] == name:
print(item)
return cls(request_handler, item, False)
return None
class GroupStatus(Type):
pass
| true | true |
f700704654c1bdfa793688460f5e7faa103312e6 | 734 | py | Python | magic_notifier/sms_clients/cgsms_client.py | jefcolbi/django-magic-notifier | f0b035027a165c7dbbd166cd0dfbf52d9b0a11c6 | [
"MIT"
] | 13 | 2021-07-25T19:03:43.000Z | 2022-01-30T23:53:03.000Z | magic_notifier/sms_clients/cgsms_client.py | jefcolbi/django-magic-notifier | f0b035027a165c7dbbd166cd0dfbf52d9b0a11c6 | [
"MIT"
] | null | null | null | magic_notifier/sms_clients/cgsms_client.py | jefcolbi/django-magic-notifier | f0b035027a165c7dbbd166cd0dfbf52d9b0a11c6 | [
"MIT"
] | null | null | null | import logging
import requests
from django.conf import settings
from .base import BaseSmsClient
logger = logging.getLogger("notifier")
class CGSmsClient(BaseSmsClient):
@classmethod
def send(cls, number: str, text: str, **kwargs):
sub_account = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT"]
sub_account_pass = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT_PASSWORD"]
params = {
"sub_account": sub_account,
"sub_account_pass": sub_account_pass,
"action": "send_sms",
"message": text,
"recipients": number,
}
res = requests.get("http://cheapglobalsms.com/api_v1", params=params)
return res
| 28.230769 | 94 | 0.633515 | import logging
import requests
from django.conf import settings
from .base import BaseSmsClient
logger = logging.getLogger("notifier")
class CGSmsClient(BaseSmsClient):
@classmethod
def send(cls, number: str, text: str, **kwargs):
sub_account = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT"]
sub_account_pass = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT_PASSWORD"]
params = {
"sub_account": sub_account,
"sub_account_pass": sub_account_pass,
"action": "send_sms",
"message": text,
"recipients": number,
}
res = requests.get("http://cheapglobalsms.com/api_v1", params=params)
return res
| true | true |
f700708d0b9048b76b77d12bc9318376b5a488e6 | 114,661 | py | Python | CppHeaderParser-2.7/CppHeaderParser/CppHeaderParser.py | lamondlab/sipify | 0e2f345ddc9514666d067fc32851882ffb22944a | [
"Apache-2.0"
] | 4 | 2018-03-02T10:53:04.000Z | 2019-01-25T07:56:13.000Z | CppHeaderParser-2.7/CppHeaderParser/CppHeaderParser.py | lamondlab/sipify | 0e2f345ddc9514666d067fc32851882ffb22944a | [
"Apache-2.0"
] | 1 | 2018-05-08T11:12:14.000Z | 2021-07-13T09:29:12.000Z | CppHeaderParser-2.7/CppHeaderParser/CppHeaderParser.py | lamondlab/sipify | 0e2f345ddc9514666d067fc32851882ffb22944a | [
"Apache-2.0"
] | 1 | 2018-05-11T05:14:00.000Z | 2018-05-11T05:14:00.000Z | #!/usr/bin/python
#
# Author: Jashua R. Cloutier (contact via https://bitbucket.org/senex)
# Project: http://senexcanis.com/open-source/cppheaderparser/
#
# Copyright (C) 2011, Jashua R. Cloutier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Jashua R. Cloutier nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission. Stories,
# blog entries etc making reference to this project may mention the
# name Jashua R. Cloutier in terms of project originator/creator etc.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# The CppHeaderParser.py script is written in Python 2.4 and released to
# the open source community for continuous improvements under the BSD
# 2.0 new license, which can be found at:
#
# http://www.opensource.org/licenses/bsd-license.php
#
"""Parse C++ header files and generate a data structure
representing the class
"""
import ply.lex as lex
import os
import sys
import re
import inspect
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
version = __version__ = "2.7"
tokens = [
'NUMBER',
'FLOAT_NUMBER',
'TEMPLATE_NAME',
'NAME',
'OPEN_PAREN',
'CLOSE_PAREN',
'OPEN_BRACE',
'CLOSE_BRACE',
'OPEN_SQUARE_BRACKET',
'CLOSE_SQUARE_BRACKET',
'COLON',
'SEMI_COLON',
'COMMA',
'TAB',
'BACKSLASH',
'PIPE',
'PERCENT',
'EXCLAMATION',
'CARET',
'COMMENT_SINGLELINE',
'COMMENT_MULTILINE',
'PRECOMP_MACRO',
'PRECOMP_MACRO_CONT',
'ASTERISK',
'AMPERSTAND',
'EQUALS',
'MINUS',
'PLUS',
'DIVIDE',
'CHAR_LITERAL',
'STRING_LITERAL',
'NEW_LINE',
'SQUOTE',
]
t_ignore = " \r.?@\f"
t_NUMBER = r'[0-9][0-9XxA-Fa-f]*'
t_FLOAT_NUMBER = r'[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?'
t_TEMPLATE_NAME = r'CppHeaderParser_template_[0-9]+'
t_NAME = r'[<>A-Za-z_~][A-Za-z0-9_]*'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_OPEN_BRACE = r'{'
t_CLOSE_BRACE = r'}'
t_OPEN_SQUARE_BRACKET = r'\['
t_CLOSE_SQUARE_BRACKET = r'\]'
t_SEMI_COLON = r';'
t_COLON = r':'
t_COMMA = r','
t_TAB = r'\t'
t_BACKSLASH = r'\\'
t_PIPE = r'\|'
t_PERCENT = r'%'
t_CARET = r'\^'
t_EXCLAMATION = r'!'
t_PRECOMP_MACRO = r'\#.*'
t_PRECOMP_MACRO_CONT = r'.*\\\n'
def t_COMMENT_SINGLELINE(t):
r'\/\/.*\n'
global doxygenCommentCache
if t.value.startswith("///") or t.value.startswith("//!"):
if doxygenCommentCache:
doxygenCommentCache += "\n"
if t.value.endswith("\n"):
doxygenCommentCache += t.value[:-1]
else:
doxygenCommentCache += t.value
t.lexer.lineno += len([a for a in t.value if a=="\n"])
t_ASTERISK = r'\*'
t_MINUS = r'\-'
t_PLUS = r'\+'
t_DIVIDE = r'/(?!/)'
t_AMPERSTAND = r'&'
t_EQUALS = r'='
t_CHAR_LITERAL = "'.'"
t_SQUOTE = "'"
#found at http://wordaligned.org/articles/string-literals-and-regular-expressions
#TODO: This does not work with the string "bla \" bla"
t_STRING_LITERAL = r'"([^"\\]|\\.)*"'
#Found at http://ostermiller.org/findcomment.html
def t_COMMENT_MULTILINE(t):
r'/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/'
global doxygenCommentCache
if t.value.startswith("/**") or t.value.startswith("/*!"):
#not sure why, but get double new lines
v = t.value.replace("\n\n", "\n")
#strip prefixing whitespace
v = re.sub("\n[\s]+\*", "\n*", v)
doxygenCommentCache += v
t.lexer.lineno += len([a for a in t.value if a=="\n"])
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(v):
print(( "Lex error: ", v ))
lex.lex()
# Controls error_print
print_errors = 1
# Controls warning_print
print_warnings = 1
# Controls debug_print
debug = 0
# Controls trace_print
debug_trace = 0
def error_print(arg):
if print_errors: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def warning_print(arg):
if print_warnings: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def debug_print(arg):
global debug
if debug: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def trace_print(*arg):
global debug_trace
if debug_trace:
sys.stdout.write("[%s] "%(inspect.currentframe().f_back.f_lineno))
for a in arg: sys.stdout.write("%s "%a)
sys.stdout.write("\n")
supportedAccessSpecifier = [
'public',
'protected',
'private',
'public slots',
'protected slots',
'private slots',
'public Q_SLOTS',
'protected Q_SLOTS',
'private Q_SLOTS',
'signals',
'Q_SIGNALS',
]
#Symbols to ignore, usually special macros
ignoreSymbols = [
'Q_OBJECT',
'Q_PROPERTY()',
'Q_DECLARE_FLAGS()',
'Q_INVOKABLE',
]
doxygenCommentCache = ""
#Track what was added in what order and at what depth
parseHistory = []
def is_namespace(nameStack):
"""Determines if a namespace is being specified"""
if len(nameStack) == 0:
return False
if nameStack[0] == "namespace":
return True
return False
def is_enum_namestack(nameStack):
"""Determines if a namestack is an enum namestack"""
if len(nameStack) == 0:
return False
if nameStack[0] == "enum":
return True
if len(nameStack) > 1 and nameStack[0] == "typedef" and nameStack[1] == "enum":
return True
return False
def is_fundamental(s):
for a in s.split():
if a not in ["size_t", "struct", "union", "unsigned", "signed", "bool", "char", "short", "int", "float", "double", "long", "void", "*"]: return False
return True
def is_function_pointer_stack(stack):
"""Count how many non-nested paranthesis are in the stack. Useful for determining if a stack is a function pointer"""
paren_depth = 0
paren_count = 0
star_after_first_paren = False
last_e = None
for e in stack:
if e == "(":
paren_depth += 1
elif e == ")" and paren_depth > 0:
paren_depth -= 1
if paren_depth == 0:
paren_count += 1
elif e == "*" and last_e == "(" and paren_count == 0 and paren_depth == 1:
star_after_first_paren = True
last_e = e
if star_after_first_paren and paren_count == 2:
return True
else:
return False
def is_method_namestack(stack):
r = False
if '(' not in stack: r = False
elif stack[0] == 'typedef': r = False # TODO deal with typedef function prototypes
#elif '=' in stack and stack.index('=') < stack.index('(') and stack[stack.index('=')-1] != 'operator': r = False #disabled July6th - allow all operators
elif 'operator' in stack: r = True # allow all operators
elif '{' in stack and stack.index('{') < stack.index('('): r = False # struct that looks like a method/class
elif '(' in stack and ')' in stack:
if '{' in stack and '}' in stack: r = True
elif stack[-1] == ';':
if is_function_pointer_stack(stack):
r = False
else:
r = True
elif '{' in stack: r = True # ideally we catch both braces... TODO
else: r = False
#Test for case of property set to something with parens such as "static const int CONST_A = (1 << 7) - 1;"
if r and "(" in stack and "=" in stack and 'operator' not in stack:
if stack.index("=") < stack.index("("): r = False
return r
def is_property_namestack(nameStack):
r = False
if '(' not in nameStack and ')' not in nameStack: r = True
elif "(" in nameStack and "=" in nameStack and nameStack.index("=") < nameStack.index("("): r = True
#See if we are a function pointer
if not r and is_function_pointer_stack(nameStack): r = True
return r
def detect_lineno(s):
"""Detect the line number for a given token string"""
try:
rtn = s.lineno()
if rtn != -1:
return rtn
except: pass
global curLine
return curLine
def filter_out_attribute_keyword(stack):
"""Strips __attribute__ and its parenthetical expression from the stack"""
if "__attribute__" not in stack: return stack
try:
debug_print("Stripping __attribute__ from %s"% stack)
attr_index = stack.index("__attribute__")
attr_end = attr_index + 1 #Assuming not followed by parenthetical expression which wont happen
#Find final paren
if stack[attr_index + 1] == '(':
paren_count = 1
for i in range(attr_index + 2, len(stack)):
elm = stack[i]
if elm == '(':
paren_count += 1
elif elm == ')':
paren_count -= 1
if paren_count == 0:
attr_end = i + 1
break
new_stack = stack[0:attr_index] + stack[attr_end:]
debug_print("stripped stack is %s"% new_stack)
return new_stack
except:
return stack
class TagStr(str):
"""Wrapper for a string that allows us to store the line number associated with it"""
lineno_reg = {}
def __new__(cls,*args,**kw):
new_obj = str.__new__(cls,*args)
if "lineno" in kw:
TagStr.lineno_reg[id(new_obj)] = kw["lineno"]
return new_obj
def __del__(self):
try:
del TagStr.lineno_reg[id(self)]
except: pass
def lineno(self):
return TagStr.lineno_reg.get(id(self), -1)
class CppParseError(Exception): pass
class CppClass(dict):
"""Takes a name stack and turns it into a class
Contains the following Keys:
self['name'] - Name of the class
self['doxygen'] - Doxygen comments associated with the class if they exist
self['inherits'] - List of Classes that this one inherits where the values
are of the form {"access": Anything in supportedAccessSpecifier
"class": Name of the class
self['methods'] - Dictionary where keys are from supportedAccessSpecifier
and values are a lists of CppMethod's
self['properties'] - Dictionary where keys are from supportedAccessSpecifier
and values are lists of CppVariable's
self['enums'] - Dictionary where keys are from supportedAccessSpecifier and
values are lists of CppEnum's
self['structs'] - Dictionary where keys are from supportedAccessSpecifier and
values are lists of nested Struct's
An example of how this could look is as follows:
#self =
{
'name': ""
'inherits':[]
'methods':
{
'public':[],
'protected':[],
'private':[]
},
'properties':
{
'public':[],
'protected':[],
'private':[]
},
'enums':
{
'public':[],
'protected':[],
'private':[]
}
}
"""
def get_all_methods(self):
r = []
for typ in supportedAccessSpecifier: r += self['methods'][typ]
return r
def get_all_method_names( self ):
r = []
for typ in supportedAccessSpecifier: r += self.get_method_names(typ) # returns list
return r
def get_all_pure_virtual_methods( self ):
r = {}
for typ in supportedAccessSpecifier: r.update(self.get_pure_virtual_methods(typ)) # returns dict
return r
def get_method_names( self, type='public' ): return [ meth['name'] for meth in self['methods'][ type ] ]
def get_pure_virtual_methods( self, type='public' ):
r = {}
for meth in self['methods'][ type ]:
if meth['pure_virtual']: r[ meth['name'] ] = meth
return r
def __init__(self, nameStack, curTemplate):
self['nested_classes'] = []
self['parent'] = None
self['abstract'] = False
self._public_enums = {}
self._public_structs = {}
self._public_typedefs = {}
self._public_forward_declares = []
self['namespace'] = ""
debug_print( "Class: %s"%nameStack )
debug_print( "Template: %s"%curTemplate)
if (len(nameStack) < 2):
nameStack.insert(1, "")#anonymous struct
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if "::" in "".join(nameStack):
#Re-Join class paths (ex ['class', 'Bar', ':', ':', 'Foo'] -> ['class', 'Bar::Foo']
try:
new_nameStack = []
for name in nameStack:
if len(new_nameStack) == 0:
new_nameStack.append(name)
elif name == ":" and new_nameStack[-1].endswith(":"):
new_nameStack[-1] += name
elif new_nameStack[-1].endswith("::"):
new_nameStack[-2] += new_nameStack[-1] + name
del new_nameStack[-1]
else:
new_nameStack.append(name)
trace_print("Convert from namestack\n %s\nto\n%s"%(nameStack, new_nameStack))
nameStack = new_nameStack
except: pass
# Handle final specifier
self["final"] = False
try:
final_index = nameStack.index("final")
# Dont trip up the rest of the logic
del nameStack[final_index]
self["final"] = True
trace_print("final")
except: pass
self["name"] = nameStack[1]
self["line_number"] = detect_lineno(nameStack[0])
#Handle template classes
if len(nameStack) > 3 and nameStack[2].startswith("<"):
open_template_count = 0
param_separator = 0
found_first = False
i = 0
for elm in nameStack:
if '<' in elm :
open_template_count += 1
found_first = True
elif '>' in elm:
open_template_count -= 1
if found_first and open_template_count == 0:
self["name"] = "".join(nameStack[1:i + 1])
break;
i += 1
elif ":" in nameStack:
self['name'] = nameStack[ nameStack.index(':') - 1 ]
inheritList = []
if nameStack.count(':') == 1:
nameStack = nameStack[nameStack.index(":") + 1:]
while len(nameStack):
tmpStack = []
tmpInheritClass = {"access":"private", "virtual": False}
if "," in nameStack:
tmpStack = nameStack[:nameStack.index(",")]
nameStack = nameStack[nameStack.index(",") + 1:]
else:
tmpStack = nameStack
nameStack = []
# Convert template classes to one name in the last index
for i in range(0, len(tmpStack)):
if '<' in tmpStack[i]:
tmpStack2 = tmpStack[:i-1]
tmpStack2.append("".join(tmpStack[i-1:]))
tmpStack = tmpStack2
break
if len(tmpStack) == 0:
break;
elif len(tmpStack) == 1:
tmpInheritClass["class"] = tmpStack[0]
elif len(tmpStack) == 2:
tmpInheritClass["access"] = tmpStack[0]
tmpInheritClass["class"] = tmpStack[1]
elif len(tmpStack) == 3 and "virtual" in tmpStack:
tmpInheritClass["access"] = tmpStack[1] if tmpStack[1] != "virtual" else tmpStack[0]
tmpInheritClass["class"] = tmpStack[2]
tmpInheritClass["virtual"] = True
else:
warning_print( "Warning: can not parse inheriting class %s"%(" ".join(tmpStack)))
if '>' in tmpStack: pass # allow skip templates for now
else: raise NotImplemented
if 'class' in tmpInheritClass: inheritList.append(tmpInheritClass)
elif nameStack.count(':') == 2: self['parent'] = self['name']; self['name'] = nameStack[-1]
elif nameStack.count(':') > 2 and nameStack[0] in ("class", "struct"):
tmpStack = nameStack[nameStack.index(":") + 1:]
superTmpStack = [[]]
for tok in tmpStack:
if tok == ',':
superTmpStack.append([])
else:
superTmpStack[-1].append(tok)
for tmpStack in superTmpStack:
tmpInheritClass = {"access":"private"}
if len(tmpStack) and tmpStack[0] in supportedAccessSpecifier:
tmpInheritClass["access"] = tmpStack[0]
tmpStack = tmpStack[1:]
inheritNSStack = []
while len(tmpStack) > 3:
if tmpStack[0] == ':': break;
if tmpStack[1] != ':': break;
if tmpStack[2] != ':': break;
inheritNSStack.append(tmpStack[0])
tmpStack = tmpStack[3:]
if len(tmpStack) == 1 and tmpStack[0] != ':':
inheritNSStack.append(tmpStack[0])
tmpInheritClass["class"] = "::".join(inheritNSStack)
inheritList.append(tmpInheritClass)
self['inherits'] = inheritList
if curTemplate:
self["template"] = curTemplate
trace_print("Setting template to '%s'"%self["template"])
methodAccessSpecificList = {}
propertyAccessSpecificList = {}
enumAccessSpecificList = {}
structAccessSpecificList = {}
typedefAccessSpecificList = {}
forwardAccessSpecificList = {}
for accessSpecifier in supportedAccessSpecifier:
methodAccessSpecificList[accessSpecifier] = []
propertyAccessSpecificList[accessSpecifier] = []
enumAccessSpecificList[accessSpecifier] = []
structAccessSpecificList[accessSpecifier] = []
typedefAccessSpecificList[accessSpecifier] = []
forwardAccessSpecificList[accessSpecifier] = []
self['methods'] = methodAccessSpecificList
self['properties'] = propertyAccessSpecificList
self['enums'] = enumAccessSpecificList
self['structs'] = structAccessSpecificList
self['typedefs'] = typedefAccessSpecificList
self['forward_declares'] = forwardAccessSpecificList
def show(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self["final"]: rtn += " final"
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in list(self.keys()):
rtn += " Inherits: "
for inheritClass in self["inherits"]:
if inheritClass["virtual"]: rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += " {\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += " %s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " <Enums>\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " <Properties>\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " <Methods>\n"
for method in self["methods"][accessSpecifier]:
rtn += "\t\t" + method.show() + '\n'
rtn += " }\n"
print(rtn)
def __str__(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self["final"]: rtn += " final"
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in list(self.keys()) and len(self["inherits"]):
rtn += "Inherits: "
for inheritClass in self["inherits"]:
if inheritClass.get("virtual", False): rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += "{\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += "%s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " // Enums\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " // Properties\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " // Methods\n"
for method in self["methods"][accessSpecifier]:
rtn += " %s\n"%(repr(method))
rtn += "}\n"
return rtn
class CppUnion( CppClass ):
"""Takes a name stack and turns it into a union
Contains the following Keys:
self['name'] - Name of the union
self['doxygen'] - Doxygen comments associated with the union if they exist
self['members'] - List of members the union has
An example of how this could look is as follows:
#self =
{
'name': ""
'members': []
}
"""
def __init__(self, nameStack):
CppClass.__init__(self, nameStack, None)
self["name"] = "union " + self["name"]
self["members"] = self["properties"]["public"]
def transform_to_union_keys(self):
print("union keys: %s"%list(self.keys()))
for key in ['inherits', 'parent', 'abstract', 'namespace', 'typedefs', 'methods']:
del self[key]
def show(self):
"""Convert class to a string"""
print(self)
def __str__(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
rtn += "{\n"
for member in self["members"]:
rtn += " %s\n"%(repr(member))
rtn += "}\n"
return rtn
class _CppMethod( dict ):
def _params_helper1( self, stack ):
# deal with "throw" keyword
if 'throw' in stack: stack = stack[ : stack.index('throw') ]
## remove GCC keyword __attribute__(...) and preserve returns ##
cleaned = []
hit = False; hitOpen = 0; hitClose = 0
for a in stack:
if a == '__attribute__': hit = True
if hit:
if a == '(': hitOpen += 1
elif a == ')': hitClose += 1
if a==')' and hitOpen == hitClose:
hit = False
else:
cleaned.append( a )
stack = cleaned
# also deal with attribute((const)) function prefix #
# TODO this needs to be better #
if len(stack) > 5:
a = ''.join(stack)
if a.startswith('((__const__))'): stack = stack[ 5 : ]
elif a.startswith('__attribute__((__const__))'): stack = stack[ 6 : ]
stack = stack[stack.index('(') + 1: ]
if not stack: return []
if len(stack)>=3 and stack[0]==')' and stack[1]==':': # is this always a constructor?
self['constructor'] = True
return []
stack.reverse(); _end_ = stack.index(')'); stack.reverse()
stack = stack[ : len(stack)-(_end_+1) ]
if '(' not in stack: return stack # safe to return, no defaults that init a class
# transforms ['someclass', '(', '0', '0', '0', ')'] into "someclass(0,0,0)'"
r = []; hit=False
for a in stack:
if a == '(': hit=True
elif a == ')': hit=False
if hit or a == ')': r[-1] = r[-1] + a
else: r.append( a )
return r
def _params_helper2( self, params ):
for p in params:
p['method'] = self # save reference in variable to parent method
if '::' in p['type']:
ns = p['type'].split('::')[0]
if ns not in Resolver.NAMESPACES and ns in Resolver.CLASSES:
p['type'] = self['namespace'] + p['type']
else: p['namespace'] = self[ 'namespace' ]
class CppMethod( _CppMethod ):
"""Takes a name stack and turns it into a method
Contains the following Keys:
self['rtnType'] - Return type of the method (ex. "int")
self['name'] - Name of the method (ex. "getSize")
self['doxygen'] - Doxygen comments associated with the method if they exist
self['parameters'] - List of CppVariables
"""
def show(self):
r = ['method name: %s (%s)' %(self['name'],self['debug']) ]
if self['returns']: r.append( 'returns: %s'%self['returns'] )
if self['parameters']: r.append( 'number arguments: %s' %len(self['parameters']))
if self['pure_virtual']: r.append( 'pure virtual: %s'%self['pure_virtual'] )
if self['constructor']: r.append( 'constructor' )
if self['destructor']: r.append( 'destructor' )
return '\n\t\t '.join( r )
def __init__(self, nameStack, curClass, methinfo, curTemplate):
debug_print( "Method: %s"%nameStack )
debug_print( "Template: %s"%curTemplate )
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if "operator" in nameStack:
self["rtnType"] = " ".join(nameStack[:nameStack.index('operator')])
self["name"] = "".join(nameStack[nameStack.index('operator'):nameStack.index('(')])
else:
self["rtnType"] = " ".join(nameStack[:nameStack.index('(') - 1])
self["name"] = " ".join(nameStack[nameStack.index('(') - 1:nameStack.index('(')])
if self["rtnType"].startswith("virtual"):
self["rtnType"] = self["rtnType"][len("virtual"):].strip()
if len(self["rtnType"]) == 0 or self["name"] == curClass:
self["rtnType"] = "void"
self["rtnType"] = self["rtnType"].replace(' : : ', '::' )
self["rtnType"] = self["rtnType"].replace(" <","<")
self["rtnType"] = self["rtnType"].replace(" >",">").replace(">>", "> >").replace(">>", "> >")
self["rtnType"] = self["rtnType"].replace(" ,",",")
for spec in ["const", "final", "override"]:
self[spec] = False
for i in reversed(nameStack):
if i == spec:
self[spec] = True
break
elif i == ")":
break
self.update( methinfo )
self["line_number"] = detect_lineno(nameStack[0])
#Filter out initializer lists used in constructors
try:
paren_depth_counter = 0
for i in range(0, len(nameStack)):
elm = nameStack[i]
if elm == "(":
paren_depth_counter += 1
if elm == ")":
paren_depth_counter -=1
if paren_depth_counter == 0 and nameStack[i+1] == ':':
debug_print("Stripping out initializer list")
nameStack = nameStack[:i+1]
break
except: pass
paramsStack = self._params_helper1( nameStack )
debug_print( "curTemplate: %s"%curTemplate)
if curTemplate:
self["template"] = curTemplate
debug_print( "SET self['template'] to `%s`"%self["template"])
params = []
#See if there is a doxygen comment for the variable
doxyVarDesc = {}
if "doxygen" in self:
doxyLines = self["doxygen"].split("\n")
lastParamDesc = ""
for doxyLine in doxyLines:
if " @param " in doxyLine or " \param " in doxyLine:
try:
#Strip out the param
doxyLine = doxyLine[doxyLine.find("param ") + 6:]
(var, desc) = doxyLine.split(" ", 1)
doxyVarDesc[var] = desc.strip()
lastParamDesc = var
except: pass
elif " @return " in doxyLine or " \return " in doxyLine:
lastParamDesc = ""
# not handled for now
elif lastParamDesc:
try:
doxyLine = doxyLine.strip()
if " " not in doxyLine:
lastParamDesc = ""
continue
doxyLine = doxyLine[doxyLine.find(" ") + 1:]
doxyVarDesc[lastParamDesc] += " " + doxyLine
except: pass
#Create the variable now
while (len(paramsStack)):
# Find commas that are not nexted in <>'s like template types
open_template_count = 0
param_separator = 0
i = 0
for elm in paramsStack:
if '<' in elm :
open_template_count += 1
elif '>' in elm:
open_template_count -= 1
elif elm == ',' and open_template_count == 0:
param_separator = i
break
i += 1
if param_separator:
param = CppVariable(paramsStack[0:param_separator], doxyVarDesc=doxyVarDesc)
if len(list(param.keys())): params.append(param)
paramsStack = paramsStack[param_separator + 1:]
else:
param = CppVariable(paramsStack, doxyVarDesc=doxyVarDesc)
if len(list(param.keys())): params.append(param)
break
self["parameters"] = params
#self._params_helper2( params ) # mods params inplace
def __str__(self):
filter_keys = ("parent", "defined", "operator", "returns_reference")
cpy = dict((k,v) for (k,v) in list(self.items()) if k not in filter_keys)
return "%s"%cpy
class _CppVariable(dict):
def _name_stack_helper( self, stack ):
stack = list(stack)
if '=' not in stack: # TODO refactor me
# check for array[n] and deal with funny array syntax: "int myvar:99"
array = []
while stack and stack[-1].isdigit(): array.append( stack.pop() )
if array: array.reverse(); self['array'] = int(''.join(array))
if stack and stack[-1].endswith(':'): stack[-1] = stack[-1][:-1]
while stack and not stack[-1]: stack.pop() # can be empty
return stack
def init(self):
#assert self['name'] # allow unnamed variables, methods like this: "void func(void);"
a = []
self['aliases'] = []; self['parent'] = None; self['typedef'] = None
for key in 'constant reference pointer static typedefs class fundamental unresolved'.split():
self[ key ] = 0
for b in self['type'].split():
if b == '__const__': b = 'const'
a.append( b )
self['type'] = ' '.join( a )
class CppVariable( _CppVariable ):
"""Takes a name stack and turns it into a method
Contains the following Keys:
self['type'] - Type for the variable (ex. "const string &")
self['name'] - Name of the variable (ex. "numItems")
self['namespace'] - Namespace containing the enum
self['desc'] - Description of the variable if part of a method (optional)
self['doxygen'] - Doxygen comments associated with the method if they exist
self['defaultValue'] - Default value of the variable, this key will only
exist if there is a default value
self['extern'] - True if its an extern, false if not
"""
Vars = []
def __init__(self, nameStack, **kwargs):
debug_print("trace %s"%nameStack)
if len(nameStack) and nameStack[0] == "extern":
self['extern'] = True
del nameStack[0]
else:
self['extern'] = False
_stack_ = nameStack
if "[" in nameStack: #strip off array informatin
arrayStack = nameStack[nameStack.index("["):]
if nameStack.count("[") > 1:
debug_print("Multi dimensional array")
debug_print("arrayStack=%s"%arrayStack)
nums = filter(lambda x: x.isdigit(), arrayStack)
# Calculate size by multiplying all dimensions
p = 1
for n in nums:
p *= int(n)
#Multi dimensional array
self["array_size"] = p
self["multi_dimensional_array"] = 1
self["multi_dimensional_array_size"] = "x".join(nums)
else:
debug_print("Array")
if len(arrayStack) == 3:
self["array_size"] = arrayStack[1]
nameStack = nameStack[:nameStack.index("[")]
self["array"] = 1
else:
self["array"] = 0
nameStack = self._name_stack_helper( nameStack )
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
debug_print( "Variable: %s"%nameStack )
self["line_number"] = detect_lineno(nameStack[0])
self["function_pointer"] = 0
if (len(nameStack) < 2): # +++
if len(nameStack) == 1: self['type'] = nameStack[0]; self['name'] = ''
else: error_print(_stack_); assert 0
elif is_function_pointer_stack(nameStack): #function pointer
self["type"] = " ".join(nameStack[:nameStack.index("(") + 2] + nameStack[nameStack.index(")") :])
self["name"] = " ".join(nameStack[nameStack.index("(") + 2 : nameStack.index(")")])
self["function_pointer"] = 1
elif ("=" in nameStack):
self["type"] = " ".join(nameStack[:nameStack.index("=") - 1])
self["name"] = nameStack[nameStack.index("=") - 1]
self["defaultValue"] = " ".join(nameStack[nameStack.index("=") + 1:]) # deprecate camelCase in dicts
self['default'] = " ".join(nameStack[nameStack.index("=") + 1:])
elif is_fundamental(nameStack[-1]) or nameStack[-1] in ['>', '<' , ':', '.']:
#Un named parameter
self["type"] = " ".join(nameStack)
self["name"] = ""
else: # common case
self["type"] = " ".join(nameStack[:-1])
self["name"] = nameStack[-1]
self["type"] = self["type"].replace(" :",":")
self["type"] = self["type"].replace(": ",":")
self["type"] = self["type"].replace(" <","<")
self["type"] = self["type"].replace(" >",">").replace(">>", "> >").replace(">>", "> >")
self["type"] = self["type"].replace(" ,",",")
#Optional doxygen description
try:
self["desc"] = kwargs["doxyVarDesc"][self["name"]]
except: pass
self.init()
CppVariable.Vars.append( self ) # save and resolve later
def __str__(self):
keys_white_list = ['constant','name','reference','type','static','pointer','desc', 'line_number', 'extern']
cpy = dict((k,v) for (k,v) in list(self.items()) if k in keys_white_list)
if "array_size" in self: cpy["array_size"] = self["array_size"]
return "%s"%cpy
class _CppEnum(dict):
def resolve_enum_values( self, values ):
"""Evaluates the values list of dictionaries passed in and figures out what the enum value
for each enum is editing in place:
Example:
From: [{'name': 'ORANGE'},
{'name': 'RED'},
{'name': 'GREEN', 'value': '8'}]
To: [{'name': 'ORANGE', 'value': 0},
{'name': 'RED', 'value': 1},
{'name': 'GREEN', 'value': 8}]
"""
t = int; i = 0
names = [ v['name'] for v in values ]
for v in values:
if 'value' in v:
a = v['value'].strip()
# Remove single quotes from single quoted chars (unless part of some expression
if len(a) == 3 and a[0] == "'" and a[2] == "'":
a = v['value'] = a[1]
if a.lower().startswith("0x"):
try:
i = a = int(a , 16)
except:pass
elif a.isdigit():
i = a = int( a )
elif a in names:
for other in values:
if other['name'] == a:
v['value'] = other['value']
break
elif '"' in a or "'" in a: t = str # only if there are quotes it this a string enum
else:
try:
a = i = ord(a)
except: pass
#Allow access of what is in the file pre-convert if converted
if v['value'] != str(a):
v['raw_value'] = v['value']
v['value'] = a
else: v['value'] = i
try:
v['value'] = v['value'].replace(" < < ", " << ").replace(" >> ", " >> ")
except: pass
i += 1
return t
class CppEnum(_CppEnum):
"""Takes a name stack and turns it into an Enum
Contains the following Keys:
self['name'] - Name of the enum (ex. "ItemState")
self['namespace'] - Namespace containing the enum
self['values'] - List of values where the values are a dictionary of the
form {"name": name of the key (ex. "PARSING_HEADER"),
"value": Specified value of the enum, this key will only exist
if a value for a given enum value was defined
}
"""
def __init__(self, nameStack):
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if len(nameStack) == 3 and nameStack[0] == "enum":
debug_print("Created enum as just name/value")
self["name"] = nameStack[1]
self["instances"]=[nameStack[2]]
if len(nameStack) < 4 or "{" not in nameStack or "}" not in nameStack:
#Not enough stuff for an enum
debug_print("Bad enum")
return
valueList = []
self["line_number"] = detect_lineno(nameStack[0])
#Figure out what values it has
valueStack = nameStack[nameStack.index('{') + 1: nameStack.index('}')]
while len(valueStack):
tmpStack = []
if "," in valueStack:
tmpStack = valueStack[:valueStack.index(",")]
valueStack = valueStack[valueStack.index(",") + 1:]
else:
tmpStack = valueStack
valueStack = []
d = {}
if len(tmpStack) == 1: d["name"] = tmpStack[0]
elif len(tmpStack) >= 3 and tmpStack[1] == "=":
d["name"] = tmpStack[0]; d["value"] = " ".join(tmpStack[2:])
elif len(tmpStack) == 2 and tmpStack[1] == "=":
debug_print( "WARN-enum: parser missed value for %s"%tmpStack[0] )
d["name"] = tmpStack[0]
if d: valueList.append( d )
if len(valueList):
self['type'] = self.resolve_enum_values( valueList ) # returns int for standard enum
self["values"] = valueList
else:
warning_print( 'WARN-enum: empty enum %s'%nameStack )
return
#Figure out if it has a name
preBraceStack = nameStack[:nameStack.index("{")]
postBraceStack = nameStack[nameStack.index("}") + 1:]
self["typedef"] = False
if (len(preBraceStack) == 2 and "typedef" not in nameStack):
self["name"] = preBraceStack[1]
elif len(postBraceStack) and "typedef" in nameStack:
self["name"] = " ".join(postBraceStack)
self["typedef"] = True
else: warning_print( 'WARN-enum: nameless enum %s'%nameStack )
#See if there are instances of this
if "typedef" not in nameStack and len(postBraceStack):
self["instances"] = []
for var in postBraceStack:
if "," in var:
continue
self["instances"].append(var)
self["namespace"] = ""
class CppStruct(dict):
Structs = []
def __init__(self, nameStack):
if len(nameStack) >= 2: self['type'] = nameStack[1]
else: self['type'] = None
self['fields'] = []
self.Structs.append( self )
global curLine
self["line_number"] = curLine
C99_NONSTANDARD = {
'int8' : 'signed char',
'int16' : 'short int',
'int32' : 'int',
'int64' : 'int64_t', # this can be: long int (64bit), or long long int (32bit)
'uint' : 'unsigned int',
'uint8' : 'unsigned char',
'uint16' : 'unsigned short int',
'uint32' : 'unsigned int',
'uint64' : 'uint64_t', # depends on host bits
}
def standardize_fundamental( s ):
if s in C99_NONSTANDARD: return C99_NONSTANDARD[ s ]
else: return s
class Resolver(object):
C_FUNDAMENTAL = 'size_t unsigned signed bool char wchar short int float double long void'.split()
C_FUNDAMENTAL += 'struct union enum'.split()
SubTypedefs = {} # TODO deprecate?
NAMESPACES = []
CLASSES = {}
STRUCTS = {}
def initextra(self):
self.typedefs = {}
self.typedefs_order = []
self.classes_order = []
self.structs = Resolver.STRUCTS
self.structs_order = []
self.namespaces = Resolver.NAMESPACES # save all namespaces
self.curStruct = None
self.stack = [] # full name stack, good idea to keep both stacks? (simple stack and full stack)
self._classes_brace_level = {} # class name : level
self._structs_brace_level = {} # struct type : level
self._method_body = None
self._forward_decls = []
self._template_typenames = [] # template<typename XXX>
def current_namespace(self): return self.cur_namespace(True)
def cur_namespace(self, add_double_colon=False):
rtn = ""
i = 0
while i < len(self.nameSpaces):
rtn += self.nameSpaces[i]
if add_double_colon or i < len(self.nameSpaces) - 1: rtn += "::"
i+=1
return rtn
def guess_ctypes_type( self, string ):
pointers = string.count('*')
string = string.replace('*','')
a = string.split()
if 'unsigned' in a: u = 'u'
else: u = ''
if 'long' in a and 'double' in a: b = 'longdouble' # there is no ctypes.c_ulongdouble (this is a 64bit float?)
elif a.count('long') == 2 and 'int' in a: b = '%sint64' %u
elif a.count('long') == 2: b = '%slonglong' %u
elif 'long' in a: b = '%slong' %u
elif 'double' in a: b = 'double' # no udouble in ctypes
elif 'short' in a: b = '%sshort' %u
elif 'char' in a: b = '%schar' %u
elif 'wchar' in a: b = 'wchar'
elif 'bool' in a: b = 'bool'
elif 'float' in a: b = 'float'
elif 'int' in a: b = '%sint' %u
elif 'int8' in a: b = 'int8'
elif 'int16' in a: b = 'int16'
elif 'int32' in a: b = 'int32'
elif 'int64' in a: b = 'int64'
elif 'uint' in a: b = 'uint'
elif 'uint8' in a: b = 'uint8'
elif 'uint16' in a: b = 'uint16'
elif 'uint32' in a: b = 'uint32'
elif 'uint64' in a: b = 'uint64'
elif 'size_t' in a: b = 'size_t'
elif 'void' in a: b = 'void_p'
elif string in 'struct union'.split(): b = 'void_p' # what should be done here? don't trust struct, it could be a class, no need to expose via ctypes
else: b = 'void_p'
if not pointers: return 'ctypes.c_%s' %b
else:
x = ''
for i in range(pointers): x += 'ctypes.POINTER('
x += 'ctypes.c_%s' %b
x += ')' * pointers
return x
def resolve_type( self, string, result ): # recursive
'''
keeps track of useful things like: how many pointers, number of typedefs, is fundamental or a class, etc...
'''
## be careful with templates, what is inside <something*> can be a pointer but the overall type is not a pointer
## these come before a template
s = string.split('<')[0]
result[ 'constant' ] += s.split().count('const')
result[ 'static' ] += s.split().count('static')
result[ 'mutable' ] = 'mutable' in s.split()
## these come after a template
s = string.split('>')[-1]
result[ 'pointer' ] += s.count('*')
result[ 'reference' ] += s.count('&')
x = string; alias = False
for a in '* & const static mutable'.split(): x = x.replace(a,'')
for y in x.split():
if y not in self.C_FUNDAMENTAL: alias = y; break
#if alias == 'class':
# result['class'] = result['name'] # forward decl of class
# result['forward_decl'] = True
if alias == '__extension__': result['fundamental_extension'] = True
elif alias:
result['aliases'].append( alias )
if alias in C99_NONSTANDARD:
result['type'] = C99_NONSTANDARD[ alias ]
result['typedef'] = alias
result['typedefs'] += 1
elif alias in self.typedefs:
result['typedefs'] += 1
result['typedef'] = alias
self.resolve_type( self.typedefs[alias], result )
elif alias in self.classes:
klass = self.classes[alias]; result['fundamental'] = False
result['class'] = klass
result['unresolved'] = False
else: result['unresolved'] = True
else:
result['fundamental'] = True
result['unresolved'] = False
def finalize_vars(self):
for s in CppStruct.Structs: # vars within structs can be ignored if they do not resolve
for var in s['fields']: var['parent'] = s['type']
#for c in self.classes.values():
# for var in c.get_all_properties(): var['parent'] = c['name']
## RESOLVE ##
for var in CppVariable.Vars:
self.resolve_type( var['type'], var )
#if 'method' in var and var['method']['name'] == '_notifyCurrentCamera': print(var); assert 0
# then find concrete type and best guess ctypes type #
for var in CppVariable.Vars:
if not var['aliases']: #var['fundamental']:
var['ctypes_type'] = self.guess_ctypes_type( var['type'] )
else:
var['unresolved'] = False # below may test to True
if var['class']:
var['ctypes_type'] = 'ctypes.c_void_p'
else:
assert var['aliases']
tag = var['aliases'][0]
klass = None
nestedEnum = None
nestedStruct = None
nestedTypedef = None
if 'method' in var and 'parent' in list(var['method'].keys()):
klass = var['method']['parent']
if tag in var['method']['parent']._public_enums:
nestedEnum = var['method']['parent']._public_enums[ tag ]
elif tag in var['method']['parent']._public_structs:
nestedStruct = var['method']['parent']._public_structs[ tag ]
elif tag in var['method']['parent']._public_typedefs:
nestedTypedef = var['method']['parent']._public_typedefs[ tag ]
if '<' in tag: # should also contain '>'
var['template'] = tag # do not resolve templates
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif nestedEnum:
enum = nestedEnum
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['enum'] = var['method']['path'] + '::' + enum['name']
var['fundamental'] = True
elif nestedStruct:
var['ctypes_type'] = 'ctypes.c_void_p'
var['raw_type'] = var['method']['path'] + '::' + nestedStruct['type']
var['fundamental'] = False
elif nestedTypedef:
var['fundamental'] = is_fundamental( nestedTypedef )
if not var['fundamental']:
var['raw_type'] = var['method']['path'] + '::' + tag
else:
_tag = tag
if '::' in tag and tag.split('::')[0] in self.namespaces: tag = tag.split('::')[-1]
con = self.concrete_typedef( _tag )
if con:
var['concrete_type'] = con
var['ctypes_type'] = self.guess_ctypes_type( var['concrete_type'] )
elif tag in self.structs:
trace_print( 'STRUCT', var )
var['struct'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
var['raw_type'] = self.structs[tag]['namespace'] + '::' + tag
elif tag in self._forward_decls:
var['forward_declared'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
elif tag in self.global_enums:
enum = self.global_enums[ tag ]
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['enum'] = enum['namespace'] + enum['name']
var['fundamental'] = True
elif var['parent']:
warning_print( 'WARN unresolved %s'%_tag)
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif tag.count('::')==1:
trace_print( 'trying to find nested something in', tag )
a = tag.split('::')[0]
b = tag.split('::')[-1]
if a in self.classes: # a::b is most likely something nested in a class
klass = self.classes[ a ]
if b in klass._public_enums:
trace_print( '...found nested enum', b )
enum = klass._public_enums[ b ]
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
try:
if 'method' in var: var['enum'] = var['method']['path'] + '::' + enum['name']
else: # class property
var['unresolved'] = True
except:
var['unresolved'] = True
var['fundamental'] = True
else: var['unresolved'] = True # TODO klass._public_xxx
elif a in self.namespaces: # a::b can also be a nested namespace
if b in self.global_enums:
enum = self.global_enums[ b ]
trace_print(enum)
trace_print(var)
assert 0
elif b in self.global_enums: # falling back, this is a big ugly
enum = self.global_enums[ b ]
assert a in enum['namespace'].split('::')
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['fundamental'] = True
else: # boost::gets::crazy
trace_print('NAMESPACES', self.namespaces)
trace_print( a, b )
trace_print( '---- boost gets crazy ----' )
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif 'namespace' in var and self.concrete_typedef(var['namespace']+tag):
#print( 'TRYING WITH NS', var['namespace'] )
con = self.concrete_typedef( var['namespace']+tag )
if con:
var['typedef'] = var['namespace']+tag
var['type'] = con
if 'struct' in con.split():
var['raw_type'] = var['typedef']
var['ctypes_type'] = 'ctypes.c_void_p'
else:
self.resolve_type( var['type'], var )
var['ctypes_type'] = self.guess_ctypes_type( var['type'] )
elif '::' in var:
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif tag in self.SubTypedefs: # TODO remove SubTypedefs
if 'property_of_class' in var or 'property_of_struct' in var:
trace_print( 'class:', self.SubTypedefs[ tag ], 'tag:', tag )
var['typedef'] = self.SubTypedefs[ tag ] # class name
var['ctypes_type'] = 'ctypes.c_void_p'
else:
trace_print( "WARN-this should almost never happen!" )
trace_print( var ); trace_print('-'*80)
var['unresolved'] = True
elif tag in self._template_typenames:
var['typename'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True # TODO, how to deal with templates?
elif tag.startswith('_'): # assume starting with underscore is not important for wrapping
warning_print( 'WARN unresolved %s'%_tag)
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
else:
trace_print( 'WARN: unknown type', var )
assert 'property_of_class' in var or 'property_of_struct' # only allow this case
var['unresolved'] = True
## if not resolved and is a method param, not going to wrap these methods ##
if var['unresolved'] and 'method' in var: var['method']['unresolved_parameters'] = True
# create stripped raw_type #
p = '* & const static mutable'.split() # +++ new July7: "mutable"
for var in CppVariable.Vars:
if 'raw_type' not in var:
raw = []
for x in var['type'].split():
if x not in p: raw.append( x )
var['raw_type'] = ' '.join( raw )
#if 'AutoConstantEntry' in var['raw_type']: print(var); assert 0
if var['class']:
if '::' not in var['raw_type']:
if not var['class']['parent']:
var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type']
elif var['class']['parent'] in self.classes:
parent = self.classes[ var['class']['parent'] ]
var['raw_type'] = parent['namespace'] + '::' + var['class']['name'] + '::' + var['raw_type']
else:
var['unresolved'] = True
elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] not in self.namespaces:
var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type']
else:
var['unresolved'] = True
elif 'forward_declared' in var and 'namespace' in var:
if '::' not in var['raw_type']:
var['raw_type'] = var['namespace'] + var['raw_type']
elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] in self.namespaces:
pass
else: trace_print('-'*80); trace_print(var); raise NotImplemented
## need full name space for classes in raw type ##
if var['raw_type'].startswith( '::' ):
#print(var)
#print('NAMESPACE', var['class']['namespace'])
#print( 'PARENT NS', var['class']['parent']['namespace'] )
#assert 0
var['unresolved'] = True
if 'method' in var: var['method']['unresolved_parameters'] = True
#var['raw_type'] = var['raw_type'][2:]
# Take care of #defines and #pragmas etc
trace_print("Processing precomp_macro_buf: %s"%self._precomp_macro_buf)
for m in self._precomp_macro_buf:
macro = m.replace("<CppHeaderParser_newline_temp_replacement>\\n", "\n")
try:
if macro.lower().startswith("#define"):
trace_print("Adding #define %s"%macro)
self.defines.append(macro.split(" ", 1)[1].strip())
elif macro.lower().startswith("#if") or macro.lower().startswith("#endif") or macro.lower().startswith("#else"):
self.conditionals.append(macro)
elif macro.lower().startswith("#pragma"):
trace_print("Adding #pragma %s"%macro)
self.pragmas.append(macro.split(" ", 1)[1].strip())
elif macro.lower().startswith("#include"):
trace_print("Adding #include %s"%macro)
self.includes.append(macro.split(" ", 1)[1].strip())
else:
debug_print("Cant detect what to do with precomp macro '%s'"%macro)
except: pass
self._precomp_macro_buf = None
def concrete_typedef( self, key ):
if key not in self.typedefs:
#print( 'FAILED typedef', key )
return None
while key in self.typedefs:
prev = key
key = self.typedefs[ key ]
if '<' in key or '>' in key: return prev # stop at template
if key.startswith('std::'): return key # stop at std lib
return key
class _CppHeader( Resolver ):
def finalize(self):
self.finalize_vars()
# finalize classes and method returns types
for cls in list(self.classes.values()):
for meth in cls.get_all_methods():
if meth['pure_virtual']: cls['abstract'] = True
if not meth['returns_fundamental'] and meth['returns'] in C99_NONSTANDARD:
meth['returns'] = C99_NONSTANDARD[meth['returns']]
meth['returns_fundamental'] = True
elif not meth['returns_fundamental']: # describe the return type
con = None
if cls['namespace'] and '::' not in meth['returns']:
con = self.concrete_typedef( cls['namespace'] + '::' + meth['returns'] )
else: con = self.concrete_typedef( meth['returns'] )
if con:
meth['returns_concrete'] = con
meth['returns_fundamental'] = is_fundamental( con )
elif meth['returns'] in self.classes:
trace_print( 'meth returns class:', meth['returns'] )
meth['returns_class'] = True
elif meth['returns'] in self.SubTypedefs:
meth['returns_class'] = True
meth['returns_nested'] = self.SubTypedefs[ meth['returns'] ]
elif meth['returns'] in cls._public_enums:
enum = cls._public_enums[ meth['returns'] ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif meth['returns'] in self.global_enums:
enum = self.global_enums[ meth['returns'] ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif meth['returns'].count('::')==1:
trace_print( meth )
a,b = meth['returns'].split('::')
if a in self.namespaces:
if b in self.classes:
klass = self.classes[ b ]
meth['returns_class'] = a + '::' + b
elif '<' in b and '>' in b:
warning_print( 'WARN-can not return template: %s'%b )
meth['returns_unknown'] = True
elif b in self.global_enums:
enum = self.global_enums[ b ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
else: trace_print( a, b); trace_print( meth); meth['returns_unknown'] = True # +++
elif a in self.classes:
klass = self.classes[ a ]
if b in klass._public_enums:
trace_print( '...found nested enum', b )
enum = klass._public_enums[ b ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif b in klass._public_forward_declares:
meth['returns_class'] = True
elif b in klass._public_typedefs:
typedef = klass._public_typedefs[ b ]
meth['returns_fundamental'] = is_fundamental( typedef )
else:
trace_print( meth ) # should be a nested class, TODO fix me.
meth['returns_unknown'] = True
elif '::' in meth['returns']:
trace_print('TODO namespace or extra nested return:', meth)
meth['returns_unknown'] = True
else:
trace_print( 'WARN: UNKNOWN RETURN', meth['name'], meth['returns'])
meth['returns_unknown'] = True
if meth["returns"].startswith(": : "):
meth["returns"] = meth["returns"].replace(": : ", "::")
for cls in list(self.classes.values()):
methnames = cls.get_all_method_names()
pvm = cls.get_all_pure_virtual_methods()
for d in cls['inherits']:
c = d['class']
a = d['access'] # do not depend on this to be 'public'
trace_print( 'PARENT CLASS:', c )
if c not in self.classes: trace_print('WARN: parent class not found')
if c in self.classes and self.classes[c]['abstract']:
p = self.classes[ c ]
for meth in p.get_all_methods(): #p["methods"]["public"]:
trace_print( '\t\tmeth', meth['name'], 'pure virtual', meth['pure_virtual'] )
if meth['pure_virtual'] and meth['name'] not in methnames: cls['abstract'] = True; break
def evaluate_struct_stack(self):
"""Create a Struct out of the name stack (but not its parts)"""
#print( 'eval struct stack', self.nameStack )
#if self.braceDepth != len(self.nameSpaces): return
struct = CppStruct(self.nameStack)
struct["namespace"] = self.cur_namespace()
self.structs[ struct['type'] ] = struct
self.structs_order.append( struct )
if self.curClass:
struct['parent'] = self.curClass
klass = self.classes[ self.curClass ]
klass['structs'][self.curAccessSpecifier].append( struct )
if self.curAccessSpecifier == 'public': klass._public_structs[ struct['type'] ] = struct
self.curStruct = struct
self._structs_brace_level[ struct['type'] ] = self.braceDepth
def parse_method_type( self, stack ):
trace_print( 'meth type info', stack )
if stack[0] in ':;' and stack[1] != ':': stack = stack[1:]
info = {
'debug': ' '.join(stack).replace(' : : ', '::' ).replace(' < ', '<' ).replace(' > ', '> ' ).replace(" >",">").replace(">>", "> >").replace(">>", "> >"),
'class':None,
'namespace':self.cur_namespace(add_double_colon=True),
}
for tag in 'defined pure_virtual operator constructor destructor extern template virtual static explicit inline friend returns returns_pointer returns_fundamental returns_class'.split(): info[tag]=False
header = stack[ : stack.index('(') ]
header = ' '.join( header )
header = header.replace(' : : ', '::' )
header = header.replace(' < ', '<' )
header = header.replace(' > ', '> ' )
header = header.strip()
if '{' in stack:
info['defined'] = True
self._method_body = self.braceDepth + 1
trace_print( 'NEW METHOD WITH BODY', self.braceDepth )
elif stack[-1] == ';':
info['defined'] = False
self._method_body = None # not a great idea to be clearing here
else: assert 0
if len(stack) > 3 and stack[-1] == ';' and stack[-2] == '0' and stack[-3] == '=':
info['pure_virtual'] = True
r = header.split()
name = None
if 'operator' in stack: # rare case op overload defined outside of class
op = stack[ stack.index('operator')+1 : stack.index('(') ]
op = ''.join(op)
if not op:
if " ".join(['operator', '(', ')', '(']) in " ".join(stack):
op = "()"
else:
trace_print( 'Error parsing operator')
return None
info['operator'] = op
name = 'operator' + op
a = stack[ : stack.index('operator') ]
elif r:
name = r[-1]
a = r[ : -1 ] # strip name
if name is None: return None
#if name.startswith('~'): name = name[1:]
while a and a[0] == '}': # strip - can have multiple } }
a = a[1:]
if '::' in name:
#klass,name = name.split('::') # methods can be defined outside of class
klass = name[ : name.rindex('::') ]
name = name.split('::')[-1]
info['class'] = klass
if klass in self.classes and not self.curClass:
#Class function defined outside the class
return None
# info['name'] = name
#else: info['name'] = name
if name.startswith('~'):
info['destructor'] = True
name = name[1:]
elif not a or (name == self.curClass and len(self.curClass)):
info['constructor'] = True
info['name'] = name
for tag in 'extern virtual static explicit inline friend'.split():
if tag in a: info[ tag ] = True; a.remove( tag ) # inplace
if 'template' in a:
a.remove('template')
b = ' '.join( a )
if '>' in b:
info['template'] = b[ : b.index('>')+1 ]
info['returns'] = b[ b.index('>')+1 : ] # find return type, could be incorrect... TODO
if '<typename' in info['template'].split():
typname = info['template'].split()[-1]
typname = typname[ : -1 ] # strip '>'
if typname not in self._template_typenames: self._template_typenames.append( typname )
else: info['returns'] = ' '.join( a )
else: info['returns'] = ' '.join( a )
info['returns'] = info['returns'].replace(' <', '<').strip()
## be careful with templates, do not count pointers inside template
info['returns_pointer'] = info['returns'].split('>')[-1].count('*')
if info['returns_pointer']: info['returns'] = info['returns'].replace('*','').strip()
info['returns_reference'] = '&' in info['returns']
if info['returns']: info['returns'] = info['returns'].replace('&','').strip()
a = []
for b in info['returns'].split():
if b == '__const__': info['returns_const'] = True
elif b == 'const': info['returns_const'] = True
else: a.append( b )
info['returns'] = ' '.join( a )
info['returns_fundamental'] = is_fundamental( info['returns'] )
return info
def evaluate_method_stack(self):
"""Create a method out of the name stack"""
if self.curStruct:
trace_print( 'WARN - struct contains methods - skipping' )
trace_print( self.stack )
assert 0
info = self.parse_method_type( self.stack )
if info:
if info[ 'class' ] and info['class'] in self.classes: # case where methods are defined outside of class
newMethod = CppMethod(self.nameStack, info['name'], info, self.curTemplate)
klass = self.classes[ info['class'] ]
klass[ 'methods' ][ 'public' ].append( newMethod )
newMethod['parent'] = klass
if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name']
else: newMethod['path'] = klass['name']
elif self.curClass: # normal case
newMethod = CppMethod(self.nameStack, self.curClass, info, self.curTemplate)
klass = self.classes[self.curClass]
klass['methods'][self.curAccessSpecifier].append(newMethod)
newMethod['parent'] = klass
if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name']
else: newMethod['path'] = klass['name']
else: #non class functions
debug_print("FREE FUNCTION")
newMethod = CppMethod(self.nameStack, None, info, self.curTemplate)
self.functions.append(newMethod)
global parseHistory
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "method", "item": newMethod})
else:
trace_print( 'free function?', self.nameStack )
self.stack = []
def _parse_typedef( self, stack, namespace='' ):
if not stack or 'typedef' not in stack: return
stack = list( stack ) # copy just to be safe
if stack[-1] == ';': stack.pop()
while stack and stack[-1].isdigit(): stack.pop() # throw away array size for now
idx = stack.index('typedef')
if stack[-1] == "]":
try:
name = namespace + "".join(stack[-4:])
# Strip off the array part so the rest of the parsing is better
stack = stack[:-3]
except:
name = namespace + stack[-1]
else:
name = namespace + stack[-1]
s = ''
for a in stack[idx+1:-1]:
if a == '{': break
if not s or s[-1] in ':<>' or a in ':<>': s += a # keep compact
else: s += ' ' + a # spacing
r = {'name':name, 'raw':s, 'type':s}
if not is_fundamental(s):
if 'struct' in s.split(): pass # TODO is this right? "struct ns::something"
elif '::' not in s: s = namespace + s # only add the current name space if no namespace given
r['type'] = s
if s: return r
def evaluate_typedef(self):
ns = self.cur_namespace(add_double_colon=True)
res = self._parse_typedef( self.stack, ns )
if res:
name = res['name']
self.typedefs[ name ] = res['type']
if name not in self.typedefs_order: self.typedefs_order.append( name )
def evaluate_property_stack(self):
"""Create a Property out of the name stack"""
global parseHistory
assert self.stack[-1] == ';'
debug_print( "trace" )
if self.nameStack[0] == 'typedef':
if self.curClass:
typedef = self._parse_typedef( self.stack )
name = typedef['name']
klass = self.classes[ self.curClass ]
klass[ 'typedefs' ][ self.curAccessSpecifier ].append( name )
if self.curAccessSpecifier == 'public': klass._public_typedefs[ name ] = typedef['type']
Resolver.SubTypedefs[ name ] = self.curClass
else: assert 0
elif self.curStruct or self.curClass:
if len(self.nameStack) == 1:
#See if we can de anonymize the type
filteredParseHistory = [h for h in parseHistory if h["braceDepth"] == self.braceDepth]
if len(filteredParseHistory) and filteredParseHistory[-1]["item_type"] == "class":
self.nameStack.insert(0, filteredParseHistory[-1]["item"]["name"])
debug_print("DEANONYMOIZING %s to type '%s'"%(self.nameStack[1], self.nameStack[0]))
if "," in self.nameStack: #Maybe we have a variable list
#Figure out what part is the variable separator but remember templates of function pointer
#First find left most comma outside of a > and )
leftMostComma = 0;
for i in range(0, len(self.nameStack)):
name = self.nameStack[i]
if name in (">", ")"): leftMostComma = 0
if leftMostComma == 0 and name == ",": leftMostComma = i
# Is it really a list of variables?
if leftMostComma != 0:
trace_print("Multiple variables for namestack in %s. Separating processing"%self.nameStack)
orig_nameStack = self.nameStack[:]
orig_stack = self.stack[:]
type_nameStack = orig_nameStack[:leftMostComma-1]
for name in orig_nameStack[leftMostComma - 1::2]:
self.nameStack = type_nameStack + [name]
self.stack = orig_stack[:] # Not maintained for mucking, but this path it doesnt matter
self.evaluate_property_stack()
return
newVar = CppVariable(self.nameStack)
newVar['namespace'] = self.current_namespace()
if self.curStruct:
self.curStruct[ 'fields' ].append( newVar )
newVar['property_of_struct'] = self.curStruct
elif self.curClass:
klass = self.classes[self.curClass]
klass["properties"][self.curAccessSpecifier].append(newVar)
newVar['property_of_class'] = klass['name']
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "variable", "item": newVar})
else:
debug_print( "Found Global variable" )
newVar = CppVariable(self.nameStack)
self.variables.append(newVar)
self.stack = [] # CLEAR STACK
def evaluate_class_stack(self):
"""Create a Class out of the name stack (but not its parts)"""
#dont support sub classes today
#print( 'eval class stack', self.nameStack )
parent = self.curClass
if self.braceDepth > len( self.nameSpaces) and parent:
trace_print( 'HIT NESTED SUBCLASS' )
self.accessSpecifierStack.append(self.curAccessSpecifier)
elif self.braceDepth != len(self.nameSpaces):
error_print( 'ERROR: WRONG BRACE DEPTH' )
return
# When dealing with typedefed structs, get rid of typedef keyword to handle later on
if self.nameStack[0] == "typedef":
del self.nameStack[0]
if len(self.nameStack) == 1:
self.anon_struct_counter += 1
# We cant handle more than 1 anonymous struct, so name them uniquely
self.nameStack.append("<anon-struct-%d>"%self.anon_struct_counter)
if self.nameStack[0] == "class":
self.curAccessSpecifier = 'private'
else:#struct
self.curAccessSpecifier = 'public'
debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier)
if self.nameStack[0] == "union":
newClass = CppUnion(self.nameStack)
self.anon_union_counter = [self.braceDepth, 2]
trace_print( 'NEW UNION', newClass['name'] )
else:
newClass = CppClass(self.nameStack, self.curTemplate)
trace_print( 'NEW CLASS', newClass['name'] )
newClass["declaration_method"] = self.nameStack[0]
self.classes_order.append( newClass ) # good idea to save ordering
self.stack = [] # fixes if class declared with ';' in closing brace
if parent:
newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent
newClass['parent'] = parent
self.classes[ parent ]['nested_classes'].append( newClass )
## supports nested classes with the same name ##
self.curClass = key = parent+'::'+newClass['name']
self._classes_brace_level[ key ] = self.braceDepth
elif newClass['parent']: # nested class defined outside of parent. A::B {...}
parent = newClass['parent']
newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent
self.classes[ parent ]['nested_classes'].append( newClass )
## supports nested classes with the same name ##
self.curClass = key = parent+'::'+newClass['name']
self._classes_brace_level[ key ] = self.braceDepth
else:
newClass["namespace"] = self.cur_namespace()
key = newClass['name']
self.curClass = newClass["name"]
self._classes_brace_level[ newClass['name'] ] = self.braceDepth
if not key.endswith("::") and not key.endswith(" ") and len(key) != 0:
if key in self.classes:
trace_print( 'ERROR name collision:', key )
self.classes[key].show()
trace_print('-'*80)
newClass.show()
assert key not in self.classes # namespace collision
self.classes[ key ] = newClass
global parseHistory
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "class", "item": newClass})
def evalute_forward_decl(self):
trace_print( 'FORWARD DECL', self.nameStack )
assert self.nameStack[0] in ('class', 'struct')
name = self.nameStack[-1]
if self.curClass:
klass = self.classes[ self.curClass ]
klass['forward_declares'][self.curAccessSpecifier].append( name )
if self.curAccessSpecifier == 'public': klass._public_forward_declares.append( name )
else: self._forward_decls.append( name )
class CppHeader( _CppHeader ):
"""Parsed C++ class header
Variables produced:
self.classes - Dictionary of classes found in a given header file where the
key is the name of the class
"""
IGNORE_NAMES = '__extension__'.split()
def show(self):
for className in list(self.classes.keys()):self.classes[className].show()
def __init__(self, headerFileName, argType="file", **kwargs):
"""Create the parsed C++ header file parse tree
headerFileName - Name of the file to parse OR actual file contents (depends on argType)
argType - Indicates how to interpret headerFileName as a file string or file name
kwargs - Supports the following keywords
"""
## reset global state ##
global doxygenCommentCache
doxygenCommentCache = ""
CppVariable.Vars = []
CppStruct.Structs = []
if (argType == "file"):
self.headerFileName = os.path.expandvars(headerFileName)
self.mainClass = os.path.split(self.headerFileName)[1][:-2]
headerFileStr = ""
elif argType == "string":
self.headerFileName = ""
self.mainClass = "???"
headerFileStr = headerFileName
else:
raise Exception("Arg type must be either file or string")
self.curClass = ""
# nested classes have parent::nested, but no extra namespace,
# this keeps the API compatible, TODO proper namespace for everything.
Resolver.CLASSES = {}
self.classes = Resolver.CLASSES
#Functions that are not part of a class
self.functions = []
self.pragmas = []
self.defines = []
self.includes = []
self.conditionals = []
self._precomp_macro_buf = [] #for internal purposes, will end up filling out pragmras and defines at the end
self.enums = []
self.variables = []
self.global_enums = {}
self.nameStack = []
self.nameSpaces = []
self.curAccessSpecifier = 'private' # private is default
self.curTemplate = None
self.accessSpecifierStack = []
self.accessSpecifierScratch = []
debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier)
self.initextra()
# Old namestacks for a given level
self.nameStackHistory = []
self.anon_struct_counter = 0
self.anon_union_counter = [-1, 0]
self.templateRegistry = []
if (len(self.headerFileName)):
fd = open(self.headerFileName)
headerFileStr = "".join(fd.readlines())
fd.close()
# Make sure supportedAccessSpecifier are sane
for i in range(0, len(supportedAccessSpecifier)):
if " " not in supportedAccessSpecifier[i]: continue
supportedAccessSpecifier[i] = re.sub("[ ]+", " ", supportedAccessSpecifier[i]).strip()
# Strip out template declarations
templateSectionsToSliceOut = []
try:
for m in re.finditer("template[\t ]*<[^>]*>", headerFileStr):
start = m.start()
# Search for the final '>' which may or may not be caught in the case of nexted <>'s
for i in range(start, len(headerFileStr)):
if headerFileStr[i] == '<':
firstBracket = i
break
ltgtStackCount = 1
#Now look for fianl '>'
for i in range(firstBracket + 1, len(headerFileStr)):
if headerFileStr[i] == '<':
ltgtStackCount += 1
elif headerFileStr[i] == '>':
ltgtStackCount -= 1
if ltgtStackCount == 0:
end = i
break
templateSectionsToSliceOut.append((start, end))
# Now strip out all instances of the template
templateSectionsToSliceOut.reverse()
for tslice in templateSectionsToSliceOut:
# Replace the template symbol with a single symbol
template_symbol="CppHeaderParser_template_%d"%len(self.templateRegistry)
self.templateRegistry.append(headerFileStr[tslice[0]: tslice[1]+1])
newlines = headerFileStr[tslice[0]: tslice[1]].count("\n") * "\n" #Keep line numbers the same
headerFileStr = headerFileStr[:tslice[0]] + newlines + " " + template_symbol + " " + headerFileStr[tslice[1] + 1:]
except:
pass
# Change multi line #defines and expressions to single lines maintaining line nubmers
# Based from http://stackoverflow.com/questions/2424458/regular-expression-to-match-cs-multiline-preprocessor-statements
matches = re.findall(r'(?m)^(?:.*\\\r?\n)+.*$', headerFileStr)
is_define = re.compile(r'[ \t\v]*#[Dd][Ee][Ff][Ii][Nn][Ee]')
for m in matches:
#Keep the newlines so that linecount doesnt break
num_newlines = len([a for a in m if a=="\n"])
if is_define.match(m):
new_m = m.replace("\n", "<CppHeaderParser_newline_temp_replacement>\\n")
else:
# Just expression taking up multiple lines, make it take 1 line for easier parsing
new_m = m.replace("\\\n", " ")
if (num_newlines > 0):
new_m += "\n"*(num_newlines)
headerFileStr = headerFileStr.replace(m, new_m)
#Filter out Extern "C" statements. These are order dependent
matches = re.findall(re.compile(r'extern[\t ]+"[Cc]"[\t \n\r]*{', re.DOTALL), headerFileStr)
for m in matches:
#Keep the newlines so that linecount doesnt break
num_newlines = len([a for a in m if a=="\n"])
headerFileStr = headerFileStr.replace(m, "\n" * num_newlines)
headerFileStr = re.sub(r'extern[ ]+"[Cc]"[ ]*', "", headerFileStr)
#Filter out any ignore symbols that end with "()" to account for #define magic functions
for ignore in ignoreSymbols:
if not ignore.endswith("()"): continue
while True:
locStart = headerFileStr.find(ignore[:-1])
if locStart == -1:
break;
locEnd = None
#Now walk till we find the last paren and account for sub parens
parenCount = 1
inQuotes = False
for i in range(locStart + len(ignore) - 1, len(headerFileStr)):
c = headerFileStr[i]
if not inQuotes:
if c == "(":
parenCount += 1
elif c == ")":
parenCount -= 1
elif c == '"':
inQuotes = True
if parenCount == 0:
locEnd = i + 1
break;
else:
if c == '"' and headerFileStr[i-1] != '\\':
inQuotes = False
if locEnd:
#Strip it out but keep the linecount the same so line numbers are right
match_str = headerFileStr[locStart:locEnd]
debug_print("Striping out '%s'"%match_str)
num_newlines = len([a for a in match_str if a=="\n"])
headerFileStr = headerFileStr.replace(headerFileStr[locStart:locEnd], "\n"*num_newlines)
self.braceDepth = 0
lex.lex()
lex.input(headerFileStr)
global curLine
global curChar
curLine = 0
curChar = 0
try:
while True:
tok = lex.token()
if not tok: break
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
self.anon_union_counter[1] -= 1
tok.value = TagStr(tok.value, lineno=tok.lineno)
#debug_print("TOK: %s"%tok)
if tok.type == 'NAME' and tok.value in self.IGNORE_NAMES: continue
if tok.type != 'TEMPLATE_NAME':
self.stack.append( tok.value )
curLine = tok.lineno
curChar = tok.lexpos
if (tok.type in ('PRECOMP_MACRO', 'PRECOMP_MACRO_CONT')):
debug_print("PRECOMP: %s"%tok)
self._precomp_macro_buf.append(tok.value)
self.stack = []
self.nameStack = []
continue
if tok.type == 'TEMPLATE_NAME':
try:
templateId = int(tok.value.replace("CppHeaderParser_template_",""))
self.curTemplate = self.templateRegistry[templateId]
except: pass
if (tok.type == 'OPEN_BRACE'):
if len(self.nameStack) >= 2 and is_namespace(self.nameStack): # namespace {} with no name used in boost, this sets default?
if self.nameStack[1] == "__IGNORED_NAMESPACE__CppHeaderParser__":#Used in filtering extern "C"
self.nameStack[1] = ""
self.nameSpaces.append(self.nameStack[1])
ns = self.cur_namespace(); self.stack = []
if ns not in self.namespaces: self.namespaces.append( ns )
# Detect special condition of macro magic before class declaration so we
# can filter it out
if 'class' in self.nameStack and self.nameStack[0] != 'class':
classLocationNS = self.nameStack.index("class")
classLocationS = self.stack.index("class")
if "(" not in self.nameStack[classLocationNS:]:
debug_print("keyword 'class' found in unexpected location in nameStack, must be following #define magic. Process that before moving on")
origNameStack = self.nameStack
origStack = self.stack
#Process first part of stack which is probably #define macro magic and may cause issues
self.nameStack = self.nameStack[:classLocationNS]
self.stack = self.stack[:classLocationS]
try:
self.evaluate_stack()
except:
debug_print("Error processing #define magic... Oh well")
#Process rest of stack
self.nameStack = origNameStack[classLocationNS:]
self.stack = origStack[classLocationS:]
if len(self.nameStack) and not is_enum_namestack(self.nameStack):
self.evaluate_stack()
else:
self.nameStack.append(tok.value)
if self.stack and self.stack[0] == 'class': self.stack = []
self.braceDepth += 1
elif (tok.type == 'CLOSE_BRACE'):
if self.braceDepth == 0:
continue
if (self.braceDepth == len(self.nameSpaces)):
tmp = self.nameSpaces.pop()
self.stack = [] # clear stack when namespace ends?
if len(self.nameStack) and is_enum_namestack(self.nameStack):
self.nameStack.append(tok.value)
elif self.braceDepth < 10:
self.evaluate_stack()
else:
self.nameStack = []
self.braceDepth -= 1
#self.stack = []; print 'BRACE DEPTH', self.braceDepth, 'NS', len(self.nameSpaces)
if self.curClass: debug_print( 'CURBD %s'%self._classes_brace_level[ self.curClass ] )
if (self.braceDepth == 0) or (self.curClass and self._classes_brace_level[self.curClass]==self.braceDepth):
trace_print( 'END OF CLASS DEF' )
if self.accessSpecifierStack:
self.curAccessSpecifier = self.accessSpecifierStack[-1]
self.accessSpecifierStack = self.accessSpecifierStack[:-1]
if self.curClass and self.classes[ self.curClass ]['parent']: self.curClass = self.classes[ self.curClass ]['parent']
else: self.curClass = ""; #self.curStruct = None
self.stack = []
#if self.curStruct: self.curStruct = None
if self.braceDepth == 0 or (self.curStruct and self._structs_brace_level[self.curStruct['type']]==self.braceDepth):
trace_print( 'END OF STRUCT DEF' )
self.curStruct = None
if self._method_body and (self.braceDepth + 1) <= self._method_body:
self._method_body = None; self.stack = []; self.nameStack = []; trace_print( 'FORCE CLEAR METHBODY' )
if (tok.type == 'OPEN_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'CLOSE_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'OPEN_SQUARE_BRACKET'):
self.nameStack.append(tok.value)
elif (tok.type == 'CLOSE_SQUARE_BRACKET'):
self.nameStack.append(tok.value)
elif (tok.type == 'TAB'): pass
elif (tok.type == 'EQUALS'):
self.nameStack.append(tok.value)
elif (tok.type == 'COMMA'):
self.nameStack.append(tok.value)
elif (tok.type == 'BACKSLASH'):
self.nameStack.append(tok.value)
elif (tok.type == 'DIVIDE'):
self.nameStack.append(tok.value)
elif (tok.type == 'PIPE'):
self.nameStack.append(tok.value)
elif (tok.type == 'PERCENT'):
self.nameStack.append(tok.value)
elif (tok.type == 'CARET'):
self.nameStack.append(tok.value)
elif (tok.type == 'EXCLAMATION'):
self.nameStack.append(tok.value)
elif (tok.type == 'SQUOTE'): pass
elif (tok.type == 'NUMBER' or tok.type == 'FLOAT_NUMBER'):
self.nameStack.append(tok.value)
elif (tok.type == 'MINUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'PLUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'STRING_LITERAL'):
self.nameStack.append(tok.value)
elif (tok.type == 'NAME' or tok.type == 'AMPERSTAND' or tok.type == 'ASTERISK' or tok.type == 'CHAR_LITERAL'):
if tok.value in ignoreSymbols:
debug_print("Ignore symbol %s"%tok.value)
elif (tok.value == 'class'):
self.nameStack.append(tok.value)
elif tok.value in supportedAccessSpecifier:
if len(self.nameStack) and self.nameStack[0] in ("class", "struct", "union"):
self.nameStack.append(tok.value)
elif self.braceDepth == len(self.nameSpaces) + 1 or self.braceDepth == (len(self.nameSpaces) + len(self.curClass.split("::"))):
self.curAccessSpecifier = tok.value;
self.accessSpecifierScratch.append(tok.value)
debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier)
self.stack = []
else:
self.nameStack.append(tok.value)
if self.anon_union_counter[0] == self.braceDepth:
self.anon_union_counter = [-1, 0]
elif (tok.type == 'COLON'):
#Dont want colon to be first in stack
if len(self.nameStack) == 0:
self.accessSpecifierScratch = []
continue
# Handle situation where access specifiers can be multi words such as "public slots"
jns = " ".join(self.accessSpecifierScratch + self.nameStack)
if jns in supportedAccessSpecifier:
self.curAccessSpecifier = jns;
debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier)
self.stack = []
self.nameStack = []
else:
self.nameStack.append(tok.value)
self.accessSpecifierScratch = []
elif (tok.type == 'SEMI_COLON'):
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
debug_print("Creating anonymous union")
#Force the processing of an anonymous union
saved_namestack = self.nameStack[:]
saved_stack = self.stack[:]
self.nameStack = [""]
self.stack = self.nameStack + [";"]
self.nameStack = self.nameStack[0:1]
debug_print("pre eval anon stack")
self.evaluate_stack( tok.type )
debug_print("post eval anon stack")
self.nameStack = saved_namestack
self.stack = saved_stack
self.anon_union_counter = [-1, 0];
if (self.braceDepth < 10): self.evaluate_stack( tok.type )
self.stack = []
self.nameStack = []
except:
if (debug): raise
raise CppParseError("Not able to parse %s on line %d evaluating \"%s\"\nError around: %s"
% (self.headerFileName, tok.lineno, tok.value, " ".join(self.nameStack)))
self.finalize()
global parseHistory
parseHistory = []
# Delete some temporary variables
for key in ["_precomp_macro_buf", "nameStack", "nameSpaces", "curAccessSpecifier", "accessSpecifierStack",
"accessSpecifierScratch", "nameStackHistory", "anon_struct_counter", "anon_union_counter",
"_classes_brace_level", "_forward_decls", "stack", "mainClass", "curStruct", "_template_typenames",
"_method_body", "braceDepth", "_structs_brace_level", "typedefs_order", "curTemplate", "templateRegistry"]:
del self.__dict__[key]
def evaluate_stack(self, token=None):
"""Evaluates the current name stack"""
global doxygenCommentCache
self.nameStack = filter_out_attribute_keyword(self.nameStack)
self.stack = filter_out_attribute_keyword(self.stack)
nameStackCopy = self.nameStack[:]
debug_print( "Evaluating stack %s\n BraceDepth: %s (called from %d)" %(self.nameStack,self.braceDepth, inspect.currentframe().f_back.f_lineno))
#Handle special case of overloading operator ()
if "operator()(" in "".join(self.nameStack):
operator_index = self.nameStack.index("operator")
self.nameStack.pop(operator_index + 2)
self.nameStack.pop(operator_index + 1)
self.nameStack[operator_index] = "operator()"
if (len(self.curClass)):
debug_print( "%s (%s) "%(self.curClass, self.curAccessSpecifier))
else:
debug_print( "<anonymous> (%s) "%self.curAccessSpecifier)
#Filter special case of array with casting in it
try:
bracePos = self.nameStack.index("[")
parenPos = self.nameStack.index("(")
if bracePos == parenPos - 1:
endParen = self.nameStack.index(")")
self.nameStack = self.nameStack[:bracePos + 1] + self.nameStack[endParen + 1:]
debug_print("Filtered namestack to=%s"%self.nameStack)
except: pass
#if 'typedef' in self.nameStack: self.evaluate_typedef() # allows nested typedefs, probably a bad idea
if (not self.curClass and 'typedef' in self.nameStack and
(('struct' not in self.nameStack and 'union' not in self.nameStack) or self.stack[-1] == ";") and
not is_enum_namestack(self.nameStack)):
trace_print('STACK', self.stack)
self.evaluate_typedef()
return
elif (len(self.nameStack) == 0):
debug_print( "trace" )
debug_print( "(Empty Stack)" )
return
elif (self.nameStack[0] == "namespace"):
#Taken care of outside of here
pass
elif len(self.nameStack) == 2 and self.nameStack[0] == "friend":#friend class declaration
pass
elif len(self.nameStack) >= 2 and self.nameStack[0] == 'using' and self.nameStack[1] == 'namespace': pass # TODO
elif is_enum_namestack(self.nameStack):
debug_print( "trace" )
self.evaluate_enum_stack()
elif self._method_body and (self.braceDepth + 1) > self._method_body: trace_print( 'INSIDE METHOD DEF' )
elif is_method_namestack(self.stack) and not self.curStruct and '(' in self.nameStack:
debug_print( "trace" )
if self.braceDepth > 0:
if "{" in self.stack and self.stack[0] != '{' and self.stack[-1] == ';' and self.braceDepth == 1:
#Special case of a method defined outside a class that has a body
pass
else:
self.evaluate_method_stack()
else:
#Free function
self.evaluate_method_stack()
elif (len(self.nameStack) == 1 and len(self.nameStackHistory) > self.braceDepth
and (self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "struct"] or
self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "union"])):
# Look for the name of a typedef struct: struct typedef {...] StructName; or unions to get renamed
debug_print("found the naming of a union")
type_name_to_rename = self.nameStackHistory[self.braceDepth][1]
new_name = self.nameStack[0]
type_to_rename = self.classes[type_name_to_rename]
type_to_rename["name"] = self.nameStack[0]
#Now re install it in its new location
self.classes[new_name] = type_to_rename
del self.classes[type_name_to_rename]
elif is_property_namestack(self.nameStack) and self.stack[-1] == ';':
debug_print( "trace" )
if self.nameStack[0] in ('class', 'struct') and len(self.stack) == 3: self.evalute_forward_decl()
elif len(self.nameStack) >= 2 and (self.nameStack[0]=='friend' and self.nameStack[1]=='class'): pass
else: self.evaluate_property_stack() # catches class props and structs in a namespace
elif self.nameStack[0] in ("class", "struct", "union") or self.nameStack[0] == 'typedef' and self.nameStack[1] in ('struct', 'union'):
#Parsing a union can reuse much of the class parsing
debug_print( "trace" )
self.evaluate_class_stack()
elif not self.curClass:
debug_print( "trace" )
if is_enum_namestack(self.nameStack): self.evaluate_enum_stack()
elif self.curStruct and self.stack[-1] == ';': self.evaluate_property_stack() # this catches fields of global structs
self.nameStack = []
doxygenCommentCache = ""
elif (self.braceDepth < 1):
debug_print( "trace" )
#Ignore global stuff for now
debug_print( "Global stuff: %s"%self.nameStack )
self.nameStack = []
doxygenCommentCache = ""
elif (self.braceDepth > len(self.nameSpaces) + 1):
debug_print( "trace" )
self.nameStack = []
doxygenCommentCache = ""
try:
self.nameStackHistory[self.braceDepth] = (nameStackCopy, self.curClass)
except:
self.nameStackHistory.append((nameStackCopy, self.curClass))
self.nameStack = [] # its a little confusing to have some if/else above return and others not, and then clearning the nameStack down here
doxygenCommentCache = ""
self.curTemplate = None
def evaluate_enum_stack(self):
"""Create an Enum out of the name stack"""
debug_print( "evaluating enum" )
newEnum = CppEnum(self.nameStack)
if len(list(newEnum.keys())):
if len(self.curClass):
newEnum["namespace"] = self.cur_namespace(False)
klass = self.classes[self.curClass]
klass["enums"][self.curAccessSpecifier].append(newEnum)
if self.curAccessSpecifier == 'public' and 'name' in newEnum: klass._public_enums[ newEnum['name'] ] = newEnum
else:
newEnum["namespace"] = self.cur_namespace(True)
self.enums.append(newEnum)
if 'name' in newEnum and newEnum['name']: self.global_enums[ newEnum['name'] ] = newEnum
#This enum has instances, turn them into properties
if "instances" in newEnum:
instanceType = "enum"
if "name" in newEnum:
instanceType = newEnum["name"]
for instance in newEnum["instances"]:
self.nameStack = [instanceType, instance]
self.evaluate_property_stack()
del newEnum["instances"]
def strip_parent_keys(self):
"""Strip all parent keys to prevent loops"""
obj_queue = [self]
while len(obj_queue):
obj = obj_queue.pop()
trace_print("pop %s type %s"%(obj, type(obj)))
try:
if "parent" in obj.keys():
del obj["parent"]
trace_print("Stripped parent from %s"%obj.keys())
except: pass
# Figure out what sub types are one of ours
try:
if not hasattr(obj, 'keys'):
obj = obj.__dict__
for k in obj.keys():
trace_print("-Try key %s"%(k))
trace_print("-type %s"%(type(obj[k])))
if k in ["nameStackHistory", "parent", "_public_typedefs"]: continue
if type(obj[k]) == list:
for i in obj[k]:
trace_print("push l %s"%i)
obj_queue.append(i)
elif type(obj[k]) == dict:
if len(obj):
trace_print("push d %s"%obj[k])
obj_queue.append(obj[k])
elif type(obj[k]) == type(type(0)):
if type(obj[k]) == int:
obj[k] = "int"
elif type(obj[k]) == str:
obj[k] = "string"
else:
obj[k] = "???"
trace_print("next key\n")
except:
trace_print("Exception")
def toJSON(self, indent=4):
"""Converts a parsed structure to JSON"""
import json
self.strip_parent_keys()
try:
del self.__dict__["classes_order"]
except: pass
return json.dumps(self.__dict__, indent=indent)
def __repr__(self):
rtn = {
"classes": self.classes,
"functions": self.functions,
"enums": self.enums,
"variables": self.variables,
}
return repr(rtn)
def __str__(self):
rtn = ""
for className in list(self.classes.keys()):
rtn += "%s\n"%self.classes[className]
if self.functions:
rtn += "// functions\n"
for f in self.functions:
rtn += "%s\n"%f
if self.variables:
rtn += "// variables\n"
for f in self.variables:
rtn += "%s\n"%f
if self.enums:
rtn += "// enums\n"
for f in self.enums:
rtn += "%s\n"%f
return rtn
| 43.697027 | 210 | 0.51306 |
import ply.lex as lex
import os
import sys
import re
import inspect
def lineno():
return inspect.currentframe().f_back.f_lineno
version = __version__ = "2.7"
tokens = [
'NUMBER',
'FLOAT_NUMBER',
'TEMPLATE_NAME',
'NAME',
'OPEN_PAREN',
'CLOSE_PAREN',
'OPEN_BRACE',
'CLOSE_BRACE',
'OPEN_SQUARE_BRACKET',
'CLOSE_SQUARE_BRACKET',
'COLON',
'SEMI_COLON',
'COMMA',
'TAB',
'BACKSLASH',
'PIPE',
'PERCENT',
'EXCLAMATION',
'CARET',
'COMMENT_SINGLELINE',
'COMMENT_MULTILINE',
'PRECOMP_MACRO',
'PRECOMP_MACRO_CONT',
'ASTERISK',
'AMPERSTAND',
'EQUALS',
'MINUS',
'PLUS',
'DIVIDE',
'CHAR_LITERAL',
'STRING_LITERAL',
'NEW_LINE',
'SQUOTE',
]
t_ignore = " \r.?@\f"
t_NUMBER = r'[0-9][0-9XxA-Fa-f]*'
t_FLOAT_NUMBER = r'[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?'
t_TEMPLATE_NAME = r'CppHeaderParser_template_[0-9]+'
t_NAME = r'[<>A-Za-z_~][A-Za-z0-9_]*'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_OPEN_BRACE = r'{'
t_CLOSE_BRACE = r'}'
t_OPEN_SQUARE_BRACKET = r'\['
t_CLOSE_SQUARE_BRACKET = r'\]'
t_SEMI_COLON = r';'
t_COLON = r':'
t_COMMA = r','
t_TAB = r'\t'
t_BACKSLASH = r'\\'
t_PIPE = r'\|'
t_PERCENT = r'%'
t_CARET = r'\^'
t_EXCLAMATION = r'!'
t_PRECOMP_MACRO = r'\#.*'
t_PRECOMP_MACRO_CONT = r'.*\\\n'
def t_COMMENT_SINGLELINE(t):
global doxygenCommentCache
if t.value.startswith("///") or t.value.startswith("//!"):
if doxygenCommentCache:
doxygenCommentCache += "\n"
if t.value.endswith("\n"):
doxygenCommentCache += t.value[:-1]
else:
doxygenCommentCache += t.value
t.lexer.lineno += len([a for a in t.value if a=="\n"])
t_ASTERISK = r'\*'
t_MINUS = r'\-'
t_PLUS = r'\+'
t_DIVIDE = r'/(?!/)'
t_AMPERSTAND = r'&'
t_EQUALS = r'='
t_CHAR_LITERAL = "'.'"
t_SQUOTE = "'"
#found at http://wordaligned.org/articles/string-literals-and-regular-expressions
#TODO: This does not work with the string "bla \" bla"
t_STRING_LITERAL = r'"([^"\\]|\\.)*"'
#Found at http://ostermiller.org/findcomment.html
def t_COMMENT_MULTILINE(t):
global doxygenCommentCache
if t.value.startswith("/**") or t.value.startswith("/*!"):
#not sure why, but get double new lines
v = t.value.replace("\n\n", "\n")
#strip prefixing whitespace
v = re.sub("\n[\s]+\*", "\n*", v)
doxygenCommentCache += v
t.lexer.lineno += len([a for a in t.value if a=="\n"])
def t_NEWLINE(t):
t.lexer.lineno += len(t.value)
def t_error(v):
print(( "Lex error: ", v ))
lex.lex()
# Controls error_print
print_errors = 1
# Controls warning_print
print_warnings = 1
# Controls debug_print
debug = 0
# Controls trace_print
debug_trace = 0
def error_print(arg):
if print_errors: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def warning_print(arg):
if print_warnings: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def debug_print(arg):
global debug
if debug: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def trace_print(*arg):
global debug_trace
if debug_trace:
sys.stdout.write("[%s] "%(inspect.currentframe().f_back.f_lineno))
for a in arg: sys.stdout.write("%s "%a)
sys.stdout.write("\n")
supportedAccessSpecifier = [
'public',
'protected',
'private',
'public slots',
'protected slots',
'private slots',
'public Q_SLOTS',
'protected Q_SLOTS',
'private Q_SLOTS',
'signals',
'Q_SIGNALS',
]
#Symbols to ignore, usually special macros
ignoreSymbols = [
'Q_OBJECT',
'Q_PROPERTY()',
'Q_DECLARE_FLAGS()',
'Q_INVOKABLE',
]
doxygenCommentCache = ""
#Track what was added in what order and at what depth
parseHistory = []
def is_namespace(nameStack):
if len(nameStack) == 0:
return False
if nameStack[0] == "namespace":
return True
return False
def is_enum_namestack(nameStack):
if len(nameStack) == 0:
return False
if nameStack[0] == "enum":
return True
if len(nameStack) > 1 and nameStack[0] == "typedef" and nameStack[1] == "enum":
return True
return False
def is_fundamental(s):
for a in s.split():
if a not in ["size_t", "struct", "union", "unsigned", "signed", "bool", "char", "short", "int", "float", "double", "long", "void", "*"]: return False
return True
def is_function_pointer_stack(stack):
paren_depth = 0
paren_count = 0
star_after_first_paren = False
last_e = None
for e in stack:
if e == "(":
paren_depth += 1
elif e == ")" and paren_depth > 0:
paren_depth -= 1
if paren_depth == 0:
paren_count += 1
elif e == "*" and last_e == "(" and paren_count == 0 and paren_depth == 1:
star_after_first_paren = True
last_e = e
if star_after_first_paren and paren_count == 2:
return True
else:
return False
def is_method_namestack(stack):
r = False
if '(' not in stack: r = False
elif stack[0] == 'typedef': r = False # TODO deal with typedef function prototypes
#elif '=' in stack and stack.index('=') < stack.index('(') and stack[stack.index('=')-1] != 'operator': r = False #disabled July6th - allow all operators
elif 'operator' in stack: r = True # allow all operators
elif '{' in stack and stack.index('{') < stack.index('('): r = False # struct that looks like a method/class
elif '(' in stack and ')' in stack:
if '{' in stack and '}' in stack: r = True
elif stack[-1] == ';':
if is_function_pointer_stack(stack):
r = False
else:
r = True
elif '{' in stack: r = True # ideally we catch both braces... TODO
else: r = False
#Test for case of property set to something with parens such as "static const int CONST_A = (1 << 7) - 1;"
if r and "(" in stack and "=" in stack and 'operator' not in stack:
if stack.index("=") < stack.index("("): r = False
return r
def is_property_namestack(nameStack):
r = False
if '(' not in nameStack and ')' not in nameStack: r = True
elif "(" in nameStack and "=" in nameStack and nameStack.index("=") < nameStack.index("("): r = True
#See if we are a function pointer
if not r and is_function_pointer_stack(nameStack): r = True
return r
def detect_lineno(s):
try:
rtn = s.lineno()
if rtn != -1:
return rtn
except: pass
global curLine
return curLine
def filter_out_attribute_keyword(stack):
if "__attribute__" not in stack: return stack
try:
debug_print("Stripping __attribute__ from %s"% stack)
attr_index = stack.index("__attribute__")
attr_end = attr_index + 1 #Assuming not followed by parenthetical expression which wont happen
#Find final paren
if stack[attr_index + 1] == '(':
paren_count = 1
for i in range(attr_index + 2, len(stack)):
elm = stack[i]
if elm == '(':
paren_count += 1
elif elm == ')':
paren_count -= 1
if paren_count == 0:
attr_end = i + 1
break
new_stack = stack[0:attr_index] + stack[attr_end:]
debug_print("stripped stack is %s"% new_stack)
return new_stack
except:
return stack
class TagStr(str):
lineno_reg = {}
def __new__(cls,*args,**kw):
new_obj = str.__new__(cls,*args)
if "lineno" in kw:
TagStr.lineno_reg[id(new_obj)] = kw["lineno"]
return new_obj
def __del__(self):
try:
del TagStr.lineno_reg[id(self)]
except: pass
def lineno(self):
return TagStr.lineno_reg.get(id(self), -1)
class CppParseError(Exception): pass
class CppClass(dict):
def get_all_methods(self):
r = []
for typ in supportedAccessSpecifier: r += self['methods'][typ]
return r
def get_all_method_names( self ):
r = []
for typ in supportedAccessSpecifier: r += self.get_method_names(typ) # returns list
return r
def get_all_pure_virtual_methods( self ):
r = {}
for typ in supportedAccessSpecifier: r.update(self.get_pure_virtual_methods(typ)) # returns dict
return r
def get_method_names( self, type='public' ): return [ meth['name'] for meth in self['methods'][ type ] ]
def get_pure_virtual_methods( self, type='public' ):
r = {}
for meth in self['methods'][ type ]:
if meth['pure_virtual']: r[ meth['name'] ] = meth
return r
def __init__(self, nameStack, curTemplate):
self['nested_classes'] = []
self['parent'] = None
self['abstract'] = False
self._public_enums = {}
self._public_structs = {}
self._public_typedefs = {}
self._public_forward_declares = []
self['namespace'] = ""
debug_print( "Class: %s"%nameStack )
debug_print( "Template: %s"%curTemplate)
if (len(nameStack) < 2):
nameStack.insert(1, "")#anonymous struct
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if "::" in "".join(nameStack):
#Re-Join class paths (ex ['class', 'Bar', ':', ':', 'Foo'] -> ['class', 'Bar::Foo']
try:
new_nameStack = []
for name in nameStack:
if len(new_nameStack) == 0:
new_nameStack.append(name)
elif name == ":" and new_nameStack[-1].endswith(":"):
new_nameStack[-1] += name
elif new_nameStack[-1].endswith("::"):
new_nameStack[-2] += new_nameStack[-1] + name
del new_nameStack[-1]
else:
new_nameStack.append(name)
trace_print("Convert from namestack\n %s\nto\n%s"%(nameStack, new_nameStack))
nameStack = new_nameStack
except: pass
# Handle final specifier
self["final"] = False
try:
final_index = nameStack.index("final")
# Dont trip up the rest of the logic
del nameStack[final_index]
self["final"] = True
trace_print("final")
except: pass
self["name"] = nameStack[1]
self["line_number"] = detect_lineno(nameStack[0])
#Handle template classes
if len(nameStack) > 3 and nameStack[2].startswith("<"):
open_template_count = 0
param_separator = 0
found_first = False
i = 0
for elm in nameStack:
if '<' in elm :
open_template_count += 1
found_first = True
elif '>' in elm:
open_template_count -= 1
if found_first and open_template_count == 0:
self["name"] = "".join(nameStack[1:i + 1])
break;
i += 1
elif ":" in nameStack:
self['name'] = nameStack[ nameStack.index(':') - 1 ]
inheritList = []
if nameStack.count(':') == 1:
nameStack = nameStack[nameStack.index(":") + 1:]
while len(nameStack):
tmpStack = []
tmpInheritClass = {"access":"private", "virtual": False}
if "," in nameStack:
tmpStack = nameStack[:nameStack.index(",")]
nameStack = nameStack[nameStack.index(",") + 1:]
else:
tmpStack = nameStack
nameStack = []
# Convert template classes to one name in the last index
for i in range(0, len(tmpStack)):
if '<' in tmpStack[i]:
tmpStack2 = tmpStack[:i-1]
tmpStack2.append("".join(tmpStack[i-1:]))
tmpStack = tmpStack2
break
if len(tmpStack) == 0:
break;
elif len(tmpStack) == 1:
tmpInheritClass["class"] = tmpStack[0]
elif len(tmpStack) == 2:
tmpInheritClass["access"] = tmpStack[0]
tmpInheritClass["class"] = tmpStack[1]
elif len(tmpStack) == 3 and "virtual" in tmpStack:
tmpInheritClass["access"] = tmpStack[1] if tmpStack[1] != "virtual" else tmpStack[0]
tmpInheritClass["class"] = tmpStack[2]
tmpInheritClass["virtual"] = True
else:
warning_print( "Warning: can not parse inheriting class %s"%(" ".join(tmpStack)))
if '>' in tmpStack: pass # allow skip templates for now
else: raise NotImplemented
if 'class' in tmpInheritClass: inheritList.append(tmpInheritClass)
elif nameStack.count(':') == 2: self['parent'] = self['name']; self['name'] = nameStack[-1]
elif nameStack.count(':') > 2 and nameStack[0] in ("class", "struct"):
tmpStack = nameStack[nameStack.index(":") + 1:]
superTmpStack = [[]]
for tok in tmpStack:
if tok == ',':
superTmpStack.append([])
else:
superTmpStack[-1].append(tok)
for tmpStack in superTmpStack:
tmpInheritClass = {"access":"private"}
if len(tmpStack) and tmpStack[0] in supportedAccessSpecifier:
tmpInheritClass["access"] = tmpStack[0]
tmpStack = tmpStack[1:]
inheritNSStack = []
while len(tmpStack) > 3:
if tmpStack[0] == ':': break;
if tmpStack[1] != ':': break;
if tmpStack[2] != ':': break;
inheritNSStack.append(tmpStack[0])
tmpStack = tmpStack[3:]
if len(tmpStack) == 1 and tmpStack[0] != ':':
inheritNSStack.append(tmpStack[0])
tmpInheritClass["class"] = "::".join(inheritNSStack)
inheritList.append(tmpInheritClass)
self['inherits'] = inheritList
if curTemplate:
self["template"] = curTemplate
trace_print("Setting template to '%s'"%self["template"])
methodAccessSpecificList = {}
propertyAccessSpecificList = {}
enumAccessSpecificList = {}
structAccessSpecificList = {}
typedefAccessSpecificList = {}
forwardAccessSpecificList = {}
for accessSpecifier in supportedAccessSpecifier:
methodAccessSpecificList[accessSpecifier] = []
propertyAccessSpecificList[accessSpecifier] = []
enumAccessSpecificList[accessSpecifier] = []
structAccessSpecificList[accessSpecifier] = []
typedefAccessSpecificList[accessSpecifier] = []
forwardAccessSpecificList[accessSpecifier] = []
self['methods'] = methodAccessSpecificList
self['properties'] = propertyAccessSpecificList
self['enums'] = enumAccessSpecificList
self['structs'] = structAccessSpecificList
self['typedefs'] = typedefAccessSpecificList
self['forward_declares'] = forwardAccessSpecificList
def show(self):
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self["final"]: rtn += " final"
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in list(self.keys()):
rtn += " Inherits: "
for inheritClass in self["inherits"]:
if inheritClass["virtual"]: rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += " {\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += " %s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " <Enums>\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " <Properties>\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " <Methods>\n"
for method in self["methods"][accessSpecifier]:
rtn += "\t\t" + method.show() + '\n'
rtn += " }\n"
print(rtn)
def __str__(self):
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self["final"]: rtn += " final"
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in list(self.keys()) and len(self["inherits"]):
rtn += "Inherits: "
for inheritClass in self["inherits"]:
if inheritClass.get("virtual", False): rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += "{\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += "%s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " // Enums\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " // Properties\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " // Methods\n"
for method in self["methods"][accessSpecifier]:
rtn += " %s\n"%(repr(method))
rtn += "}\n"
return rtn
class CppUnion( CppClass ):
def __init__(self, nameStack):
CppClass.__init__(self, nameStack, None)
self["name"] = "union " + self["name"]
self["members"] = self["properties"]["public"]
def transform_to_union_keys(self):
print("union keys: %s"%list(self.keys()))
for key in ['inherits', 'parent', 'abstract', 'namespace', 'typedefs', 'methods']:
del self[key]
def show(self):
print(self)
def __str__(self):
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
rtn += "{\n"
for member in self["members"]:
rtn += " %s\n"%(repr(member))
rtn += "}\n"
return rtn
class _CppMethod( dict ):
def _params_helper1( self, stack ):
# deal with "throw" keyword
if 'throw' in stack: stack = stack[ : stack.index('throw') ]
## remove GCC keyword __attribute__(...) and preserve returns ##
cleaned = []
hit = False; hitOpen = 0; hitClose = 0
for a in stack:
if a == '__attribute__': hit = True
if hit:
if a == '(': hitOpen += 1
elif a == ')': hitClose += 1
if a==')' and hitOpen == hitClose:
hit = False
else:
cleaned.append( a )
stack = cleaned
# also deal with attribute((const)) function prefix #
# TODO this needs to be better #
if len(stack) > 5:
a = ''.join(stack)
if a.startswith('((__const__))'): stack = stack[ 5 : ]
elif a.startswith('__attribute__((__const__))'): stack = stack[ 6 : ]
stack = stack[stack.index('(') + 1: ]
if not stack: return []
if len(stack)>=3 and stack[0]==')' and stack[1]==':': # is this always a constructor?
self['constructor'] = True
return []
stack.reverse(); _end_ = stack.index(')'); stack.reverse()
stack = stack[ : len(stack)-(_end_+1) ]
if '(' not in stack: return stack # safe to return, no defaults that init a class
# transforms ['someclass', '(', '0', '0', '0', ')'] into "someclass(0,0,0)'"
r = []; hit=False
for a in stack:
if a == '(': hit=True
elif a == ')': hit=False
if hit or a == ')': r[-1] = r[-1] + a
else: r.append( a )
return r
def _params_helper2( self, params ):
for p in params:
p['method'] = self if '::' in p['type']:
ns = p['type'].split('::')[0]
if ns not in Resolver.NAMESPACES and ns in Resolver.CLASSES:
p['type'] = self['namespace'] + p['type']
else: p['namespace'] = self[ 'namespace' ]
class CppMethod( _CppMethod ):
def show(self):
r = ['method name: %s (%s)' %(self['name'],self['debug']) ]
if self['returns']: r.append( 'returns: %s'%self['returns'] )
if self['parameters']: r.append( 'number arguments: %s' %len(self['parameters']))
if self['pure_virtual']: r.append( 'pure virtual: %s'%self['pure_virtual'] )
if self['constructor']: r.append( 'constructor' )
if self['destructor']: r.append( 'destructor' )
return '\n\t\t '.join( r )
def __init__(self, nameStack, curClass, methinfo, curTemplate):
debug_print( "Method: %s"%nameStack )
debug_print( "Template: %s"%curTemplate )
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if "operator" in nameStack:
self["rtnType"] = " ".join(nameStack[:nameStack.index('operator')])
self["name"] = "".join(nameStack[nameStack.index('operator'):nameStack.index('(')])
else:
self["rtnType"] = " ".join(nameStack[:nameStack.index('(') - 1])
self["name"] = " ".join(nameStack[nameStack.index('(') - 1:nameStack.index('(')])
if self["rtnType"].startswith("virtual"):
self["rtnType"] = self["rtnType"][len("virtual"):].strip()
if len(self["rtnType"]) == 0 or self["name"] == curClass:
self["rtnType"] = "void"
self["rtnType"] = self["rtnType"].replace(' : : ', '::' )
self["rtnType"] = self["rtnType"].replace(" <","<")
self["rtnType"] = self["rtnType"].replace(" >",">").replace(">>", "> >").replace(">>", "> >")
self["rtnType"] = self["rtnType"].replace(" ,",",")
for spec in ["const", "final", "override"]:
self[spec] = False
for i in reversed(nameStack):
if i == spec:
self[spec] = True
break
elif i == ")":
break
self.update( methinfo )
self["line_number"] = detect_lineno(nameStack[0])
try:
paren_depth_counter = 0
for i in range(0, len(nameStack)):
elm = nameStack[i]
if elm == "(":
paren_depth_counter += 1
if elm == ")":
paren_depth_counter -=1
if paren_depth_counter == 0 and nameStack[i+1] == ':':
debug_print("Stripping out initializer list")
nameStack = nameStack[:i+1]
break
except: pass
paramsStack = self._params_helper1( nameStack )
debug_print( "curTemplate: %s"%curTemplate)
if curTemplate:
self["template"] = curTemplate
debug_print( "SET self['template'] to `%s`"%self["template"])
params = []
doxyVarDesc = {}
if "doxygen" in self:
doxyLines = self["doxygen"].split("\n")
lastParamDesc = ""
for doxyLine in doxyLines:
if " @param " in doxyLine or " \param " in doxyLine:
try:
doxyLine = doxyLine[doxyLine.find("param ") + 6:]
(var, desc) = doxyLine.split(" ", 1)
doxyVarDesc[var] = desc.strip()
lastParamDesc = var
except: pass
elif " @return " in doxyLine or " \return " in doxyLine:
lastParamDesc = ""
elif lastParamDesc:
try:
doxyLine = doxyLine.strip()
if " " not in doxyLine:
lastParamDesc = ""
continue
doxyLine = doxyLine[doxyLine.find(" ") + 1:]
doxyVarDesc[lastParamDesc] += " " + doxyLine
except: pass
while (len(paramsStack)):
open_template_count = 0
param_separator = 0
i = 0
for elm in paramsStack:
if '<' in elm :
open_template_count += 1
elif '>' in elm:
open_template_count -= 1
elif elm == ',' and open_template_count == 0:
param_separator = i
break
i += 1
if param_separator:
param = CppVariable(paramsStack[0:param_separator], doxyVarDesc=doxyVarDesc)
if len(list(param.keys())): params.append(param)
paramsStack = paramsStack[param_separator + 1:]
else:
param = CppVariable(paramsStack, doxyVarDesc=doxyVarDesc)
if len(list(param.keys())): params.append(param)
break
self["parameters"] = params
#self._params_helper2( params ) # mods params inplace
def __str__(self):
filter_keys = ("parent", "defined", "operator", "returns_reference")
cpy = dict((k,v) for (k,v) in list(self.items()) if k not in filter_keys)
return "%s"%cpy
class _CppVariable(dict):
def _name_stack_helper( self, stack ):
stack = list(stack)
if '=' not in stack: # TODO refactor me
# check for array[n] and deal with funny array syntax: "int myvar:99"
array = []
while stack and stack[-1].isdigit(): array.append( stack.pop() )
if array: array.reverse(); self['array'] = int(''.join(array))
if stack and stack[-1].endswith(':'): stack[-1] = stack[-1][:-1]
while stack and not stack[-1]: stack.pop() # can be empty
return stack
def init(self):
#assert self['name'] # allow unnamed variables, methods like this: "void func(void);"
a = []
self['aliases'] = []; self['parent'] = None; self['typedef'] = None
for key in 'constant reference pointer static typedefs class fundamental unresolved'.split():
self[ key ] = 0
for b in self['type'].split():
if b == '__const__': b = 'const'
a.append( b )
self['type'] = ' '.join( a )
class CppVariable( _CppVariable ):
Vars = []
def __init__(self, nameStack, **kwargs):
debug_print("trace %s"%nameStack)
if len(nameStack) and nameStack[0] == "extern":
self['extern'] = True
del nameStack[0]
else:
self['extern'] = False
_stack_ = nameStack
if "[" in nameStack: #strip off array informatin
arrayStack = nameStack[nameStack.index("["):]
if nameStack.count("[") > 1:
debug_print("Multi dimensional array")
debug_print("arrayStack=%s"%arrayStack)
nums = filter(lambda x: x.isdigit(), arrayStack)
# Calculate size by multiplying all dimensions
p = 1
for n in nums:
p *= int(n)
#Multi dimensional array
self["array_size"] = p
self["multi_dimensional_array"] = 1
self["multi_dimensional_array_size"] = "x".join(nums)
else:
debug_print("Array")
if len(arrayStack) == 3:
self["array_size"] = arrayStack[1]
nameStack = nameStack[:nameStack.index("[")]
self["array"] = 1
else:
self["array"] = 0
nameStack = self._name_stack_helper( nameStack )
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
debug_print( "Variable: %s"%nameStack )
self["line_number"] = detect_lineno(nameStack[0])
self["function_pointer"] = 0
if (len(nameStack) < 2): # +++
if len(nameStack) == 1: self['type'] = nameStack[0]; self['name'] = ''
else: error_print(_stack_); assert 0
elif is_function_pointer_stack(nameStack): #function pointer
self["type"] = " ".join(nameStack[:nameStack.index("(") + 2] + nameStack[nameStack.index(")") :])
self["name"] = " ".join(nameStack[nameStack.index("(") + 2 : nameStack.index(")")])
self["function_pointer"] = 1
elif ("=" in nameStack):
self["type"] = " ".join(nameStack[:nameStack.index("=") - 1])
self["name"] = nameStack[nameStack.index("=") - 1]
self["defaultValue"] = " ".join(nameStack[nameStack.index("=") + 1:]) # deprecate camelCase in dicts
self['default'] = " ".join(nameStack[nameStack.index("=") + 1:])
elif is_fundamental(nameStack[-1]) or nameStack[-1] in ['>', '<' , ':', '.']:
#Un named parameter
self["type"] = " ".join(nameStack)
self["name"] = ""
else: # common case
self["type"] = " ".join(nameStack[:-1])
self["name"] = nameStack[-1]
self["type"] = self["type"].replace(" :",":")
self["type"] = self["type"].replace(": ",":")
self["type"] = self["type"].replace(" <","<")
self["type"] = self["type"].replace(" >",">").replace(">>", "> >").replace(">>", "> >")
self["type"] = self["type"].replace(" ,",",")
#Optional doxygen description
try:
self["desc"] = kwargs["doxyVarDesc"][self["name"]]
except: pass
self.init()
CppVariable.Vars.append( self ) # save and resolve later
def __str__(self):
keys_white_list = ['constant','name','reference','type','static','pointer','desc', 'line_number', 'extern']
cpy = dict((k,v) for (k,v) in list(self.items()) if k in keys_white_list)
if "array_size" in self: cpy["array_size"] = self["array_size"]
return "%s"%cpy
class _CppEnum(dict):
def resolve_enum_values( self, values ):
t = int; i = 0
names = [ v['name'] for v in values ]
for v in values:
if 'value' in v:
a = v['value'].strip()
# Remove single quotes from single quoted chars (unless part of some expression
if len(a) == 3 and a[0] == "'" and a[2] == "'":
a = v['value'] = a[1]
if a.lower().startswith("0x"):
try:
i = a = int(a , 16)
except:pass
elif a.isdigit():
i = a = int( a )
elif a in names:
for other in values:
if other['name'] == a:
v['value'] = other['value']
break
elif '"' in a or "'" in a: t = str # only if there are quotes it this a string enum
else:
try:
a = i = ord(a)
except: pass
#Allow access of what is in the file pre-convert if converted
if v['value'] != str(a):
v['raw_value'] = v['value']
v['value'] = a
else: v['value'] = i
try:
v['value'] = v['value'].replace(" < < ", " << ").replace(" >> ", " >> ")
except: pass
i += 1
return t
class CppEnum(_CppEnum):
def __init__(self, nameStack):
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if len(nameStack) == 3 and nameStack[0] == "enum":
debug_print("Created enum as just name/value")
self["name"] = nameStack[1]
self["instances"]=[nameStack[2]]
if len(nameStack) < 4 or "{" not in nameStack or "}" not in nameStack:
#Not enough stuff for an enum
debug_print("Bad enum")
return
valueList = []
self["line_number"] = detect_lineno(nameStack[0])
#Figure out what values it has
valueStack = nameStack[nameStack.index('{') + 1: nameStack.index('}')]
while len(valueStack):
tmpStack = []
if "," in valueStack:
tmpStack = valueStack[:valueStack.index(",")]
valueStack = valueStack[valueStack.index(",") + 1:]
else:
tmpStack = valueStack
valueStack = []
d = {}
if len(tmpStack) == 1: d["name"] = tmpStack[0]
elif len(tmpStack) >= 3 and tmpStack[1] == "=":
d["name"] = tmpStack[0]; d["value"] = " ".join(tmpStack[2:])
elif len(tmpStack) == 2 and tmpStack[1] == "=":
debug_print( "WARN-enum: parser missed value for %s"%tmpStack[0] )
d["name"] = tmpStack[0]
if d: valueList.append( d )
if len(valueList):
self['type'] = self.resolve_enum_values( valueList ) # returns int for standard enum
self["values"] = valueList
else:
warning_print( 'WARN-enum: empty enum %s'%nameStack )
return
#Figure out if it has a name
preBraceStack = nameStack[:nameStack.index("{")]
postBraceStack = nameStack[nameStack.index("}") + 1:]
self["typedef"] = False
if (len(preBraceStack) == 2 and "typedef" not in nameStack):
self["name"] = preBraceStack[1]
elif len(postBraceStack) and "typedef" in nameStack:
self["name"] = " ".join(postBraceStack)
self["typedef"] = True
else: warning_print( 'WARN-enum: nameless enum %s'%nameStack )
#See if there are instances of this
if "typedef" not in nameStack and len(postBraceStack):
self["instances"] = []
for var in postBraceStack:
if "," in var:
continue
self["instances"].append(var)
self["namespace"] = ""
class CppStruct(dict):
Structs = []
def __init__(self, nameStack):
if len(nameStack) >= 2: self['type'] = nameStack[1]
else: self['type'] = None
self['fields'] = []
self.Structs.append( self )
global curLine
self["line_number"] = curLine
C99_NONSTANDARD = {
'int8' : 'signed char',
'int16' : 'short int',
'int32' : 'int',
'int64' : 'int64_t', # this can be: long int (64bit), or long long int (32bit)
'uint' : 'unsigned int',
'uint8' : 'unsigned char',
'uint16' : 'unsigned short int',
'uint32' : 'unsigned int',
'uint64' : 'uint64_t', # depends on host bits
}
def standardize_fundamental( s ):
if s in C99_NONSTANDARD: return C99_NONSTANDARD[ s ]
else: return s
class Resolver(object):
C_FUNDAMENTAL = 'size_t unsigned signed bool char wchar short int float double long void'.split()
C_FUNDAMENTAL += 'struct union enum'.split()
SubTypedefs = {} # TODO deprecate?
NAMESPACES = []
CLASSES = {}
STRUCTS = {}
def initextra(self):
self.typedefs = {}
self.typedefs_order = []
self.classes_order = []
self.structs = Resolver.STRUCTS
self.structs_order = []
self.namespaces = Resolver.NAMESPACES # save all namespaces
self.curStruct = None
self.stack = [] # full name stack, good idea to keep both stacks? (simple stack and full stack)
self._classes_brace_level = {} # class name : level
self._structs_brace_level = {} # struct type : level
self._method_body = None
self._forward_decls = []
self._template_typenames = [] # template<typename XXX>
def current_namespace(self): return self.cur_namespace(True)
def cur_namespace(self, add_double_colon=False):
rtn = ""
i = 0
while i < len(self.nameSpaces):
rtn += self.nameSpaces[i]
if add_double_colon or i < len(self.nameSpaces) - 1: rtn += "::"
i+=1
return rtn
def guess_ctypes_type( self, string ):
pointers = string.count('*')
string = string.replace('*','')
a = string.split()
if 'unsigned' in a: u = 'u'
else: u = ''
if 'long' in a and 'double' in a: b = 'longdouble' # there is no ctypes.c_ulongdouble (this is a 64bit float?)
elif a.count('long') == 2 and 'int' in a: b = '%sint64' %u
elif a.count('long') == 2: b = '%slonglong' %u
elif 'long' in a: b = '%slong' %u
elif 'double' in a: b = 'double' # no udouble in ctypes
elif 'short' in a: b = '%sshort' %u
elif 'char' in a: b = '%schar' %u
elif 'wchar' in a: b = 'wchar'
elif 'bool' in a: b = 'bool'
elif 'float' in a: b = 'float'
elif 'int' in a: b = '%sint' %u
elif 'int8' in a: b = 'int8'
elif 'int16' in a: b = 'int16'
elif 'int32' in a: b = 'int32'
elif 'int64' in a: b = 'int64'
elif 'uint' in a: b = 'uint'
elif 'uint8' in a: b = 'uint8'
elif 'uint16' in a: b = 'uint16'
elif 'uint32' in a: b = 'uint32'
elif 'uint64' in a: b = 'uint64'
elif 'size_t' in a: b = 'size_t'
elif 'void' in a: b = 'void_p'
elif string in 'struct union'.split(): b = 'void_p' # what should be done here? don't trust struct, it could be a class, no need to expose via ctypes
else: b = 'void_p'
if not pointers: return 'ctypes.c_%s' %b
else:
x = ''
for i in range(pointers): x += 'ctypes.POINTER('
x += 'ctypes.c_%s' %b
x += ')' * pointers
return x
def resolve_type( self, string, result ): # recursive
## be careful with templates, what is inside <something*> can be a pointer but the overall type is not a pointer
## these come before a template
s = string.split('<')[0]
result[ 'constant' ] += s.split().count('const')
result[ 'static' ] += s.split().count('static')
result[ 'mutable' ] = 'mutable' in s.split()
## these come after a template
s = string.split('>')[-1]
result[ 'pointer' ] += s.count('*')
result[ 'reference' ] += s.count('&')
x = string; alias = False
for a in '* & const static mutable'.split(): x = x.replace(a,'')
for y in x.split():
if y not in self.C_FUNDAMENTAL: alias = y; break
#if alias == 'class':
# result['class'] = result['name'] # forward decl of class
# result['forward_decl'] = True
if alias == '__extension__': result['fundamental_extension'] = True
elif alias:
result['aliases'].append( alias )
if alias in C99_NONSTANDARD:
result['type'] = C99_NONSTANDARD[ alias ]
result['typedef'] = alias
result['typedefs'] += 1
elif alias in self.typedefs:
result['typedefs'] += 1
result['typedef'] = alias
self.resolve_type( self.typedefs[alias], result )
elif alias in self.classes:
klass = self.classes[alias]; result['fundamental'] = False
result['class'] = klass
result['unresolved'] = False
else: result['unresolved'] = True
else:
result['fundamental'] = True
result['unresolved'] = False
def finalize_vars(self):
for s in CppStruct.Structs: # vars within structs can be ignored if they do not resolve
for var in s['fields']: var['parent'] = s['type']
#for c in self.classes.values():
# for var in c.get_all_properties(): var['parent'] = c['name']
## RESOLVE ##
for var in CppVariable.Vars:
self.resolve_type( var['type'], var )
#if 'method' in var and var['method']['name'] == '_notifyCurrentCamera': print(var); assert 0
# then find concrete type and best guess ctypes type #
for var in CppVariable.Vars:
if not var['aliases']: #var['fundamental']:
var['ctypes_type'] = self.guess_ctypes_type( var['type'] )
else:
var['unresolved'] = False # below may test to True
if var['class']:
var['ctypes_type'] = 'ctypes.c_void_p'
else:
assert var['aliases']
tag = var['aliases'][0]
klass = None
nestedEnum = None
nestedStruct = None
nestedTypedef = None
if 'method' in var and 'parent' in list(var['method'].keys()):
klass = var['method']['parent']
if tag in var['method']['parent']._public_enums:
nestedEnum = var['method']['parent']._public_enums[ tag ]
elif tag in var['method']['parent']._public_structs:
nestedStruct = var['method']['parent']._public_structs[ tag ]
elif tag in var['method']['parent']._public_typedefs:
nestedTypedef = var['method']['parent']._public_typedefs[ tag ]
if '<' in tag: # should also contain '>'
var['template'] = tag # do not resolve templates
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif nestedEnum:
enum = nestedEnum
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['enum'] = var['method']['path'] + '::' + enum['name']
var['fundamental'] = True
elif nestedStruct:
var['ctypes_type'] = 'ctypes.c_void_p'
var['raw_type'] = var['method']['path'] + '::' + nestedStruct['type']
var['fundamental'] = False
elif nestedTypedef:
var['fundamental'] = is_fundamental( nestedTypedef )
if not var['fundamental']:
var['raw_type'] = var['method']['path'] + '::' + tag
else:
_tag = tag
if '::' in tag and tag.split('::')[0] in self.namespaces: tag = tag.split('::')[-1]
con = self.concrete_typedef( _tag )
if con:
var['concrete_type'] = con
var['ctypes_type'] = self.guess_ctypes_type( var['concrete_type'] )
elif tag in self.structs:
trace_print( 'STRUCT', var )
var['struct'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
var['raw_type'] = self.structs[tag]['namespace'] + '::' + tag
elif tag in self._forward_decls:
var['forward_declared'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
elif tag in self.global_enums:
enum = self.global_enums[ tag ]
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['enum'] = enum['namespace'] + enum['name']
var['fundamental'] = True
elif var['parent']:
warning_print( 'WARN unresolved %s'%_tag)
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif tag.count('::')==1:
trace_print( 'trying to find nested something in', tag )
a = tag.split('::')[0]
b = tag.split('::')[-1]
if a in self.classes: # a::b is most likely something nested in a class
klass = self.classes[ a ]
if b in klass._public_enums:
trace_print( '...found nested enum', b )
enum = klass._public_enums[ b ]
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
try:
if 'method' in var: var['enum'] = var['method']['path'] + '::' + enum['name']
else: # class property
var['unresolved'] = True
except:
var['unresolved'] = True
var['fundamental'] = True
else: var['unresolved'] = True # TODO klass._public_xxx
elif a in self.namespaces: # a::b can also be a nested namespace
if b in self.global_enums:
enum = self.global_enums[ b ]
trace_print(enum)
trace_print(var)
assert 0
elif b in self.global_enums: # falling back, this is a big ugly
enum = self.global_enums[ b ]
assert a in enum['namespace'].split('::')
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['fundamental'] = True
else: # boost::gets::crazy
trace_print('NAMESPACES', self.namespaces)
trace_print( a, b )
trace_print( '---- boost gets crazy ----' )
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif 'namespace' in var and self.concrete_typedef(var['namespace']+tag):
#print( 'TRYING WITH NS', var['namespace'] )
con = self.concrete_typedef( var['namespace']+tag )
if con:
var['typedef'] = var['namespace']+tag
var['type'] = con
if 'struct' in con.split():
var['raw_type'] = var['typedef']
var['ctypes_type'] = 'ctypes.c_void_p'
else:
self.resolve_type( var['type'], var )
var['ctypes_type'] = self.guess_ctypes_type( var['type'] )
elif '::' in var:
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif tag in self.SubTypedefs: # TODO remove SubTypedefs
if 'property_of_class' in var or 'property_of_struct' in var:
trace_print( 'class:', self.SubTypedefs[ tag ], 'tag:', tag )
var['typedef'] = self.SubTypedefs[ tag ] # class name
var['ctypes_type'] = 'ctypes.c_void_p'
else:
trace_print( "WARN-this should almost never happen!" )
trace_print( var ); trace_print('-'*80)
var['unresolved'] = True
elif tag in self._template_typenames:
var['typename'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True # TODO, how to deal with templates?
elif tag.startswith('_'): # assume starting with underscore is not important for wrapping
warning_print( 'WARN unresolved %s'%_tag)
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
else:
trace_print( 'WARN: unknown type', var )
assert 'property_of_class' in var or 'property_of_struct' # only allow this case
var['unresolved'] = True
## if not resolved and is a method param, not going to wrap these methods ##
if var['unresolved'] and 'method' in var: var['method']['unresolved_parameters'] = True
# create stripped raw_type #
p = '* & const static mutable'.split() # +++ new July7: "mutable"
for var in CppVariable.Vars:
if 'raw_type' not in var:
raw = []
for x in var['type'].split():
if x not in p: raw.append( x )
var['raw_type'] = ' '.join( raw )
#if 'AutoConstantEntry' in var['raw_type']: print(var); assert 0
if var['class']:
if '::' not in var['raw_type']:
if not var['class']['parent']:
var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type']
elif var['class']['parent'] in self.classes:
parent = self.classes[ var['class']['parent'] ]
var['raw_type'] = parent['namespace'] + '::' + var['class']['name'] + '::' + var['raw_type']
else:
var['unresolved'] = True
elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] not in self.namespaces:
var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type']
else:
var['unresolved'] = True
elif 'forward_declared' in var and 'namespace' in var:
if '::' not in var['raw_type']:
var['raw_type'] = var['namespace'] + var['raw_type']
elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] in self.namespaces:
pass
else: trace_print('-'*80); trace_print(var); raise NotImplemented
## need full name space for classes in raw type ##
if var['raw_type'].startswith( '::' ):
#print(var)
#print('NAMESPACE', var['class']['namespace'])
#print( 'PARENT NS', var['class']['parent']['namespace'] )
#assert 0
var['unresolved'] = True
if 'method' in var: var['method']['unresolved_parameters'] = True
#var['raw_type'] = var['raw_type'][2:]
# Take care of #defines and #pragmas etc
trace_print("Processing precomp_macro_buf: %s"%self._precomp_macro_buf)
for m in self._precomp_macro_buf:
macro = m.replace("<CppHeaderParser_newline_temp_replacement>\\n", "\n")
try:
if macro.lower().startswith("#define"):
trace_print("Adding #define %s"%macro)
self.defines.append(macro.split(" ", 1)[1].strip())
elif macro.lower().startswith("#if") or macro.lower().startswith("#endif") or macro.lower().startswith("#else"):
self.conditionals.append(macro)
elif macro.lower().startswith("#pragma"):
trace_print("Adding #pragma %s"%macro)
self.pragmas.append(macro.split(" ", 1)[1].strip())
elif macro.lower().startswith("#include"):
trace_print("Adding #include %s"%macro)
self.includes.append(macro.split(" ", 1)[1].strip())
else:
debug_print("Cant detect what to do with precomp macro '%s'"%macro)
except: pass
self._precomp_macro_buf = None
def concrete_typedef( self, key ):
if key not in self.typedefs:
#print( 'FAILED typedef', key )
return None
while key in self.typedefs:
prev = key
key = self.typedefs[ key ]
if '<' in key or '>' in key: return prev # stop at template
if key.startswith('std::'): return key # stop at std lib
return key
class _CppHeader( Resolver ):
def finalize(self):
self.finalize_vars()
# finalize classes and method returns types
for cls in list(self.classes.values()):
for meth in cls.get_all_methods():
if meth['pure_virtual']: cls['abstract'] = True
if not meth['returns_fundamental'] and meth['returns'] in C99_NONSTANDARD:
meth['returns'] = C99_NONSTANDARD[meth['returns']]
meth['returns_fundamental'] = True
elif not meth['returns_fundamental']: # describe the return type
con = None
if cls['namespace'] and '::' not in meth['returns']:
con = self.concrete_typedef( cls['namespace'] + '::' + meth['returns'] )
else: con = self.concrete_typedef( meth['returns'] )
if con:
meth['returns_concrete'] = con
meth['returns_fundamental'] = is_fundamental( con )
elif meth['returns'] in self.classes:
trace_print( 'meth returns class:', meth['returns'] )
meth['returns_class'] = True
elif meth['returns'] in self.SubTypedefs:
meth['returns_class'] = True
meth['returns_nested'] = self.SubTypedefs[ meth['returns'] ]
elif meth['returns'] in cls._public_enums:
enum = cls._public_enums[ meth['returns'] ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif meth['returns'] in self.global_enums:
enum = self.global_enums[ meth['returns'] ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif meth['returns'].count('::')==1:
trace_print( meth )
a,b = meth['returns'].split('::')
if a in self.namespaces:
if b in self.classes:
klass = self.classes[ b ]
meth['returns_class'] = a + '::' + b
elif '<' in b and '>' in b:
warning_print( 'WARN-can not return template: %s'%b )
meth['returns_unknown'] = True
elif b in self.global_enums:
enum = self.global_enums[ b ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
else: trace_print( a, b); trace_print( meth); meth['returns_unknown'] = True # +++
elif a in self.classes:
klass = self.classes[ a ]
if b in klass._public_enums:
trace_print( '...found nested enum', b )
enum = klass._public_enums[ b ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif b in klass._public_forward_declares:
meth['returns_class'] = True
elif b in klass._public_typedefs:
typedef = klass._public_typedefs[ b ]
meth['returns_fundamental'] = is_fundamental( typedef )
else:
trace_print( meth ) # should be a nested class, TODO fix me.
meth['returns_unknown'] = True
elif '::' in meth['returns']:
trace_print('TODO namespace or extra nested return:', meth)
meth['returns_unknown'] = True
else:
trace_print( 'WARN: UNKNOWN RETURN', meth['name'], meth['returns'])
meth['returns_unknown'] = True
if meth["returns"].startswith(": : "):
meth["returns"] = meth["returns"].replace(": : ", "::")
for cls in list(self.classes.values()):
methnames = cls.get_all_method_names()
pvm = cls.get_all_pure_virtual_methods()
for d in cls['inherits']:
c = d['class']
a = d['access'] # do not depend on this to be 'public'
trace_print( 'PARENT CLASS:', c )
if c not in self.classes: trace_print('WARN: parent class not found')
if c in self.classes and self.classes[c]['abstract']:
p = self.classes[ c ]
for meth in p.get_all_methods(): #p["methods"]["public"]:
trace_print( '\t\tmeth', meth['name'], 'pure virtual', meth['pure_virtual'] )
if meth['pure_virtual'] and meth['name'] not in methnames: cls['abstract'] = True; break
def evaluate_struct_stack(self):
#print( 'eval struct stack', self.nameStack )
#if self.braceDepth != len(self.nameSpaces): return
struct = CppStruct(self.nameStack)
struct["namespace"] = self.cur_namespace()
self.structs[ struct['type'] ] = struct
self.structs_order.append( struct )
if self.curClass:
struct['parent'] = self.curClass
klass = self.classes[ self.curClass ]
klass['structs'][self.curAccessSpecifier].append( struct )
if self.curAccessSpecifier == 'public': klass._public_structs[ struct['type'] ] = struct
self.curStruct = struct
self._structs_brace_level[ struct['type'] ] = self.braceDepth
def parse_method_type( self, stack ):
trace_print( 'meth type info', stack )
if stack[0] in ':;' and stack[1] != ':': stack = stack[1:]
info = {
'debug': ' '.join(stack).replace(' : : ', '::' ).replace(' < ', '<' ).replace(' > ', '> ' ).replace(" >",">").replace(">>", "> >").replace(">>", "> >"),
'class':None,
'namespace':self.cur_namespace(add_double_colon=True),
}
for tag in 'defined pure_virtual operator constructor destructor extern template virtual static explicit inline friend returns returns_pointer returns_fundamental returns_class'.split(): info[tag]=False
header = stack[ : stack.index('(') ]
header = ' '.join( header )
header = header.replace(' : : ', '::' )
header = header.replace(' < ', '<' )
header = header.replace(' > ', '> ' )
header = header.strip()
if '{' in stack:
info['defined'] = True
self._method_body = self.braceDepth + 1
trace_print( 'NEW METHOD WITH BODY', self.braceDepth )
elif stack[-1] == ';':
info['defined'] = False
self._method_body = None # not a great idea to be clearing here
else: assert 0
if len(stack) > 3 and stack[-1] == ';' and stack[-2] == '0' and stack[-3] == '=':
info['pure_virtual'] = True
r = header.split()
name = None
if 'operator' in stack: # rare case op overload defined outside of class
op = stack[ stack.index('operator')+1 : stack.index('(') ]
op = ''.join(op)
if not op:
if " ".join(['operator', '(', ')', '(']) in " ".join(stack):
op = "()"
else:
trace_print( 'Error parsing operator')
return None
info['operator'] = op
name = 'operator' + op
a = stack[ : stack.index('operator') ]
elif r:
name = r[-1]
a = r[ : -1 ] # strip name
if name is None: return None
#if name.startswith('~'): name = name[1:]
while a and a[0] == '}': # strip - can have multiple } }
a = a[1:]
if '::' in name:
#klass,name = name.split('::') # methods can be defined outside of class
klass = name[ : name.rindex('::') ]
name = name.split('::')[-1]
info['class'] = klass
if klass in self.classes and not self.curClass:
#Class function defined outside the class
return None
# info['name'] = name
#else: info['name'] = name
if name.startswith('~'):
info['destructor'] = True
name = name[1:]
elif not a or (name == self.curClass and len(self.curClass)):
info['constructor'] = True
info['name'] = name
for tag in 'extern virtual static explicit inline friend'.split():
if tag in a: info[ tag ] = True; a.remove( tag ) # inplace
if 'template' in a:
a.remove('template')
b = ' '.join( a )
if '>' in b:
info['template'] = b[ : b.index('>')+1 ]
info['returns'] = b[ b.index('>')+1 : ] # find return type, could be incorrect... TODO
if '<typename' in info['template'].split():
typname = info['template'].split()[-1]
typname = typname[ : -1 ] # strip '>'
if typname not in self._template_typenames: self._template_typenames.append( typname )
else: info['returns'] = ' '.join( a )
else: info['returns'] = ' '.join( a )
info['returns'] = info['returns'].replace(' <', '<').strip()
## be careful with templates, do not count pointers inside template
info['returns_pointer'] = info['returns'].split('>')[-1].count('*')
if info['returns_pointer']: info['returns'] = info['returns'].replace('*','').strip()
info['returns_reference'] = '&' in info['returns']
if info['returns']: info['returns'] = info['returns'].replace('&','').strip()
a = []
for b in info['returns'].split():
if b == '__const__': info['returns_const'] = True
elif b == 'const': info['returns_const'] = True
else: a.append( b )
info['returns'] = ' '.join( a )
info['returns_fundamental'] = is_fundamental( info['returns'] )
return info
def evaluate_method_stack(self):
if self.curStruct:
trace_print( 'WARN - struct contains methods - skipping' )
trace_print( self.stack )
assert 0
info = self.parse_method_type( self.stack )
if info:
if info[ 'class' ] and info['class'] in self.classes: # case where methods are defined outside of class
newMethod = CppMethod(self.nameStack, info['name'], info, self.curTemplate)
klass = self.classes[ info['class'] ]
klass[ 'methods' ][ 'public' ].append( newMethod )
newMethod['parent'] = klass
if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name']
else: newMethod['path'] = klass['name']
elif self.curClass: # normal case
newMethod = CppMethod(self.nameStack, self.curClass, info, self.curTemplate)
klass = self.classes[self.curClass]
klass['methods'][self.curAccessSpecifier].append(newMethod)
newMethod['parent'] = klass
if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name']
else: newMethod['path'] = klass['name']
else: #non class functions
debug_print("FREE FUNCTION")
newMethod = CppMethod(self.nameStack, None, info, self.curTemplate)
self.functions.append(newMethod)
global parseHistory
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "method", "item": newMethod})
else:
trace_print( 'free function?', self.nameStack )
self.stack = []
def _parse_typedef( self, stack, namespace='' ):
if not stack or 'typedef' not in stack: return
stack = list( stack ) # copy just to be safe
if stack[-1] == ';': stack.pop()
while stack and stack[-1].isdigit(): stack.pop() # throw away array size for now
idx = stack.index('typedef')
if stack[-1] == "]":
try:
name = namespace + "".join(stack[-4:])
# Strip off the array part so the rest of the parsing is better
stack = stack[:-3]
except:
name = namespace + stack[-1]
else:
name = namespace + stack[-1]
s = ''
for a in stack[idx+1:-1]:
if a == '{': break
if not s or s[-1] in ':<>' or a in ':<>': s += a # keep compact
else: s += ' ' + a # spacing
r = {'name':name, 'raw':s, 'type':s}
if not is_fundamental(s):
if 'struct' in s.split(): pass # TODO is this right? "struct ns::something"
elif '::' not in s: s = namespace + s # only add the current name space if no namespace given
r['type'] = s
if s: return r
def evaluate_typedef(self):
ns = self.cur_namespace(add_double_colon=True)
res = self._parse_typedef( self.stack, ns )
if res:
name = res['name']
self.typedefs[ name ] = res['type']
if name not in self.typedefs_order: self.typedefs_order.append( name )
def evaluate_property_stack(self):
global parseHistory
assert self.stack[-1] == ';'
debug_print( "trace" )
if self.nameStack[0] == 'typedef':
if self.curClass:
typedef = self._parse_typedef( self.stack )
name = typedef['name']
klass = self.classes[ self.curClass ]
klass[ 'typedefs' ][ self.curAccessSpecifier ].append( name )
if self.curAccessSpecifier == 'public': klass._public_typedefs[ name ] = typedef['type']
Resolver.SubTypedefs[ name ] = self.curClass
else: assert 0
elif self.curStruct or self.curClass:
if len(self.nameStack) == 1:
#See if we can de anonymize the type
filteredParseHistory = [h for h in parseHistory if h["braceDepth"] == self.braceDepth]
if len(filteredParseHistory) and filteredParseHistory[-1]["item_type"] == "class":
self.nameStack.insert(0, filteredParseHistory[-1]["item"]["name"])
debug_print("DEANONYMOIZING %s to type '%s'"%(self.nameStack[1], self.nameStack[0]))
if "," in self.nameStack: #Maybe we have a variable list
#Figure out what part is the variable separator but remember templates of function pointer
#First find left most comma outside of a > and )
leftMostComma = 0;
for i in range(0, len(self.nameStack)):
name = self.nameStack[i]
if name in (">", ")"): leftMostComma = 0
if leftMostComma == 0 and name == ",": leftMostComma = i
# Is it really a list of variables?
if leftMostComma != 0:
trace_print("Multiple variables for namestack in %s. Separating processing"%self.nameStack)
orig_nameStack = self.nameStack[:]
orig_stack = self.stack[:]
type_nameStack = orig_nameStack[:leftMostComma-1]
for name in orig_nameStack[leftMostComma - 1::2]:
self.nameStack = type_nameStack + [name]
self.stack = orig_stack[:] # Not maintained for mucking, but this path it doesnt matter
self.evaluate_property_stack()
return
newVar = CppVariable(self.nameStack)
newVar['namespace'] = self.current_namespace()
if self.curStruct:
self.curStruct[ 'fields' ].append( newVar )
newVar['property_of_struct'] = self.curStruct
elif self.curClass:
klass = self.classes[self.curClass]
klass["properties"][self.curAccessSpecifier].append(newVar)
newVar['property_of_class'] = klass['name']
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "variable", "item": newVar})
else:
debug_print( "Found Global variable" )
newVar = CppVariable(self.nameStack)
self.variables.append(newVar)
self.stack = [] # CLEAR STACK
def evaluate_class_stack(self):
#dont support sub classes today
#print( 'eval class stack', self.nameStack )
parent = self.curClass
if self.braceDepth > len( self.nameSpaces) and parent:
trace_print( 'HIT NESTED SUBCLASS' )
self.accessSpecifierStack.append(self.curAccessSpecifier)
elif self.braceDepth != len(self.nameSpaces):
error_print( 'ERROR: WRONG BRACE DEPTH' )
return
# When dealing with typedefed structs, get rid of typedef keyword to handle later on
if self.nameStack[0] == "typedef":
del self.nameStack[0]
if len(self.nameStack) == 1:
self.anon_struct_counter += 1
# We cant handle more than 1 anonymous struct, so name them uniquely
self.nameStack.append("<anon-struct-%d>"%self.anon_struct_counter)
if self.nameStack[0] == "class":
self.curAccessSpecifier = 'private'
else:#struct
self.curAccessSpecifier = 'public'
debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier)
if self.nameStack[0] == "union":
newClass = CppUnion(self.nameStack)
self.anon_union_counter = [self.braceDepth, 2]
trace_print( 'NEW UNION', newClass['name'] )
else:
newClass = CppClass(self.nameStack, self.curTemplate)
trace_print( 'NEW CLASS', newClass['name'] )
newClass["declaration_method"] = self.nameStack[0]
self.classes_order.append( newClass ) # good idea to save ordering
self.stack = [] # fixes if class declared with ';' in closing brace
if parent:
newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent
newClass['parent'] = parent
self.classes[ parent ]['nested_classes'].append( newClass )
## supports nested classes with the same name ##
self.curClass = key = parent+'::'+newClass['name']
self._classes_brace_level[ key ] = self.braceDepth
elif newClass['parent']: # nested class defined outside of parent. A::B {...}
parent = newClass['parent']
newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent
self.classes[ parent ]['nested_classes'].append( newClass )
## supports nested classes with the same name ##
self.curClass = key = parent+'::'+newClass['name']
self._classes_brace_level[ key ] = self.braceDepth
else:
newClass["namespace"] = self.cur_namespace()
key = newClass['name']
self.curClass = newClass["name"]
self._classes_brace_level[ newClass['name'] ] = self.braceDepth
if not key.endswith("::") and not key.endswith(" ") and len(key) != 0:
if key in self.classes:
trace_print( 'ERROR name collision:', key )
self.classes[key].show()
trace_print('-'*80)
newClass.show()
assert key not in self.classes # namespace collision
self.classes[ key ] = newClass
global parseHistory
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "class", "item": newClass})
def evalute_forward_decl(self):
trace_print( 'FORWARD DECL', self.nameStack )
assert self.nameStack[0] in ('class', 'struct')
name = self.nameStack[-1]
if self.curClass:
klass = self.classes[ self.curClass ]
klass['forward_declares'][self.curAccessSpecifier].append( name )
if self.curAccessSpecifier == 'public': klass._public_forward_declares.append( name )
else: self._forward_decls.append( name )
class CppHeader( _CppHeader ):
IGNORE_NAMES = '__extension__'.split()
def show(self):
for className in list(self.classes.keys()):self.classes[className].show()
def __init__(self, headerFileName, argType="file", **kwargs):
## reset global state ##
global doxygenCommentCache
doxygenCommentCache = ""
CppVariable.Vars = []
CppStruct.Structs = []
if (argType == "file"):
self.headerFileName = os.path.expandvars(headerFileName)
self.mainClass = os.path.split(self.headerFileName)[1][:-2]
headerFileStr = ""
elif argType == "string":
self.headerFileName = ""
self.mainClass = "???"
headerFileStr = headerFileName
else:
raise Exception("Arg type must be either file or string")
self.curClass = ""
# nested classes have parent::nested, but no extra namespace,
# this keeps the API compatible, TODO proper namespace for everything.
Resolver.CLASSES = {}
self.classes = Resolver.CLASSES
#Functions that are not part of a class
self.functions = []
self.pragmas = []
self.defines = []
self.includes = []
self.conditionals = []
self._precomp_macro_buf = [] #for internal purposes, will end up filling out pragmras and defines at the end
self.enums = []
self.variables = []
self.global_enums = {}
self.nameStack = []
self.nameSpaces = []
self.curAccessSpecifier = 'private' # private is default
self.curTemplate = None
self.accessSpecifierStack = []
self.accessSpecifierScratch = []
debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier)
self.initextra()
# Old namestacks for a given level
self.nameStackHistory = []
self.anon_struct_counter = 0
self.anon_union_counter = [-1, 0]
self.templateRegistry = []
if (len(self.headerFileName)):
fd = open(self.headerFileName)
headerFileStr = "".join(fd.readlines())
fd.close()
# Make sure supportedAccessSpecifier are sane
for i in range(0, len(supportedAccessSpecifier)):
if " " not in supportedAccessSpecifier[i]: continue
supportedAccessSpecifier[i] = re.sub("[ ]+", " ", supportedAccessSpecifier[i]).strip()
# Strip out template declarations
templateSectionsToSliceOut = []
try:
for m in re.finditer("template[\t ]*<[^>]*>", headerFileStr):
start = m.start()
# Search for the final '>' which may or may not be caught in the case of nexted <>'s
for i in range(start, len(headerFileStr)):
if headerFileStr[i] == '<':
firstBracket = i
break
ltgtStackCount = 1
#Now look for fianl '>'
for i in range(firstBracket + 1, len(headerFileStr)):
if headerFileStr[i] == '<':
ltgtStackCount += 1
elif headerFileStr[i] == '>':
ltgtStackCount -= 1
if ltgtStackCount == 0:
end = i
break
templateSectionsToSliceOut.append((start, end))
# Now strip out all instances of the template
templateSectionsToSliceOut.reverse()
for tslice in templateSectionsToSliceOut:
# Replace the template symbol with a single symbol
template_symbol="CppHeaderParser_template_%d"%len(self.templateRegistry)
self.templateRegistry.append(headerFileStr[tslice[0]: tslice[1]+1])
newlines = headerFileStr[tslice[0]: tslice[1]].count("\n") * "\n" #Keep line numbers the same
headerFileStr = headerFileStr[:tslice[0]] + newlines + " " + template_symbol + " " + headerFileStr[tslice[1] + 1:]
except:
pass
# Change multi line #defines and expressions to single lines maintaining line nubmers
# Based from http://stackoverflow.com/questions/2424458/regular-expression-to-match-cs-multiline-preprocessor-statements
matches = re.findall(r'(?m)^(?:.*\\\r?\n)+.*$', headerFileStr)
is_define = re.compile(r'[ \t\v]*#[Dd][Ee][Ff][Ii][Nn][Ee]')
for m in matches:
#Keep the newlines so that linecount doesnt break
num_newlines = len([a for a in m if a=="\n"])
if is_define.match(m):
new_m = m.replace("\n", "<CppHeaderParser_newline_temp_replacement>\\n")
else:
# Just expression taking up multiple lines, make it take 1 line for easier parsing
new_m = m.replace("\\\n", " ")
if (num_newlines > 0):
new_m += "\n"*(num_newlines)
headerFileStr = headerFileStr.replace(m, new_m)
#Filter out Extern "C" statements. These are order dependent
matches = re.findall(re.compile(r'extern[\t ]+"[Cc]"[\t \n\r]*{', re.DOTALL), headerFileStr)
for m in matches:
#Keep the newlines so that linecount doesnt break
num_newlines = len([a for a in m if a=="\n"])
headerFileStr = headerFileStr.replace(m, "\n" * num_newlines)
headerFileStr = re.sub(r'extern[ ]+"[Cc]"[ ]*', "", headerFileStr)
#Filter out any ignore symbols that end with "()" to account for #define magic functions
for ignore in ignoreSymbols:
if not ignore.endswith("()"): continue
while True:
locStart = headerFileStr.find(ignore[:-1])
if locStart == -1:
break;
locEnd = None
#Now walk till we find the last paren and account for sub parens
parenCount = 1
inQuotes = False
for i in range(locStart + len(ignore) - 1, len(headerFileStr)):
c = headerFileStr[i]
if not inQuotes:
if c == "(":
parenCount += 1
elif c == ")":
parenCount -= 1
elif c == '"':
inQuotes = True
if parenCount == 0:
locEnd = i + 1
break;
else:
if c == '"' and headerFileStr[i-1] != '\\':
inQuotes = False
if locEnd:
#Strip it out but keep the linecount the same so line numbers are right
match_str = headerFileStr[locStart:locEnd]
debug_print("Striping out '%s'"%match_str)
num_newlines = len([a for a in match_str if a=="\n"])
headerFileStr = headerFileStr.replace(headerFileStr[locStart:locEnd], "\n"*num_newlines)
self.braceDepth = 0
lex.lex()
lex.input(headerFileStr)
global curLine
global curChar
curLine = 0
curChar = 0
try:
while True:
tok = lex.token()
if not tok: break
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
self.anon_union_counter[1] -= 1
tok.value = TagStr(tok.value, lineno=tok.lineno)
#debug_print("TOK: %s"%tok)
if tok.type == 'NAME' and tok.value in self.IGNORE_NAMES: continue
if tok.type != 'TEMPLATE_NAME':
self.stack.append( tok.value )
curLine = tok.lineno
curChar = tok.lexpos
if (tok.type in ('PRECOMP_MACRO', 'PRECOMP_MACRO_CONT')):
debug_print("PRECOMP: %s"%tok)
self._precomp_macro_buf.append(tok.value)
self.stack = []
self.nameStack = []
continue
if tok.type == 'TEMPLATE_NAME':
try:
templateId = int(tok.value.replace("CppHeaderParser_template_",""))
self.curTemplate = self.templateRegistry[templateId]
except: pass
if (tok.type == 'OPEN_BRACE'):
if len(self.nameStack) >= 2 and is_namespace(self.nameStack): # namespace {} with no name used in boost, this sets default?
if self.nameStack[1] == "__IGNORED_NAMESPACE__CppHeaderParser__":#Used in filtering extern "C"
self.nameStack[1] = ""
self.nameSpaces.append(self.nameStack[1])
ns = self.cur_namespace(); self.stack = []
if ns not in self.namespaces: self.namespaces.append( ns )
# Detect special condition of macro magic before class declaration so we
# can filter it out
if 'class' in self.nameStack and self.nameStack[0] != 'class':
classLocationNS = self.nameStack.index("class")
classLocationS = self.stack.index("class")
if "(" not in self.nameStack[classLocationNS:]:
debug_print("keyword 'class' found in unexpected location in nameStack, must be following origNameStack = self.nameStack
origStack = self.stack
#Process first part of stack which is probably #define macro magic and may cause issues
self.nameStack = self.nameStack[:classLocationNS]
self.stack = self.stack[:classLocationS]
try:
self.evaluate_stack()
except:
debug_print("Error processing #Process rest of stack
self.nameStack = origNameStack[classLocationNS:]
self.stack = origStack[classLocationS:]
if len(self.nameStack) and not is_enum_namestack(self.nameStack):
self.evaluate_stack()
else:
self.nameStack.append(tok.value)
if self.stack and self.stack[0] == 'class': self.stack = []
self.braceDepth += 1
elif (tok.type == 'CLOSE_BRACE'):
if self.braceDepth == 0:
continue
if (self.braceDepth == len(self.nameSpaces)):
tmp = self.nameSpaces.pop()
self.stack = [] # clear stack when namespace ends?
if len(self.nameStack) and is_enum_namestack(self.nameStack):
self.nameStack.append(tok.value)
elif self.braceDepth < 10:
self.evaluate_stack()
else:
self.nameStack = []
self.braceDepth -= 1
#self.stack = []; print 'BRACE DEPTH', self.braceDepth, 'NS', len(self.nameSpaces)
if self.curClass: debug_print( 'CURBD %s'%self._classes_brace_level[ self.curClass ] )
if (self.braceDepth == 0) or (self.curClass and self._classes_brace_level[self.curClass]==self.braceDepth):
trace_print( 'END OF CLASS DEF' )
if self.accessSpecifierStack:
self.curAccessSpecifier = self.accessSpecifierStack[-1]
self.accessSpecifierStack = self.accessSpecifierStack[:-1]
if self.curClass and self.classes[ self.curClass ]['parent']: self.curClass = self.classes[ self.curClass ]['parent']
else: self.curClass = ""; #self.curStruct = None
self.stack = []
#if self.curStruct: self.curStruct = None
if self.braceDepth == 0 or (self.curStruct and self._structs_brace_level[self.curStruct['type']]==self.braceDepth):
trace_print( 'END OF STRUCT DEF' )
self.curStruct = None
if self._method_body and (self.braceDepth + 1) <= self._method_body:
self._method_body = None; self.stack = []; self.nameStack = []; trace_print( 'FORCE CLEAR METHBODY' )
if (tok.type == 'OPEN_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'CLOSE_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'OPEN_SQUARE_BRACKET'):
self.nameStack.append(tok.value)
elif (tok.type == 'CLOSE_SQUARE_BRACKET'):
self.nameStack.append(tok.value)
elif (tok.type == 'TAB'): pass
elif (tok.type == 'EQUALS'):
self.nameStack.append(tok.value)
elif (tok.type == 'COMMA'):
self.nameStack.append(tok.value)
elif (tok.type == 'BACKSLASH'):
self.nameStack.append(tok.value)
elif (tok.type == 'DIVIDE'):
self.nameStack.append(tok.value)
elif (tok.type == 'PIPE'):
self.nameStack.append(tok.value)
elif (tok.type == 'PERCENT'):
self.nameStack.append(tok.value)
elif (tok.type == 'CARET'):
self.nameStack.append(tok.value)
elif (tok.type == 'EXCLAMATION'):
self.nameStack.append(tok.value)
elif (tok.type == 'SQUOTE'): pass
elif (tok.type == 'NUMBER' or tok.type == 'FLOAT_NUMBER'):
self.nameStack.append(tok.value)
elif (tok.type == 'MINUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'PLUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'STRING_LITERAL'):
self.nameStack.append(tok.value)
elif (tok.type == 'NAME' or tok.type == 'AMPERSTAND' or tok.type == 'ASTERISK' or tok.type == 'CHAR_LITERAL'):
if tok.value in ignoreSymbols:
debug_print("Ignore symbol %s"%tok.value)
elif (tok.value == 'class'):
self.nameStack.append(tok.value)
elif tok.value in supportedAccessSpecifier:
if len(self.nameStack) and self.nameStack[0] in ("class", "struct", "union"):
self.nameStack.append(tok.value)
elif self.braceDepth == len(self.nameSpaces) + 1 or self.braceDepth == (len(self.nameSpaces) + len(self.curClass.split("::"))):
self.curAccessSpecifier = tok.value;
self.accessSpecifierScratch.append(tok.value)
debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier)
self.stack = []
else:
self.nameStack.append(tok.value)
if self.anon_union_counter[0] == self.braceDepth:
self.anon_union_counter = [-1, 0]
elif (tok.type == 'COLON'):
#Dont want colon to be first in stack
if len(self.nameStack) == 0:
self.accessSpecifierScratch = []
continue
# Handle situation where access specifiers can be multi words such as "public slots"
jns = " ".join(self.accessSpecifierScratch + self.nameStack)
if jns in supportedAccessSpecifier:
self.curAccessSpecifier = jns;
debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier)
self.stack = []
self.nameStack = []
else:
self.nameStack.append(tok.value)
self.accessSpecifierScratch = []
elif (tok.type == 'SEMI_COLON'):
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
debug_print("Creating anonymous union")
#Force the processing of an anonymous union
saved_namestack = self.nameStack[:]
saved_stack = self.stack[:]
self.nameStack = [""]
self.stack = self.nameStack + [";"]
self.nameStack = self.nameStack[0:1]
debug_print("pre eval anon stack")
self.evaluate_stack( tok.type )
debug_print("post eval anon stack")
self.nameStack = saved_namestack
self.stack = saved_stack
self.anon_union_counter = [-1, 0];
if (self.braceDepth < 10): self.evaluate_stack( tok.type )
self.stack = []
self.nameStack = []
except:
if (debug): raise
raise CppParseError("Not able to parse %s on line %d evaluating \"%s\"\nError around: %s"
% (self.headerFileName, tok.lineno, tok.value, " ".join(self.nameStack)))
self.finalize()
global parseHistory
parseHistory = []
# Delete some temporary variables
for key in ["_precomp_macro_buf", "nameStack", "nameSpaces", "curAccessSpecifier", "accessSpecifierStack",
"accessSpecifierScratch", "nameStackHistory", "anon_struct_counter", "anon_union_counter",
"_classes_brace_level", "_forward_decls", "stack", "mainClass", "curStruct", "_template_typenames",
"_method_body", "braceDepth", "_structs_brace_level", "typedefs_order", "curTemplate", "templateRegistry"]:
del self.__dict__[key]
def evaluate_stack(self, token=None):
global doxygenCommentCache
self.nameStack = filter_out_attribute_keyword(self.nameStack)
self.stack = filter_out_attribute_keyword(self.stack)
nameStackCopy = self.nameStack[:]
debug_print( "Evaluating stack %s\n BraceDepth: %s (called from %d)" %(self.nameStack,self.braceDepth, inspect.currentframe().f_back.f_lineno))
#Handle special case of overloading operator ()
if "operator()(" in "".join(self.nameStack):
operator_index = self.nameStack.index("operator")
self.nameStack.pop(operator_index + 2)
self.nameStack.pop(operator_index + 1)
self.nameStack[operator_index] = "operator()"
if (len(self.curClass)):
debug_print( "%s (%s) "%(self.curClass, self.curAccessSpecifier))
else:
debug_print( "<anonymous> (%s) "%self.curAccessSpecifier)
#Filter special case of array with casting in it
try:
bracePos = self.nameStack.index("[")
parenPos = self.nameStack.index("(")
if bracePos == parenPos - 1:
endParen = self.nameStack.index(")")
self.nameStack = self.nameStack[:bracePos + 1] + self.nameStack[endParen + 1:]
debug_print("Filtered namestack to=%s"%self.nameStack)
except: pass
#if 'typedef' in self.nameStack: self.evaluate_typedef() # allows nested typedefs, probably a bad idea
if (not self.curClass and 'typedef' in self.nameStack and
(('struct' not in self.nameStack and 'union' not in self.nameStack) or self.stack[-1] == ";") and
not is_enum_namestack(self.nameStack)):
trace_print('STACK', self.stack)
self.evaluate_typedef()
return
elif (len(self.nameStack) == 0):
debug_print( "trace" )
debug_print( "(Empty Stack)" )
return
elif (self.nameStack[0] == "namespace"):
#Taken care of outside of here
pass
elif len(self.nameStack) == 2 and self.nameStack[0] == "friend":#friend class declaration
pass
elif len(self.nameStack) >= 2 and self.nameStack[0] == 'using' and self.nameStack[1] == 'namespace': pass # TODO
elif is_enum_namestack(self.nameStack):
debug_print( "trace" )
self.evaluate_enum_stack()
elif self._method_body and (self.braceDepth + 1) > self._method_body: trace_print( 'INSIDE METHOD DEF' )
elif is_method_namestack(self.stack) and not self.curStruct and '(' in self.nameStack:
debug_print( "trace" )
if self.braceDepth > 0:
if "{" in self.stack and self.stack[0] != '{' and self.stack[-1] == ';' and self.braceDepth == 1:
#Special case of a method defined outside a class that has a body
pass
else:
self.evaluate_method_stack()
else:
#Free function
self.evaluate_method_stack()
elif (len(self.nameStack) == 1 and len(self.nameStackHistory) > self.braceDepth
and (self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "struct"] or
self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "union"])):
# Look for the name of a typedef struct: struct typedef {...] StructName; or unions to get renamed
debug_print("found the naming of a union")
type_name_to_rename = self.nameStackHistory[self.braceDepth][1]
new_name = self.nameStack[0]
type_to_rename = self.classes[type_name_to_rename]
type_to_rename["name"] = self.nameStack[0]
#Now re install it in its new location
self.classes[new_name] = type_to_rename
del self.classes[type_name_to_rename]
elif is_property_namestack(self.nameStack) and self.stack[-1] == ';':
debug_print( "trace" )
if self.nameStack[0] in ('class', 'struct') and len(self.stack) == 3: self.evalute_forward_decl()
elif len(self.nameStack) >= 2 and (self.nameStack[0]=='friend' and self.nameStack[1]=='class'): pass
else: self.evaluate_property_stack() # catches class props and structs in a namespace
elif self.nameStack[0] in ("class", "struct", "union") or self.nameStack[0] == 'typedef' and self.nameStack[1] in ('struct', 'union'):
#Parsing a union can reuse much of the class parsing
debug_print( "trace" )
self.evaluate_class_stack()
elif not self.curClass:
debug_print( "trace" )
if is_enum_namestack(self.nameStack): self.evaluate_enum_stack()
elif self.curStruct and self.stack[-1] == ';': self.evaluate_property_stack() # this catches fields of global structs
self.nameStack = []
doxygenCommentCache = ""
elif (self.braceDepth < 1):
debug_print( "trace" )
#Ignore global stuff for now
debug_print( "Global stuff: %s"%self.nameStack )
self.nameStack = []
doxygenCommentCache = ""
elif (self.braceDepth > len(self.nameSpaces) + 1):
debug_print( "trace" )
self.nameStack = []
doxygenCommentCache = ""
try:
self.nameStackHistory[self.braceDepth] = (nameStackCopy, self.curClass)
except:
self.nameStackHistory.append((nameStackCopy, self.curClass))
self.nameStack = [] # its a little confusing to have some if/else above return and others not, and then clearning the nameStack down here
doxygenCommentCache = ""
self.curTemplate = None
def evaluate_enum_stack(self):
debug_print( "evaluating enum" )
newEnum = CppEnum(self.nameStack)
if len(list(newEnum.keys())):
if len(self.curClass):
newEnum["namespace"] = self.cur_namespace(False)
klass = self.classes[self.curClass]
klass["enums"][self.curAccessSpecifier].append(newEnum)
if self.curAccessSpecifier == 'public' and 'name' in newEnum: klass._public_enums[ newEnum['name'] ] = newEnum
else:
newEnum["namespace"] = self.cur_namespace(True)
self.enums.append(newEnum)
if 'name' in newEnum and newEnum['name']: self.global_enums[ newEnum['name'] ] = newEnum
#This enum has instances, turn them into properties
if "instances" in newEnum:
instanceType = "enum"
if "name" in newEnum:
instanceType = newEnum["name"]
for instance in newEnum["instances"]:
self.nameStack = [instanceType, instance]
self.evaluate_property_stack()
del newEnum["instances"]
def strip_parent_keys(self):
obj_queue = [self]
while len(obj_queue):
obj = obj_queue.pop()
trace_print("pop %s type %s"%(obj, type(obj)))
try:
if "parent" in obj.keys():
del obj["parent"]
trace_print("Stripped parent from %s"%obj.keys())
except: pass
# Figure out what sub types are one of ours
try:
if not hasattr(obj, 'keys'):
obj = obj.__dict__
for k in obj.keys():
trace_print("-Try key %s"%(k))
trace_print("-type %s"%(type(obj[k])))
if k in ["nameStackHistory", "parent", "_public_typedefs"]: continue
if type(obj[k]) == list:
for i in obj[k]:
trace_print("push l %s"%i)
obj_queue.append(i)
elif type(obj[k]) == dict:
if len(obj):
trace_print("push d %s"%obj[k])
obj_queue.append(obj[k])
elif type(obj[k]) == type(type(0)):
if type(obj[k]) == int:
obj[k] = "int"
elif type(obj[k]) == str:
obj[k] = "string"
else:
obj[k] = "???"
trace_print("next key\n")
except:
trace_print("Exception")
def toJSON(self, indent=4):
import json
self.strip_parent_keys()
try:
del self.__dict__["classes_order"]
except: pass
return json.dumps(self.__dict__, indent=indent)
def __repr__(self):
rtn = {
"classes": self.classes,
"functions": self.functions,
"enums": self.enums,
"variables": self.variables,
}
return repr(rtn)
def __str__(self):
rtn = ""
for className in list(self.classes.keys()):
rtn += "%s\n"%self.classes[className]
if self.functions:
rtn += "// functions\n"
for f in self.functions:
rtn += "%s\n"%f
if self.variables:
rtn += "// variables\n"
for f in self.variables:
rtn += "%s\n"%f
if self.enums:
rtn += "// enums\n"
for f in self.enums:
rtn += "%s\n"%f
return rtn
| true | true |
f7007173943c99d08791c125b906d4befe6387ea | 24,923 | py | Python | tensorflow/contrib/rnn/python/ops/lstm_ops.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 5 | 2019-05-23T02:59:21.000Z | 2020-02-05T08:20:23.000Z | tensorflow/contrib/rnn/python/ops/lstm_ops.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/rnn/python/ops/lstm_ops.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 2 | 2019-07-04T00:47:02.000Z | 2019-07-08T08:47:05.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
LayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = gen_lstm_ops.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,
b_grad) = gen_lstm_ops.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [
None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad
]
class LSTMBlockCell(LayerRNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much
faster. The weight and bias matrices should be compatible as long as the
variable scope matches.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
name="lstm_cell"):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.nn.rnn_cell.LSTMCell`.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMBlockCell instead.
"""
super(LSTMBlockCell, self).__init__(_reuse=reuse, name=name)
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._cell_clip = cell_clip if cell_clip is not None else -1
self._names = {
"W": "kernel",
"b": "bias",
"wci": "w_i_diag",
"wcf": "w_f_diag",
"wco": "w_o_diag",
"scope": "lstm_cell"
}
# Inputs must be 2-dimensional.
self.input_spec = base_layer.InputSpec(ndim=2)
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if not inputs_shape[1].value:
raise ValueError(
"Expecting inputs_shape[1] to be set: %s" % str(inputs_shape))
input_size = inputs_shape[1].value
self._kernel = self.add_variable(
self._names["W"], [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
self._names["b"], [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable(self._names["wci"], [self._num_units])
self._w_f_diag = self.add_variable(self._names["wcf"], [self._num_units])
self._w_o_diag = self.add_variable(self._names["wco"], [self._num_units])
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM)."""
if len(state) != 2:
raise ValueError("Expecting state to be a tuple with length 2.")
if self._use_peephole:
wci = self._w_i_diag
wcf = self._w_f_diag
wco = self._w_o_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = state
(_, cs, _, _, _, _, h) = _lstm_block_cell(
inputs,
cs_prev,
h_prev,
self._kernel,
self._bias,
wci=wci,
wcf=wcf,
wco=wco,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
return h, new_state
class LSTMBlockWrapper(base_layer.Layer):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def call(self, inputs, initial_state=None, dtype=None, sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
or a list of `time_len` tensors of shape `[batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" % inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError("Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(
inputs, initial_cell_state, initial_output, dtype, sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(sequence_length, time_len, dtype=dtype),
[1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unstack(outputs)
final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell_impl.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
name="lstm_fused_cell"):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Default is no cell clipping.
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.nn.rnn_cell.LSTMCell`.
"""
super(LSTMBlockFusedCell, self).__init__(_reuse=reuse, name=name)
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
# Inputs must be 3-dimensional.
self.input_spec = base_layer.InputSpec(ndim=3)
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def build(self, input_shape):
input_size = input_shape[2].value
self._kernel = self.add_variable(
"kernel", [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
"bias", [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable("w_i_diag", [self._num_units])
self._w_f_diag = self.add_variable("w_f_diag", [self._num_units])
self._w_o_diag = self.add_variable("w_o_diag", [self._num_units])
self.built = True
def _call_cell(self,
inputs,
initial_cell_state=None,
initial_output=None,
dtype=None,
sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
if self._use_peephole:
wci = self._w_i_diag
wco = self._w_o_diag
wcf = self._w_f_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
| 35.401989 | 96 | 0.650564 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
LayerRNNCell = rnn_cell_impl.LayerRNNCell
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
use_peephole=use_peephole,
name=name)
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = gen_lstm_ops.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,
b_grad) = gen_lstm_ops.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [
None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad
]
class LSTMBlockCell(LayerRNNCell):
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
name="lstm_cell"):
super(LSTMBlockCell, self).__init__(_reuse=reuse, name=name)
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._cell_clip = cell_clip if cell_clip is not None else -1
self._names = {
"W": "kernel",
"b": "bias",
"wci": "w_i_diag",
"wcf": "w_f_diag",
"wco": "w_o_diag",
"scope": "lstm_cell"
}
self.input_spec = base_layer.InputSpec(ndim=2)
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if not inputs_shape[1].value:
raise ValueError(
"Expecting inputs_shape[1] to be set: %s" % str(inputs_shape))
input_size = inputs_shape[1].value
self._kernel = self.add_variable(
self._names["W"], [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
self._names["b"], [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable(self._names["wci"], [self._num_units])
self._w_f_diag = self.add_variable(self._names["wcf"], [self._num_units])
self._w_o_diag = self.add_variable(self._names["wco"], [self._num_units])
self.built = True
def call(self, inputs, state):
if len(state) != 2:
raise ValueError("Expecting state to be a tuple with length 2.")
if self._use_peephole:
wci = self._w_i_diag
wcf = self._w_f_diag
wco = self._w_o_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = state
(_, cs, _, _, _, _, h) = _lstm_block_cell(
inputs,
cs_prev,
h_prev,
self._kernel,
self._bias,
wci=wci,
wcf=wcf,
wco=wco,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
return h, new_state
class LSTMBlockWrapper(base_layer.Layer):
@abc.abstractproperty
def num_units(self):
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
pass
def call(self, inputs, initial_state=None, dtype=None, sequence_length=None):
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" % inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
if initial_state is None:
if dtype is None:
raise ValueError("Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state cell_states, outputs = self._call_cell(
inputs, initial_cell_state, initial_output, dtype, sequence_length)
if sequence_length is not None:
mask = array_ops.transpose(
array_ops.sequence_mask(sequence_length, time_len, dtype=dtype),
[1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
outputs = array_ops.unstack(outputs)
final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
name="lstm_fused_cell"):
super(LSTMBlockFusedCell, self).__init__(_reuse=reuse, name=name)
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
self.input_spec = base_layer.InputSpec(ndim=3)
@property
def num_units(self):
return self._num_units
def build(self, input_shape):
input_size = input_shape[2].value
self._kernel = self.add_variable(
"kernel", [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
"bias", [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable("w_i_diag", [self._num_units])
self._w_f_diag = self.add_variable("w_f_diag", [self._num_units])
self._w_o_diag = self.add_variable("w_o_diag", [self._num_units])
self.built = True
def _call_cell(self,
inputs,
initial_cell_state=None,
initial_output=None,
dtype=None,
sequence_length=None):
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
if self._use_peephole:
wci = self._w_i_diag
wco = self._w_o_diag
wcf = self._w_f_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
| true | true |
f7007281123ef0b43ad24fd2ed13f4b9a7f55812 | 867 | py | Python | common/clean_svt_transcript.py | phsmit/kaldi-recipes | b2e2e098cc344767a9ac6cc15942afea5685a7e8 | [
"Apache-2.0"
] | null | null | null | common/clean_svt_transcript.py | phsmit/kaldi-recipes | b2e2e098cc344767a9ac6cc15942afea5685a7e8 | [
"Apache-2.0"
] | null | null | null | common/clean_svt_transcript.py | phsmit/kaldi-recipes | b2e2e098cc344767a9ac6cc15942afea5685a7e8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
import codecs
import sys
def transform(i,o):
for line in i:
if len(line.strip()) == 0:
continue
key, trans = line.strip().split(None, 1)
ntrans = []
for t in trans.split():
if t.startswith("<"):
continue
ntrans.append(t.lower())
print("{} {}".format(key, " ".join(ntrans)), file=o)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=codecs.getreader('utf-8')(sys.stdin.buffer))
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=codecs.getwriter('utf-8')(sys.stdout.buffer))
args = parser.parse_args()
transform(args.infile, args.outfile) | 30.964286 | 146 | 0.608997 | import argparse
import codecs
import sys
def transform(i,o):
for line in i:
if len(line.strip()) == 0:
continue
key, trans = line.strip().split(None, 1)
ntrans = []
for t in trans.split():
if t.startswith("<"):
continue
ntrans.append(t.lower())
print("{} {}".format(key, " ".join(ntrans)), file=o)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=codecs.getreader('utf-8')(sys.stdin.buffer))
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=codecs.getwriter('utf-8')(sys.stdout.buffer))
args = parser.parse_args()
transform(args.infile, args.outfile) | true | true |
f70072e664f2a4c0803afed709115398c03808ab | 11,745 | py | Python | PSN.py | jeevangelista/PSN | b47e95e8e3c205c0aa29438a025644ce1d25f93e | [
"MIT"
] | null | null | null | PSN.py | jeevangelista/PSN | b47e95e8e3c205c0aa29438a025644ce1d25f93e | [
"MIT"
] | null | null | null | PSN.py | jeevangelista/PSN | b47e95e8e3c205c0aa29438a025644ce1d25f93e | [
"MIT"
] | null | null | null | #!/Users/erol/Code/2.7/bin/python
#Change this before running
import sys, getopt, glob, csv
from math import sqrt
from Bio import Struct
from Bio.Struct.Geometry import center_of_mass
from Bio.PDB import *
import numpy as np
def string_to_float(val):
try:
return float(val)
except ValueError as e:
raise
# Returns the number of atoms per residue pair that have a
# euclidean distance below a threshold
# Inputs:
# model: Structure object
# threshold: Threshold
# excluded: Excluded atoms
# Returns: |R| x |R| matrix |R| = # of residues, Rij = # of connected atom bet Ri and Rj
def compute_atom_contact(model, threshold, excluded=[]):
res_count = 0
# Get residue counts on all chain
for chain in model:
res_count += len(chain)
# generate mod contact matrix
mod_contact_matrix = np.zeros((res_count,res_count))
last_i = 1
for chain in model:
i = last_i
chaindex_i = 0
for res_i in chain.get_residues():
index_i = chaindex_i + i
last_i = index_i # we note the last value of i for multiple chain
chaindex_j = 0
for res_j in chain.get_residues():
index_j = chaindex_j + index_i
# don't recompute
if index_j <= len(chain):
if index_j > index_i:
for at_i in res_i:
if at_i.get_name() not in excluded:
for at_j in res_j:
if at_j.get_name() not in excluded:
euclidean = np.linalg.norm(at_i.get_coord()-at_j.get_coord())
if euclidean <= threshold:
# add one interacting node to both residues
mod_contact_matrix[index_i-1][index_j-1] += 1
mod_contact_matrix[index_j-1][index_i-1] += 1
chaindex_j += 1
chaindex_i += 1
# # Assume for now na single chain lang
# for res_i in range(len(chain)):
# index_i = res_i + i # add i to consider multiple chains
# last_i = index_i # we note the last value of i for multiple chain
# for res_j in range(len(chain)-res_i):
# index_j = res_j + index_i
# if index_i < index_j: # Wag na magrecompute
# # for ptm
# try:
# for at_i in chain[index_i]:
# if at_i not in excluded:
# for at_j in chain[index_j]:
# if at_j not in excluded:
# euclidean = np.linalg.norm(at_i.get_coord()-at_j.get_coord())
# if euclidean <= threshold:
# # add one interacting node to both residues
# mod_contact_matrix[index_i-1][index_j-1] += 1
# mod_contact_matrix[index_j-1][index_i-1] += 1
# except KeyError as e:
# break
return mod_contact_matrix
# Returns the ave number of atoms per residue pair that have a
# euclidean distance below a threshold
# Inputs:
# structure: Structure object
# threshold: Threshold
# excluded: Excluded atoms
# Returns: |R| x |R| matrix |R| = # of residues
def compute_ave_atom_contact(structure, threshold, excluded=[]):
res_count = 0
# Get residue counts on all chain
for chain in structure[0]:
res_count += len(chain)
struct_contact_matrix = np.zeros((res_count,res_count))
for model in structure:
mod_contact_matrix = compute_atom_contact(model, threshold, excluded)
struct_contact_matrix += mod_contact_matrix
ave_struct_contact_matrix = struct_contact_matrix / len(structure)
return ave_struct_contact_matrix
# returns a dictionary that contains the max contact per residue
def compute_max_residue(structure, ave_struct_contact_matrix):
res_dict = {}
i = 0
for chain in structure[0]:
for res in chain:
contact_atoms = np.sum(ave_struct_contact_matrix[i])
i+=1
if res.get_resname() not in res_dict:
res_dict[res.get_resname()] = contact_atoms
elif res_dict[res.get_resname()] < contact_atoms:
res_dict[res.get_resname()] = contact_atoms
return res_dict
# compute normalization factor per residue
def compute_normalization_factor(pdb_dir, threshold, output):
parser = PDBParser()
tot_files = 0.0
res_dir_dict = {}
for filename in glob.iglob(pdb_dir + "/*.pdb"):
tot_files += 1
label = filename.split('/')[-1].split('.')[0]
print label
structure = parser.get_structure(label,filename)
ave_struct_contact_matrix = compute_ave_atom_contact(structure, 2)
res_dict = compute_max_residue(structure, ave_struct_contact_matrix)
for key in res_dict:
if key not in res_dir_dict:
res_dir_dict[key] = res_dict[key]
else:
res_dir_dict[key] += res_dict[key]
for key in res_dir_dict:
res_dir_dict[key] = res_dir_dict[key]/tot_files
with open(output, "w") as outfile:
csvfile = csv.writer(outfile)
for key in res_dir_dict:
csvfile.writerow([key,res_dir_dict[key]])
print res_dir_dict
# read normalization file and returns it in a dictionary
def read_normalization(norm_fact):
norm_dict = {}
with open(norm_fact) as normfile:
csvfile = csv.reader(normfile)
for row in csvfile:
norm_dict[row[0]] = float(row[1])
return norm_dict
def compute_interaction(res_1, res_2, inter_atoms, norm_dict):
# for amber
res_1 = 'HIS' if res_1 in ['HIE', 'HID', 'HIP'] else res_1
res_2 = 'HIS' if res_2 in ['HIE', 'HID', 'HIP'] else res_2
norm_1 = norm_dict[res_1]
norm_2 = norm_dict[res_2]
# remove tom
if norm_1 == 0 or norm_2 == 0:
return 0
return (inter_atoms*100.0)/sqrt(norm_1*norm_2)
# Returns the number of atoms per residue pair that have a
# euclidean distance below a threshold
# Inputs:
# model: Structure object
# threshold: Threshold
# excluded: Excluded atoms
# Returns: |R| x |R| matrix |R| = # of residues, Rij = interaction strength
def compute_atom_interaction(model,
contact_thresh,
interaction_thresh,
norm_dict,
residues,
excluded=[]):
model_contact = compute_atom_contact(model, contact_thresh, excluded)
model_interact = np.zeros(model_contact.shape)
for i in range(len(residues)):
for j in range(len(residues)-i):
index_j = j + i
index_i = i
# if equal zero lang
if index_i < index_j:
int_str = compute_interaction(residues[index_i],
residues[index_j],
model_contact[index_i][index_j],
norm_dict)
#rint int_str
if int_str >= interaction_thresh:
model_interact[index_i][index_j] = 1
model_interact[index_j][index_i] = 1
return model_interact
def compute_interaction_matrix(structure,
contact_thresh,
interaction_thresh,
edge_prct,
norm_dict,
outdir,
excluded=[]):
residues = []
for res in structure[0].get_residues():
residues.append(res.get_resname())
struct_interact = np.zeros((len(residues),len(residues)))
PSN = np.zeros((len(residues),len(residues)))
for model in structure:
model_interact = compute_atom_interaction(model,
contact_thresh,
interaction_thresh,
norm_dict,
residues,
excluded=[])
struct_interact = struct_interact + model_interact
edge_thresh = edge_prct * len(structure)
for i in range(len(residues)):
for j in range(len(residues)-i):
index_j = j + i
index_i = i
# if equal zero lang
if index_i < index_j:
if struct_interact[index_i][index_j]>= edge_thresh:
PSN[index_i][index_j] = 1
PSN[index_j][index_i] = 1
filename = "%s/%s_%.2f_%.2f_%.2f" % (outdir,structure.get_id(),contact_thresh,interaction_thresh,edge_prct)
np.savetxt(filename,PSN,fmt='%d')
def get_graph_rep(pdb_dir,
contact_thresh,
interaction_thresh,
edge_prct,
norm_dict,
outdir,
excluded=[]):
parser = PDBParser()
for filename in glob.iglob(pdb_dir + "/*.pdb"):
label = filename.split('/')[-1].split('.')[0]
print label
structure = parser.get_structure(label,filename)
for int_thresh in interaction_thresh:
compute_interaction_matrix(structure,
contact_thresh,
int_thresh,
edge_prct,
norm_dict,
outdir,
excluded=[])
def main(argv):
pdb_dir = ''
norm_fact = ''
outdir = ''
norm = 1
interaction_thresh = 1
contact_thresh = 4.5
normalize = False
edge_prct = 0.9
bb = ['N','CA','C','O']
try:
opts, args = getopt.getopt(argv,"hnp:f:c:i:e:o:",["pdb_dir=",
"norm_fact=",
"interaction_thresh=",
"contact_thresh=",
"edge_prct=",
"normalize",
"outdir="])
except getopt.GetoptError:
print 'blast_parser.py -i <input fasta> -x <blast xml> -o <output>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Computes a network given a threshold or computes normalization factor'
print '''PSN.py
-p --pdb_dir\t\tpdb directory
-f --norm_fact\t\tnormalization factor file
-i --interaction_thresh\tcomma separated interaction strengths
-c --contact_thresh\tcontact threshold
-n --normalize\t\tComputes normalization factor of input pdbs
-o --output\t\tName of output'''
sys.exit()
elif opt in ("-p", "--pdb_dir"):
pdb_dir = arg
elif opt in ("-f", "--norm_fact"):
norm_fact = arg
elif opt in ("-e", "--edge_prct"):
edge_prct = string_to_float(arg)
elif opt in ("-c", "--contact_thresh"):
contact_thresh = string_to_float(arg)
elif opt in ("-i", "--interaction_thresh"):
interaction_thresh = []
for i in arg.split(","):
interaction_thresh.append(string_to_float(i))
elif opt in ("-o", "--output"):
outdir = arg
elif opt in ("-n","--normalize"):
normalize = True
# parser = PDBParser()
# structure = parser.get_structure('1akg','../native/1acw.pdb')
# model = structure[0]
# #compute_normalization_factor(pdb_dir, contact_thresh, output)
# compute_interaction_matrix(structure,
# contact_thresh,
# interaction_thresh,
# edge_prct,
# norm_dict)
if normalize:
compute_normalization_factor(pdb_dir, contact_thresh, outdir)
else:
norm_dict = read_normalization(norm_fact)
get_graph_rep(pdb_dir,
contact_thresh,
interaction_thresh,
edge_prct,
norm_dict,
outdir,
bb)
if __name__ == "__main__":
main(sys.argv[1:])
| 35.27027 | 109 | 0.572073 |
import sys, getopt, glob, csv
from math import sqrt
from Bio import Struct
from Bio.Struct.Geometry import center_of_mass
from Bio.PDB import *
import numpy as np
def string_to_float(val):
try:
return float(val)
except ValueError as e:
raise
def compute_atom_contact(model, threshold, excluded=[]):
res_count = 0
for chain in model:
res_count += len(chain)
mod_contact_matrix = np.zeros((res_count,res_count))
last_i = 1
for chain in model:
i = last_i
chaindex_i = 0
for res_i in chain.get_residues():
index_i = chaindex_i + i
last_i = index_i chaindex_j = 0
for res_j in chain.get_residues():
index_j = chaindex_j + index_i
if index_j <= len(chain):
if index_j > index_i:
for at_i in res_i:
if at_i.get_name() not in excluded:
for at_j in res_j:
if at_j.get_name() not in excluded:
euclidean = np.linalg.norm(at_i.get_coord()-at_j.get_coord())
if euclidean <= threshold:
# add one interacting node to both residues
mod_contact_matrix[index_i-1][index_j-1] += 1
mod_contact_matrix[index_j-1][index_i-1] += 1
chaindex_j += 1
chaindex_i += 1
# # Assume for now na single chain lang
# for res_i in range(len(chain)):
# index_i = res_i + i # add i to consider multiple chains
# last_i = index_i # we note the last value of i for multiple chain
# for res_j in range(len(chain)-res_i):
# index_j = res_j + index_i
# if index_i < index_j: # Wag na magrecompute
# # for ptm
# try:
# for at_i in chain[index_i]:
# if at_i not in excluded:
# for at_j in chain[index_j]:
# if at_j not in excluded:
# euclidean = np.linalg.norm(at_i.get_coord()-at_j.get_coord())
# if euclidean <= threshold:
# # add one interacting node to both residues
# mod_contact_matrix[index_i-1][index_j-1] += 1
# mod_contact_matrix[index_j-1][index_i-1] += 1
# except KeyError as e:
# break
return mod_contact_matrix
# Returns the ave number of atoms per residue pair that have a
# euclidean distance below a threshold
# Inputs:
# structure: Structure object
# threshold: Threshold
# excluded: Excluded atoms
# Returns: |R| x |R| matrix |R| = # of residues
def compute_ave_atom_contact(structure, threshold, excluded=[]):
res_count = 0
# Get residue counts on all chain
for chain in structure[0]:
res_count += len(chain)
struct_contact_matrix = np.zeros((res_count,res_count))
for model in structure:
mod_contact_matrix = compute_atom_contact(model, threshold, excluded)
struct_contact_matrix += mod_contact_matrix
ave_struct_contact_matrix = struct_contact_matrix / len(structure)
return ave_struct_contact_matrix
# returns a dictionary that contains the max contact per residue
def compute_max_residue(structure, ave_struct_contact_matrix):
res_dict = {}
i = 0
for chain in structure[0]:
for res in chain:
contact_atoms = np.sum(ave_struct_contact_matrix[i])
i+=1
if res.get_resname() not in res_dict:
res_dict[res.get_resname()] = contact_atoms
elif res_dict[res.get_resname()] < contact_atoms:
res_dict[res.get_resname()] = contact_atoms
return res_dict
# compute normalization factor per residue
def compute_normalization_factor(pdb_dir, threshold, output):
parser = PDBParser()
tot_files = 0.0
res_dir_dict = {}
for filename in glob.iglob(pdb_dir + "/*.pdb"):
tot_files += 1
label = filename.split('/')[-1].split('.')[0]
print label
structure = parser.get_structure(label,filename)
ave_struct_contact_matrix = compute_ave_atom_contact(structure, 2)
res_dict = compute_max_residue(structure, ave_struct_contact_matrix)
for key in res_dict:
if key not in res_dir_dict:
res_dir_dict[key] = res_dict[key]
else:
res_dir_dict[key] += res_dict[key]
for key in res_dir_dict:
res_dir_dict[key] = res_dir_dict[key]/tot_files
with open(output, "w") as outfile:
csvfile = csv.writer(outfile)
for key in res_dir_dict:
csvfile.writerow([key,res_dir_dict[key]])
print res_dir_dict
# read normalization file and returns it in a dictionary
def read_normalization(norm_fact):
norm_dict = {}
with open(norm_fact) as normfile:
csvfile = csv.reader(normfile)
for row in csvfile:
norm_dict[row[0]] = float(row[1])
return norm_dict
def compute_interaction(res_1, res_2, inter_atoms, norm_dict):
# for amber
res_1 = 'HIS' if res_1 in ['HIE', 'HID', 'HIP'] else res_1
res_2 = 'HIS' if res_2 in ['HIE', 'HID', 'HIP'] else res_2
norm_1 = norm_dict[res_1]
norm_2 = norm_dict[res_2]
# remove tom
if norm_1 == 0 or norm_2 == 0:
return 0
return (inter_atoms*100.0)/sqrt(norm_1*norm_2)
# Returns the number of atoms per residue pair that have a
# euclidean distance below a threshold
# Inputs:
# model: Structure object
# threshold: Threshold
# excluded: Excluded atoms
# Returns: |R| x |R| matrix |R| = # of residues, Rij = interaction strength
def compute_atom_interaction(model,
contact_thresh,
interaction_thresh,
norm_dict,
residues,
excluded=[]):
model_contact = compute_atom_contact(model, contact_thresh, excluded)
model_interact = np.zeros(model_contact.shape)
for i in range(len(residues)):
for j in range(len(residues)-i):
index_j = j + i
index_i = i
# if equal zero lang
if index_i < index_j:
int_str = compute_interaction(residues[index_i],
residues[index_j],
model_contact[index_i][index_j],
norm_dict)
#rint int_str
if int_str >= interaction_thresh:
model_interact[index_i][index_j] = 1
model_interact[index_j][index_i] = 1
return model_interact
def compute_interaction_matrix(structure,
contact_thresh,
interaction_thresh,
edge_prct,
norm_dict,
outdir,
excluded=[]):
residues = []
for res in structure[0].get_residues():
residues.append(res.get_resname())
struct_interact = np.zeros((len(residues),len(residues)))
PSN = np.zeros((len(residues),len(residues)))
for model in structure:
model_interact = compute_atom_interaction(model,
contact_thresh,
interaction_thresh,
norm_dict,
residues,
excluded=[])
struct_interact = struct_interact + model_interact
edge_thresh = edge_prct * len(structure)
for i in range(len(residues)):
for j in range(len(residues)-i):
index_j = j + i
index_i = i
# if equal zero lang
if index_i < index_j:
if struct_interact[index_i][index_j]>= edge_thresh:
PSN[index_i][index_j] = 1
PSN[index_j][index_i] = 1
filename = "%s/%s_%.2f_%.2f_%.2f" % (outdir,structure.get_id(),contact_thresh,interaction_thresh,edge_prct)
np.savetxt(filename,PSN,fmt='%d')
def get_graph_rep(pdb_dir,
contact_thresh,
interaction_thresh,
edge_prct,
norm_dict,
outdir,
excluded=[]):
parser = PDBParser()
for filename in glob.iglob(pdb_dir + "/*.pdb"):
label = filename.split('/')[-1].split('.')[0]
print label
structure = parser.get_structure(label,filename)
for int_thresh in interaction_thresh:
compute_interaction_matrix(structure,
contact_thresh,
int_thresh,
edge_prct,
norm_dict,
outdir,
excluded=[])
def main(argv):
pdb_dir = ''
norm_fact = ''
outdir = ''
norm = 1
interaction_thresh = 1
contact_thresh = 4.5
normalize = False
edge_prct = 0.9
bb = ['N','CA','C','O']
try:
opts, args = getopt.getopt(argv,"hnp:f:c:i:e:o:",["pdb_dir=",
"norm_fact=",
"interaction_thresh=",
"contact_thresh=",
"edge_prct=",
"normalize",
"outdir="])
except getopt.GetoptError:
print 'blast_parser.py -i <input fasta> -x <blast xml> -o <output>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Computes a network given a threshold or computes normalization factor'
print '''PSN.py
-p --pdb_dir\t\tpdb directory
-f --norm_fact\t\tnormalization factor file
-i --interaction_thresh\tcomma separated interaction strengths
-c --contact_thresh\tcontact threshold
-n --normalize\t\tComputes normalization factor of input pdbs
-o --output\t\tName of output'''
sys.exit()
elif opt in ("-p", "--pdb_dir"):
pdb_dir = arg
elif opt in ("-f", "--norm_fact"):
norm_fact = arg
elif opt in ("-e", "--edge_prct"):
edge_prct = string_to_float(arg)
elif opt in ("-c", "--contact_thresh"):
contact_thresh = string_to_float(arg)
elif opt in ("-i", "--interaction_thresh"):
interaction_thresh = []
for i in arg.split(","):
interaction_thresh.append(string_to_float(i))
elif opt in ("-o", "--output"):
outdir = arg
elif opt in ("-n","--normalize"):
normalize = True
# parser = PDBParser()
# structure = parser.get_structure('1akg','../native/1acw.pdb')
# model = structure[0]
# #compute_normalization_factor(pdb_dir, contact_thresh, output)
# compute_interaction_matrix(structure,
# contact_thresh,
# interaction_thresh,
# edge_prct,
# norm_dict)
if normalize:
compute_normalization_factor(pdb_dir, contact_thresh, outdir)
else:
norm_dict = read_normalization(norm_fact)
get_graph_rep(pdb_dir,
contact_thresh,
interaction_thresh,
edge_prct,
norm_dict,
outdir,
bb)
if __name__ == "__main__":
main(sys.argv[1:])
| false | true |
f70073d75e19b01388a36182b6eeb036bacce511 | 2,604 | py | Python | ExeProc/red/pythoncode/addressbook_pb2.py | vitorCamargo/distributed-systems | 23a294a21b9d148a415429dcd326a978780a50d0 | [
"MIT"
] | null | null | null | ExeProc/red/pythoncode/addressbook_pb2.py | vitorCamargo/distributed-systems | 23a294a21b9d148a415429dcd326a978780a50d0 | [
"MIT"
] | null | null | null | ExeProc/red/pythoncode/addressbook_pb2.py | vitorCamargo/distributed-systems | 23a294a21b9d148a415429dcd326a978780a50d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: addressbook.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='addressbook.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11\x61\x64\x64ressbook.proto\"1\n\x06Person\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\r\n\x05\x65mail\x18\x03 \x01(\tb\x06proto3')
)
_PERSON = _descriptor.Descriptor(
name='Person',
full_name='Person',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Person.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='Person.id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='email', full_name='Person.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=21,
serialized_end=70,
)
DESCRIPTOR.message_types_by_name['Person'] = _PERSON
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict(
DESCRIPTOR = _PERSON,
__module__ = 'addressbook_pb2'
# @@protoc_insertion_point(class_scope:Person)
))
_sym_db.RegisterMessage(Person)
# @@protoc_insertion_point(module_scope)
| 30.635294 | 182 | 0.731183 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='addressbook.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11\x61\x64\x64ressbook.proto\"1\n\x06Person\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\r\n\x05\x65mail\x18\x03 \x01(\tb\x06proto3')
)
_PERSON = _descriptor.Descriptor(
name='Person',
full_name='Person',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Person.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='Person.id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='email', full_name='Person.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=21,
serialized_end=70,
)
DESCRIPTOR.message_types_by_name['Person'] = _PERSON
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict(
DESCRIPTOR = _PERSON,
__module__ = 'addressbook_pb2'
# @@protoc_insertion_point(class_scope:Person)
))
_sym_db.RegisterMessage(Person)
# @@protoc_insertion_point(module_scope)
| true | true |
f7007426c0895ef71449b89c57011d35fd1a9bea | 2,298 | py | Python | tutorial/helpers.py | DEVESHTARASIA/big-data-tutorial | 74e2aa1241c30913c5f12b9667f9d626002b98a2 | [
"CC-BY-3.0"
] | 56 | 2015-01-01T01:43:31.000Z | 2021-10-04T17:47:40.000Z | tutorial/helpers.py | DEVESHTARASIA/big-data-tutorial | 74e2aa1241c30913c5f12b9667f9d626002b98a2 | [
"CC-BY-3.0"
] | null | null | null | tutorial/helpers.py | DEVESHTARASIA/big-data-tutorial | 74e2aa1241c30913c5f12b9667f9d626002b98a2 | [
"CC-BY-3.0"
] | 51 | 2015-01-16T15:43:06.000Z | 2021-10-06T18:29:05.000Z | """
Small helpers for code that is not shown in the notebooks
"""
from sklearn import neighbors, datasets, linear_model
import pylab as pl
import numpy as np
from matplotlib.colors import ListedColormap
# Create color maps for 3-class classification problem, as with iris
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_iris_knn():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlabel('sepal length (cm)')
pl.ylabel('sepal width (cm)')
pl.axis('tight')
def plot_polynomial_regression():
rng = np.random.RandomState(0)
x = 2*rng.rand(100) - 1
f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9
y = f(x) + .4 * rng.normal(size=100)
x_test = np.linspace(-1, 1, 100)
pl.figure()
pl.scatter(x, y, s=4)
X = np.array([x**i for i in range(5)]).T
X_test = np.array([x_test**i for i in range(5)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='4th order')
X = np.array([x**i for i in range(10)]).T
X_test = np.array([x_test**i for i in range(10)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='9th order')
pl.legend(loc='best')
pl.axis('tight')
pl.title('Fitting a 4th and a 9th order polynomial')
pl.figure()
pl.scatter(x, y, s=4)
pl.plot(x_test, f(x_test), label="truth")
pl.axis('tight')
pl.title('Ground truth (9th order polynomial)') | 31.479452 | 76 | 0.608355 |
from sklearn import neighbors, datasets, linear_model
import pylab as pl
import numpy as np
from matplotlib.colors import ListedColormap
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_iris_knn():
iris = datasets.load_iris()
X = iris.data[:, :2] y = iris.target
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlabel('sepal length (cm)')
pl.ylabel('sepal width (cm)')
pl.axis('tight')
def plot_polynomial_regression():
rng = np.random.RandomState(0)
x = 2*rng.rand(100) - 1
f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9
y = f(x) + .4 * rng.normal(size=100)
x_test = np.linspace(-1, 1, 100)
pl.figure()
pl.scatter(x, y, s=4)
X = np.array([x**i for i in range(5)]).T
X_test = np.array([x_test**i for i in range(5)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='4th order')
X = np.array([x**i for i in range(10)]).T
X_test = np.array([x_test**i for i in range(10)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='9th order')
pl.legend(loc='best')
pl.axis('tight')
pl.title('Fitting a 4th and a 9th order polynomial')
pl.figure()
pl.scatter(x, y, s=4)
pl.plot(x_test, f(x_test), label="truth")
pl.axis('tight')
pl.title('Ground truth (9th order polynomial)') | true | true |
f7007438100c6e954cb5fc874048d000c8b4c1f5 | 3,253 | py | Python | iwata/settings.py | gingerbeardman/iwata-asks-downloader | 8b60b24bd7ad4c5a8b72e2cf9ab7557df65015d9 | [
"MIT"
] | 16 | 2020-01-05T14:55:01.000Z | 2021-06-06T09:41:08.000Z | iwata/settings.py | gingerbeardman/iwata-asks-downloader | 8b60b24bd7ad4c5a8b72e2cf9ab7557df65015d9 | [
"MIT"
] | 9 | 2020-01-05T16:42:36.000Z | 2020-01-06T15:03:56.000Z | iwata/settings.py | gingerbeardman/iwata-asks-downloader | 8b60b24bd7ad4c5a8b72e2cf9ab7557df65015d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for iwata project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'iwata'
SPIDER_MODULES = ['iwata.spiders']
NEWSPIDER_MODULE = 'iwata.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'iwata (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'iwata.middlewares.IwataSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'iwata.middlewares.IwataDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# 'iwata.pipelines.IwataPipeline': 100,
# 'iwata.pipelines.JsonWriterPipeline': 200,
'iwata.pipelines.MarkdownWriterPipeline': 300,
'scrapy.pipelines.images.ImagesPipeline': 400,
}
IMAGES_STORE = '_images'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 2
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 10
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
FEED_EXPORT_ENCODING = 'utf-8' | 33.885417 | 102 | 0.774055 |
BOT_NAME = 'iwata'
SPIDER_MODULES = ['iwata.spiders']
NEWSPIDER_MODULE = 'iwata.spiders'
ROBOTSTXT_OBEY = False
ITEM_PIPELINES = {
'iwata.pipelines.MarkdownWriterPipeline': 300,
'scrapy.pipelines.images.ImagesPipeline': 400,
}
IMAGES_STORE = '_images'
HTTPCACHE_ENABLED = True
FEED_EXPORT_ENCODING = 'utf-8' | true | true |
f70074f951aabc6cf1fa5cd4f6fc099ca53429b3 | 4,050 | py | Python | Examples/Host Record/update_host_record/update_host_record_form.py | kemori-bc/gateway-workflows | 5aa1e3492b0c0b4ec23a6247ca92861cc77f2187 | [
"Apache-2.0"
] | null | null | null | Examples/Host Record/update_host_record/update_host_record_form.py | kemori-bc/gateway-workflows | 5aa1e3492b0c0b4ec23a6247ca92861cc77f2187 | [
"Apache-2.0"
] | null | null | null | Examples/Host Record/update_host_record/update_host_record_form.py | kemori-bc/gateway-workflows | 5aa1e3492b0c0b4ec23a6247ca92861cc77f2187 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 BlueCat Networks (USA) Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# By: BlueCat Networks
# Date: 2021-08-23
# Gateway Version: 20.12.1
# Description: Example Gateway workflow
"""
Update host record form
"""
from wtforms import SubmitField
from bluecat.wtform_fields import (
Configuration,
View,
Zone,
HostRecord,
CustomStringField,
PlainHTML,
CustomBooleanField,
)
from bluecat.server_endpoints import get_host_records_endpoint
from bluecat.wtform_extensions import GatewayForm
class GenericFormTemplate(GatewayForm):
"""Form to generate HTML and Javascript for the update_host_record workflow
Note:
When updating the form, remember to make the corresponding changes to the workflow pages
"""
workflow_name = "update_host_record"
workflow_permission = "update_host_record_page"
configuration = Configuration(
workflow_name=workflow_name,
permissions=workflow_permission,
label="Configuration",
required=True,
coerce=int,
clear_below_on_change=False,
is_disabled_on_start=False,
on_complete=["call_view"],
enable_dependencies={"on_complete": ["view"]},
disable_dependencies={"on_change": ["view"]},
clear_dependencies={"on_change": ["view"]},
)
view = View(
workflow_name=workflow_name,
permissions=workflow_permission,
label="View",
required=True,
one_off=True,
clear_below_on_change=False,
enable_dependencies={"on_complete": ["parent_zone"]},
disable_dependencies={"on_change": ["parent_zone"]},
clear_dependencies={"on_change": ["parent_zone"]},
should_cascade_disable_on_change=True,
should_cascade_clear_on_change=True,
)
parent_zone = Zone(
workflow_name=workflow_name,
permissions=workflow_permission,
label="Zone",
required=True,
start_initialized=True,
inputs={"zone": "parent_zone", "configuration": "configuration", "view": "view"},
clear_below_on_change=False,
enable_dependencies={"on_complete": ["host_record"]},
disable_dependencies={"on_change": ["host_record"]},
clear_dependencies={"on_change": ["host_record", "name", "ip4_address"]},
should_cascade_disable_on_change=True,
should_cascade_clear_on_change=True,
)
host_record = HostRecord(
workflow_name=workflow_name,
permissions=workflow_permission,
label="Host Record",
required=True,
inputs={
"configuration": "configuration",
"view": "view",
"parent_zone": "parent_zone",
"host_record": "host_record",
},
server_outputs={"on_complete": {"name": "name", "addresses": "ip4_address"}},
server_side_output_method=get_host_records_endpoint,
clear_below_on_change=False,
enable_dependencies={"on_complete": ["submit", "name", "ip4_address", "deploy_now"]},
disable_dependencies={"on_change": ["submit", "name", "ip4_address", "deploy_now"]},
should_cascade_disable_on_change=True,
)
separator = PlainHTML("<hr>")
name = CustomStringField(label="New Host Name", required=True)
ip4_address = CustomStringField(
label="IPv4 Address (multiple IPv4 addresses must be separated by a comma)", required=True
)
deploy_now = CustomBooleanField(label="Deploy Now")
submit = SubmitField(label="Update")
| 34.033613 | 98 | 0.682469 |
from wtforms import SubmitField
from bluecat.wtform_fields import (
Configuration,
View,
Zone,
HostRecord,
CustomStringField,
PlainHTML,
CustomBooleanField,
)
from bluecat.server_endpoints import get_host_records_endpoint
from bluecat.wtform_extensions import GatewayForm
class GenericFormTemplate(GatewayForm):
workflow_name = "update_host_record"
workflow_permission = "update_host_record_page"
configuration = Configuration(
workflow_name=workflow_name,
permissions=workflow_permission,
label="Configuration",
required=True,
coerce=int,
clear_below_on_change=False,
is_disabled_on_start=False,
on_complete=["call_view"],
enable_dependencies={"on_complete": ["view"]},
disable_dependencies={"on_change": ["view"]},
clear_dependencies={"on_change": ["view"]},
)
view = View(
workflow_name=workflow_name,
permissions=workflow_permission,
label="View",
required=True,
one_off=True,
clear_below_on_change=False,
enable_dependencies={"on_complete": ["parent_zone"]},
disable_dependencies={"on_change": ["parent_zone"]},
clear_dependencies={"on_change": ["parent_zone"]},
should_cascade_disable_on_change=True,
should_cascade_clear_on_change=True,
)
parent_zone = Zone(
workflow_name=workflow_name,
permissions=workflow_permission,
label="Zone",
required=True,
start_initialized=True,
inputs={"zone": "parent_zone", "configuration": "configuration", "view": "view"},
clear_below_on_change=False,
enable_dependencies={"on_complete": ["host_record"]},
disable_dependencies={"on_change": ["host_record"]},
clear_dependencies={"on_change": ["host_record", "name", "ip4_address"]},
should_cascade_disable_on_change=True,
should_cascade_clear_on_change=True,
)
host_record = HostRecord(
workflow_name=workflow_name,
permissions=workflow_permission,
label="Host Record",
required=True,
inputs={
"configuration": "configuration",
"view": "view",
"parent_zone": "parent_zone",
"host_record": "host_record",
},
server_outputs={"on_complete": {"name": "name", "addresses": "ip4_address"}},
server_side_output_method=get_host_records_endpoint,
clear_below_on_change=False,
enable_dependencies={"on_complete": ["submit", "name", "ip4_address", "deploy_now"]},
disable_dependencies={"on_change": ["submit", "name", "ip4_address", "deploy_now"]},
should_cascade_disable_on_change=True,
)
separator = PlainHTML("<hr>")
name = CustomStringField(label="New Host Name", required=True)
ip4_address = CustomStringField(
label="IPv4 Address (multiple IPv4 addresses must be separated by a comma)", required=True
)
deploy_now = CustomBooleanField(label="Deploy Now")
submit = SubmitField(label="Update")
| true | true |
f7007599d5407174b0644c00cd4a735463bb5b0e | 12,011 | py | Python | plugins/modules/oci_data_catalog_namespace_facts.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_data_catalog_namespace_facts.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_data_catalog_namespace_facts.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_catalog_namespace_facts
short_description: Fetches details about one or multiple Namespace resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple Namespace resources in Oracle Cloud Infrastructure
- Returns a list of namespaces within a data catalog.
- If I(namespace_id) is specified, the details of a single Namespace will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
catalog_id:
description:
- Unique catalog identifier.
type: str
required: true
namespace_id:
description:
- Unique namespace identifier.
- Required to get a specific namespace.
type: str
aliases: ["id"]
fields:
description:
- Specifies the fields to return in a namespace response.
type: list
elements: str
choices:
- "key"
- "displayName"
- "description"
- "lifecycleState"
- "timeCreated"
- "timeUpdated"
- "createdById"
- "updatedById"
- "properties"
display_name:
description:
- A filter to return only resources that match the entire display name given. The match is not case sensitive.
type: str
aliases: ["name"]
display_name_contains:
description:
- "A filter to return only resources that match display name pattern given. The match is not case sensitive.
For Example : /folders?displayNameContains=Cu.*
The above would match all folders with display name that starts with \\"Cu\\" or has the pattern \\"Cu\\" anywhere in between."
type: str
lifecycle_state:
description:
- A filter to return only resources that match the specified lifecycle state. The value is case insensitive.
type: str
choices:
- "CREATING"
- "ACTIVE"
- "INACTIVE"
- "UPDATING"
- "DELETING"
- "DELETED"
- "FAILED"
- "MOVING"
time_created:
description:
- Time that the resource was created. An L(RFC3339,https://tools.ietf.org/html/rfc3339) formatted datetime string.
type: str
time_updated:
description:
- Time that the resource was updated. An L(RFC3339,https://tools.ietf.org/html/rfc3339) formatted datetime string.
type: str
created_by_id:
description:
- OCID of the user who created the resource.
type: str
updated_by_id:
description:
- OCID of the user who updated the resource.
type: str
sort_by:
description:
- The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is
ascending. If no value is specified TIMECREATED is default.
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
sort_order:
description:
- The sort order to use, either 'asc' or 'desc'.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific namespace
oci_data_catalog_namespace_facts:
# required
catalog_id: "ocid1.catalog.oc1..xxxxxxEXAMPLExxxxxx"
namespace_id: "ocid1.namespace.oc1..xxxxxxEXAMPLExxxxxx"
# optional
fields: [ "key" ]
- name: List namespaces
oci_data_catalog_namespace_facts:
# required
catalog_id: "ocid1.catalog.oc1..xxxxxxEXAMPLExxxxxx"
# optional
fields: [ "key" ]
display_name: display_name_example
display_name_contains: display_name_contains_example
lifecycle_state: CREATING
time_created: 2013-10-20T19:20:30+01:00
time_updated: 2013-10-20T19:20:30+01:00
created_by_id: "ocid1.createdby.oc1..xxxxxxEXAMPLExxxxxx"
updated_by_id: "ocid1.updatedby.oc1..xxxxxxEXAMPLExxxxxx"
sort_by: TIMECREATED
sort_order: ASC
"""
RETURN = """
namespaces:
description:
- List of Namespace resources
returned: on success
type: complex
contains:
key:
description:
- Unique namespace key that is immutable.
returned: on success
type: str
sample: key_example
display_name:
description:
- Name of the Namespace
returned: on success
type: str
sample: display_name_example
description:
description:
- Description for the namespace
returned: on success
type: str
sample: description_example
is_service_defined:
description:
- If this field is defined by service or by a user
returned: on success
type: bool
sample: true
lifecycle_state:
description:
- The current state of the namespace.
returned: on success
type: str
sample: CREATING
time_created:
description:
- "The date and time the namespace was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
Example: `2019-03-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The last time that any change was made to the namespace. An L(RFC3339,https://tools.ietf.org/html/rfc3339) formatted datetime string.
- Returned for get operation
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
created_by_id:
description:
- OCID of the user who created the namespace.
- Returned for get operation
returned: on success
type: str
sample: "ocid1.createdby.oc1..xxxxxxEXAMPLExxxxxx"
updated_by_id:
description:
- OCID of the user who last modified the namespace.
- Returned for get operation
returned: on success
type: str
sample: "ocid1.updatedby.oc1..xxxxxxEXAMPLExxxxxx"
sample: [{
"key": "key_example",
"display_name": "display_name_example",
"description": "description_example",
"is_service_defined": true,
"lifecycle_state": "CREATING",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"created_by_id": "ocid1.createdby.oc1..xxxxxxEXAMPLExxxxxx",
"updated_by_id": "ocid1.updatedby.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.data_catalog import DataCatalogClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataCatalogNamespaceFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"catalog_id",
"namespace_id",
]
def get_required_params_for_list(self):
return [
"catalog_id",
]
def get_resource(self):
optional_get_method_params = [
"fields",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.get_namespace,
catalog_id=self.module.params.get("catalog_id"),
namespace_id=self.module.params.get("namespace_id"),
**optional_kwargs
)
def list_resources(self):
optional_list_method_params = [
"display_name",
"display_name_contains",
"lifecycle_state",
"time_created",
"time_updated",
"created_by_id",
"updated_by_id",
"sort_by",
"sort_order",
"fields",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_namespaces,
catalog_id=self.module.params.get("catalog_id"),
**optional_kwargs
)
DataCatalogNamespaceFactsHelperCustom = get_custom_class(
"DataCatalogNamespaceFactsHelperCustom"
)
class ResourceFactsHelper(
DataCatalogNamespaceFactsHelperCustom, DataCatalogNamespaceFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
catalog_id=dict(type="str", required=True),
namespace_id=dict(aliases=["id"], type="str"),
fields=dict(
type="list",
elements="str",
choices=[
"key",
"displayName",
"description",
"lifecycleState",
"timeCreated",
"timeUpdated",
"createdById",
"updatedById",
"properties",
],
),
display_name=dict(aliases=["name"], type="str"),
display_name_contains=dict(type="str"),
lifecycle_state=dict(
type="str",
choices=[
"CREATING",
"ACTIVE",
"INACTIVE",
"UPDATING",
"DELETING",
"DELETED",
"FAILED",
"MOVING",
],
),
time_created=dict(type="str"),
time_updated=dict(type="str"),
created_by_id=dict(type="str"),
updated_by_id=dict(type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="namespace",
service_client_class=DataCatalogClient,
namespace="data_catalog",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(namespaces=result)
if __name__ == "__main__":
main()
| 32.114973 | 151 | 0.589709 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_catalog_namespace_facts
short_description: Fetches details about one or multiple Namespace resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple Namespace resources in Oracle Cloud Infrastructure
- Returns a list of namespaces within a data catalog.
- If I(namespace_id) is specified, the details of a single Namespace will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
catalog_id:
description:
- Unique catalog identifier.
type: str
required: true
namespace_id:
description:
- Unique namespace identifier.
- Required to get a specific namespace.
type: str
aliases: ["id"]
fields:
description:
- Specifies the fields to return in a namespace response.
type: list
elements: str
choices:
- "key"
- "displayName"
- "description"
- "lifecycleState"
- "timeCreated"
- "timeUpdated"
- "createdById"
- "updatedById"
- "properties"
display_name:
description:
- A filter to return only resources that match the entire display name given. The match is not case sensitive.
type: str
aliases: ["name"]
display_name_contains:
description:
- "A filter to return only resources that match display name pattern given. The match is not case sensitive.
For Example : /folders?displayNameContains=Cu.*
The above would match all folders with display name that starts with \\"Cu\\" or has the pattern \\"Cu\\" anywhere in between."
type: str
lifecycle_state:
description:
- A filter to return only resources that match the specified lifecycle state. The value is case insensitive.
type: str
choices:
- "CREATING"
- "ACTIVE"
- "INACTIVE"
- "UPDATING"
- "DELETING"
- "DELETED"
- "FAILED"
- "MOVING"
time_created:
description:
- Time that the resource was created. An L(RFC3339,https://tools.ietf.org/html/rfc3339) formatted datetime string.
type: str
time_updated:
description:
- Time that the resource was updated. An L(RFC3339,https://tools.ietf.org/html/rfc3339) formatted datetime string.
type: str
created_by_id:
description:
- OCID of the user who created the resource.
type: str
updated_by_id:
description:
- OCID of the user who updated the resource.
type: str
sort_by:
description:
- The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is
ascending. If no value is specified TIMECREATED is default.
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
sort_order:
description:
- The sort order to use, either 'asc' or 'desc'.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific namespace
oci_data_catalog_namespace_facts:
# required
catalog_id: "ocid1.catalog.oc1..xxxxxxEXAMPLExxxxxx"
namespace_id: "ocid1.namespace.oc1..xxxxxxEXAMPLExxxxxx"
# optional
fields: [ "key" ]
- name: List namespaces
oci_data_catalog_namespace_facts:
# required
catalog_id: "ocid1.catalog.oc1..xxxxxxEXAMPLExxxxxx"
# optional
fields: [ "key" ]
display_name: display_name_example
display_name_contains: display_name_contains_example
lifecycle_state: CREATING
time_created: 2013-10-20T19:20:30+01:00
time_updated: 2013-10-20T19:20:30+01:00
created_by_id: "ocid1.createdby.oc1..xxxxxxEXAMPLExxxxxx"
updated_by_id: "ocid1.updatedby.oc1..xxxxxxEXAMPLExxxxxx"
sort_by: TIMECREATED
sort_order: ASC
"""
RETURN = """
namespaces:
description:
- List of Namespace resources
returned: on success
type: complex
contains:
key:
description:
- Unique namespace key that is immutable.
returned: on success
type: str
sample: key_example
display_name:
description:
- Name of the Namespace
returned: on success
type: str
sample: display_name_example
description:
description:
- Description for the namespace
returned: on success
type: str
sample: description_example
is_service_defined:
description:
- If this field is defined by service or by a user
returned: on success
type: bool
sample: true
lifecycle_state:
description:
- The current state of the namespace.
returned: on success
type: str
sample: CREATING
time_created:
description:
- "The date and time the namespace was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
Example: `2019-03-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The last time that any change was made to the namespace. An L(RFC3339,https://tools.ietf.org/html/rfc3339) formatted datetime string.
- Returned for get operation
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
created_by_id:
description:
- OCID of the user who created the namespace.
- Returned for get operation
returned: on success
type: str
sample: "ocid1.createdby.oc1..xxxxxxEXAMPLExxxxxx"
updated_by_id:
description:
- OCID of the user who last modified the namespace.
- Returned for get operation
returned: on success
type: str
sample: "ocid1.updatedby.oc1..xxxxxxEXAMPLExxxxxx"
sample: [{
"key": "key_example",
"display_name": "display_name_example",
"description": "description_example",
"is_service_defined": true,
"lifecycle_state": "CREATING",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"created_by_id": "ocid1.createdby.oc1..xxxxxxEXAMPLExxxxxx",
"updated_by_id": "ocid1.updatedby.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.data_catalog import DataCatalogClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataCatalogNamespaceFactsHelperGen(OCIResourceFactsHelperBase):
def get_required_params_for_get(self):
return [
"catalog_id",
"namespace_id",
]
def get_required_params_for_list(self):
return [
"catalog_id",
]
def get_resource(self):
optional_get_method_params = [
"fields",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.get_namespace,
catalog_id=self.module.params.get("catalog_id"),
namespace_id=self.module.params.get("namespace_id"),
**optional_kwargs
)
def list_resources(self):
optional_list_method_params = [
"display_name",
"display_name_contains",
"lifecycle_state",
"time_created",
"time_updated",
"created_by_id",
"updated_by_id",
"sort_by",
"sort_order",
"fields",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_namespaces,
catalog_id=self.module.params.get("catalog_id"),
**optional_kwargs
)
DataCatalogNamespaceFactsHelperCustom = get_custom_class(
"DataCatalogNamespaceFactsHelperCustom"
)
class ResourceFactsHelper(
DataCatalogNamespaceFactsHelperCustom, DataCatalogNamespaceFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
catalog_id=dict(type="str", required=True),
namespace_id=dict(aliases=["id"], type="str"),
fields=dict(
type="list",
elements="str",
choices=[
"key",
"displayName",
"description",
"lifecycleState",
"timeCreated",
"timeUpdated",
"createdById",
"updatedById",
"properties",
],
),
display_name=dict(aliases=["name"], type="str"),
display_name_contains=dict(type="str"),
lifecycle_state=dict(
type="str",
choices=[
"CREATING",
"ACTIVE",
"INACTIVE",
"UPDATING",
"DELETING",
"DELETED",
"FAILED",
"MOVING",
],
),
time_created=dict(type="str"),
time_updated=dict(type="str"),
created_by_id=dict(type="str"),
updated_by_id=dict(type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="namespace",
service_client_class=DataCatalogClient,
namespace="data_catalog",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(namespaces=result)
if __name__ == "__main__":
main()
| true | true |
f700763f7e7484369e4aaaf60e64c92af4f1f9e9 | 89 | py | Python | api/run.py | clodonil/mytasks | 0355c9a37139d3cd5aeef24e804a2e3f513136b6 | [
"Apache-2.0"
] | null | null | null | api/run.py | clodonil/mytasks | 0355c9a37139d3cd5aeef24e804a2e3f513136b6 | [
"Apache-2.0"
] | null | null | null | api/run.py | clodonil/mytasks | 0355c9a37139d3cd5aeef24e804a2e3f513136b6 | [
"Apache-2.0"
] | null | null | null | from app import app
app.run(app.config['HOST'], app.config['PORT'], app.config['DEBUG'])
| 29.666667 | 68 | 0.696629 | from app import app
app.run(app.config['HOST'], app.config['PORT'], app.config['DEBUG'])
| true | true |
f70076a50a1ec77000417afebe6c4b53d762ed40 | 81 | py | Python | project/api/__init__.py | Shiqan/fortnite-replay-api | 0ef42287a559b55864244961c3ccf7a697be740f | [
"MIT"
] | 4 | 2019-05-16T12:52:40.000Z | 2020-04-21T19:24:36.000Z | project/api/__init__.py | Shiqan/fortnite-replay-api | 0ef42287a559b55864244961c3ccf7a697be740f | [
"MIT"
] | null | null | null | project/api/__init__.py | Shiqan/fortnite-replay-api | 0ef42287a559b55864244961c3ccf7a697be740f | [
"MIT"
] | 2 | 2021-03-23T13:27:02.000Z | 2022-03-06T21:30:08.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Fortnite replay api blueprint
"""
| 11.571429 | 29 | 0.592593 | true | true |
|
f70078f9bc488153bc2bdd1da52425628d5aa4e9 | 1,968 | py | Python | tests/unit_tests/modules/s3/s3gis/KMLLayer.py | PeterDaveHello/eden | 26174a9dde2f19cd3bc879694f373ad5f765b6ed | [
"MIT"
] | 1 | 2017-07-22T18:49:34.000Z | 2017-07-22T18:49:34.000Z | tests/unit_tests/modules/s3/s3gis/KMLLayer.py | PeterDaveHello/eden | 26174a9dde2f19cd3bc879694f373ad5f765b6ed | [
"MIT"
] | null | null | null | tests/unit_tests/modules/s3/s3gis/KMLLayer.py | PeterDaveHello/eden | 26174a9dde2f19cd3bc879694f373ad5f765b6ed | [
"MIT"
] | 1 | 2019-12-16T15:14:46.000Z | 2019-12-16T15:14:46.000Z |
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
s3gis = s3gis_tests.s3gis
def test_KMLLayer():
current.session.s3.debug = True
current.request.utcnow = datetime.datetime.now()
s3gis_tests.layer_test(
db,
db.gis_layer_kml,
dict(
name = "Test KML",
description = "Test KML layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
url = "test://test_KML",
),
"S3.gis.layers_kml",
[
{
"marker_height": 34,
"marker_image": u"gis_marker.image.marker_red.png",
"marker_width": 20,
"name": u"Test KML",
# this shows that caching is OK:
"url": u"/eden/default/download/gis_cache2.file.Test_20KML.kml"
}
],
session = session,
request = request,
)
def test_KMLCaching_not_possible():
import os.path
import sys
class Mock(object):
pass
mock_stderr = Mock()
buffer = []
def mock_write(error_message):
buffer.append(error_message)
mock_stderr.write = mock_write
with s3gis_tests.Change(
os.path,
{
"exists": lambda *a, **kw: False
}
):
with s3gis_tests.Change(
sys,
{
"stderr": mock_stderr
}
):
with s3gis_tests.Change(
current.session.s3,
{
"debug": False
}
):
kml_layer = s3gis.KMLLayer(s3gis.GIS())
js = kml_layer.as_javascript()
assert session.error.startswith(
"GIS: KML layers cannot be cached: "
)
assert "GIS: KML layers cannot be cached:" in buffer[0]
| 27.71831 | 79 | 0.490346 |
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
s3gis = s3gis_tests.s3gis
def test_KMLLayer():
current.session.s3.debug = True
current.request.utcnow = datetime.datetime.now()
s3gis_tests.layer_test(
db,
db.gis_layer_kml,
dict(
name = "Test KML",
description = "Test KML layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
url = "test://test_KML",
),
"S3.gis.layers_kml",
[
{
"marker_height": 34,
"marker_image": u"gis_marker.image.marker_red.png",
"marker_width": 20,
"name": u"Test KML",
"url": u"/eden/default/download/gis_cache2.file.Test_20KML.kml"
}
],
session = session,
request = request,
)
def test_KMLCaching_not_possible():
import os.path
import sys
class Mock(object):
pass
mock_stderr = Mock()
buffer = []
def mock_write(error_message):
buffer.append(error_message)
mock_stderr.write = mock_write
with s3gis_tests.Change(
os.path,
{
"exists": lambda *a, **kw: False
}
):
with s3gis_tests.Change(
sys,
{
"stderr": mock_stderr
}
):
with s3gis_tests.Change(
current.session.s3,
{
"debug": False
}
):
kml_layer = s3gis.KMLLayer(s3gis.GIS())
js = kml_layer.as_javascript()
assert session.error.startswith(
"GIS: KML layers cannot be cached: "
)
assert "GIS: KML layers cannot be cached:" in buffer[0]
| true | true |
f70079abc23e2a4161a080f73867e8d2f28c4762 | 1,338 | py | Python | examples/starwars/data.py | mdornseif/graphene-gae | a223d10b7558c7e8e6d190cd1297eba54878c4c8 | [
"BSD-3-Clause"
] | 128 | 2016-05-17T05:48:13.000Z | 2021-11-08T12:45:59.000Z | examples/starwars/data.py | mdornseif/graphene-gae | a223d10b7558c7e8e6d190cd1297eba54878c4c8 | [
"BSD-3-Clause"
] | 42 | 2016-05-26T04:35:45.000Z | 2021-03-25T21:28:05.000Z | examples/starwars/data.py | mdornseif/graphene-gae | a223d10b7558c7e8e6d190cd1297eba54878c4c8 | [
"BSD-3-Clause"
] | 13 | 2016-06-04T14:03:10.000Z | 2020-10-01T06:18:43.000Z |
from .models import Character, Faction, Ship
__author__ = 'ekampf'
def initialize():
human = Character(name='Human')
human.put()
droid = Character(name='Droid')
droid.put()
rebels = Faction(id="rebels", name='Alliance to Restore the Republic', hero_key=human.key)
rebels.put()
empire = Faction(id="empire", name='Galactic Empire', hero_key=droid.key)
empire.put()
xwing = Ship(name='X-Wing', faction_key=rebels.key)
xwing.put()
ywing = Ship(name='Y-Wing', faction_key=rebels.key)
ywing.put()
awing = Ship(name='A-Wing', faction_key=rebels.key)
awing.put()
# Yeah, technically it's Corellian. But it flew in the service of the rebels,
# so for the purposes of this demo it's a rebel ship.
falcon = Ship(name='Millenium Falcon', faction_key=rebels.key)
falcon.put()
homeOne = Ship(name='Home One', faction_key=rebels.key)
homeOne.put()
tieFighter = Ship(name='TIE Fighter', faction_key=empire.key)
tieFighter.put()
tieInterceptor = Ship(name='TIE Interceptor', faction_key=empire.key)
tieInterceptor.put()
executor = Ship(name='Executor', faction_key=empire.key)
executor.put()
def create_ship(ship_name, faction_key):
new_ship = Ship(name=ship_name, faction_key=faction_key)
new_ship.put()
return new_ship
| 26.235294 | 94 | 0.68012 |
from .models import Character, Faction, Ship
__author__ = 'ekampf'
def initialize():
human = Character(name='Human')
human.put()
droid = Character(name='Droid')
droid.put()
rebels = Faction(id="rebels", name='Alliance to Restore the Republic', hero_key=human.key)
rebels.put()
empire = Faction(id="empire", name='Galactic Empire', hero_key=droid.key)
empire.put()
xwing = Ship(name='X-Wing', faction_key=rebels.key)
xwing.put()
ywing = Ship(name='Y-Wing', faction_key=rebels.key)
ywing.put()
awing = Ship(name='A-Wing', faction_key=rebels.key)
awing.put()
# so for the purposes of this demo it's a rebel ship.
falcon = Ship(name='Millenium Falcon', faction_key=rebels.key)
falcon.put()
homeOne = Ship(name='Home One', faction_key=rebels.key)
homeOne.put()
tieFighter = Ship(name='TIE Fighter', faction_key=empire.key)
tieFighter.put()
tieInterceptor = Ship(name='TIE Interceptor', faction_key=empire.key)
tieInterceptor.put()
executor = Ship(name='Executor', faction_key=empire.key)
executor.put()
def create_ship(ship_name, faction_key):
new_ship = Ship(name=ship_name, faction_key=faction_key)
new_ship.put()
return new_ship
| true | true |
f70079cc4279ef48d761d342b154ee50b32ac6d2 | 14,748 | py | Python | rssant_cli/rss.py | dumpmemory/rssant | 8bacf91bedc2d2fa35f3e8b0f6a8b8c2a712c1a6 | [
"BSD-3-Clause"
] | null | null | null | rssant_cli/rss.py | dumpmemory/rssant | 8bacf91bedc2d2fa35f3e8b0f6a8b8c2a712c1a6 | [
"BSD-3-Clause"
] | null | null | null | rssant_cli/rss.py | dumpmemory/rssant | 8bacf91bedc2d2fa35f3e8b0f6a8b8c2a712c1a6 | [
"BSD-3-Clause"
] | null | null | null | import logging
import time
import json
from collections import defaultdict
import tqdm
import click
from django.utils import timezone
from django.db import transaction, connection
from django.db.models import Q
from django.contrib.auth import get_user_model
import rssant_common.django_setup # noqa:F401
from rssant_api.models import Feed, Story, UnionFeed, UserStory, UserFeed
from rssant_api.helper import reverse_url
from rssant_common import _proxy_helper
from rssant_common.helper import format_table, pretty_format_json
from rssant_feedlib.reader import FeedResponseStatus, FeedReader
from rssant_common import unionid
from rssant_feedlib import processor
from rssant_common.actor_client import scheduler
from rssant_config import CONFIG
LOG = logging.getLogger(__name__)
@click.group()
def main():
"""RSS Commands"""
def _decode_feed_ids(option_feeds):
"""
>>> _decode_feed_ids('123,456')
[123, 456]
"""
return [int(x) for x in option_feeds.strip().split(',')]
def _decode_union_feed_ids(option_feeds):
"""
>>> _decode_union_feed_ids('014064,0140be')
[196, 366]
"""
return [unionid.decode(x)[1] for x in option_feeds.strip().split(',')]
def _get_all_feed_ids():
feed_ids = [feed.id for feed in Feed.objects.only('id').all()]
return feed_ids
def _get_feed_ids(option_feeds):
if option_feeds and option_feeds != 'all':
feed_ids = _decode_feed_ids(option_feeds)
else:
feed_ids = _get_all_feed_ids()
return feed_ids
def _get_story_ids(option_storys):
if option_storys:
story_ids = option_storys.strip().split(',')
else:
story_ids = [story.id for story in Story.objects.only('id').all()]
return story_ids
@main.command()
@click.option('--dry-run', is_flag=True)
def fix_feed_total_storys(dry_run=False):
incorrect_feeds = Story.query_feed_incorrect_total_storys()
LOG.info('total %s incorrect feeds', len(incorrect_feeds))
header = ['feed_id', 'total_storys', 'correct_total_storys']
click.echo(format_table(incorrect_feeds, header=header))
if dry_run:
return
with transaction.atomic():
num_corrected = 0
for feed_id, *__ in tqdm.tqdm(incorrect_feeds, ncols=80, ascii=True):
fixed = Story.fix_feed_total_storys(feed_id)
if fixed:
num_corrected += 1
LOG.info('correct %s feeds', num_corrected)
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
def update_feed_monthly_story_count(feeds=None):
feed_ids = _get_feed_ids(feeds)
LOG.info('total %s feeds', len(feed_ids))
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
with transaction.atomic():
Story.refresh_feed_monthly_story_count(feed_id)
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
def update_feed_dryness(feeds=None):
feed_ids = _get_feed_ids(feeds)
LOG.info('total %s feeds', len(feed_ids))
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
with transaction.atomic():
feed = Feed.get_by_pk(feed_id)
if feed.total_storys <= 0:
continue
cnt = feed.monthly_story_count
if not cnt:
Story.refresh_feed_monthly_story_count(feed_id)
feed.refresh_from_db()
feed.dryness = feed.monthly_story_count.dryness()
feed.save()
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
def update_feed_dt_first_story_published(feeds=None):
feed_ids = _get_feed_ids(feeds)
LOG.info('total %s feeds', len(feed_ids))
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
with transaction.atomic():
feed = Feed.get_by_pk(feed_id)
if feed.dt_first_story_published:
continue
if feed.total_storys <= 0:
continue
try:
story = Story.get_by_offset(feed_id, 0, detail=True)
except Story.DoesNotExist:
LOG.warning(f'story feed_id={feed_id} offset=0 not exists')
continue
feed.dt_first_story_published = story.dt_published
feed.save()
@main.command()
@click.option('--storys', help="story ids, separate by ','")
def update_story_has_mathjax(storys=None):
story_ids = _get_story_ids(storys)
LOG.info('total %s storys', len(story_ids))
for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
with transaction.atomic():
story = Story.objects.only('id', 'content', '_version').get(pk=story_id)
if processor.story_has_mathjax(story.content):
story.has_mathjax = True
story.save()
@main.command()
def update_story_is_user_marked():
user_storys = list(
UserStory.objects
.exclude(is_watched=False, is_favorited=False)
.all()
)
LOG.info('total %s user marked storys', len(user_storys))
if not user_storys:
return
for user_story in tqdm.tqdm(user_storys, ncols=80, ascii=True):
Story.set_user_marked_by_id(user_story.story_id)
@main.command()
@click.option('--storys', help="story ids, separate by ','")
def process_story_links(storys=None):
story_ids = _get_story_ids(storys)
LOG.info('total %s storys', len(story_ids))
for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
with transaction.atomic():
story = Story.objects.only('id', 'content', '_version').get(pk=story_id)
content = processor.process_story_links(story.content, story.link)
if story.content != content:
story.content = content
story.save()
@main.command()
@click.option('--storys', help="story ids, separate by ','")
def update_story_images(storys=None):
story_ids = _get_story_ids(storys)
LOG.info('total %s storys', len(story_ids))
for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
story = Story.objects.get(pk=story_id)
scheduler.tell('harbor_rss.update_story_images', dict(
story_id=story_id,
story_url=story.link,
images=[],
))
@main.command()
@click.argument('unionid_text')
def decode_unionid(unionid_text):
numbers = unionid.decode(unionid_text)
if len(numbers) == 3:
click.echo('user_id={} feed_id={} offset={}'.format(*numbers))
elif len(numbers) == 2:
click.echo('user_id={} feed_id={}'.format(*numbers))
else:
click.echo(numbers)
@main.command()
@click.option('--days', type=int, default=1)
@click.option('--limit', type=int, default=100)
@click.option('--threshold', type=int, default=99)
def delete_invalid_feeds(days=1, limit=100, threshold=99):
sql = """
SELECT feed_id, title, link, url, status_code, count FROM (
SELECT feed_id, status_code, count(1) as count FROM rssant_api_rawfeed
WHERE dt_created >= %s and (status_code < 200 or status_code >= 400)
group by feed_id, status_code
having count(1) > 3
order by count desc
limit %s
) error_feed
join rssant_api_feed
on error_feed.feed_id = rssant_api_feed.id
order by feed_id, status_code, count;
"""
sql_ok_count = """
SELECT feed_id, count(1) as count FROM rssant_api_rawfeed
WHERE dt_created >= %s and (status_code >= 200 and status_code < 400)
AND feed_id=ANY(%s)
group by feed_id
"""
t_begin = timezone.now() - timezone.timedelta(days=days)
error_feeds = defaultdict(dict)
with connection.cursor() as cursor:
cursor.execute(sql, [t_begin, limit])
for feed_id, title, link, url, status_code, count in cursor.fetchall():
error_feeds[feed_id].update(feed_id=feed_id, title=title, link=link, url=url)
error = error_feeds[feed_id].setdefault('error', {})
error_name = FeedResponseStatus.name_of(status_code)
error[error_name] = count
error_feeds[feed_id]['error_count'] = sum(error.values())
error_feeds[feed_id].update(ok_count=0, error_percent=100)
cursor.execute(sql_ok_count, [t_begin, list(error_feeds)])
for feed_id, ok_count in cursor.fetchall():
feed = error_feeds[feed_id]
total = feed['error_count'] + ok_count
error_percent = round((feed['error_count'] / total) * 100)
feed.update(ok_count=ok_count, error_percent=error_percent)
error_feeds = list(sorted(error_feeds.values(), key=lambda x: x['error_percent'], reverse=True))
delete_feed_ids = []
for feed in error_feeds:
if feed['error_percent'] >= threshold:
delete_feed_ids.append(feed['feed_id'])
click.echo(pretty_format_json(feed))
if delete_feed_ids:
confirm_delete = click.confirm(f'Delete {len(delete_feed_ids)} feeds?')
if not confirm_delete:
click.echo('Abort!')
else:
UnionFeed.bulk_delete(delete_feed_ids)
click.echo('Done!')
return error_feeds
@main.command()
def fix_user_story_offset():
sql = """
SELECT us.id, us."offset", story."offset"
FROM rssant_api_userstory AS us
LEFT OUTER JOIN rssant_api_story AS story
ON us.story_id=story.id
WHERE us."offset" != story."offset"
"""
items = []
with connection.cursor() as cursor:
cursor.execute(sql)
for us_id, us_offset, story_offset in cursor.fetchall():
items.append((us_id, us_offset, story_offset))
click.echo(f'total {len(items)} mismatch user story offset')
if not items:
return
with transaction.atomic():
for us_id, us_offset, story_offset in tqdm.tqdm(items, ncols=80, ascii=True):
UserStory.objects.filter(pk=us_id).update(offset=-us_offset)
for us_id, us_offset, story_offset in tqdm.tqdm(items, ncols=80, ascii=True):
UserStory.objects.filter(pk=us_id).update(offset=story_offset)
@main.command()
def subscribe_changelog():
changelog_url = CONFIG.root_url.rstrip('/') + '/changelog.atom'
feed = Feed.objects.get(url=changelog_url)
if not feed:
click.echo(f'not found changelog feed url={changelog_url}')
return
click.echo(f'changelog feed {feed}')
User = get_user_model()
users = list(User.objects.all())
click.echo(f'total {len(users)} users')
for user in tqdm.tqdm(users, ncols=80, ascii=True):
with transaction.atomic():
user_feed = UserFeed.objects\
.filter(user_id=user.id, feed_id=feed.id).first()
if not user_feed:
user_feed = UserFeed(
user_id=user.id,
feed_id=feed.id,
is_from_bookmark=False,
)
user_feed.save()
@main.command()
def update_feed_use_proxy():
if not CONFIG.rss_proxy_enable:
click.echo('rss proxy not enable!')
return
blacklist = [
'%博客园%',
'%微信%',
'%新浪%',
'%的评论%',
'%Comments on%',
]
sql = """
select * from rssant_api_feed
where (NOT title LIKE ANY(%s)) AND (
dt_created >= '2020-04-01' or
(total_storys <= 5 and dt_updated <= '2019-12-01')
)
"""
feeds = list(Feed.objects.raw(sql, [blacklist]))
click.echo(f'{len(feeds)} feeds need check')
reader = FeedReader(**_proxy_helper.get_proxy_options())
proxy_feeds = []
with reader:
for i, feed in enumerate(feeds):
click.echo(f'#{i} {feed}')
status = reader.read(feed.url).status
click.echo(f' #{i} status={FeedResponseStatus.name_of(status)}')
if FeedResponseStatus.is_need_proxy(status):
proxy_status = reader.read(feed.url, use_proxy=True).status
click.echo(f' #{i} proxy_status={FeedResponseStatus.name_of(proxy_status)}')
if proxy_status == 200:
proxy_feeds.append(feed)
click.echo(f'{len(proxy_feeds)} feeds need use proxy')
if proxy_feeds:
with transaction.atomic():
for feed in tqdm.tqdm(proxy_feeds, ncols=80, ascii=True):
feed.refresh_from_db()
feed.use_proxy = True
feed.save()
@main.command()
@click.argument('key')
def delete_feed(key):
try:
key = int(key)
except ValueError:
pass # ignore
if isinstance(key, int):
feed = Feed.get_by_pk(key)
else:
feed = Feed.objects.filter(
Q(url__contains=key) | Q(title__contains=key)
).first()
if not feed:
print(f'not found feed like {key}')
return
if click.confirm(f'delete {feed} ?'):
feed.delete()
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
@click.option('--union-feeds', help="union feed ids, separate by ','")
@click.option('--key', help="feed url or title keyword")
@click.option('--expire', type=int, default=1, help="expire hours")
def refresh_feed(feeds, union_feeds, key, expire=None):
feed_ids = []
if feeds:
feed_ids.extend(_get_feed_ids(feeds))
if union_feeds:
feed_ids.extend(_decode_union_feed_ids(union_feeds))
if key:
cond = Q(url__contains=key) | Q(title__contains=key)
feed_objs = Feed.objects.filter(cond).only('id').all()
feed_ids.extend(x.id for x in feed_objs)
feed_ids = list(sorted(set(feed_ids)))
expire_at = time.time() + expire * 60 * 60
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
feed = Feed.objects.only('id', 'url', 'use_proxy').get(pk=feed_id)
scheduler.tell('worker_rss.sync_feed', dict(
feed_id=feed.id,
url=feed.url,
use_proxy=feed.use_proxy,
is_refresh=True,
), expire_at=expire_at)
@main.command()
@click.option('--feeds', required=True, help="feed ids, separate by ','")
def update_feed_reverse_url(feeds):
feed_ids = _get_feed_ids(feeds)
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
feed = Feed.objects.get(pk=feed_id)
feed.reverse_url = reverse_url(feed.url)
feed.save()
@main.command()
@click.option('--dst', required=True, help='actor dst')
@click.option('--content', help='message content')
@click.option('--expire-seconds', type=int, help='expire time in seconds')
def tell(dst, content, expire_seconds):
if content:
content = json.loads(content)
expire_at = None
if expire_seconds:
expire_at = int(time.time()) + expire_seconds
scheduler.tell(dst, content=content, expire_at=expire_at)
if __name__ == "__main__":
main()
| 34.865248 | 100 | 0.641782 | import logging
import time
import json
from collections import defaultdict
import tqdm
import click
from django.utils import timezone
from django.db import transaction, connection
from django.db.models import Q
from django.contrib.auth import get_user_model
import rssant_common.django_setup from rssant_api.models import Feed, Story, UnionFeed, UserStory, UserFeed
from rssant_api.helper import reverse_url
from rssant_common import _proxy_helper
from rssant_common.helper import format_table, pretty_format_json
from rssant_feedlib.reader import FeedResponseStatus, FeedReader
from rssant_common import unionid
from rssant_feedlib import processor
from rssant_common.actor_client import scheduler
from rssant_config import CONFIG
LOG = logging.getLogger(__name__)
@click.group()
def main():
def _decode_feed_ids(option_feeds):
return [int(x) for x in option_feeds.strip().split(',')]
def _decode_union_feed_ids(option_feeds):
return [unionid.decode(x)[1] for x in option_feeds.strip().split(',')]
def _get_all_feed_ids():
feed_ids = [feed.id for feed in Feed.objects.only('id').all()]
return feed_ids
def _get_feed_ids(option_feeds):
if option_feeds and option_feeds != 'all':
feed_ids = _decode_feed_ids(option_feeds)
else:
feed_ids = _get_all_feed_ids()
return feed_ids
def _get_story_ids(option_storys):
if option_storys:
story_ids = option_storys.strip().split(',')
else:
story_ids = [story.id for story in Story.objects.only('id').all()]
return story_ids
@main.command()
@click.option('--dry-run', is_flag=True)
def fix_feed_total_storys(dry_run=False):
incorrect_feeds = Story.query_feed_incorrect_total_storys()
LOG.info('total %s incorrect feeds', len(incorrect_feeds))
header = ['feed_id', 'total_storys', 'correct_total_storys']
click.echo(format_table(incorrect_feeds, header=header))
if dry_run:
return
with transaction.atomic():
num_corrected = 0
for feed_id, *__ in tqdm.tqdm(incorrect_feeds, ncols=80, ascii=True):
fixed = Story.fix_feed_total_storys(feed_id)
if fixed:
num_corrected += 1
LOG.info('correct %s feeds', num_corrected)
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
def update_feed_monthly_story_count(feeds=None):
feed_ids = _get_feed_ids(feeds)
LOG.info('total %s feeds', len(feed_ids))
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
with transaction.atomic():
Story.refresh_feed_monthly_story_count(feed_id)
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
def update_feed_dryness(feeds=None):
feed_ids = _get_feed_ids(feeds)
LOG.info('total %s feeds', len(feed_ids))
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
with transaction.atomic():
feed = Feed.get_by_pk(feed_id)
if feed.total_storys <= 0:
continue
cnt = feed.monthly_story_count
if not cnt:
Story.refresh_feed_monthly_story_count(feed_id)
feed.refresh_from_db()
feed.dryness = feed.monthly_story_count.dryness()
feed.save()
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
def update_feed_dt_first_story_published(feeds=None):
feed_ids = _get_feed_ids(feeds)
LOG.info('total %s feeds', len(feed_ids))
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
with transaction.atomic():
feed = Feed.get_by_pk(feed_id)
if feed.dt_first_story_published:
continue
if feed.total_storys <= 0:
continue
try:
story = Story.get_by_offset(feed_id, 0, detail=True)
except Story.DoesNotExist:
LOG.warning(f'story feed_id={feed_id} offset=0 not exists')
continue
feed.dt_first_story_published = story.dt_published
feed.save()
@main.command()
@click.option('--storys', help="story ids, separate by ','")
def update_story_has_mathjax(storys=None):
story_ids = _get_story_ids(storys)
LOG.info('total %s storys', len(story_ids))
for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
with transaction.atomic():
story = Story.objects.only('id', 'content', '_version').get(pk=story_id)
if processor.story_has_mathjax(story.content):
story.has_mathjax = True
story.save()
@main.command()
def update_story_is_user_marked():
user_storys = list(
UserStory.objects
.exclude(is_watched=False, is_favorited=False)
.all()
)
LOG.info('total %s user marked storys', len(user_storys))
if not user_storys:
return
for user_story in tqdm.tqdm(user_storys, ncols=80, ascii=True):
Story.set_user_marked_by_id(user_story.story_id)
@main.command()
@click.option('--storys', help="story ids, separate by ','")
def process_story_links(storys=None):
story_ids = _get_story_ids(storys)
LOG.info('total %s storys', len(story_ids))
for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
with transaction.atomic():
story = Story.objects.only('id', 'content', '_version').get(pk=story_id)
content = processor.process_story_links(story.content, story.link)
if story.content != content:
story.content = content
story.save()
@main.command()
@click.option('--storys', help="story ids, separate by ','")
def update_story_images(storys=None):
story_ids = _get_story_ids(storys)
LOG.info('total %s storys', len(story_ids))
for story_id in tqdm.tqdm(story_ids, ncols=80, ascii=True):
story = Story.objects.get(pk=story_id)
scheduler.tell('harbor_rss.update_story_images', dict(
story_id=story_id,
story_url=story.link,
images=[],
))
@main.command()
@click.argument('unionid_text')
def decode_unionid(unionid_text):
numbers = unionid.decode(unionid_text)
if len(numbers) == 3:
click.echo('user_id={} feed_id={} offset={}'.format(*numbers))
elif len(numbers) == 2:
click.echo('user_id={} feed_id={}'.format(*numbers))
else:
click.echo(numbers)
@main.command()
@click.option('--days', type=int, default=1)
@click.option('--limit', type=int, default=100)
@click.option('--threshold', type=int, default=99)
def delete_invalid_feeds(days=1, limit=100, threshold=99):
sql = """
SELECT feed_id, title, link, url, status_code, count FROM (
SELECT feed_id, status_code, count(1) as count FROM rssant_api_rawfeed
WHERE dt_created >= %s and (status_code < 200 or status_code >= 400)
group by feed_id, status_code
having count(1) > 3
order by count desc
limit %s
) error_feed
join rssant_api_feed
on error_feed.feed_id = rssant_api_feed.id
order by feed_id, status_code, count;
"""
sql_ok_count = """
SELECT feed_id, count(1) as count FROM rssant_api_rawfeed
WHERE dt_created >= %s and (status_code >= 200 and status_code < 400)
AND feed_id=ANY(%s)
group by feed_id
"""
t_begin = timezone.now() - timezone.timedelta(days=days)
error_feeds = defaultdict(dict)
with connection.cursor() as cursor:
cursor.execute(sql, [t_begin, limit])
for feed_id, title, link, url, status_code, count in cursor.fetchall():
error_feeds[feed_id].update(feed_id=feed_id, title=title, link=link, url=url)
error = error_feeds[feed_id].setdefault('error', {})
error_name = FeedResponseStatus.name_of(status_code)
error[error_name] = count
error_feeds[feed_id]['error_count'] = sum(error.values())
error_feeds[feed_id].update(ok_count=0, error_percent=100)
cursor.execute(sql_ok_count, [t_begin, list(error_feeds)])
for feed_id, ok_count in cursor.fetchall():
feed = error_feeds[feed_id]
total = feed['error_count'] + ok_count
error_percent = round((feed['error_count'] / total) * 100)
feed.update(ok_count=ok_count, error_percent=error_percent)
error_feeds = list(sorted(error_feeds.values(), key=lambda x: x['error_percent'], reverse=True))
delete_feed_ids = []
for feed in error_feeds:
if feed['error_percent'] >= threshold:
delete_feed_ids.append(feed['feed_id'])
click.echo(pretty_format_json(feed))
if delete_feed_ids:
confirm_delete = click.confirm(f'Delete {len(delete_feed_ids)} feeds?')
if not confirm_delete:
click.echo('Abort!')
else:
UnionFeed.bulk_delete(delete_feed_ids)
click.echo('Done!')
return error_feeds
@main.command()
def fix_user_story_offset():
sql = """
SELECT us.id, us."offset", story."offset"
FROM rssant_api_userstory AS us
LEFT OUTER JOIN rssant_api_story AS story
ON us.story_id=story.id
WHERE us."offset" != story."offset"
"""
items = []
with connection.cursor() as cursor:
cursor.execute(sql)
for us_id, us_offset, story_offset in cursor.fetchall():
items.append((us_id, us_offset, story_offset))
click.echo(f'total {len(items)} mismatch user story offset')
if not items:
return
with transaction.atomic():
for us_id, us_offset, story_offset in tqdm.tqdm(items, ncols=80, ascii=True):
UserStory.objects.filter(pk=us_id).update(offset=-us_offset)
for us_id, us_offset, story_offset in tqdm.tqdm(items, ncols=80, ascii=True):
UserStory.objects.filter(pk=us_id).update(offset=story_offset)
@main.command()
def subscribe_changelog():
changelog_url = CONFIG.root_url.rstrip('/') + '/changelog.atom'
feed = Feed.objects.get(url=changelog_url)
if not feed:
click.echo(f'not found changelog feed url={changelog_url}')
return
click.echo(f'changelog feed {feed}')
User = get_user_model()
users = list(User.objects.all())
click.echo(f'total {len(users)} users')
for user in tqdm.tqdm(users, ncols=80, ascii=True):
with transaction.atomic():
user_feed = UserFeed.objects\
.filter(user_id=user.id, feed_id=feed.id).first()
if not user_feed:
user_feed = UserFeed(
user_id=user.id,
feed_id=feed.id,
is_from_bookmark=False,
)
user_feed.save()
@main.command()
def update_feed_use_proxy():
if not CONFIG.rss_proxy_enable:
click.echo('rss proxy not enable!')
return
blacklist = [
'%博客园%',
'%微信%',
'%新浪%',
'%的评论%',
'%Comments on%',
]
sql = """
select * from rssant_api_feed
where (NOT title LIKE ANY(%s)) AND (
dt_created >= '2020-04-01' or
(total_storys <= 5 and dt_updated <= '2019-12-01')
)
"""
feeds = list(Feed.objects.raw(sql, [blacklist]))
click.echo(f'{len(feeds)} feeds need check')
reader = FeedReader(**_proxy_helper.get_proxy_options())
proxy_feeds = []
with reader:
for i, feed in enumerate(feeds):
click.echo(f'#{i} {feed}')
status = reader.read(feed.url).status
click.echo(f' #{i} status={FeedResponseStatus.name_of(status)}')
if FeedResponseStatus.is_need_proxy(status):
proxy_status = reader.read(feed.url, use_proxy=True).status
click.echo(f' #{i} proxy_status={FeedResponseStatus.name_of(proxy_status)}')
if proxy_status == 200:
proxy_feeds.append(feed)
click.echo(f'{len(proxy_feeds)} feeds need use proxy')
if proxy_feeds:
with transaction.atomic():
for feed in tqdm.tqdm(proxy_feeds, ncols=80, ascii=True):
feed.refresh_from_db()
feed.use_proxy = True
feed.save()
@main.command()
@click.argument('key')
def delete_feed(key):
try:
key = int(key)
except ValueError:
pass if isinstance(key, int):
feed = Feed.get_by_pk(key)
else:
feed = Feed.objects.filter(
Q(url__contains=key) | Q(title__contains=key)
).first()
if not feed:
print(f'not found feed like {key}')
return
if click.confirm(f'delete {feed} ?'):
feed.delete()
@main.command()
@click.option('--feeds', help="feed ids, separate by ','")
@click.option('--union-feeds', help="union feed ids, separate by ','")
@click.option('--key', help="feed url or title keyword")
@click.option('--expire', type=int, default=1, help="expire hours")
def refresh_feed(feeds, union_feeds, key, expire=None):
feed_ids = []
if feeds:
feed_ids.extend(_get_feed_ids(feeds))
if union_feeds:
feed_ids.extend(_decode_union_feed_ids(union_feeds))
if key:
cond = Q(url__contains=key) | Q(title__contains=key)
feed_objs = Feed.objects.filter(cond).only('id').all()
feed_ids.extend(x.id for x in feed_objs)
feed_ids = list(sorted(set(feed_ids)))
expire_at = time.time() + expire * 60 * 60
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
feed = Feed.objects.only('id', 'url', 'use_proxy').get(pk=feed_id)
scheduler.tell('worker_rss.sync_feed', dict(
feed_id=feed.id,
url=feed.url,
use_proxy=feed.use_proxy,
is_refresh=True,
), expire_at=expire_at)
@main.command()
@click.option('--feeds', required=True, help="feed ids, separate by ','")
def update_feed_reverse_url(feeds):
feed_ids = _get_feed_ids(feeds)
for feed_id in tqdm.tqdm(feed_ids, ncols=80, ascii=True):
feed = Feed.objects.get(pk=feed_id)
feed.reverse_url = reverse_url(feed.url)
feed.save()
@main.command()
@click.option('--dst', required=True, help='actor dst')
@click.option('--content', help='message content')
@click.option('--expire-seconds', type=int, help='expire time in seconds')
def tell(dst, content, expire_seconds):
if content:
content = json.loads(content)
expire_at = None
if expire_seconds:
expire_at = int(time.time()) + expire_seconds
scheduler.tell(dst, content=content, expire_at=expire_at)
if __name__ == "__main__":
main()
| true | true |
f7007aa840274af20197e60983ca3a3b63feaf8b | 1,554 | py | Python | solutions/REF_11_main.py | ManoloBrn/gcloudtraining17 | 8b72f16b3868239743c7a207fb44082d055da4e3 | [
"Apache-2.0"
] | null | null | null | solutions/REF_11_main.py | ManoloBrn/gcloudtraining17 | 8b72f16b3868239743c7a207fb44082d055da4e3 | [
"Apache-2.0"
] | null | null | null | solutions/REF_11_main.py | ManoloBrn/gcloudtraining17 | 8b72f16b3868239743c7a207fb44082d055da4e3 | [
"Apache-2.0"
] | 2 | 2016-11-28T05:36:44.000Z | 2017-02-13T01:31:38.000Z | #!/usr/bin/env python
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
header = self.request.headers.get('X-AppEngine-Cron', None)
if not header:
raise ValueError('attempt to access cron handler directly, '
'missing custom App Engine header')
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
header = self.request.headers.get('X-AppEngine-QueueName', None)
if not header:
raise ValueError('attempt to access task handler directly, '
'missing custom App Engine header')
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler)
], debug=True)
| 37.902439 | 72 | 0.623552 |
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
header = self.request.headers.get('X-AppEngine-Cron', None)
if not header:
raise ValueError('attempt to access cron handler directly, '
'missing custom App Engine header')
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
header = self.request.headers.get('X-AppEngine-QueueName', None)
if not header:
raise ValueError('attempt to access task handler directly, '
'missing custom App Engine header')
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), self.request.get('email'), 'You created a new Conference!', 'Hi, you have created a following ' 'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler)
], debug=True)
| true | true |
f7007aeadf7f3003bdb12b053d54a12891257329 | 5,750 | py | Python | torchkit/models/vision/segmentation/unet.py | cosmic-cortex/torchkit | 9f44c8a500a4345d81feac14b6b200c5d190283a | [
"MIT"
] | null | null | null | torchkit/models/vision/segmentation/unet.py | cosmic-cortex/torchkit | 9f44c8a500a4345d81feac14b6b200c5d190283a | [
"MIT"
] | null | null | null | torchkit/models/vision/segmentation/unet.py | cosmic-cortex/torchkit | 9f44c8a500a4345d81feac14b6b200c5d190283a | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def pad_to_shape(this, shp):
"""
Not a very safe function.
"""
return F.pad(this, (0, shp[3] - this.shape[3], 0, shp[2] - this.shape[2]))
class First(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, dropout=False):
super(First, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.first = nn.Sequential(*layers)
def forward(self, x):
return self.first(x)
class Encoder(nn.Module):
def __init__(
self, in_channels, middle_channels, out_channels,
dropout=False, downsample_kernel=2
):
super(Encoder, self).__init__()
layers = [
nn.MaxPool2d(kernel_size=downsample_kernel),
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.encoder = nn.Sequential(*layers)
def forward(self, x):
return self.encoder(x)
class Center(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):
super(Center, self).__init__()
layers = [
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.center = nn.Sequential(*layers)
def forward(self, x):
return self.center(x)
class Decoder(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):
super(Decoder, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.decoder = nn.Sequential(*layers)
def forward(self, x):
return self.decoder(x)
class Last(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, softmax=False):
super(Last, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=1),
nn.Sigmoid()
]
if softmax:
layers.append(nn.Softmax2d())
self.first = nn.Sequential(*layers)
def forward(self, x):
return self.first(x)
class UNet(nn.Module):
def __init__(self, in_channels, out_channels, softmax=False):
super(UNet, self).__init__()
self.first = First(in_channels, 64, 64)
self.encoder_1 = Encoder(64, 128, 128)
self.encoder_2 = Encoder(128, 256, 256)
self.encoder_3 = Encoder(256, 512, 512)
self.center = Center(512, 1024, 1024, 512)
self.decoder_3 = Decoder(1024, 512, 512, 256)
self.decoder_2 = Decoder(512, 256, 256, 128)
self.decoder_1 = Decoder(256, 128, 128, 64)
self.last = Last(128, 64, out_channels, softmax=softmax)
def forward(self, x):
x_first = self.first(x)
x_enc_1 = self.encoder_1(x_first)
x_enc_2 = self.encoder_2(x_enc_1)
x_enc_3 = self.encoder_3(x_enc_2)
x_cent = self.center(x_enc_3)
x_dec_3 = self.decoder_3(torch.cat([pad_to_shape(x_cent, x_enc_3.shape), x_enc_3], dim=1))
x_dec_2 = self.decoder_2(torch.cat([pad_to_shape(x_dec_3, x_enc_2.shape), x_enc_2], dim=1))
x_dec_1 = self.decoder_1(torch.cat([pad_to_shape(x_dec_2, x_enc_1.shape), x_enc_1], dim=1))
return self.last(torch.cat([pad_to_shape(x_dec_1, x_first.shape), x_first], dim=1))
if __name__ == '__main__':
pass
| 35.060976 | 100 | 0.602957 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def pad_to_shape(this, shp):
return F.pad(this, (0, shp[3] - this.shape[3], 0, shp[2] - this.shape[2]))
class First(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, dropout=False):
super(First, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.first = nn.Sequential(*layers)
def forward(self, x):
return self.first(x)
class Encoder(nn.Module):
def __init__(
self, in_channels, middle_channels, out_channels,
dropout=False, downsample_kernel=2
):
super(Encoder, self).__init__()
layers = [
nn.MaxPool2d(kernel_size=downsample_kernel),
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.encoder = nn.Sequential(*layers)
def forward(self, x):
return self.encoder(x)
class Center(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):
super(Center, self).__init__()
layers = [
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.center = nn.Sequential(*layers)
def forward(self, x):
return self.center(x)
class Decoder(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):
super(Decoder, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout2d(p=dropout))
self.decoder = nn.Sequential(*layers)
def forward(self, x):
return self.decoder(x)
class Last(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, softmax=False):
super(Last, self).__init__()
layers = [
nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, out_channels, kernel_size=1),
nn.Sigmoid()
]
if softmax:
layers.append(nn.Softmax2d())
self.first = nn.Sequential(*layers)
def forward(self, x):
return self.first(x)
class UNet(nn.Module):
def __init__(self, in_channels, out_channels, softmax=False):
super(UNet, self).__init__()
self.first = First(in_channels, 64, 64)
self.encoder_1 = Encoder(64, 128, 128)
self.encoder_2 = Encoder(128, 256, 256)
self.encoder_3 = Encoder(256, 512, 512)
self.center = Center(512, 1024, 1024, 512)
self.decoder_3 = Decoder(1024, 512, 512, 256)
self.decoder_2 = Decoder(512, 256, 256, 128)
self.decoder_1 = Decoder(256, 128, 128, 64)
self.last = Last(128, 64, out_channels, softmax=softmax)
def forward(self, x):
x_first = self.first(x)
x_enc_1 = self.encoder_1(x_first)
x_enc_2 = self.encoder_2(x_enc_1)
x_enc_3 = self.encoder_3(x_enc_2)
x_cent = self.center(x_enc_3)
x_dec_3 = self.decoder_3(torch.cat([pad_to_shape(x_cent, x_enc_3.shape), x_enc_3], dim=1))
x_dec_2 = self.decoder_2(torch.cat([pad_to_shape(x_dec_3, x_enc_2.shape), x_enc_2], dim=1))
x_dec_1 = self.decoder_1(torch.cat([pad_to_shape(x_dec_2, x_enc_1.shape), x_enc_1], dim=1))
return self.last(torch.cat([pad_to_shape(x_dec_1, x_first.shape), x_first], dim=1))
if __name__ == '__main__':
pass
| true | true |
f7007b869cca1c346ea889953e37448001e40b10 | 5,930 | py | Python | run_decoding/run_decoding_WM_across_epochs_and_conditions.py | romquentin/decod_WM_Selection_and_maintenance | fc1bf2f21959795fbea731f642cc750c2b61bce2 | [
"BSD-3-Clause"
] | 7 | 2018-07-16T01:59:03.000Z | 2021-07-28T09:48:13.000Z | run_decoding/run_decoding_WM_across_epochs_and_conditions.py | romquentin/decod_WM_Selection_and_maintenance | fc1bf2f21959795fbea731f642cc750c2b61bce2 | [
"BSD-3-Clause"
] | 1 | 2020-03-15T00:35:45.000Z | 2020-04-17T09:54:38.000Z | run_decoding/run_decoding_WM_across_epochs_and_conditions.py | romquentin/decod_WM_Selection_and_maintenance | fc1bf2f21959795fbea731f642cc750c2b61bce2 | [
"BSD-3-Clause"
] | 4 | 2018-08-02T08:52:59.000Z | 2021-12-17T11:43:47.000Z | """Run decoding analyses in sensors space accross memory content and
visual perception for the working memory task and save decoding performance"""
# Authors: Romain Quentin <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
import mne
from h5io import read_hdf5
from mne.decoding import GeneralizingEstimator, LinearModel
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from jr.gat import (AngularRegression, scorer_spearman,
scorer_angle)
from base import (complete_behavior, get_events_interactions)
from config import path_data
import sys
subject = sys.argv[1] # read a swarm file for parralel computing on biowulf
output_folder = '/sensors_accross_epochs_and_conditions/'
# Create result folder
results_folder = op.join(path_data + 'results/' + subject + output_folder)
if not os.path.exists(results_folder):
os.makedirs(results_folder)
# read behavior
fname = op.join(path_data, subject, 'behavior_Target.hdf5')
events = read_hdf5(fname)
events = complete_behavior(events)
events = get_events_interactions(events)
# read stimulus epochs
fname = op.join(path_data, subject, 'epochs_Target.fif')
epochs_target = mne.read_epochs(fname)
epochs_target.pick_types(meg=True, ref_meg=False)
epochs_target.crop(-0.2, 0.9)
# read cue epochs
fname = op.join(path_data, subject, 'epochs_Cue.fif')
epochs_cue = mne.read_epochs(fname)
epochs_cue.pick_types(meg=True, ref_meg=False)
epochs_cue.crop(0, 1.5)
# read probe epochs
fname = op.join(path_data, subject, 'epochs_Probe.fif')
epochs_probe = mne.read_epochs(fname)
epochs_probe.pick_types(meg=True, ref_meg=False)
epochs_probe.crop(0, 0.9)
# Concatenate the data of the three epochs
X0 = epochs_target._data
X1 = epochs_cue._data
X2 = epochs_probe._data
X = np.concatenate((X0, X1, X2), axis=2)
# Define pair of analyses (train on the 2nd and test on the 1st )
paired_analyses = [['target_sfreq_cue_left_sfreq', 'left_sfreq'],
['target_sfreq_cue_right_sfreq', 'right_sfreq'],
['left_sfreq', 'target_sfreq_cue_left_sfreq'],
['right_sfreq', 'target_sfreq_cue_right_sfreq'],
['target_angle_cue_left_angle', 'left_angle'],
['target_angle_cue_right_angle', 'right_angle'],
['left_angle', 'target_angle_cue_left_angle'],
['right_angle', 'target_angle_cue_right_angle']]
# Loop across each pair of analyses
for paired_analysis in paired_analyses:
y_test = np.array(events[paired_analysis[0]])
y_train = np.array(events[paired_analysis[1]])
# Define estimators depending on the analysis
if 'angle' in paired_analysis[0][:14]:
clf = make_pipeline(StandardScaler(),
LinearModel(AngularRegression(Ridge(),
independent=False)))
scorer = scorer_angle
kwargs = dict()
gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),
n_jobs=24, **kwargs)
y_test = np.array(y_test, dtype=float)
y_train = np.array(y_train, dtype=float)
elif 'sfreq' in paired_analysis[0][:14]:
clf = make_pipeline(StandardScaler(), LinearModel(Ridge()))
scorer = scorer_spearman
kwargs = dict()
gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),
n_jobs=24, **kwargs)
y_test = np.array(y_test, dtype=float)
y_train = np.array(y_train, dtype=float)
# only consider trials with correct fixation
sel = np.where(events['is_eye_fixed'] == 1)[0]
y_train = y_train[sel]
y_test = y_test[sel]
X = np.concatenate((X0, X1, X2), axis=2)
X = X[sel]
# only consider non NaN values
# Run decoding accross condition
cv = StratifiedKFold(7)
scores = list()
scs = list()
if np.isnan(y_train).any():
sel = np.where(~np.isnan(y_train))[0]
for train, test in cv.split(X[sel], y_train[sel]):
gat.fit(X[sel][train], y_train[sel][train])
score = gat.score(X[sel][test], y_test[sel][test])
sc = gat.score(X[sel][test], y_train[sel][test]) # test on same
scores.append(score)
scs.append(sc)
scores = np.mean(scores, axis=0)
scs = np.mean(scs, axis=0)
else:
for train, test in cv.split(X, y_train):
y_te = y_test[test]
X_te = X[test]
y_te = y_te[np.where(~np.isnan(y_te))[0]]
X_te = X_te[np.where(~np.isnan(y_te))[0]]
y_tr = y_train[train]
X_tr = X[train]
y_tr = y_tr[np.where(~np.isnan(y_tr))[0]]
X_tr = X_tr[np.where(~np.isnan(y_tr))[0]]
y_tr_te = y_train[test]
X_tr_te = X[test]
y_tr_te = y_tr_te[np.where(~np.isnan(y_tr_te))[0]]
X_tr_te = X_tr_te[np.where(~np.isnan(y_tr_te))[0]]
gat.fit(X_tr, y_tr)
score = gat.score(X_te, y_te)
sc = gat.score(X_tr_te, y_tr_te) # test on same
scores.append(score)
scs.append(sc)
scores = np.mean(scores, axis=0)
scs = np.mean(scs, axis=0)
# save cross-validated scores
fname = results_folder +\
'%s_scores_%s_cross_%s.npy' % (subject,
paired_analysis[0],
paired_analysis[1])
np.save(fname, np.array(scores)) # save accross condition scores
fname = results_folder +\
'%s_scores_%s.npy' % (subject, paired_analysis[1])
np.save(fname, np.array(scs)) # save scores test/train on same condition
| 41.468531 | 78 | 0.641147 |
import os
import os.path as op
import numpy as np
import mne
from h5io import read_hdf5
from mne.decoding import GeneralizingEstimator, LinearModel
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from jr.gat import (AngularRegression, scorer_spearman,
scorer_angle)
from base import (complete_behavior, get_events_interactions)
from config import path_data
import sys
subject = sys.argv[1]
output_folder = '/sensors_accross_epochs_and_conditions/'
results_folder = op.join(path_data + 'results/' + subject + output_folder)
if not os.path.exists(results_folder):
os.makedirs(results_folder)
fname = op.join(path_data, subject, 'behavior_Target.hdf5')
events = read_hdf5(fname)
events = complete_behavior(events)
events = get_events_interactions(events)
fname = op.join(path_data, subject, 'epochs_Target.fif')
epochs_target = mne.read_epochs(fname)
epochs_target.pick_types(meg=True, ref_meg=False)
epochs_target.crop(-0.2, 0.9)
fname = op.join(path_data, subject, 'epochs_Cue.fif')
epochs_cue = mne.read_epochs(fname)
epochs_cue.pick_types(meg=True, ref_meg=False)
epochs_cue.crop(0, 1.5)
fname = op.join(path_data, subject, 'epochs_Probe.fif')
epochs_probe = mne.read_epochs(fname)
epochs_probe.pick_types(meg=True, ref_meg=False)
epochs_probe.crop(0, 0.9)
X0 = epochs_target._data
X1 = epochs_cue._data
X2 = epochs_probe._data
X = np.concatenate((X0, X1, X2), axis=2)
paired_analyses = [['target_sfreq_cue_left_sfreq', 'left_sfreq'],
['target_sfreq_cue_right_sfreq', 'right_sfreq'],
['left_sfreq', 'target_sfreq_cue_left_sfreq'],
['right_sfreq', 'target_sfreq_cue_right_sfreq'],
['target_angle_cue_left_angle', 'left_angle'],
['target_angle_cue_right_angle', 'right_angle'],
['left_angle', 'target_angle_cue_left_angle'],
['right_angle', 'target_angle_cue_right_angle']]
for paired_analysis in paired_analyses:
y_test = np.array(events[paired_analysis[0]])
y_train = np.array(events[paired_analysis[1]])
if 'angle' in paired_analysis[0][:14]:
clf = make_pipeline(StandardScaler(),
LinearModel(AngularRegression(Ridge(),
independent=False)))
scorer = scorer_angle
kwargs = dict()
gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),
n_jobs=24, **kwargs)
y_test = np.array(y_test, dtype=float)
y_train = np.array(y_train, dtype=float)
elif 'sfreq' in paired_analysis[0][:14]:
clf = make_pipeline(StandardScaler(), LinearModel(Ridge()))
scorer = scorer_spearman
kwargs = dict()
gat = GeneralizingEstimator(clf, scoring=make_scorer(scorer),
n_jobs=24, **kwargs)
y_test = np.array(y_test, dtype=float)
y_train = np.array(y_train, dtype=float)
sel = np.where(events['is_eye_fixed'] == 1)[0]
y_train = y_train[sel]
y_test = y_test[sel]
X = np.concatenate((X0, X1, X2), axis=2)
X = X[sel]
cv = StratifiedKFold(7)
scores = list()
scs = list()
if np.isnan(y_train).any():
sel = np.where(~np.isnan(y_train))[0]
for train, test in cv.split(X[sel], y_train[sel]):
gat.fit(X[sel][train], y_train[sel][train])
score = gat.score(X[sel][test], y_test[sel][test])
sc = gat.score(X[sel][test], y_train[sel][test]) scores.append(score)
scs.append(sc)
scores = np.mean(scores, axis=0)
scs = np.mean(scs, axis=0)
else:
for train, test in cv.split(X, y_train):
y_te = y_test[test]
X_te = X[test]
y_te = y_te[np.where(~np.isnan(y_te))[0]]
X_te = X_te[np.where(~np.isnan(y_te))[0]]
y_tr = y_train[train]
X_tr = X[train]
y_tr = y_tr[np.where(~np.isnan(y_tr))[0]]
X_tr = X_tr[np.where(~np.isnan(y_tr))[0]]
y_tr_te = y_train[test]
X_tr_te = X[test]
y_tr_te = y_tr_te[np.where(~np.isnan(y_tr_te))[0]]
X_tr_te = X_tr_te[np.where(~np.isnan(y_tr_te))[0]]
gat.fit(X_tr, y_tr)
score = gat.score(X_te, y_te)
sc = gat.score(X_tr_te, y_tr_te) scores.append(score)
scs.append(sc)
scores = np.mean(scores, axis=0)
scs = np.mean(scs, axis=0)
fname = results_folder +\
'%s_scores_%s_cross_%s.npy' % (subject,
paired_analysis[0],
paired_analysis[1])
np.save(fname, np.array(scores)) fname = results_folder +\
'%s_scores_%s.npy' % (subject, paired_analysis[1])
np.save(fname, np.array(scs)) | true | true |
f7007b8f63065271ad10c75bc6357fbca374b88c | 8,674 | py | Python | test/sagemaker_tests/pytorch/inference/conftest.py | leezu/deep-learning-containers | 52591228240ad88d1eb39f419ade93d3ca5ec695 | [
"Apache-2.0"
] | null | null | null | test/sagemaker_tests/pytorch/inference/conftest.py | leezu/deep-learning-containers | 52591228240ad88d1eb39f419ade93d3ca5ec695 | [
"Apache-2.0"
] | null | null | null | test/sagemaker_tests/pytorch/inference/conftest.py | leezu/deep-learning-containers | 52591228240ad88d1eb39f419ade93d3ca5ec695 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import boto3
import os
import logging
import platform
import pytest
import shutil
import sys
import tempfile
from sagemaker import LocalSession, Session
from sagemaker.pytorch import PyTorch
from .utils import image_utils
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('factory.py').setLevel(logging.INFO)
logging.getLogger('auth.py').setLevel(logging.INFO)
logging.getLogger('connectionpool.py').setLevel(logging.INFO)
dir_path = os.path.dirname(os.path.realpath(__file__))
NO_P2_REGIONS = ['ap-east-1', 'ap-northeast-3', 'ap-southeast-2', 'ca-central-1', 'eu-central-1', 'eu-north-1',
'eu-west-2', 'eu-west-3', 'us-west-1', 'sa-east-1', 'me-south-1']
NO_P3_REGIONS = ['ap-east-1', 'ap-northeast-3', 'ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'ca-central-1',
'eu-central-1', 'eu-north-1', 'eu-west-2', 'eu-west-3', 'sa-east-1', 'us-west-1', 'me-south-1']
def pytest_addoption(parser):
parser.addoption('--build-image', '-D', action='store_true')
parser.addoption('--build-base-image', '-B', action='store_true')
parser.addoption('--aws-id')
parser.addoption('--instance-type')
parser.addoption('--accelerator-type', default=None)
parser.addoption('--docker-base-name', default='pytorch')
parser.addoption('--region', default='us-west-2')
parser.addoption('--framework-version', default=PyTorch.LATEST_VERSION)
parser.addoption('--py-version', choices=['2', '3'], default=str(sys.version_info.major))
# Processor is still "cpu" for EIA tests
parser.addoption('--processor', choices=['gpu', 'cpu', 'eia'], default='cpu')
# If not specified, will default to {framework-version}-{processor}-py{py-version}
parser.addoption('--tag', default=None)
parser.addoption('--generate-coverage-doc', default=False, action='store_true',
help='use this option to generate test coverage doc')
def pytest_collection_modifyitems(session, config, items):
if config.getoption("--generate-coverage-doc"):
from test.test_utils.test_reporting import TestReportGenerator
report_generator = TestReportGenerator(items, is_sagemaker=True)
report_generator.generate_coverage_doc(framework="pytorch", job_type="inference")
@pytest.fixture(scope='session', name='docker_base_name')
def fixture_docker_base_name(request):
return request.config.getoption('--docker-base-name')
@pytest.fixture(scope='session', name='region')
def fixture_region(request):
return request.config.getoption('--region')
@pytest.fixture(scope='session', name='framework_version')
def fixture_framework_version(request):
return request.config.getoption('--framework-version')
@pytest.fixture(scope='session', name='py_version')
def fixture_py_version(request):
return 'py{}'.format(int(request.config.getoption('--py-version')))
@pytest.fixture(scope='session', name='processor')
def fixture_processor(request):
return request.config.getoption('--processor')
@pytest.fixture(scope='session', name='tag')
def fixture_tag(request, framework_version, processor, py_version):
provided_tag = request.config.getoption('--tag')
default_tag = '{}-{}-{}'.format(framework_version, processor, py_version)
return provided_tag if provided_tag else default_tag
@pytest.fixture(scope='session', name='docker_image')
def fixture_docker_image(docker_base_name, tag):
return '{}:{}'.format(docker_base_name, tag)
@pytest.fixture
def opt_ml():
tmp = tempfile.mkdtemp()
os.mkdir(os.path.join(tmp, 'output'))
# Docker cannot mount Mac OS /var folder properly see
# https://forums.docker.com/t/var-folders-isnt-mounted-properly/9600
opt_ml_dir = '/private{}'.format(tmp) if platform.system() == 'Darwin' else tmp
yield opt_ml_dir
shutil.rmtree(tmp, True)
@pytest.fixture(scope='session', name='use_gpu')
def fixture_use_gpu(processor):
return processor == 'gpu'
@pytest.fixture(scope='session', name='build_base_image', autouse=True)
def fixture_build_base_image(request, framework_version, py_version, processor, tag, docker_base_name):
build_base_image = request.config.getoption('--build-base-image')
if build_base_image:
return image_utils.build_base_image(framework_name=docker_base_name,
framework_version=framework_version,
py_version=py_version,
base_image_tag=tag,
processor=processor,
cwd=os.path.join(dir_path, '..'))
return tag
@pytest.fixture(scope='session', name='sagemaker_session')
def fixture_sagemaker_session(region):
return Session(boto_session=boto3.Session(region_name=region))
@pytest.fixture(scope='session', name='sagemaker_local_session')
def fixture_sagemaker_local_session(region):
return LocalSession(boto_session=boto3.Session(region_name=region))
@pytest.fixture(name='aws_id', scope='session')
def fixture_aws_id(request):
return request.config.getoption('--aws-id')
@pytest.fixture(name='instance_type', scope='session')
def fixture_instance_type(request, processor):
provided_instance_type = request.config.getoption('--instance-type')
default_instance_type = 'local' if processor == 'cpu' else 'local_gpu'
return provided_instance_type or default_instance_type
@pytest.fixture(name='accelerator_type', scope='session')
def fixture_accelerator_type(request):
return request.config.getoption('--accelerator-type')
@pytest.fixture(name='docker_registry', scope='session')
def fixture_docker_registry(aws_id, region):
return '{}.dkr.ecr.{}.amazonaws.com'.format(aws_id, region)
@pytest.fixture(name='ecr_image', scope='session')
def fixture_ecr_image(docker_registry, docker_base_name, tag):
return '{}/{}:{}'.format(docker_registry, docker_base_name, tag)
@pytest.fixture(autouse=True)
def skip_by_device_type(request, use_gpu, instance_type, accelerator_type):
is_gpu = use_gpu or instance_type[3] in ['g', 'p']
is_eia = accelerator_type is not None
# Separate out cases for clearer logic.
# When running GPU test, skip CPU test. When running CPU test, skip GPU test.
if (request.node.get_closest_marker('gpu_test') and not is_gpu) or \
(request.node.get_closest_marker('cpu_test') and is_gpu):
pytest.skip('Skipping because running on \'{}\' instance'.format(instance_type))
# When running EIA test, skip the CPU and GPU functions
elif (request.node.get_closest_marker('gpu_test') or request.node.get_closest_marker('cpu_test')) and is_eia:
pytest.skip('Skipping because running on \'{}\' instance'.format(instance_type))
# When running CPU or GPU test, skip EIA test.
elif request.node.get_closest_marker('eia_test') and not is_eia:
pytest.skip('Skipping because running on \'{}\' instance'.format(instance_type))
@pytest.fixture(autouse=True)
def skip_by_py_version(request, py_version):
if request.node.get_closest_marker('skip_py2') and py_version != 'py3':
pytest.skip('Skipping the test because Python 2 is not supported.')
@pytest.fixture(autouse=True)
def skip_gpu_instance_restricted_regions(region, instance_type):
if (region in NO_P2_REGIONS and instance_type.startswith('ml.p2')) \
or (region in NO_P3_REGIONS and instance_type.startswith('ml.p3')):
pytest.skip('Skipping GPU test in region {}'.format(region))
@pytest.fixture(autouse=True)
def skip_gpu_py2(request, use_gpu, instance_type, py_version, framework_version):
is_gpu = use_gpu or instance_type[3] in ['g', 'p']
if request.node.get_closest_marker('skip_gpu_py2') and is_gpu and py_version != 'py3' \
and framework_version == '1.4.0':
pytest.skip('Skipping the test until mms issue resolved.')
| 40.157407 | 113 | 0.715241 | from __future__ import absolute_import
import boto3
import os
import logging
import platform
import pytest
import shutil
import sys
import tempfile
from sagemaker import LocalSession, Session
from sagemaker.pytorch import PyTorch
from .utils import image_utils
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('factory.py').setLevel(logging.INFO)
logging.getLogger('auth.py').setLevel(logging.INFO)
logging.getLogger('connectionpool.py').setLevel(logging.INFO)
dir_path = os.path.dirname(os.path.realpath(__file__))
NO_P2_REGIONS = ['ap-east-1', 'ap-northeast-3', 'ap-southeast-2', 'ca-central-1', 'eu-central-1', 'eu-north-1',
'eu-west-2', 'eu-west-3', 'us-west-1', 'sa-east-1', 'me-south-1']
NO_P3_REGIONS = ['ap-east-1', 'ap-northeast-3', 'ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'ca-central-1',
'eu-central-1', 'eu-north-1', 'eu-west-2', 'eu-west-3', 'sa-east-1', 'us-west-1', 'me-south-1']
def pytest_addoption(parser):
parser.addoption('--build-image', '-D', action='store_true')
parser.addoption('--build-base-image', '-B', action='store_true')
parser.addoption('--aws-id')
parser.addoption('--instance-type')
parser.addoption('--accelerator-type', default=None)
parser.addoption('--docker-base-name', default='pytorch')
parser.addoption('--region', default='us-west-2')
parser.addoption('--framework-version', default=PyTorch.LATEST_VERSION)
parser.addoption('--py-version', choices=['2', '3'], default=str(sys.version_info.major))
parser.addoption('--processor', choices=['gpu', 'cpu', 'eia'], default='cpu')
parser.addoption('--tag', default=None)
parser.addoption('--generate-coverage-doc', default=False, action='store_true',
help='use this option to generate test coverage doc')
def pytest_collection_modifyitems(session, config, items):
if config.getoption("--generate-coverage-doc"):
from test.test_utils.test_reporting import TestReportGenerator
report_generator = TestReportGenerator(items, is_sagemaker=True)
report_generator.generate_coverage_doc(framework="pytorch", job_type="inference")
@pytest.fixture(scope='session', name='docker_base_name')
def fixture_docker_base_name(request):
return request.config.getoption('--docker-base-name')
@pytest.fixture(scope='session', name='region')
def fixture_region(request):
return request.config.getoption('--region')
@pytest.fixture(scope='session', name='framework_version')
def fixture_framework_version(request):
return request.config.getoption('--framework-version')
@pytest.fixture(scope='session', name='py_version')
def fixture_py_version(request):
return 'py{}'.format(int(request.config.getoption('--py-version')))
@pytest.fixture(scope='session', name='processor')
def fixture_processor(request):
return request.config.getoption('--processor')
@pytest.fixture(scope='session', name='tag')
def fixture_tag(request, framework_version, processor, py_version):
provided_tag = request.config.getoption('--tag')
default_tag = '{}-{}-{}'.format(framework_version, processor, py_version)
return provided_tag if provided_tag else default_tag
@pytest.fixture(scope='session', name='docker_image')
def fixture_docker_image(docker_base_name, tag):
return '{}:{}'.format(docker_base_name, tag)
@pytest.fixture
def opt_ml():
tmp = tempfile.mkdtemp()
os.mkdir(os.path.join(tmp, 'output'))
opt_ml_dir = '/private{}'.format(tmp) if platform.system() == 'Darwin' else tmp
yield opt_ml_dir
shutil.rmtree(tmp, True)
@pytest.fixture(scope='session', name='use_gpu')
def fixture_use_gpu(processor):
return processor == 'gpu'
@pytest.fixture(scope='session', name='build_base_image', autouse=True)
def fixture_build_base_image(request, framework_version, py_version, processor, tag, docker_base_name):
build_base_image = request.config.getoption('--build-base-image')
if build_base_image:
return image_utils.build_base_image(framework_name=docker_base_name,
framework_version=framework_version,
py_version=py_version,
base_image_tag=tag,
processor=processor,
cwd=os.path.join(dir_path, '..'))
return tag
@pytest.fixture(scope='session', name='sagemaker_session')
def fixture_sagemaker_session(region):
return Session(boto_session=boto3.Session(region_name=region))
@pytest.fixture(scope='session', name='sagemaker_local_session')
def fixture_sagemaker_local_session(region):
return LocalSession(boto_session=boto3.Session(region_name=region))
@pytest.fixture(name='aws_id', scope='session')
def fixture_aws_id(request):
return request.config.getoption('--aws-id')
@pytest.fixture(name='instance_type', scope='session')
def fixture_instance_type(request, processor):
provided_instance_type = request.config.getoption('--instance-type')
default_instance_type = 'local' if processor == 'cpu' else 'local_gpu'
return provided_instance_type or default_instance_type
@pytest.fixture(name='accelerator_type', scope='session')
def fixture_accelerator_type(request):
return request.config.getoption('--accelerator-type')
@pytest.fixture(name='docker_registry', scope='session')
def fixture_docker_registry(aws_id, region):
return '{}.dkr.ecr.{}.amazonaws.com'.format(aws_id, region)
@pytest.fixture(name='ecr_image', scope='session')
def fixture_ecr_image(docker_registry, docker_base_name, tag):
return '{}/{}:{}'.format(docker_registry, docker_base_name, tag)
@pytest.fixture(autouse=True)
def skip_by_device_type(request, use_gpu, instance_type, accelerator_type):
is_gpu = use_gpu or instance_type[3] in ['g', 'p']
is_eia = accelerator_type is not None
if (request.node.get_closest_marker('gpu_test') and not is_gpu) or \
(request.node.get_closest_marker('cpu_test') and is_gpu):
pytest.skip('Skipping because running on \'{}\' instance'.format(instance_type))
elif (request.node.get_closest_marker('gpu_test') or request.node.get_closest_marker('cpu_test')) and is_eia:
pytest.skip('Skipping because running on \'{}\' instance'.format(instance_type))
elif request.node.get_closest_marker('eia_test') and not is_eia:
pytest.skip('Skipping because running on \'{}\' instance'.format(instance_type))
@pytest.fixture(autouse=True)
def skip_by_py_version(request, py_version):
if request.node.get_closest_marker('skip_py2') and py_version != 'py3':
pytest.skip('Skipping the test because Python 2 is not supported.')
@pytest.fixture(autouse=True)
def skip_gpu_instance_restricted_regions(region, instance_type):
if (region in NO_P2_REGIONS and instance_type.startswith('ml.p2')) \
or (region in NO_P3_REGIONS and instance_type.startswith('ml.p3')):
pytest.skip('Skipping GPU test in region {}'.format(region))
@pytest.fixture(autouse=True)
def skip_gpu_py2(request, use_gpu, instance_type, py_version, framework_version):
is_gpu = use_gpu or instance_type[3] in ['g', 'p']
if request.node.get_closest_marker('skip_gpu_py2') and is_gpu and py_version != 'py3' \
and framework_version == '1.4.0':
pytest.skip('Skipping the test until mms issue resolved.')
| true | true |
f7007ce6b17b426ac80e33b7b3f19e6da1a19280 | 4,146 | py | Python | dsbox/ml/visualization/metrics.py | Pandinosaurus/dsbox | aea56049025ed7e6e66427f8636286f8be1b6e03 | [
"Apache-2.0"
] | 16 | 2020-05-11T09:10:15.000Z | 2021-04-13T08:43:28.000Z | dsbox/ml/visualization/metrics.py | Pandinosaurus/dsbox | aea56049025ed7e6e66427f8636286f8be1b6e03 | [
"Apache-2.0"
] | 1 | 2020-12-03T20:02:32.000Z | 2020-12-03T20:02:32.000Z | dsbox/ml/visualization/metrics.py | Pandinosaurus/dsbox | aea56049025ed7e6e66427f8636286f8be1b6e03 | [
"Apache-2.0"
] | 1 | 2020-05-11T17:22:20.000Z | 2020-05-11T17:22:20.000Z | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc
__author__ = "Aurélien Massiot"
__credits__ = "https://github.com/octo-technology/bdacore"
__license__ = "Apache 2.0"
def plot_confusion_matrix(confusion_matrix, classes_list, normalize=True, figsize=(10, 7), fontsize=14, cmap="Blues"):
"""
Display a pretty confusion matrix.
Parameters
----------
confusion_matrix : array-like
classes_list : list,
classes list of the confusion matrix
normalize : boolean,
normalize confusion matrix
figsize : tuple, optional (default=(10,7))
set the figure size
fontsize : int, optional (default=14)
set the font size
cmap : str, optional (default="Blues")
set the colormap
Returns
-------
Confusion matrix figure
Examples
--------
>>> from dsbox.ml.visualization.metrics import plot_confusion_matrix
>>> array = [[ 8458, 227, 1730], \
[ 1073, 37590, 1613], \
[ 2390, 1159, 17540]]
>>> classes_list = ["A", "B", "C"]
>>> plot_confusion_matrix(array, classes_list)
"""
confusion_matrix = np.array(confusion_matrix)
fig, ax = plt.subplots(figsize=figsize)
if normalize:
normalized_cm = np.array(confusion_matrix).astype('float') / np.array(confusion_matrix).sum(axis=1)[:,
np.newaxis]
df_cm = pd.DataFrame(
normalized_cm, index=classes_list, columns=classes_list,
)
plt.matshow(df_cm, fignum=0, cmap=cmap)
else:
df_cm = pd.DataFrame(
confusion_matrix, index=classes_list, columns=classes_list,
)
plt.matshow(df_cm, fignum=0, cmap=cmap)
ax.set_xticks(np.arange(len(classes_list)))
ax.set_yticks(np.arange(len(classes_list)))
ax.set_xticklabels(classes_list)
ax.set_yticklabels(classes_list)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
for i in range(len(classes_list)):
for j in range(len(classes_list)):
ax.text(j, i, confusion_matrix[i, j], ha="center", va="center", color="grey", fontsize=fontsize)
plt.ylabel('True labels')
plt.xlabel('Predicted labels')
plt.show()
def plot_roc_curve(y_test, y_pred_probas, proba_step=None):
"""
Plot ROC curve with probabilities thresholds.
Parameters
----------
y_test : array-like
true labels
y_pred_probas : array-like
predicted labels
proba_step : int (optional) (default=None)
if set, give the step for each probability display. If None, nothing is displayed.
Examples
--------
>>> from dsbox.ml.visualization.metrics import plot_roc_curve
>>> from sklearn import datasets
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.ensemble import RandomForestClassifier
>>> X, y = datasets.make_moons(noise=0.3, random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0)
>>> clf = RandomForestClassifier(n_estimators=10, random_state=42)
>>> _ = clf.fit(X_train, y_train)
>>> y_pred_probas = clf.predict_proba(X_test)
>>> plot_roc_curve(y_test, y_pred_probas, proba_step=2)
"""
fpr, tpr, thresholds = roc_curve(y_test, y_pred_probas[:, 1])
auc_score = auc(fpr, tpr)
plt.figure()
lw = 1
plt.plot(fpr, tpr, color='darkorange', lw=lw, marker='.')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
if proba_step is not None:
i = 0
for x, y, txt in zip(fpr, tpr, thresholds):
if i % proba_step == 0:
plt.annotate(np.round(txt, 2), (x, y - 0.04), color='darkgray', fontsize=8)
i += 1
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC) - AUC score: {}'.format(str(np.round(auc_score,3))))
plt.show()
| 31.409091 | 118 | 0.623493 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc
__author__ = "Aurélien Massiot"
__credits__ = "https://github.com/octo-technology/bdacore"
__license__ = "Apache 2.0"
def plot_confusion_matrix(confusion_matrix, classes_list, normalize=True, figsize=(10, 7), fontsize=14, cmap="Blues"):
confusion_matrix = np.array(confusion_matrix)
fig, ax = plt.subplots(figsize=figsize)
if normalize:
normalized_cm = np.array(confusion_matrix).astype('float') / np.array(confusion_matrix).sum(axis=1)[:,
np.newaxis]
df_cm = pd.DataFrame(
normalized_cm, index=classes_list, columns=classes_list,
)
plt.matshow(df_cm, fignum=0, cmap=cmap)
else:
df_cm = pd.DataFrame(
confusion_matrix, index=classes_list, columns=classes_list,
)
plt.matshow(df_cm, fignum=0, cmap=cmap)
ax.set_xticks(np.arange(len(classes_list)))
ax.set_yticks(np.arange(len(classes_list)))
ax.set_xticklabels(classes_list)
ax.set_yticklabels(classes_list)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
for i in range(len(classes_list)):
for j in range(len(classes_list)):
ax.text(j, i, confusion_matrix[i, j], ha="center", va="center", color="grey", fontsize=fontsize)
plt.ylabel('True labels')
plt.xlabel('Predicted labels')
plt.show()
def plot_roc_curve(y_test, y_pred_probas, proba_step=None):
fpr, tpr, thresholds = roc_curve(y_test, y_pred_probas[:, 1])
auc_score = auc(fpr, tpr)
plt.figure()
lw = 1
plt.plot(fpr, tpr, color='darkorange', lw=lw, marker='.')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
if proba_step is not None:
i = 0
for x, y, txt in zip(fpr, tpr, thresholds):
if i % proba_step == 0:
plt.annotate(np.round(txt, 2), (x, y - 0.04), color='darkgray', fontsize=8)
i += 1
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC) - AUC score: {}'.format(str(np.round(auc_score,3))))
plt.show()
| true | true |
f7007d740bcaf7a6890bc4f3746a71b835476624 | 75 | py | Python | tests/csrf_tests/csrf_token_error_handler_urls.py | jpmallarino/django | 659d2421c7adbbcd205604002d521d82d6b0b465 | [
"BSD-3-Clause",
"0BSD"
] | 16 | 2019-08-10T12:24:06.000Z | 2020-05-21T09:11:14.000Z | tests/csrf_tests/csrf_token_error_handler_urls.py | jpmallarino/django | 659d2421c7adbbcd205604002d521d82d6b0b465 | [
"BSD-3-Clause",
"0BSD"
] | 12 | 2019-08-10T11:55:29.000Z | 2020-05-21T04:46:30.000Z | tests/csrf_tests/csrf_token_error_handler_urls.py | jpmallarino/django | 659d2421c7adbbcd205604002d521d82d6b0b465 | [
"BSD-3-Clause",
"0BSD"
] | 3 | 2019-08-20T13:29:34.000Z | 2020-01-30T22:05:10.000Z | urlpatterns = []
handler404 = "csrf_tests.views.csrf_token_error_handler"
| 18.75 | 56 | 0.8 | urlpatterns = []
handler404 = "csrf_tests.views.csrf_token_error_handler"
| true | true |
f7007dd29ef8b16e106db064eeeb8545c8e209c3 | 1,031 | py | Python | clients/python/sust/api/climate_explorer/clientgen/test/test_physical_risk_summary_indicators.py | sustglobal/dev-center | 817947b2a91500c634c4cac9063e5084efb2f8a6 | [
"Apache-2.0"
] | null | null | null | clients/python/sust/api/climate_explorer/clientgen/test/test_physical_risk_summary_indicators.py | sustglobal/dev-center | 817947b2a91500c634c4cac9063e5084efb2f8a6 | [
"Apache-2.0"
] | 6 | 2021-12-06T19:25:57.000Z | 2022-03-01T20:58:55.000Z | clients/python/sust/api/climate_explorer/clientgen/test/test_physical_risk_summary_indicators.py | sustglobal/dev-center | 817947b2a91500c634c4cac9063e5084efb2f8a6 | [
"Apache-2.0"
] | 1 | 2021-12-03T22:39:06.000Z | 2021-12-03T22:39:06.000Z | """
Sust Global Climate Explorer API
This API provides programmatic access to physical risk exposure data. For more guidance on using this API, please visit the Sust Global Dev Center: https://developers.sustglobal.com. # noqa: E501
The version of the OpenAPI document: beta
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import sust.api.climate_explorer.clientgen
from sust.api.climate_explorer.clientgen.model.physical_risk_summary_indicators import PhysicalRiskSummaryIndicators
class TestPhysicalRiskSummaryIndicators(unittest.TestCase):
"""PhysicalRiskSummaryIndicators unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPhysicalRiskSummaryIndicators(self):
"""Test PhysicalRiskSummaryIndicators"""
# FIXME: construct object with mandatory attributes with example values
# model = PhysicalRiskSummaryIndicators() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 28.638889 | 202 | 0.741028 |
import sys
import unittest
import sust.api.climate_explorer.clientgen
from sust.api.climate_explorer.clientgen.model.physical_risk_summary_indicators import PhysicalRiskSummaryIndicators
class TestPhysicalRiskSummaryIndicators(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testPhysicalRiskSummaryIndicators(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f7007e7d6cadbb4707818ec05e6fcbc50ba52dfb | 2,656 | py | Python | sysinv/sysinv/sysinv/sysinv/common/service.py | starlingx-staging/stx-config | ccbf0392d1941e7cad6673f6351bd905a5a5d419 | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/common/service.py | starlingx-staging/stx-config | ccbf0392d1941e7cad6673f6351bd905a5a5d419 | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/common/service.py | starlingx-staging/stx-config | ccbf0392d1941e7cad6673f6351bd905a5a5d419 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <[email protected]>
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo_config import cfg
from sysinv.openstack.common import context
from sysinv.openstack.common import log
from sysinv.openstack.common import periodic_task
from sysinv.openstack.common import rpc
from sysinv.openstack.common.rpc import service as rpc_service
from oslo_service import service
cfg.CONF.register_opts([
cfg.IntOpt('periodic_interval',
default=60,
help='seconds between running periodic tasks'),
cfg.StrOpt('host',
default=socket.getfqdn(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
])
CONF = cfg.CONF
class PeriodicService(rpc_service.Service, periodic_task.PeriodicTasks):
def start(self):
super(PeriodicService, self).start()
admin_context = context.RequestContext('admin', 'admin', is_admin=True)
self.tg.add_timer(cfg.CONF.periodic_interval,
self.manager.periodic_tasks,
context=admin_context)
def prepare_service(argv=None):
if argv is None:
argv = []
rpc.set_defaults(control_exchange='sysinv')
cfg.set_defaults(log.log_opts,
default_log_levels=['amqplib=WARN',
'qpid.messaging=INFO',
'sqlalchemy=WARN',
'keystoneclient=INFO',
'stevedore=INFO',
'eventlet.wsgi.server=WARN'
])
cfg.CONF(argv[1:], project='sysinv')
log.setup('sysinv')
def process_launcher():
return service.ProcessLauncher(CONF)
| 34.947368 | 79 | 0.622364 |
import socket
from oslo_config import cfg
from sysinv.openstack.common import context
from sysinv.openstack.common import log
from sysinv.openstack.common import periodic_task
from sysinv.openstack.common import rpc
from sysinv.openstack.common.rpc import service as rpc_service
from oslo_service import service
cfg.CONF.register_opts([
cfg.IntOpt('periodic_interval',
default=60,
help='seconds between running periodic tasks'),
cfg.StrOpt('host',
default=socket.getfqdn(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
])
CONF = cfg.CONF
class PeriodicService(rpc_service.Service, periodic_task.PeriodicTasks):
def start(self):
super(PeriodicService, self).start()
admin_context = context.RequestContext('admin', 'admin', is_admin=True)
self.tg.add_timer(cfg.CONF.periodic_interval,
self.manager.periodic_tasks,
context=admin_context)
def prepare_service(argv=None):
if argv is None:
argv = []
rpc.set_defaults(control_exchange='sysinv')
cfg.set_defaults(log.log_opts,
default_log_levels=['amqplib=WARN',
'qpid.messaging=INFO',
'sqlalchemy=WARN',
'keystoneclient=INFO',
'stevedore=INFO',
'eventlet.wsgi.server=WARN'
])
cfg.CONF(argv[1:], project='sysinv')
log.setup('sysinv')
def process_launcher():
return service.ProcessLauncher(CONF)
| true | true |
f7007ecebe6e595cb0fbec944c1292dd13f2083b | 1,680 | py | Python | setup.py | sbrisard/rebin | a0abc9b6e6f82f3c80fe30129f139f1d54f78471 | [
"BSD-3-Clause"
] | null | null | null | setup.py | sbrisard/rebin | a0abc9b6e6f82f3c80fe30129f139f1d54f78471 | [
"BSD-3-Clause"
] | null | null | null | setup.py | sbrisard/rebin | a0abc9b6e6f82f3c80fe30129f139f1d54f78471 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import unittest
from setuptools import setup
def my_test_suite():
"""From http://stackoverflow.com/questions/17001010/.
"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
with open('rebin.py', 'r') as f:
lines = f.read()
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
lines, re.MULTILINE).group(1)
description = re.search(r'^u\"\"\"(.*)',
lines, re.MULTILINE).group(1)
long_description = re.search('^u\"\"\"(.*)^\"\"\"',
lines, re.MULTILINE | re.DOTALL).group(1)
author = re.search(r'^__author__\s*=\s*[\'"]([^\'"]*)[\'"]',
lines, re.MULTILINE).group(1)
print(long_description)
setup(
name='rebin',
version=version,
description=description,
long_description=long_description,
url='https://github.com/sbrisard/rebin',
author=author,
author_email='',
py_modules=['rebin'],
license='BSD-3',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering'],
test_suite='setup.my_test_suite',
install_requires=['numpy'],
)
| 32.307692 | 74 | 0.550595 | import re
import unittest
from setuptools import setup
def my_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
with open('rebin.py', 'r') as f:
lines = f.read()
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
lines, re.MULTILINE).group(1)
description = re.search(r'^u\"\"\"(.*)',
lines, re.MULTILINE).group(1)
long_description = re.search('^u\"\"\"(.*)^\"\"\"',
lines, re.MULTILINE | re.DOTALL).group(1)
author = re.search(r'^__author__\s*=\s*[\'"]([^\'"]*)[\'"]',
lines, re.MULTILINE).group(1)
print(long_description)
setup(
name='rebin',
version=version,
description=description,
long_description=long_description,
url='https://github.com/sbrisard/rebin',
author=author,
author_email='',
py_modules=['rebin'],
license='BSD-3',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering'],
test_suite='setup.my_test_suite',
install_requires=['numpy'],
)
| true | true |
f7007ee1209e9e3b464ffcab4ffdfa1412233c4b | 91,000 | py | Python | src/metpy/calc/thermo.py | aschueth/MetPy | 5e906c0fcfadccdc8514011d15d911243130d405 | [
"BSD-3-Clause"
] | 1 | 2021-08-16T13:07:33.000Z | 2021-08-16T13:07:33.000Z | src/metpy/calc/thermo.py | aschueth/MetPy | 5e906c0fcfadccdc8514011d15d911243130d405 | [
"BSD-3-Clause"
] | null | null | null | src/metpy/calc/thermo.py | aschueth/MetPy | 5e906c0fcfadccdc8514011d15d911243130d405 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of thermodynamic calculations."""
import warnings
import numpy as np
import scipy.integrate as si
import scipy.optimize as so
from .tools import (_greater_or_close, _less_or_close, _remove_nans, find_bounding_indices,
find_intersections, first_derivative, get_layer)
from .. import constants as mpconsts
from ..cbook import broadcast_indices
from ..interpolate.one_dimension import interpolate_1d
from ..package_tools import Exporter
from ..units import check_units, concatenate, units
from ..xarray import preprocess_xarray
exporter = Exporter(globals())
sat_pressure_0c = 6.112 * units.millibar
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[temperature]')
def relative_humidity_from_dewpoint(temperature, dewpoint):
r"""Calculate the relative humidity.
Uses temperature and dewpoint in celsius to calculate relative
humidity using the ratio of vapor pressure to saturation vapor pressures.
Parameters
----------
temperature : `pint.Quantity`
air temperature
dewpoint : `pint.Quantity`
dewpoint temperature
Returns
-------
`pint.Quantity`
relative humidity
See Also
--------
saturation_vapor_pressure
"""
e = saturation_vapor_pressure(dewpoint)
e_s = saturation_vapor_pressure(temperature)
return (e / e_s)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[pressure]')
def exner_function(pressure, reference_pressure=mpconsts.P0):
r"""Calculate the Exner function.
.. math:: \Pi = \left( \frac{p}{p_0} \right)^\kappa
This can be used to calculate potential temperature from temperature (and visa-versa),
since
.. math:: \Pi = \frac{T}{\theta}
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
reference_pressure : `pint.Quantity`, optional
The reference pressure against which to calculate the Exner function, defaults to
metpy.constants.P0
Returns
-------
`pint.Quantity`
The value of the Exner function at the given pressure
See Also
--------
potential_temperature
temperature_from_potential_temperature
"""
return (pressure / reference_pressure).to('dimensionless')**mpconsts.kappa
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def potential_temperature(pressure, temperature):
r"""Calculate the potential temperature.
Uses the Poisson equation to calculation the potential temperature
given `pressure` and `temperature`.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
temperature : `pint.Quantity`
air temperature
Returns
-------
`pint.Quantity`
The potential temperature corresponding to the temperature and
pressure.
See Also
--------
dry_lapse
Notes
-----
Formula:
.. math:: \Theta = T (P_0 / P)^\kappa
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.potential_temperature(800. * units.mbar, 273. * units.kelvin)
<Quantity(290.9665329591884, 'kelvin')>
"""
return temperature / exner_function(pressure)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def temperature_from_potential_temperature(pressure, potential_temperature):
r"""Calculate the temperature from a given potential temperature.
Uses the inverse of the Poisson equation to calculate the temperature from a
given potential temperature at a specific pressure level.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
potential_temperature : `pint.Quantity`
potential temperature
Returns
-------
`pint.Quantity`
The temperature corresponding to the potential temperature and pressure.
See Also
--------
dry_lapse
potential_temperature
Notes
-----
Formula:
.. math:: T = \Theta (P / P_0)^\kappa
Examples
--------
>>> from metpy.units import units
>>> from metpy.calc import temperature_from_potential_temperature
>>> # potential temperature
>>> theta = np.array([ 286.12859679, 288.22362587]) * units.kelvin
>>> p = 850 * units.mbar
>>> T = temperature_from_potential_temperature(p, theta)
"""
return potential_temperature * exner_function(pressure)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[pressure]')
def dry_lapse(pressure, temperature, reference_pressure=None):
r"""Calculate the temperature at a level assuming only dry processes.
This function lifts a parcel starting at `temperature`, conserving
potential temperature. The starting pressure can be given by `reference_pressure`.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
reference_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The resulting parcel temperature at levels given by `pressure`
See Also
--------
moist_lapse : Calculate parcel temperature assuming liquid saturation processes
parcel_profile : Calculate complete parcel profile
potential_temperature
"""
if reference_pressure is None:
reference_pressure = pressure[0]
return temperature * (pressure / reference_pressure)**mpconsts.kappa
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[pressure]')
def moist_lapse(pressure, temperature, reference_pressure=None):
r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `reference_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
reference_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The temperature corresponding to the starting temperature and
pressure levels.
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_.
"""
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = saturation_mixing_ratio(p, t)
frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)
/ (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon
/ (mpconsts.Rd * t * t)))).to('kelvin')
return (frac / p).magnitude
if reference_pressure is None:
reference_pressure = pressure[0]
pressure = pressure.to('mbar')
reference_pressure = reference_pressure.to('mbar')
temperature = np.atleast_1d(temperature)
side = 'left'
pres_decreasing = (pressure[0] > pressure[-1])
if pres_decreasing:
# Everything is easier if pressures are in increasing order
pressure = pressure[::-1]
side = 'right'
ref_pres_idx = np.searchsorted(pressure.m, reference_pressure.m, side=side)
ret_temperatures = np.empty((0, temperature.shape[0]))
if reference_pressure > pressure.min():
# Integrate downward in pressure
pres_down = np.append(reference_pressure.m, pressure[(ref_pres_idx - 1)::-1].m)
trace_down = si.odeint(dt, temperature.m.squeeze(), pres_down.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))
if reference_pressure < pressure.max():
# Integrate upward in pressure
pres_up = np.append(reference_pressure.m, pressure[ref_pres_idx:].m)
trace_up = si.odeint(dt, temperature.m.squeeze(), pres_up.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))
if pres_decreasing:
ret_temperatures = ret_temperatures[::-1]
return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def lcl(pressure, temperature, dewpoint, max_iters=50, eps=1e-5):
r"""Calculate the lifted condensation level (LCL) using from the starting point.
The starting state for the parcel is defined by `temperature`, `dewpoint`,
and `pressure`. If these are arrays, this function will return a LCL
for every index. This function does work with surface grids as a result.
Parameters
----------
pressure : `pint.Quantity`
The starting atmospheric pressure
temperature : `pint.Quantity`
The starting temperature
dewpoint : `pint.Quantity`
The starting dewpoint
Returns
-------
`pint.Quantity`
The LCL pressure
`pint.Quantity`
The LCL temperature
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired relative error in the calculated value, defaults to 1e-5.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dewpoint from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `max_iters` counter.
"""
def _lcl_iter(p, p0, w, t):
td = globals()['dewpoint'](vapor_pressure(units.Quantity(p, pressure.units), w))
return (p0 * (td / t) ** (1. / mpconsts.kappa)).m
w = mixing_ratio(saturation_vapor_pressure(dewpoint), pressure)
lcl_p = so.fixed_point(_lcl_iter, pressure.m, args=(pressure.m, w, temperature),
xtol=eps, maxiter=max_iters)
# np.isclose needed if surface is LCL due to precision error with np.log in dewpoint.
# Causes issues with parcel_profile_with_lcl if removed. Issue #1187
lcl_p = np.where(np.isclose(lcl_p, pressure.m), pressure.m, lcl_p) * pressure.units
return lcl_p, globals()['dewpoint'](vapor_pressure(lcl_p, w)).to(temperature.units)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def lfc(pressure, temperature, dewpoint, parcel_temperature_profile=None, dewpoint_start=None,
which='top'):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature. If this intersection occurs below the LCL,
the LFC is determined to be the same as the LCL, based upon the conditions
set forth in [USAF1990]_, pg 4-14, where a parcel must be lifted dry adiabatically
to saturation before it can freely rise.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpoint : `pint.Quantity`
The dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
dewpoint_start: `pint.Quantity`, optional
The dewpoint of the parcel for which to calculate the LFC. Defaults to the surface
dewpoint.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.
'top' returns the lowest-pressure LFC, default.
'bottom' returns the highest-pressure LFC.
'wide' returns the LFC whose corresponding EL is farthest away.
'most_cape' returns the LFC that results in the most CAPE in the profile.
Returns
-------
`pint.Quantity`
The LFC pressure, or array of same if which='all'
`pint.Quantity`
The LFC temperature, or array of same if which='all'
See Also
--------
parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
if dewpoint_start is None:
dewpoint_start = dewpoint[0]
# The parcel profile and data may have the same first data point.
# If that is the case, ignore that point to get the real first
# intersection for the LFC calculation. Use logarithmic interpolation.
if np.isclose(parcel_temperature_profile[0].to(temperature.units).m, temperature[0].m):
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='increasing', log_x=True)
else:
x, y = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing', log_x=True)
# Compute LCL for this parcel for future comparisons
this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpoint_start)
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
# Is there any positive area above the LCL?
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
# LFC doesn't exist
x, y = np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists. Make sure it is no lower than the LCL
else:
idx = x < this_lcl[0]
# LFC height < LCL height, so set LFC = LCL
if not any(idx):
el_pres, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
if np.min(el_pres) > this_lcl[0]:
x, y = np.nan * pressure.units, np.nan * temperature.units
else:
x, y = this_lcl
return x, y
# Otherwise, find all LFCs that exist above the LCL
# What is returned depends on which flag as described in the docstring
else:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature,
dewpoint, intersect_type='LFC')
def _multiple_el_lfc_options(intersect_pressures, intersect_temperatures, valid_x,
which, pressure, parcel_temperature_profile, temperature,
dewpoint, intersect_type):
"""Choose which ELs and LFCs to return from a sounding."""
p_list, t_list = intersect_pressures[valid_x], intersect_temperatures[valid_x]
if which == 'all':
x, y = p_list, t_list
elif which == 'bottom':
x, y = p_list[0], t_list[0]
elif which == 'top':
x, y = p_list[-1], t_list[-1]
elif which == 'wide':
x, y = _wide_option(intersect_type, p_list, t_list, pressure,
parcel_temperature_profile, temperature)
elif which == 'most_cape':
x, y = _most_cape_option(intersect_type, p_list, t_list, pressure, temperature,
dewpoint, parcel_temperature_profile)
else:
raise ValueError('Invalid option for "which". Valid options are "top", "bottom", '
'"wide", "most_cape", and "all".')
return x, y
def _wide_option(intersect_type, p_list, t_list, pressure, parcel_temperature_profile,
temperature):
"""Calculate the LFC or EL that produces the greatest distance between these points."""
# zip the LFC and EL lists together and find greatest difference
if intersect_type == 'LFC':
# Find EL intersection pressure values
lfc_p_list = p_list
el_p_list, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
else: # intersect_type == 'EL'
el_p_list = p_list
# Find LFC intersection pressure values
lfc_p_list, _ = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing',
log_x=True)
diff = [lfc_p.m - el_p.m for lfc_p, el_p in zip(lfc_p_list, el_p_list)]
return (p_list[np.where(diff == np.max(diff))][0],
t_list[np.where(diff == np.max(diff))][0])
def _most_cape_option(intersect_type, p_list, t_list, pressure, temperature, dewpoint,
parcel_temperature_profile):
"""Calculate the LFC or EL that produces the most CAPE in the profile."""
# Need to loop through all possible combinations of cape, find greatest cape profile
cape_list, pair_list = [], []
for which_lfc in ['top', 'bottom']:
for which_el in ['top', 'bottom']:
cape, _ = cape_cin(pressure, temperature, dewpoint, parcel_temperature_profile,
which_lfc=which_lfc, which_el=which_el)
cape_list.append(cape.m)
pair_list.append([which_lfc, which_el])
(lfc_chosen, el_chosen) = pair_list[np.where(cape_list == np.max(cape_list))[0][0]]
if intersect_type == 'LFC':
if lfc_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else: # 'bottom' is returned
x, y = p_list[0], t_list[0]
else: # EL is returned
if el_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else:
x, y = p_list[0], t_list[0]
return x, y
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def el(pressure, temperature, dewpoint, parcel_temperature_profile=None, which='top'):
r"""Calculate the equilibrium level.
This works by finding the last intersection of the ideal parcel path and
the measured environmental temperature. If there is one or fewer intersections, there is
no equilibrium level.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure profile
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpoint : `pint.Quantity`
The dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the EL. Defaults to the
surface parcel profile.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.
'top' returns the lowest-pressure EL, default.
'bottom' returns the highest-pressure EL.
'wide' returns the EL whose corresponding LFC is farthest away.
'most_cape' returns the EL that results in the most CAPE in the profile.
Returns
-------
`pint.Quantity`
The EL pressure, or array of same if which='all'
`pint.Quantity`
The EL temperature, or array of same if which='all'
See Also
--------
parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
# If the top of the sounding parcel is warmer than the environment, there is no EL
if parcel_temperature_profile[-1] > temperature[-1]:
return np.nan * pressure.units, np.nan * temperature.units
# Interpolate in log space to find the appropriate pressure - units have to be stripped
# and reassigned to allow np.log() to function properly.
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:], temperature[1:],
direction='decreasing', log_x=True)
lcl_p, _ = lcl(pressure[0], temperature[0], dewpoint[0])
idx = x < lcl_p
if len(x) > 0 and x[-1] < lcl_p:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature, dewpoint,
intersect_type='EL')
else:
return np.nan * pressure.units, np.nan * temperature.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile(pressure, temperature, dewpoint):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpoint`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. This array must be from
high to low pressure.
temperature : `pint.Quantity`
The starting temperature
dewpoint : `pint.Quantity`
The starting dewpoint
Returns
-------
`pint.Quantity`
The parcel temperatures at the specified pressure levels.
See Also
--------
lcl, moist_lapse, dry_lapse
"""
_, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpoint)
return concatenate((t_l, t_u))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile_with_lcl(pressure, temperature, dewpoint):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpoint`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile. This function returns
a profile that includes the LCL.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. This array must be from
high to low pressure.
temperature : `pint.Quantity`
The atmospheric temperature at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
dewpoint : `pint.Quantity`
The atmospheric dewpoint at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
Returns
-------
pressure : `pint.Quantity`
The parcel profile pressures, which includes the specified levels and the LCL
ambient_temperature : `pint.Quantity`
The atmospheric temperature values, including the value interpolated to the LCL level
ambient_dew_point : `pint.Quantity`
The atmospheric dewpoint values, including the value interpolated to the LCL level
profile_temperature : `pint.Quantity`
The parcel profile temperatures at all of the levels in the returned pressures array,
including the LCL.
See Also
--------
lcl, moist_lapse, dry_lapse, parcel_profile
"""
p_l, p_lcl, p_u, t_l, t_lcl, t_u = _parcel_profile_helper(pressure, temperature[0],
dewpoint[0])
new_press = concatenate((p_l, p_lcl, p_u))
prof_temp = concatenate((t_l, t_lcl, t_u))
new_temp = _insert_lcl_level(pressure, temperature, p_lcl)
new_dewp = _insert_lcl_level(pressure, dewpoint, p_lcl)
return new_press, new_temp, new_dewp, prof_temp
def _parcel_profile_helper(pressure, temperature, dewpoint):
"""Help calculate parcel profiles.
Returns the temperature and pressure, above, below, and including the LCL. The
other calculation functions decide what to do with the pieces.
"""
# Find the LCL
press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpoint)
press_lcl = press_lcl.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
# the logic for removing it later.
press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))
temp_lower = dry_lapse(press_lower, temperature)
# If the pressure profile doesn't make it to the lcl, we can stop here
if _greater_or_close(np.nanmin(pressure.m), press_lcl.m):
return (press_lower[:-1], press_lcl, units.Quantity(np.array([]), press_lower.units),
temp_lower[:-1], temp_lcl, units.Quantity(np.array([]), temp_lower.units))
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))
temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)
# Return profile pieces
return (press_lower[:-1], press_lcl, press_upper[1:],
temp_lower[:-1], temp_lcl, temp_upper[1:])
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return np.insert(temperature.m, loc, interp_temp.m) * temperature.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[dimensionless]')
def vapor_pressure(pressure, mixing_ratio):
r"""Calculate water vapor (partial) pressure.
Given total `pressure` and water vapor `mixing_ratio`, calculates the
partial pressure of water vapor.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
Returns
-------
`pint.Quantity`
The ambient water vapor (partial) pressure in the same units as
`pressure`.
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.71:
.. math:: e = p \frac{r}{r + \epsilon}
See Also
--------
saturation_vapor_pressure, dewpoint
"""
return pressure * mixing_ratio / (mpconsts.epsilon + mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[temperature]')
def saturation_vapor_pressure(temperature):
r"""Calculate the saturation water vapor (partial) pressure.
Parameters
----------
temperature : `pint.Quantity`
air temperature
Returns
-------
`pint.Quantity`
The saturation water vapor (partial) pressure
See Also
--------
vapor_pressure, dewpoint
Notes
-----
Instead of temperature, dewpoint may be used in order to calculate
the actual (ambient) water vapor (partial) pressure.
The formula used is that from [Bolton1980]_ for T in degrees Celsius:
.. math:: 6.112 e^\frac{17.67T}{T + 243.5}
"""
# Converted from original in terms of C to use kelvin. Using raw absolute values of C in
# a formula plays havoc with units support.
return sat_pressure_0c * np.exp(17.67 * (temperature - 273.15 * units.kelvin)
/ (temperature - 29.65 * units.kelvin))
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[dimensionless]')
def dewpoint_from_relative_humidity(temperature, relative_humidity):
r"""Calculate the ambient dewpoint given air temperature and relative humidity.
Parameters
----------
temperature : `pint.Quantity`
air temperature
relative_humidity : `pint.Quantity`
relative humidity expressed as a ratio in the range 0 < relative_humidity <= 1
Returns
-------
`pint.Quantity`
The dewpoint temperature
See Also
--------
dewpoint, saturation_vapor_pressure
"""
if np.any(relative_humidity > 1.2):
warnings.warn('Relative humidity >120%, ensure proper units.')
return dewpoint(relative_humidity * saturation_vapor_pressure(temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def dewpoint(vapor_pressure):
r"""Calculate the ambient dewpoint given the vapor pressure.
Parameters
----------
e : `pint.Quantity`
Water vapor partial pressure
Returns
-------
`pint.Quantity`
dewpoint temperature
See Also
--------
dewpoint_from_relative_humidity, saturation_vapor_pressure, vapor_pressure
Notes
-----
This function inverts the [Bolton1980]_ formula for saturation vapor
pressure to instead calculate the temperature. This yield the following
formula for dewpoint in degrees Celsius:
.. math:: T = \frac{243.5 log(e / 6.112)}{17.67 - log(e / 6.112)}
"""
val = np.log(vapor_pressure / sat_pressure_0c)
return 0. * units.degC + 243.5 * units.delta_degC * val / (17.67 - val)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[pressure]', '[dimensionless]')
def mixing_ratio(partial_press, total_press, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate the mixing ratio of a gas.
This calculates mixing ratio given its partial pressure and the total pressure of
the air. There are no required units for the input arrays, other than that
they have the same units.
Parameters
----------
partial_press : `pint.Quantity`
Partial pressure of the constituent gas
total_press : `pint.Quantity`
Total air pressure
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The (mass) mixing ratio, dimensionless (e.g. Kg/Kg or g/g)
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.73:
.. math:: r = \epsilon \frac{e}{p - e}
See Also
--------
saturation_mixing_ratio, vapor_pressure
"""
return (molecular_weight_ratio * partial_press
/ (total_press - partial_press)).to('dimensionless')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def saturation_mixing_ratio(total_press, temperature):
r"""Calculate the saturation mixing ratio of water vapor.
This calculation is given total pressure and the temperature. The implementation
uses the formula outlined in [Hobbs1977]_ pg.73.
Parameters
----------
total_press: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
air temperature
Returns
-------
`pint.Quantity`
The saturation mixing ratio, dimensionless
"""
return mixing_ratio(saturation_vapor_pressure(temperature), total_press)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def equivalent_potential_temperature(pressure, temperature, dewpoint):
r"""Calculate equivalent potential temperature.
This calculation must be given an air parcel's pressure, temperature, and dewpoint.
The implementation uses the formula outlined in [Bolton1980]_:
First, the LCL temperature is calculated:
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
Which is then used to calculate the potential temperature at the LCL:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{L}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
dewpoint: `pint.Quantity`
Dewpoint of parcel
Returns
-------
`pint.Quantity`
The equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used, since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
td = dewpoint.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(dewpoint).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, dewpoint).magnitude
t_l = 56 + 1. / (1. / (td - 56) + np.log(t / td) / 800.)
th_l = t * (1000 / (p - e)) ** mpconsts.kappa * (t / t_l) ** (0.28 * r)
th_e = th_l * np.exp((3036. / t_l - 1.78) * r * (1 + 0.448 * r))
return th_e * units.kelvin
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def saturation_equivalent_potential_temperature(pressure, temperature):
r"""Calculate saturation equivalent potential temperature.
This calculation must be given an air parcel's pressure and temperature.
The implementation uses the formula outlined in [Bolton1980]_ for the
equivalent potential temperature, and assumes a saturated process.
First, because we assume a saturated process, the temperature at the LCL is
equivalent to the current temperature. Therefore the following equation
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
reduces to
.. math:: T_{L} = T_{K}
Then the potential temperature at the temperature/LCL is calculated:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
However, because
.. math:: T_{L} = T_{K}
it follows that
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{K}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
Returns
-------
`pint.Quantity`
The saturation equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used (for saturated case), since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(temperature).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, temperature).magnitude
th_l = t * (1000 / (p - e)) ** mpconsts.kappa
th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))
return th_es * units.kelvin
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual temperature.
This calculation must be given an air parcel's temperature and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.80.
Parameters
----------
temperature: `pint.Quantity`
air temperature
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual temperature of the parcel
Notes
-----
.. math:: T_v = T \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
"""
return temperature * ((mixing_ratio + molecular_weight_ratio)
/ (molecular_weight_ratio * (1 + mixing_ratio)))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_potential_temperature(pressure, temperature, mixing_ratio,
molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual potential temperature.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Markowski2010]_ pg.13.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
air temperature
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual potential temperature of the parcel
Notes
-----
.. math:: \Theta_v = \Theta \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
"""
pottemp = potential_temperature(pressure, temperature)
return virtual_temperature(pottemp, mixing_ratio, molecular_weight_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def density(pressure, temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
air temperature
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
"""
virttemp = virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def relative_humidity_wet_psychrometric(pressure, dry_bulb_temperature, web_bulb_temperature,
**kwargs):
r"""Calculate the relative humidity with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
web_bulb_temperature: `pint.Quantity`
Wet bulb temperature
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
.. math:: relative_humidity = \frac{e}{e_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`e` is vapor pressure from the wet psychrometric calculation
* :math:`e_s` is the saturation vapor pressure
See Also
--------
psychrometric_vapor_pressure_wet, saturation_vapor_pressure
"""
return (psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature,
web_bulb_temperature, **kwargs)
/ saturation_vapor_pressure(dry_bulb_temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature, wet_bulb_temperature,
psychrometer_coefficient=6.21e-4 / units.kelvin):
r"""Calculate the vapor pressure with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
wet_bulb_temperature: `pint.Quantity`
Wet bulb temperature
psychrometer_coefficient: `pint.Quantity`, optional
Psychrometer coefficient. Defaults to 6.21e-4 K^-1.
Returns
-------
`pint.Quantity`
Vapor pressure
Notes
-----
.. math:: e' = e'_w(T_w) - A p (T - T_w)
* :math:`e'` is vapor pressure
* :math:`e'_w(T_w)` is the saturation vapor pressure with respect to water at temperature
:math:`T_w`
* :math:`p` is the pressure of the wet bulb
* :math:`T` is the temperature of the dry bulb
* :math:`T_w` is the temperature of the wet bulb
* :math:`A` is the psychrometer coefficient
Psychrometer coefficient depends on the specific instrument being used and the ventilation
of the instrument.
See Also
--------
saturation_vapor_pressure
"""
return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient
* pressure * (dry_bulb_temperature - wet_bulb_temperature).to('kelvin'))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity):
r"""Calculate the mixing ratio from relative humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
relative_humidity: array_like
The relative humidity expressed as a unitless ratio in the range [0, 1]. Can also pass
a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
Dimensionless mixing ratio
Notes
-----
Formula adapted from [Hobbs1977]_ pg. 74.
.. math:: w = (relative_humidity)(w_s)
* :math:`w` is mixing ratio
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`w_s` is the saturation mixing ratio
See Also
--------
relative_humidity_from_mixing_ratio, saturation_mixing_ratio
"""
return (relative_humidity
* saturation_mixing_ratio(pressure, temperature)).to('dimensionless')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_mixing_ratio(pressure, temperature, mixing_ratio):
r"""Calculate the relative humidity from mixing ratio, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`
Dimensionless mass mixing ratio
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
Formula based on that from [Hobbs1977]_ pg. 74.
.. math:: relative_humidity = \frac{w}{w_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`w` is mixing ratio
* :math:`w_s` is the saturation mixing ratio
See Also
--------
mixing_ratio_from_relative_humidity, saturation_mixing_ratio
"""
return mixing_ratio / saturation_mixing_ratio(pressure, temperature)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]')
def mixing_ratio_from_specific_humidity(specific_humidity):
r"""Calculate the mixing ratio from specific humidity.
Parameters
----------
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Mixing ratio
Notes
-----
Formula from [Salby1996]_ pg. 118.
.. math:: w = \frac{q}{1-q}
* :math:`w` is mixing ratio
* :math:`q` is the specific humidity
See Also
--------
mixing_ratio, specific_humidity_from_mixing_ratio
"""
try:
specific_humidity = specific_humidity.to('dimensionless')
except AttributeError:
pass
return specific_humidity / (1 - specific_humidity)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]')
def specific_humidity_from_mixing_ratio(mixing_ratio):
r"""Calculate the specific humidity from the mixing ratio.
Parameters
----------
mixing_ratio: `pint.Quantity`
mixing ratio
Returns
-------
`pint.Quantity`
Specific humidity
Notes
-----
Formula from [Salby1996]_ pg. 118.
.. math:: q = \frac{w}{1+w}
* :math:`w` is mixing ratio
* :math:`q` is the specific humidity
See Also
--------
mixing_ratio, mixing_ratio_from_specific_humidity
"""
try:
mixing_ratio = mixing_ratio.to('dimensionless')
except AttributeError:
pass
return mixing_ratio / (1 + mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_specific_humidity(pressure, temperature, specific_humidity):
r"""Calculate the relative humidity from specific humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
Formula based on that from [Hobbs1977]_ pg. 74. and [Salby1996]_ pg. 118.
.. math:: relative_humidity = \frac{q}{(1-q)w_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`q` is specific humidity
* :math:`w_s` is the saturation mixing ratio
See Also
--------
relative_humidity_from_mixing_ratio
"""
return (mixing_ratio_from_specific_humidity(specific_humidity)
/ saturation_mixing_ratio(pressure, temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def cape_cin(pressure, temperature, dewpoint, parcel_profile, which_lfc='bottom',
which_el='top'):
r"""Calculate CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and parcel path. CIN is integrated between the surface and
LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points
of the measured temperature profile and parcel profile are logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest, in order from highest to
lowest pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
dewpoint : `pint.Quantity`
The atmospheric dewpoint corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel.
which_lfc : str
Choose which LFC to integrate from. Valid options are 'top', 'bottom', 'wide',
and 'most_cape'. Default is 'bottom'.
which_el : str
Choose which EL to integrate to. Valid options are 'top', 'bottom', 'wide',
and 'most_cape'. Default is 'top'.
Returns
-------
`pint.Quantity`
Convective Available Potential Energy (CAPE).
`pint.Quantity`
Convective INhibition (CIN).
Notes
-----
Formula adopted from [Hobbs1977]_.
.. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p)
.. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p)
* :math:`CAPE` Convective available potential energy
* :math:`CIN` Convective inhibition
* :math:`LFC` Pressure of the level of free convection
* :math:`EL` Pressure of the equilibrium level
* :math:`SFC` Level of the surface or beginning of parcel path
* :math:`R_d` Gas constant
* :math:`g` Gravitational acceleration
* :math:`T_{parcel}` Parcel temperature
* :math:`T_{env}` Environment temperature
* :math:`p` Atmospheric pressure
See Also
--------
lfc, el
"""
pressure, temperature, dewpoint, parcel_profile = _remove_nans(pressure, temperature,
dewpoint, parcel_profile)
# Calculate LFC limit of integration
lfc_pressure, _ = lfc(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_lfc)
# If there is no LFC, no need to proceed.
if np.isnan(lfc_pressure):
return 0 * units('J/kg'), 0 * units('J/kg')
else:
lfc_pressure = lfc_pressure.magnitude
# Calculate the EL limit of integration
el_pressure, _ = el(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_el)
# No EL and we use the top reading of the sounding.
if np.isnan(el_pressure):
el_pressure = pressure[-1].magnitude
else:
el_pressure = el_pressure.magnitude
# Difference between the parcel path and measured temperature profiles
y = (parcel_profile - temperature).to(units.degK)
# Estimate zero crossings
x, y = _find_append_zero_crossings(np.copy(pressure), y)
# CAPE
# Only use data between the LFC and EL for calculation
p_mask = _less_or_close(x.m, lfc_pressure) & _greater_or_close(x.m, el_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cape = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# CIN
# Only use data between the surface and LFC for calculation
p_mask = _greater_or_close(x.m, lfc_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cin = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# Set CIN to 0 if it's returned as a positive value (#1190)
if cin > 0 * units('J/kg'):
cin = 0 * units('J/kg')
return cape, cin
def _find_append_zero_crossings(x, y):
r"""
Find and interpolate zero crossings.
Estimate the zero crossings of an x,y series and add estimated crossings to series,
returning a sorted array with no duplicate values.
Parameters
----------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
Returns
-------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
"""
crossings = find_intersections(x[1:], y[1:], np.zeros_like(y[1:]) * y.units, log_x=True)
x = concatenate((x, crossings[0]))
y = concatenate((y, crossings[1]))
# Resort so that data are in order
sort_idx = np.argsort(x)
x = x[sort_idx]
y = y[sort_idx]
# Remove duplicate data points if there are any
keep_idx = np.ediff1d(x.magnitude, to_end=[1]) > 1e-6
x = x[keep_idx]
y = y[keep_idx]
return x, y
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_parcel(pressure, temperature, dewpoint, height=None,
bottom=None, depth=300 * units.hPa):
"""
Determine the most unstable parcel in a layer.
Determines the most unstable parcel of air by calculating the equivalent
potential temperature and finding its maximum in the specified layer.
Parameters
----------
pressure: `pint.Quantity`
Atmospheric pressure profile
temperature: `pint.Quantity`
Atmospheric temperature profile
dewpoint: `pint.Quantity`
Atmospheric dewpoint profile
height: `pint.Quantity`, optional
Atmospheric height profile. Standard atmosphere assumed when None (the default).
bottom: `pint.Quantity`, optional
Bottom of the layer to consider for the calculation in pressure or height.
Defaults to using the bottom pressure or height.
depth: `pint.Quantity`, optional
Depth of the layer to consider for the calculation in pressure or height. Defaults
to 300 hPa.
Returns
-------
`pint.Quantity`
Pressure, temperature, and dewpoint of most unstable parcel in the profile.
integer
Index of the most unstable parcel in the given profile
See Also
--------
get_layer
"""
p_layer, t_layer, td_layer = get_layer(pressure, temperature, dewpoint, bottom=bottom,
depth=depth, height=height, interpolate=False)
theta_e = equivalent_potential_temperature(p_layer, t_layer, td_layer)
max_idx = np.argmax(theta_e)
return p_layer[max_idx], t_layer[max_idx], td_layer[max_idx], max_idx
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[pressure]', '[temperature]')
def isentropic_interpolation(levels, pressure, temperature, *args, axis=0,
temperature_out=False, max_iters=50, eps=1e-6,
bottom_up_search=True, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
levels : array
One-dimensional array of desired potential temperature surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
temperature_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pres = pressure.to('hPa')
temperature = temperature.to('kelvin')
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
slices = tuple(slices)
pres = np.broadcast_to(pres[slices].magnitude, temperature.shape) * pres.units
# Sort input data
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
tmpk = temperature[sorter]
levels = np.asarray(levels.m_as('kelvin')).reshape(-1)
isentlevels = levels[np.argsort(levels)]
# Make the desired isentropic levels the same shape as temperature
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
# exponent to Poisson's Equation, which is imported above
ka = mpconsts.kappa.m_as('dimensionless')
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(levels):
raise ValueError('Input theta level out of data bounds')
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = mpconsts.P0 ** ka
# index values for each point for the pressure level nearest to the desired theta level
above, below, good = find_bounding_indices(pres_theta.m, levels, axis,
from_below=bottom_up_search)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
isentprs = 0.5 * (log_p[above] + log_p[below])
# Make sure we ignore any nans in the data for solving; checking a is enough since it
# combines log_p and tmpk.
good &= ~np.isnan(a)
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(_isen_iter, isentprs[good],
args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),
xtol=eps, maxiter=max_iters)
# get back pressure from log p
isentprs[good] = np.exp(log_p_solved)
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan
# create list for storing output data
ret = [isentprs * units.hPa]
# if temperature_out = true, calculate temperature and output as last item in list
if temperature_out:
ret.append((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)) * units.kelvin)
# do an interpolation for each additional argument
if args:
others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args),
axis=axis, return_list_always=True)
ret.extend(others)
return ret
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def surface_based_cape_cin(pressure, temperature, dewpoint):
r"""Calculate surface-based CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile for a surface-based parcel. CIN is integrated
between the surface and LFC, CAPE is integrated between the LFC and EL (or top of
sounding). Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile. The first entry should be the starting
(surface) observation, with the array going from high to low pressure.
temperature : `pint.Quantity`
Temperature profile corresponding to the `pressure` profile.
dewpoint : `pint.Quantity`
Dewpoint profile corresponding to the `pressure` profile.
Returns
-------
`pint.Quantity`
Surface based Convective Available Potential Energy (CAPE).
`pint.Quantity`
Surface based Convective INhibition (CIN).
See Also
--------
cape_cin, parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
return cape_cin(p, t, td, profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_cape_cin(pressure, temperature, dewpoint, **kwargs):
r"""Calculate most unstable CAPE/CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and most unstable parcel path. CIN is integrated between the
surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).
Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
Pressure profile
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dew point profile
kwargs
Additional keyword arguments to pass to `most_unstable_parcel`
Returns
-------
`pint.Quantity`
Most unstable Convective Available Potential Energy (CAPE).
`pint.Quantity`
Most unstable Convective INhibition (CIN).
See Also
--------
cape_cin, most_unstable_parcel, parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
_, _, _, parcel_idx = most_unstable_parcel(pressure, temperature, dewpoint, **kwargs)
p, t, td, mu_profile = parcel_profile_with_lcl(pressure[parcel_idx:],
temperature[parcel_idx:],
dewpoint[parcel_idx:])
return cape_cin(p, t, td, mu_profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_layer_cape_cin(pressure, temperature, dewpoint, **kwargs):
r"""Calculate mixed-layer CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and mixed-layer parcel path. CIN is integrated between the
surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).
Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated. Kwargs for `mixed_parcel` can be provided, such as `depth`.
Default mixed-layer depth is 100 hPa.
Parameters
----------
pressure : `pint.Quantity`
Pressure profile
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dewpoint profile
kwargs
Additional keyword arguments to pass to `mixed_parcel`
Returns
-------
`pint.Quantity`
Mixed-layer Convective Available Potential Energy (CAPE).
`pint.Quantity`
Mixed-layer Convective INhibition (CIN).
See Also
--------
cape_cin, mixed_parcel, parcel_profile
"""
depth = kwargs.get('depth', 100 * units.hPa)
parcel_pressure, parcel_temp, parcel_dewpoint = mixed_parcel(pressure, temperature,
dewpoint, **kwargs)
# Remove values below top of mixed layer and add in the mixed layer values
pressure_prof = pressure[pressure < (pressure[0] - depth)]
temp_prof = temperature[pressure < (pressure[0] - depth)]
dew_prof = dewpoint[pressure < (pressure[0] - depth)]
pressure_prof = concatenate([parcel_pressure, pressure_prof])
temp_prof = concatenate([parcel_temp, temp_prof])
dew_prof = concatenate([parcel_dewpoint, dew_prof])
p, t, td, ml_profile = parcel_profile_with_lcl(pressure_prof, temp_prof, dew_prof)
return cape_cin(p, t, td, ml_profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_parcel(pressure, temperature, dewpoint, parcel_start_pressure=None,
height=None, bottom=None, depth=100 * units.hPa, interpolate=True):
r"""Calculate the properties of a parcel mixed from a layer.
Determines the properties of an air parcel that is the result of complete mixing of a
given atmospheric layer.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
dewpoint : `pint.Quantity`
Atmospheric dewpoint profile
parcel_start_pressure : `pint.Quantity`, optional
Pressure at which the mixed parcel should begin (default None)
height: `pint.Quantity`, optional
Atmospheric heights corresponding to the given pressures (default None)
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure
(default None)
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer
(default 100 hPa)
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data
Returns
-------
`pint.Quantity`
The pressure of the mixed parcel
`pint.Quantity`
The temperature of the mixed parcel
`pint.Quantity`
The dewpoint of the mixed parcel
"""
# If a parcel starting pressure is not provided, use the surface
if not parcel_start_pressure:
parcel_start_pressure = pressure[0]
# Calculate the potential temperature and mixing ratio over the layer
theta = potential_temperature(pressure, temperature)
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
# Mix the variables over the layer
mean_theta, mean_mixing_ratio = mixed_layer(pressure, theta, mixing_ratio, bottom=bottom,
height=height, depth=depth,
interpolate=interpolate)
# Convert back to temperature
mean_temperature = mean_theta * exner_function(parcel_start_pressure)
# Convert back to dewpoint
mean_vapor_pressure = vapor_pressure(parcel_start_pressure, mean_mixing_ratio)
# Using globals() here allows us to keep the dewpoint parameter but still call the
# function of the same name.
mean_dewpoint = globals()['dewpoint'](mean_vapor_pressure)
return (parcel_start_pressure, mean_temperature.to(temperature.units),
mean_dewpoint.to(dewpoint.units))
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def mixed_layer(pressure, *args, height=None, bottom=None, depth=100 * units.hPa,
interpolate=True):
r"""Mix variable(s) over a layer, yielding a mass-weighted average.
This function will integrate a data variable with respect to pressure and determine the
average value using the mean value theorem.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
datavar : array-like
Atmospheric variable measured at the given pressures
height: array-like, optional
Atmospheric heights corresponding to the given pressures (default None)
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure
(default None)
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer
(default 100 hPa)
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data (default True)
Returns
-------
`pint.Quantity`
The mixed value of the data variable.
"""
layer = get_layer(pressure, *args, height=height, bottom=bottom,
depth=depth, interpolate=interpolate)
p_layer = layer[0]
datavars_layer = layer[1:]
ret = []
for datavar_layer in datavars_layer:
actual_depth = abs(p_layer[0] - p_layer[-1])
ret.append((-1. / actual_depth.m) * np.trapz(datavar_layer.m, p_layer.m)
* datavar_layer.units)
return ret
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def dry_static_energy(height, temperature):
r"""Calculate the dry static energy of parcels.
This function will calculate the dry static energy following the first two terms of
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{dry static energy} = c_{pd} * T + gz
* :math:`T` is temperature
* :math:`z` is height
Parameters
----------
height : `pint.Quantity`
Atmospheric height
temperature : `pint.Quantity`
Air temperature
Returns
-------
`pint.Quantity`
The dry static energy
"""
return (mpconsts.g * height + mpconsts.Cp_d * temperature).to('kJ/kg')
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]', '[dimensionless]')
def moist_static_energy(height, temperature, specific_humidity):
r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
Parameters
----------
height : `pint.Quantity`
Atmospheric height
temperature : `pint.Quantity`
Air temperature
specific_humidity : `pint.Quantity`
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
The moist static energy
"""
return (dry_static_energy(height, temperature)
+ mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic(pressure, temperature, mixing_ratio=None,
molecular_weight_ratio=mpconsts.epsilon, bottom=None, depth=None):
r"""Calculate the thickness of a layer via the hypsometric equation.
This thickness calculation uses the pressure and temperature profiles (and optionally
mixing ratio) via the hypsometric equation with virtual temperature adjustment
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
mixing_ratio : `pint.Quantity`, optional
Profile of dimensionless mass mixing ratio. If none is given, virtual temperature
is simply set to be the given temperature.
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic_from_relative_humidity, pressure_to_height_std, virtual_temperature
"""
# Get the data for the layer, conditional upon bottom/depth being specified and mixing
# ratio being given
if bottom is None and depth is None:
if mixing_ratio is None:
layer_p, layer_virttemp = pressure, temperature
else:
layer_p = pressure
layer_virttemp = virtual_temperature(temperature, mixing_ratio,
molecular_weight_ratio)
else:
if mixing_ratio is None:
layer_p, layer_virttemp = get_layer(pressure, temperature, bottom=bottom,
depth=depth)
else:
layer_p, layer_temp, layer_w = get_layer(pressure, temperature, mixing_ratio,
bottom=bottom, depth=depth)
layer_virttemp = virtual_temperature(layer_temp, layer_w, molecular_weight_ratio)
# Take the integral (with unit handling) and return the result in meters
return (- mpconsts.Rd / mpconsts.g * np.trapz(
layer_virttemp.m_as('K'), x=np.log(layer_p.m_as('hPa'))) * units.K).to('m')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic_from_relative_humidity(pressure, temperature, relative_humidity,
bottom=None, depth=None):
r"""Calculate the thickness of a layer given pressure, temperature and relative humidity.
Similar to ``thickness_hydrostatic``, this thickness calculation uses the pressure,
temperature, and relative humidity profiles via the hypsometric equation with virtual
temperature adjustment.
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_. Virtual temperature is calculated
from the profiles of temperature and relative humidity.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
relative_humidity : `pint.Quantity`
Atmospheric relative humidity profile. The relative humidity is expressed as a
unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are
attached.
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic, pressure_to_height_std, virtual_temperature,
mixing_ratio_from_relative_humidity
"""
mixing = mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity)
return thickness_hydrostatic(pressure, temperature, mixing_ratio=mixing, bottom=bottom,
depth=depth)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency_squared(height, potential_temperature, axis=0):
r"""Calculate the square of the Brunt-Vaisala frequency.
Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the
formula:
.. math:: N^2 = \frac{g}{\theta} \frac{d\theta}{dz}
This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
Parameters
----------
height : `pint.Quantity`
One-dimensional profile of atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
`pint.Quantity`
The square of the Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature
"""
# Ensure validity of temperature units
potential_temperature = potential_temperature.to('K')
# Calculate and return the square of Brunt-Vaisala frequency
return mpconsts.g / potential_temperature * first_derivative(potential_temperature,
x=height, axis=axis)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency(height, potential_temperature, axis=0):
r"""Calculate the Brunt-Vaisala frequency.
This function will calculate the Brunt-Vaisala frequency as follows:
.. math:: N = \left( \frac{g}{\theta} \frac{d\theta}{dz} \right)^\frac{1}{2}
This formula based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
This function is a wrapper for `brunt_vaisala_frequency_squared` that filters out negative
(unstable) quantities and takes the square root.
Parameters
----------
height : `pint.Quantity`
One-dimensional profile of atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
`pint.Quantity`
Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency_squared, brunt_vaisala_period, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude < 0] = np.nan
return np.sqrt(bv_freq_squared)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_period(height, potential_temperature, axis=0):
r"""Calculate the Brunt-Vaisala period.
This function is a helper function for `brunt_vaisala_frequency` that calculates the
period of oscillation as in Exercise 3.13 of [Hobbs2006]_:
.. math:: \tau = \frac{2\pi}{N}
Returns `NaN` when :math:`N^2 > 0`.
Parameters
----------
height : `pint.Quantity`
One-dimensional profile of atmospheric height
potential_temperature : pint.Quantity`
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
`pint.Quantity`
Brunt-Vaisala period.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_frequency_squared, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude <= 0] = np.nan
return 2 * np.pi / np.sqrt(bv_freq_squared)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def wet_bulb_temperature(pressure, temperature, dewpoint):
"""Calculate the wet-bulb temperature using Normand's rule.
This function calculates the wet-bulb temperature using the Normand method. The LCL is
computed, and that parcel brought down to the starting pressure along a moist adiabat.
The Normand method (and others) are described and compared by [Knox2017]_.
Parameters
----------
pressure : `pint.Quantity`
Initial atmospheric pressure
temperature : `pint.Quantity`
Initial atmospheric temperature
dewpoint : `pint.Quantity`
Initial atmospheric dewpoint
Returns
-------
`pint.Quantity`
Wet-bulb temperature
See Also
--------
lcl, moist_lapse
"""
if not hasattr(pressure, 'shape'):
pressure = np.atleast_1d(pressure)
temperature = np.atleast_1d(temperature)
dewpoint = np.atleast_1d(dewpoint)
it = np.nditer([pressure, temperature, dewpoint, None],
op_dtypes=['float', 'float', 'float', 'float'],
flags=['buffered'])
for press, temp, dewp, ret in it:
press = press * pressure.units
temp = temp * temperature.units
dewp = dewp * dewpoint.units
lcl_pressure, lcl_temperature = lcl(press, temp, dewp)
moist_adiabat_temperatures = moist_lapse(concatenate([lcl_pressure, press]),
lcl_temperature)
ret[...] = moist_adiabat_temperatures[-1].magnitude
# If we started with a scalar, return a scalar
if it.operands[3].size == 1:
return it.operands[3][0] * moist_adiabat_temperatures.units
return it.operands[3] * moist_adiabat_temperatures.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def static_stability(pressure, temperature, axis=0):
r"""Calculate the static stability within a vertical profile.
.. math:: \sigma = -\frac{RT}{p} \frac{\partial \ln \theta}{\partial p}
This formula is based on equation 4.3.6 in [Bluestein1992]_.
Parameters
----------
pressure : `pint.Quantity`
Profile of atmospheric pressure
temperature : `pint.Quantity`
Profile of temperature
axis : int, optional
The axis corresponding to vertical in the pressure and temperature arrays, defaults
to 0.
Returns
-------
`pint.Quantity`
The profile of static stability.
"""
theta = potential_temperature(pressure, temperature)
return - mpconsts.Rd * temperature / pressure * first_derivative(np.log(theta.m_as('K')),
x=pressure, axis=axis)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def dewpoint_from_specific_humidity(pressure, temperature, specific_humidity):
r"""Calculate the dewpoint from specific humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Dew point temperature
See Also
--------
relative_humidity_from_mixing_ratio, dewpoint_from_relative_humidity
"""
return dewpoint_from_relative_humidity(temperature,
relative_humidity_from_specific_humidity(
pressure, temperature, specific_humidity))
@exporter.export
@preprocess_xarray
@check_units('[length]/[time]', '[pressure]', '[temperature]')
def vertical_velocity_pressure(w, pressure, temperature, mixing_ratio=0):
r"""Calculate omega from w assuming hydrostatic conditions.
This function converts vertical velocity with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` to that
with respect to pressure :math:`\left(\omega = \frac{Dp}{Dt}\right)`
assuming hydrostatic conditions on the synoptic scale.
By Equation 7.33 in [Hobbs2006]_,
.. math:: \omega \simeq -\rho g w
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing_ratio` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
w: `pint.Quantity`
Vertical velocity in terms of height
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`, optional
Mixing_ratio ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of pressure (in Pascals / second)
See Also
--------
density, vertical_velocity
"""
rho = density(pressure, temperature, mixing_ratio)
return (-mpconsts.g * rho * w).to('Pa/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]/[time]', '[pressure]', '[temperature]')
def vertical_velocity(omega, pressure, temperature, mixing_ratio=0):
r"""Calculate w from omega assuming hydrostatic conditions.
This function converts vertical velocity with respect to pressure
:math:`\left(\omega = \frac{Dp}{Dt}\right)` to that with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` assuming hydrostatic conditions on
the synoptic scale. By Equation 7.33 in [Hobbs2006]_,
.. math:: \omega \simeq -\rho g w
so that
.. math:: w \simeq \frac{- \omega}{\rho g}
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing_ratio` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
omega: `pint.Quantity`
Vertical velocity in terms of pressure
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`, optional
Mixing ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of height (in meters / second)
See Also
--------
density, vertical_velocity_pressure
"""
rho = density(pressure, temperature, mixing_ratio)
return (omega / (- mpconsts.g * rho)).to('m/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def specific_humidity_from_dewpoint(pressure, dewpoint):
r"""Calculate the specific humidity from the dewpoint temperature and pressure.
Parameters
----------
dewpoint: `pint.Quantity`
dewpoint temperature
pressure: `pint.Quantity`
pressure
Returns
-------
`pint.Quantity`
Specific humidity
See Also
--------
mixing_ratio, saturation_mixing_ratio
"""
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
return specific_humidity_from_mixing_ratio(mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def lifted_index(pressure, temperature, parcel_profile):
"""Calculate Lifted Index from the pressure temperature and parcel profile.
Lifted index formula derived from [Galway1956]_ and referenced by [Doswell-Schultz2006]_:
LI = T500 - Tp500
where:
T500 is the measured temperature at 500 hPa.
Tp500 is the temperature of the lifted parcel at 500 hPa.
Calculation of the lifted index is defined as the temperature difference between the
observed 500 hPa temperature and the temperature of a parcel lifted from the
surface to 500 hPa.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest, in order from highest to
lowest pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel.
Returns
-------
`pint.Quantity`
Lifted Index.
"""
# find the index for the 500 hPa pressure level.
idx = np.where(pressure == 500 * units.hPa)
# find the measured temperature at 500 hPa.
T500 = temperature[idx]
# find the parcel profile temperature at 500 hPa.
Tp500 = parcel_profile[idx]
# calculate the lifted index.
lifted_index = T500 - Tp500.to(units.degC)
return lifted_index
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]', '[speed]', '[speed]')
def gradient_richardson_number(height, potential_temperature, u, v, axis=0):
r"""Calculate the gradient (or flux) Richardson number.
.. math:: Ri = (g/\theta) * \frac{\left(\partial \theta/\partial z\)}
{[\left(\partial u / \partial z\right)^2 + \left(\partial v / \partial z\right)^2}
See [Holton2004]_ pg. 121-122. As noted by [Holton2004]_, flux Richardson
number values below 0.25 indicate turbulence.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
u : `pint.Quantity`
x component of the wind
v : `pint.Quantity`
y component of the wind
axis : int, optional
The axis corresponding to vertical, defaults to 0.
Returns
-------
`pint.Quantity`
Gradient Richardson number
"""
dthetadz = first_derivative(potential_temperature, x=height, axis=axis)
dudz = first_derivative(u, x=height, axis=axis)
dvdz = first_derivative(v, x=height, axis=axis)
return (mpconsts.g / potential_temperature) * (dthetadz / (dudz ** 2 + dvdz ** 2))
| 34.249153 | 95 | 0.664571 | import warnings
import numpy as np
import scipy.integrate as si
import scipy.optimize as so
from .tools import (_greater_or_close, _less_or_close, _remove_nans, find_bounding_indices,
find_intersections, first_derivative, get_layer)
from .. import constants as mpconsts
from ..cbook import broadcast_indices
from ..interpolate.one_dimension import interpolate_1d
from ..package_tools import Exporter
from ..units import check_units, concatenate, units
from ..xarray import preprocess_xarray
exporter = Exporter(globals())
sat_pressure_0c = 6.112 * units.millibar
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[temperature]')
def relative_humidity_from_dewpoint(temperature, dewpoint):
e = saturation_vapor_pressure(dewpoint)
e_s = saturation_vapor_pressure(temperature)
return (e / e_s)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[pressure]')
def exner_function(pressure, reference_pressure=mpconsts.P0):
return (pressure / reference_pressure).to('dimensionless')**mpconsts.kappa
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def potential_temperature(pressure, temperature):
return temperature / exner_function(pressure)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def temperature_from_potential_temperature(pressure, potential_temperature):
return potential_temperature * exner_function(pressure)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[pressure]')
def dry_lapse(pressure, temperature, reference_pressure=None):
if reference_pressure is None:
reference_pressure = pressure[0]
return temperature * (pressure / reference_pressure)**mpconsts.kappa
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[pressure]')
def moist_lapse(pressure, temperature, reference_pressure=None):
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = saturation_mixing_ratio(p, t)
frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)
/ (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon
/ (mpconsts.Rd * t * t)))).to('kelvin')
return (frac / p).magnitude
if reference_pressure is None:
reference_pressure = pressure[0]
pressure = pressure.to('mbar')
reference_pressure = reference_pressure.to('mbar')
temperature = np.atleast_1d(temperature)
side = 'left'
pres_decreasing = (pressure[0] > pressure[-1])
if pres_decreasing:
pressure = pressure[::-1]
side = 'right'
ref_pres_idx = np.searchsorted(pressure.m, reference_pressure.m, side=side)
ret_temperatures = np.empty((0, temperature.shape[0]))
if reference_pressure > pressure.min():
pres_down = np.append(reference_pressure.m, pressure[(ref_pres_idx - 1)::-1].m)
trace_down = si.odeint(dt, temperature.m.squeeze(), pres_down.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))
if reference_pressure < pressure.max():
pres_up = np.append(reference_pressure.m, pressure[ref_pres_idx:].m)
trace_up = si.odeint(dt, temperature.m.squeeze(), pres_up.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))
if pres_decreasing:
ret_temperatures = ret_temperatures[::-1]
return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def lcl(pressure, temperature, dewpoint, max_iters=50, eps=1e-5):
def _lcl_iter(p, p0, w, t):
td = globals()['dewpoint'](vapor_pressure(units.Quantity(p, pressure.units), w))
return (p0 * (td / t) ** (1. / mpconsts.kappa)).m
w = mixing_ratio(saturation_vapor_pressure(dewpoint), pressure)
lcl_p = so.fixed_point(_lcl_iter, pressure.m, args=(pressure.m, w, temperature),
xtol=eps, maxiter=max_iters)
lcl_p = np.where(np.isclose(lcl_p, pressure.m), pressure.m, lcl_p) * pressure.units
return lcl_p, globals()['dewpoint'](vapor_pressure(lcl_p, w)).to(temperature.units)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def lfc(pressure, temperature, dewpoint, parcel_temperature_profile=None, dewpoint_start=None,
which='top'):
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
if dewpoint_start is None:
dewpoint_start = dewpoint[0]
if np.isclose(parcel_temperature_profile[0].to(temperature.units).m, temperature[0].m):
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='increasing', log_x=True)
else:
x, y = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing', log_x=True)
this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpoint_start)
if len(x) == 0:
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
x, y = np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists. Make sure it is no lower than the LCL
else:
idx = x < this_lcl[0]
# LFC height < LCL height, so set LFC = LCL
if not any(idx):
el_pres, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
if np.min(el_pres) > this_lcl[0]:
x, y = np.nan * pressure.units, np.nan * temperature.units
else:
x, y = this_lcl
return x, y
# Otherwise, find all LFCs that exist above the LCL
# What is returned depends on which flag as described in the docstring
else:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature,
dewpoint, intersect_type='LFC')
def _multiple_el_lfc_options(intersect_pressures, intersect_temperatures, valid_x,
which, pressure, parcel_temperature_profile, temperature,
dewpoint, intersect_type):
p_list, t_list = intersect_pressures[valid_x], intersect_temperatures[valid_x]
if which == 'all':
x, y = p_list, t_list
elif which == 'bottom':
x, y = p_list[0], t_list[0]
elif which == 'top':
x, y = p_list[-1], t_list[-1]
elif which == 'wide':
x, y = _wide_option(intersect_type, p_list, t_list, pressure,
parcel_temperature_profile, temperature)
elif which == 'most_cape':
x, y = _most_cape_option(intersect_type, p_list, t_list, pressure, temperature,
dewpoint, parcel_temperature_profile)
else:
raise ValueError('Invalid option for "which". Valid options are "top", "bottom", '
'"wide", "most_cape", and "all".')
return x, y
def _wide_option(intersect_type, p_list, t_list, pressure, parcel_temperature_profile,
temperature):
# zip the LFC and EL lists together and find greatest difference
if intersect_type == 'LFC':
# Find EL intersection pressure values
lfc_p_list = p_list
el_p_list, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
else: # intersect_type == 'EL'
el_p_list = p_list
# Find LFC intersection pressure values
lfc_p_list, _ = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing',
log_x=True)
diff = [lfc_p.m - el_p.m for lfc_p, el_p in zip(lfc_p_list, el_p_list)]
return (p_list[np.where(diff == np.max(diff))][0],
t_list[np.where(diff == np.max(diff))][0])
def _most_cape_option(intersect_type, p_list, t_list, pressure, temperature, dewpoint,
parcel_temperature_profile):
# Need to loop through all possible combinations of cape, find greatest cape profile
cape_list, pair_list = [], []
for which_lfc in ['top', 'bottom']:
for which_el in ['top', 'bottom']:
cape, _ = cape_cin(pressure, temperature, dewpoint, parcel_temperature_profile,
which_lfc=which_lfc, which_el=which_el)
cape_list.append(cape.m)
pair_list.append([which_lfc, which_el])
(lfc_chosen, el_chosen) = pair_list[np.where(cape_list == np.max(cape_list))[0][0]]
if intersect_type == 'LFC':
if lfc_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else: # 'bottom' is returned
x, y = p_list[0], t_list[0]
else: # EL is returned
if el_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else:
x, y = p_list[0], t_list[0]
return x, y
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def el(pressure, temperature, dewpoint, parcel_temperature_profile=None, which='top'):
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
# If the top of the sounding parcel is warmer than the environment, there is no EL
if parcel_temperature_profile[-1] > temperature[-1]:
return np.nan * pressure.units, np.nan * temperature.units
# Interpolate in log space to find the appropriate pressure - units have to be stripped
# and reassigned to allow np.log() to function properly.
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:], temperature[1:],
direction='decreasing', log_x=True)
lcl_p, _ = lcl(pressure[0], temperature[0], dewpoint[0])
idx = x < lcl_p
if len(x) > 0 and x[-1] < lcl_p:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature, dewpoint,
intersect_type='EL')
else:
return np.nan * pressure.units, np.nan * temperature.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile(pressure, temperature, dewpoint):
_, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpoint)
return concatenate((t_l, t_u))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile_with_lcl(pressure, temperature, dewpoint):
p_l, p_lcl, p_u, t_l, t_lcl, t_u = _parcel_profile_helper(pressure, temperature[0],
dewpoint[0])
new_press = concatenate((p_l, p_lcl, p_u))
prof_temp = concatenate((t_l, t_lcl, t_u))
new_temp = _insert_lcl_level(pressure, temperature, p_lcl)
new_dewp = _insert_lcl_level(pressure, dewpoint, p_lcl)
return new_press, new_temp, new_dewp, prof_temp
def _parcel_profile_helper(pressure, temperature, dewpoint):
# Find the LCL
press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpoint)
press_lcl = press_lcl.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))
temp_lower = dry_lapse(press_lower, temperature)
if _greater_or_close(np.nanmin(pressure.m), press_lcl.m):
return (press_lower[:-1], press_lcl, units.Quantity(np.array([]), press_lower.units),
temp_lower[:-1], temp_lcl, units.Quantity(np.array([]), temp_lower.units))
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))
temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)
# Return profile pieces
return (press_lower[:-1], press_lcl, press_upper[1:],
temp_lower[:-1], temp_lcl, temp_upper[1:])
def _insert_lcl_level(pressure, temperature, lcl_pressure):
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return np.insert(temperature.m, loc, interp_temp.m) * temperature.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[dimensionless]')
def vapor_pressure(pressure, mixing_ratio):
return pressure * mixing_ratio / (mpconsts.epsilon + mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[temperature]')
def saturation_vapor_pressure(temperature):
# Converted from original in terms of C to use kelvin. Using raw absolute values of C in
# a formula plays havoc with units support.
return sat_pressure_0c * np.exp(17.67 * (temperature - 273.15 * units.kelvin)
/ (temperature - 29.65 * units.kelvin))
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[dimensionless]')
def dewpoint_from_relative_humidity(temperature, relative_humidity):
if np.any(relative_humidity > 1.2):
warnings.warn('Relative humidity >120%, ensure proper units.')
return dewpoint(relative_humidity * saturation_vapor_pressure(temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def dewpoint(vapor_pressure):
val = np.log(vapor_pressure / sat_pressure_0c)
return 0. * units.degC + 243.5 * units.delta_degC * val / (17.67 - val)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[pressure]', '[dimensionless]')
def mixing_ratio(partial_press, total_press, molecular_weight_ratio=mpconsts.epsilon):
return (molecular_weight_ratio * partial_press
/ (total_press - partial_press)).to('dimensionless')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def saturation_mixing_ratio(total_press, temperature):
return mixing_ratio(saturation_vapor_pressure(temperature), total_press)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def equivalent_potential_temperature(pressure, temperature, dewpoint):
t = temperature.to('kelvin').magnitude
td = dewpoint.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(dewpoint).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, dewpoint).magnitude
t_l = 56 + 1. / (1. / (td - 56) + np.log(t / td) / 800.)
th_l = t * (1000 / (p - e)) ** mpconsts.kappa * (t / t_l) ** (0.28 * r)
th_e = th_l * np.exp((3036. / t_l - 1.78) * r * (1 + 0.448 * r))
return th_e * units.kelvin
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def saturation_equivalent_potential_temperature(pressure, temperature):
t = temperature.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(temperature).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, temperature).magnitude
th_l = t * (1000 / (p - e)) ** mpconsts.kappa
th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))
return th_es * units.kelvin
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
return temperature * ((mixing_ratio + molecular_weight_ratio)
/ (molecular_weight_ratio * (1 + mixing_ratio)))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_potential_temperature(pressure, temperature, mixing_ratio,
molecular_weight_ratio=mpconsts.epsilon):
pottemp = potential_temperature(pressure, temperature)
return virtual_temperature(pottemp, mixing_ratio, molecular_weight_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def density(pressure, temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
virttemp = virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def relative_humidity_wet_psychrometric(pressure, dry_bulb_temperature, web_bulb_temperature,
**kwargs):
return (psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature,
web_bulb_temperature, **kwargs)
/ saturation_vapor_pressure(dry_bulb_temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature, wet_bulb_temperature,
psychrometer_coefficient=6.21e-4 / units.kelvin):
return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient
* pressure * (dry_bulb_temperature - wet_bulb_temperature).to('kelvin'))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity):
return (relative_humidity
* saturation_mixing_ratio(pressure, temperature)).to('dimensionless')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_mixing_ratio(pressure, temperature, mixing_ratio):
return mixing_ratio / saturation_mixing_ratio(pressure, temperature)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]')
def mixing_ratio_from_specific_humidity(specific_humidity):
try:
specific_humidity = specific_humidity.to('dimensionless')
except AttributeError:
pass
return specific_humidity / (1 - specific_humidity)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]')
def specific_humidity_from_mixing_ratio(mixing_ratio):
try:
mixing_ratio = mixing_ratio.to('dimensionless')
except AttributeError:
pass
return mixing_ratio / (1 + mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_specific_humidity(pressure, temperature, specific_humidity):
return (mixing_ratio_from_specific_humidity(specific_humidity)
/ saturation_mixing_ratio(pressure, temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def cape_cin(pressure, temperature, dewpoint, parcel_profile, which_lfc='bottom',
which_el='top'):
pressure, temperature, dewpoint, parcel_profile = _remove_nans(pressure, temperature,
dewpoint, parcel_profile)
# Calculate LFC limit of integration
lfc_pressure, _ = lfc(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_lfc)
# If there is no LFC, no need to proceed.
if np.isnan(lfc_pressure):
return 0 * units('J/kg'), 0 * units('J/kg')
else:
lfc_pressure = lfc_pressure.magnitude
# Calculate the EL limit of integration
el_pressure, _ = el(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_el)
# No EL and we use the top reading of the sounding.
if np.isnan(el_pressure):
el_pressure = pressure[-1].magnitude
else:
el_pressure = el_pressure.magnitude
# Difference between the parcel path and measured temperature profiles
y = (parcel_profile - temperature).to(units.degK)
# Estimate zero crossings
x, y = _find_append_zero_crossings(np.copy(pressure), y)
# CAPE
# Only use data between the LFC and EL for calculation
p_mask = _less_or_close(x.m, lfc_pressure) & _greater_or_close(x.m, el_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cape = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# CIN
# Only use data between the surface and LFC for calculation
p_mask = _greater_or_close(x.m, lfc_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cin = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# Set CIN to 0 if it's returned as a positive value (#1190)
if cin > 0 * units('J/kg'):
cin = 0 * units('J/kg')
return cape, cin
def _find_append_zero_crossings(x, y):
crossings = find_intersections(x[1:], y[1:], np.zeros_like(y[1:]) * y.units, log_x=True)
x = concatenate((x, crossings[0]))
y = concatenate((y, crossings[1]))
sort_idx = np.argsort(x)
x = x[sort_idx]
y = y[sort_idx]
keep_idx = np.ediff1d(x.magnitude, to_end=[1]) > 1e-6
x = x[keep_idx]
y = y[keep_idx]
return x, y
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_parcel(pressure, temperature, dewpoint, height=None,
bottom=None, depth=300 * units.hPa):
p_layer, t_layer, td_layer = get_layer(pressure, temperature, dewpoint, bottom=bottom,
depth=depth, height=height, interpolate=False)
theta_e = equivalent_potential_temperature(p_layer, t_layer, td_layer)
max_idx = np.argmax(theta_e)
return p_layer[max_idx], t_layer[max_idx], td_layer[max_idx], max_idx
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[pressure]', '[temperature]')
def isentropic_interpolation(levels, pressure, temperature, *args, axis=0,
temperature_out=False, max_iters=50, eps=1e-6,
bottom_up_search=True, **kwargs):
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
ndim = temperature.ndim
pres = pressure.to('hPa')
temperature = temperature.to('kelvin')
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
slices = tuple(slices)
pres = np.broadcast_to(pres[slices].magnitude, temperature.shape) * pres.units
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
tmpk = temperature[sorter]
levels = np.asarray(levels.m_as('kelvin')).reshape(-1)
isentlevels = levels[np.argsort(levels)]
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
ka = mpconsts.kappa.m_as('dimensionless')
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(levels):
raise ValueError('Input theta level out of data bounds')
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = mpconsts.P0 ** ka
# index values for each point for the pressure level nearest to the desired theta level
above, below, good = find_bounding_indices(pres_theta.m, levels, axis,
from_below=bottom_up_search)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
isentprs = 0.5 * (log_p[above] + log_p[below])
# Make sure we ignore any nans in the data for solving; checking a is enough since it
# combines log_p and tmpk.
good &= ~np.isnan(a)
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(_isen_iter, isentprs[good],
args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),
xtol=eps, maxiter=max_iters)
# get back pressure from log p
isentprs[good] = np.exp(log_p_solved)
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan
# create list for storing output data
ret = [isentprs * units.hPa]
# if temperature_out = true, calculate temperature and output as last item in list
if temperature_out:
ret.append((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)) * units.kelvin)
# do an interpolation for each additional argument
if args:
others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args),
axis=axis, return_list_always=True)
ret.extend(others)
return ret
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def surface_based_cape_cin(pressure, temperature, dewpoint):
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
return cape_cin(p, t, td, profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_cape_cin(pressure, temperature, dewpoint, **kwargs):
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
_, _, _, parcel_idx = most_unstable_parcel(pressure, temperature, dewpoint, **kwargs)
p, t, td, mu_profile = parcel_profile_with_lcl(pressure[parcel_idx:],
temperature[parcel_idx:],
dewpoint[parcel_idx:])
return cape_cin(p, t, td, mu_profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_layer_cape_cin(pressure, temperature, dewpoint, **kwargs):
depth = kwargs.get('depth', 100 * units.hPa)
parcel_pressure, parcel_temp, parcel_dewpoint = mixed_parcel(pressure, temperature,
dewpoint, **kwargs)
# Remove values below top of mixed layer and add in the mixed layer values
pressure_prof = pressure[pressure < (pressure[0] - depth)]
temp_prof = temperature[pressure < (pressure[0] - depth)]
dew_prof = dewpoint[pressure < (pressure[0] - depth)]
pressure_prof = concatenate([parcel_pressure, pressure_prof])
temp_prof = concatenate([parcel_temp, temp_prof])
dew_prof = concatenate([parcel_dewpoint, dew_prof])
p, t, td, ml_profile = parcel_profile_with_lcl(pressure_prof, temp_prof, dew_prof)
return cape_cin(p, t, td, ml_profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_parcel(pressure, temperature, dewpoint, parcel_start_pressure=None,
height=None, bottom=None, depth=100 * units.hPa, interpolate=True):
# If a parcel starting pressure is not provided, use the surface
if not parcel_start_pressure:
parcel_start_pressure = pressure[0]
# Calculate the potential temperature and mixing ratio over the layer
theta = potential_temperature(pressure, temperature)
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
# Mix the variables over the layer
mean_theta, mean_mixing_ratio = mixed_layer(pressure, theta, mixing_ratio, bottom=bottom,
height=height, depth=depth,
interpolate=interpolate)
# Convert back to temperature
mean_temperature = mean_theta * exner_function(parcel_start_pressure)
# Convert back to dewpoint
mean_vapor_pressure = vapor_pressure(parcel_start_pressure, mean_mixing_ratio)
# Using globals() here allows us to keep the dewpoint parameter but still call the
# function of the same name.
mean_dewpoint = globals()['dewpoint'](mean_vapor_pressure)
return (parcel_start_pressure, mean_temperature.to(temperature.units),
mean_dewpoint.to(dewpoint.units))
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def mixed_layer(pressure, *args, height=None, bottom=None, depth=100 * units.hPa,
interpolate=True):
layer = get_layer(pressure, *args, height=height, bottom=bottom,
depth=depth, interpolate=interpolate)
p_layer = layer[0]
datavars_layer = layer[1:]
ret = []
for datavar_layer in datavars_layer:
actual_depth = abs(p_layer[0] - p_layer[-1])
ret.append((-1. / actual_depth.m) * np.trapz(datavar_layer.m, p_layer.m)
* datavar_layer.units)
return ret
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def dry_static_energy(height, temperature):
return (mpconsts.g * height + mpconsts.Cp_d * temperature).to('kJ/kg')
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]', '[dimensionless]')
def moist_static_energy(height, temperature, specific_humidity):
return (dry_static_energy(height, temperature)
+ mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic(pressure, temperature, mixing_ratio=None,
molecular_weight_ratio=mpconsts.epsilon, bottom=None, depth=None):
# Get the data for the layer, conditional upon bottom/depth being specified and mixing
# ratio being given
if bottom is None and depth is None:
if mixing_ratio is None:
layer_p, layer_virttemp = pressure, temperature
else:
layer_p = pressure
layer_virttemp = virtual_temperature(temperature, mixing_ratio,
molecular_weight_ratio)
else:
if mixing_ratio is None:
layer_p, layer_virttemp = get_layer(pressure, temperature, bottom=bottom,
depth=depth)
else:
layer_p, layer_temp, layer_w = get_layer(pressure, temperature, mixing_ratio,
bottom=bottom, depth=depth)
layer_virttemp = virtual_temperature(layer_temp, layer_w, molecular_weight_ratio)
# Take the integral (with unit handling) and return the result in meters
return (- mpconsts.Rd / mpconsts.g * np.trapz(
layer_virttemp.m_as('K'), x=np.log(layer_p.m_as('hPa'))) * units.K).to('m')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic_from_relative_humidity(pressure, temperature, relative_humidity,
bottom=None, depth=None):
mixing = mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity)
return thickness_hydrostatic(pressure, temperature, mixing_ratio=mixing, bottom=bottom,
depth=depth)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency_squared(height, potential_temperature, axis=0):
# Ensure validity of temperature units
potential_temperature = potential_temperature.to('K')
# Calculate and return the square of Brunt-Vaisala frequency
return mpconsts.g / potential_temperature * first_derivative(potential_temperature,
x=height, axis=axis)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency(height, potential_temperature, axis=0):
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude < 0] = np.nan
return np.sqrt(bv_freq_squared)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_period(height, potential_temperature, axis=0):
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude <= 0] = np.nan
return 2 * np.pi / np.sqrt(bv_freq_squared)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def wet_bulb_temperature(pressure, temperature, dewpoint):
if not hasattr(pressure, 'shape'):
pressure = np.atleast_1d(pressure)
temperature = np.atleast_1d(temperature)
dewpoint = np.atleast_1d(dewpoint)
it = np.nditer([pressure, temperature, dewpoint, None],
op_dtypes=['float', 'float', 'float', 'float'],
flags=['buffered'])
for press, temp, dewp, ret in it:
press = press * pressure.units
temp = temp * temperature.units
dewp = dewp * dewpoint.units
lcl_pressure, lcl_temperature = lcl(press, temp, dewp)
moist_adiabat_temperatures = moist_lapse(concatenate([lcl_pressure, press]),
lcl_temperature)
ret[...] = moist_adiabat_temperatures[-1].magnitude
# If we started with a scalar, return a scalar
if it.operands[3].size == 1:
return it.operands[3][0] * moist_adiabat_temperatures.units
return it.operands[3] * moist_adiabat_temperatures.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def static_stability(pressure, temperature, axis=0):
theta = potential_temperature(pressure, temperature)
return - mpconsts.Rd * temperature / pressure * first_derivative(np.log(theta.m_as('K')),
x=pressure, axis=axis)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def dewpoint_from_specific_humidity(pressure, temperature, specific_humidity):
return dewpoint_from_relative_humidity(temperature,
relative_humidity_from_specific_humidity(
pressure, temperature, specific_humidity))
@exporter.export
@preprocess_xarray
@check_units('[length]/[time]', '[pressure]', '[temperature]')
def vertical_velocity_pressure(w, pressure, temperature, mixing_ratio=0):
rho = density(pressure, temperature, mixing_ratio)
return (-mpconsts.g * rho * w).to('Pa/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]/[time]', '[pressure]', '[temperature]')
def vertical_velocity(omega, pressure, temperature, mixing_ratio=0):
rho = density(pressure, temperature, mixing_ratio)
return (omega / (- mpconsts.g * rho)).to('m/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def specific_humidity_from_dewpoint(pressure, dewpoint):
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
return specific_humidity_from_mixing_ratio(mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def lifted_index(pressure, temperature, parcel_profile):
# find the index for the 500 hPa pressure level.
idx = np.where(pressure == 500 * units.hPa)
# find the measured temperature at 500 hPa.
T500 = temperature[idx]
# find the parcel profile temperature at 500 hPa.
Tp500 = parcel_profile[idx]
# calculate the lifted index.
lifted_index = T500 - Tp500.to(units.degC)
return lifted_index
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]', '[speed]', '[speed]')
def gradient_richardson_number(height, potential_temperature, u, v, axis=0):
dthetadz = first_derivative(potential_temperature, x=height, axis=axis)
dudz = first_derivative(u, x=height, axis=axis)
dvdz = first_derivative(v, x=height, axis=axis)
return (mpconsts.g / potential_temperature) * (dthetadz / (dudz ** 2 + dvdz ** 2))
| true | true |
f70080a8c5b6f93830a0ccc665937affad0153c6 | 33 | py | Python | privacy/util/webhook.py | Justasic/Privacy.py | 471e6669a1e22e539459bbeb24de4efd3b7a11e0 | [
"BSD-3-Clause"
] | 3 | 2019-11-04T16:11:36.000Z | 2021-05-24T18:56:40.000Z | privacy/util/webhook.py | Justasic/Privacy.py | 471e6669a1e22e539459bbeb24de4efd3b7a11e0 | [
"BSD-3-Clause"
] | 9 | 2019-11-09T01:47:58.000Z | 2022-03-30T00:19:20.000Z | privacy/util/webhook.py | Justasic/Privacy.py | 471e6669a1e22e539459bbeb24de4efd3b7a11e0 | [
"BSD-3-Clause"
] | 2 | 2020-08-12T03:22:29.000Z | 2020-12-10T00:08:46.000Z | # TODO: maybe make this flexible
| 16.5 | 32 | 0.757576 | true | true |
|
f7008119dbfc3df53830c38b0b4d874106e6e295 | 1,207 | py | Python | examples/progress-bar/custom-key-bindings.py | gousaiyang/python-prompt-toolkit | 6237764658214af4c24633795d2571d2bd03375d | [
"BSD-3-Clause"
] | 4,028 | 2015-01-02T16:31:38.000Z | 2018-10-25T14:51:02.000Z | examples/progress-bar/custom-key-bindings.py | gousaiyang/python-prompt-toolkit | 6237764658214af4c24633795d2571d2bd03375d | [
"BSD-3-Clause"
] | 709 | 2018-10-25T22:36:52.000Z | 2022-03-28T18:34:35.000Z | examples/progress-bar/custom-key-bindings.py | gousaiyang/python-prompt-toolkit | 6237764658214af4c24633795d2571d2bd03375d | [
"BSD-3-Clause"
] | 399 | 2018-10-26T18:08:46.000Z | 2022-03-28T16:09:41.000Z | #!/usr/bin/env python
"""
A very simple progress bar which keep track of the progress as we consume an
iterator.
"""
import os
import signal
import time
from prompt_toolkit import HTML
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts import ProgressBar
def main():
bottom_toolbar = HTML(
' <b>[f]</b> Print "f" <b>[q]</b> Abort <b>[x]</b> Send Control-C.'
)
# Create custom key bindings first.
kb = KeyBindings()
cancel = [False]
@kb.add("f")
def _(event):
print("You pressed `f`.")
@kb.add("q")
def _(event):
"Quit by setting cancel flag."
cancel[0] = True
@kb.add("x")
def _(event):
"Quit by sending SIGINT to the main thread."
os.kill(os.getpid(), signal.SIGINT)
# Use `patch_stdout`, to make sure that prints go above the
# application.
with patch_stdout():
with ProgressBar(key_bindings=kb, bottom_toolbar=bottom_toolbar) as pb:
for i in pb(range(800)):
time.sleep(0.01)
if cancel[0]:
break
if __name__ == "__main__":
main()
| 23.211538 | 79 | 0.614747 | import os
import signal
import time
from prompt_toolkit import HTML
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.shortcuts import ProgressBar
def main():
bottom_toolbar = HTML(
' <b>[f]</b> Print "f" <b>[q]</b> Abort <b>[x]</b> Send Control-C.'
)
kb = KeyBindings()
cancel = [False]
@kb.add("f")
def _(event):
print("You pressed `f`.")
@kb.add("q")
def _(event):
cancel[0] = True
@kb.add("x")
def _(event):
os.kill(os.getpid(), signal.SIGINT)
with patch_stdout():
with ProgressBar(key_bindings=kb, bottom_toolbar=bottom_toolbar) as pb:
for i in pb(range(800)):
time.sleep(0.01)
if cancel[0]:
break
if __name__ == "__main__":
main()
| true | true |
f700817b9394d6608b7a517ea91f3f68d7dd7ca4 | 15,460 | py | Python | TwitterMiner_UserAllTweets.py | BrainRift/twitterminer | eda652dbf8f69c429707fd14969ba9e70ff97351 | [
"MIT"
] | 4 | 2019-06-12T20:03:47.000Z | 2021-12-14T17:05:45.000Z | TwitterMiner_UserAllTweets.py | BrainRift/twitterminer | eda652dbf8f69c429707fd14969ba9e70ff97351 | [
"MIT"
] | 1 | 2019-06-20T01:32:06.000Z | 2019-06-20T01:33:13.000Z | TwitterMiner_UserAllTweets.py | BrainRift/twitterminer | eda652dbf8f69c429707fd14969ba9e70ff97351 | [
"MIT"
] | 2 | 2020-01-03T03:43:03.000Z | 2021-01-06T18:45:49.000Z | #!/usr/bin/env python
# encoding: utf-8
import os
import sqlite3 as lite
import sys
import json
import time
import urllib.request
import tweepy
from TwitterMiner_Keys import *
from tweepy import OAuthHandler
from TwitterMiner_settings import *
import hashlib
#from Twitter_validate import validate_image
def dump_hash(twitter_dump):
data_hash = None # Ensure the value starts with nothing
dump = hashlib.sha1()
dump.update(twitter_dump)
data_hash = dump.hexdigest()
return data_hash
def file_hash(point_to_file):
hash_sha1 = hashlib.sha1()
with open(point_to_file, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha1.update(chunk)
print(hash_sha1.hexdigest())
return hash_sha1.hexdigest()
def extract_image_blob(posted_image_dest):
with open("test.jpg", "wb") as image_file:
c.execute("SELECT tweeted_image FROM T_Tweets WHERE Tweet_id = " + str(tweet_id))
ablob = c.fetchone()
image_file.write(ablob[0])
def create_db(table_name):
c.execute("PRAGMA journal_mode = WAL")
c.execute("CREATE TABLE IF NOT EXISTS " + table_name + "(tweet_id INTEGER NOT NULL PRIMARY KEY, date_mined TEXT, screen_name TEXT, \
user_id INTEGER, users_name TEXT, created_at_UTC TEXT, is_retweet TEXT, \
retweeted_times TEXT, text TEXT, place_name TEXT, country_code TEXT, country TEXT, \
bounding_box TEXT, source_tweeted TEXT, geo TEXT, in_reply_to_user TEXT, \
inreply_statusid TEXT, posted_image_dest TEXT, tweeted_image BLOB, image_hash TEXT, \
media_type TEXT, media_url TEXT, media_id TEXT, posted_video_dest TEXT, \
tweeted_video BLOB, video_hash TEXT, video_type TEXT, video_url TEXT, \
url_in_tweet TEXT, status BLOB, status_hash TEXT, bookmark TEXT)")
conn.commit()
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
try:
new_tweets = api.user_timeline(screen_name = screen_name, count=200)
except tweepy.TweepError:
print("Failed to pull tweets from %s" % screen_name)
print("User may be protected/private.")
print("Exiting...")
sys.exit()
except tweepy.RateLimitError: # I want to add code here to switch creds if a Rate limit occurs
print("Failed to pull the tweets due to a Twitter Rate Limit error.")
print("Please wait 15 min and try again...")
sys.exit()
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print("getting tweets before %s" % (oldest))
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print("...%s tweets downloaded so far" % (len(alltweets)))
#transform the tweepy tweets into a 2D array that will populate the csv
for status in alltweets:
# Pull the pieces of the tweet and put them in a variable
Tweetid = status.id
screenname = status.user.screen_name
userid = status.user.id
usersname = status.user.name
tweettime = status.created_at
# Checks to see if status has the attribute of status.retweeted_status, then assigns is_retweet a value
if hasattr(status, 'retweeted_status'):
is_retweet = True
#Added this section on 6-19-19 due to truncated ReTweets
#This checks for populated data in the extended_tweet
#If data is populated, it pulls the entire full_text
#Thanks to Fraser Phillips for finding this issue
if hasattr(status.retweeted_status, 'extended_tweet'):
Amp_text = str(status.retweeted_status.extended_tweet['full_text'])
tweet = "RT: " + Amp_text.replace('&','&')
else:
Amp_text = status.retweeted_status.text
tweet = "RT: " + Amp_text.replace('&','&')
else:
is_retweet = False
Amp_text = status.text
tweet = Amp_text.replace('&','&')
retweeted_times = status.retweet_count
if status.place is not None:
placename = status.place.full_name
countrycode = status.place.country_code
country = status.place.country
boundingbox = str(status.place.bounding_box.coordinates)
else:
placename = None
countrycode = None
country = None
boundingbox = None
Tweet_source = status.source
geo = status.geo
if geo is not None:
geo = json.dumps(geo)
inreplytouser = status.in_reply_to_screen_name
inreply_tostatus = status.in_reply_to_status_id_str
#Checks for Media in the Tweet and downloads it
if 'media' in status.entities:
image_posted = status.entities['media'][0]['media_url']
remove_tweet_url = image_posted.split('/')[-1]
posted_image_dest = os.path.join("Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_image/" + remove_tweet_url)
image_path = "Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_image/"
if not os.path.exists(image_path):
os.makedirs(image_path)
try:
print("Downloading... %s" % posted_image_dest)
urllib.request.urlretrieve(image_posted, filename = posted_image_dest)
tweeted_image = open(posted_image_dest, "rb").read()
image_hash = dump_hash(tweeted_image)
except urllib.error.URLError as e:
print("Error downloading file... %s ... from TweetID: %s" % (posted_image_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_image = None
image_hash = None
pass
except:
print("Error downloading file... %s ... from TweetID: %s" % (posted_image_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE - Unknown Error"
tweeted_image = None
image_hash = None
pass
mediatype = status.entities['media'][0]['type']
mediaurl = status.entities['media'][0]['media_url']
mediaid = status.entities['media'][0]['id']
else:
posted_image_dest = None
mediatype = None
mediaurl = None
mediaid = None
tweeted_image = None
image_hash = None
# New video Code
#Checks for Video in the tweets and downloads it
if hasattr(status, 'extended_entities'):
if 'video_info' in status.extended_entities['media'][0]:
# This section checks the number of dictionaries are in the variants
# It then looks at the bitrate of the variants and determines the highest value
# Once the highest value is determined, it extracts that video.
variant_times = len(status.extended_entities['media'][0]['video_info']['variants']) # Gets the number of variants
bit_rate = -1
for variant_count in range(0, variant_times): #iterates through all the variants in that tweets
if 'bitrate' in status.extended_entities['media'][0]['video_info']['variants'][variant_count] and \
bit_rate < status.extended_entities['media'][0]['video_info']['variants'][variant_count]['bitrate']:
bit_rate = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['bitrate']
videourl = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['url']
videotype = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['content_type']
remove_video_url = videourl.split('/')[-1]
posted_video_dest = os.path.join("Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_video/" + remove_video_url)
video_path = "Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_video/"
if not os.path.exists(video_path):
os.makedirs(video_path)
try:
print("Downloading... %s" % posted_video_dest)
urllib.request.urlretrieve(videourl, filename = posted_video_dest)
tweeted_video = open(posted_video_dest, "rb").read()
video_hash = dump_hash(tweeted_video)
except urllib.error.URLError as e:
print("Error downloading file... %s ... from TweetID: %s" % (posted_video_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_video = None
video_hash = None
pass
except:
print("Error downloading file... %s ... from TweetID: %s" % (posted_video_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_video = None
video_hash = None
pass
else:
posted_video_dest = None
videotype= None
videourl= None
tweeted_video = None
video_hash = None
else:
posted_video_dest = None
videotype= None
videourl= None
tweeted_video = None
video_hash = None
# End Video Check
# End new video Code
if not status.entities['urls']:
url_in_tweet = None
else:
url_in_tweet = str(status.entities['urls'][0]['url'])
#Grab the current date and time
now = time.strftime("%c")
# Starts the raw hash process
status_dump = str(status).encode('utf-8')
status_hash = dump_hash(status_dump)
bookmark = None
# Writes the data collected in the variables to the database
try:
c.execute("INSERT INTO " + table_name + "(tweet_id, date_mined, screen_name, user_id, users_name, \
created_at_UTC, is_retweet, retweeted_times,text, place_name, \
country_code, country, bounding_box, source_tweeted, geo, \
in_reply_to_user, inreply_statusid, posted_image_dest, \
tweeted_image, image_hash, media_type, media_url, media_id, \
posted_video_dest, tweeted_video, video_hash, video_type, \
video_url, url_in_tweet, status, status_hash, bookmark) \
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)" , \
(Tweetid,
now,
screenname,
userid,
usersname,
tweettime,
is_retweet,
retweeted_times,
tweet,
placename,
countrycode,
country,
boundingbox,
Tweet_source,
geo,
inreplytouser,
inreply_tostatus,
posted_image_dest,
tweeted_image,
image_hash,
mediatype,
mediaurl,
mediaid,
posted_video_dest,
tweeted_video,
video_hash,
videotype,
videourl,
url_in_tweet,
str(status),
status_hash,
bookmark))
conn.commit()
print(str(Tweetid), "--- Successfully added to the Database")
except lite.IntegrityError:
print(str(Tweetid), "--- Record already Exists")
if __name__ == '__main__':
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#---------
#---------
#--------- Be sure to enter a unique case name -- This is handled in TwitterMiner_settings now
#---------
#---------
casename = CASE_NAME
dbname = casename + ".db"
conn = lite.connect(dbname)
c = conn.cursor()
screenname = USER_NAME
table_name = USER_NAME + "_Tweets"
create_db(table_name)
get_all_tweets(screenname)
print("\n Finished collecting Tweets from user --- %s" % screenname)
print("Results are stored in " + casename)
#validate_image('T_Tweets') | 42.944444 | 153 | 0.504657 |
import os
import sqlite3 as lite
import sys
import json
import time
import urllib.request
import tweepy
from TwitterMiner_Keys import *
from tweepy import OAuthHandler
from TwitterMiner_settings import *
import hashlib
def dump_hash(twitter_dump):
data_hash = None dump = hashlib.sha1()
dump.update(twitter_dump)
data_hash = dump.hexdigest()
return data_hash
def file_hash(point_to_file):
hash_sha1 = hashlib.sha1()
with open(point_to_file, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha1.update(chunk)
print(hash_sha1.hexdigest())
return hash_sha1.hexdigest()
def extract_image_blob(posted_image_dest):
with open("test.jpg", "wb") as image_file:
c.execute("SELECT tweeted_image FROM T_Tweets WHERE Tweet_id = " + str(tweet_id))
ablob = c.fetchone()
image_file.write(ablob[0])
def create_db(table_name):
c.execute("PRAGMA journal_mode = WAL")
c.execute("CREATE TABLE IF NOT EXISTS " + table_name + "(tweet_id INTEGER NOT NULL PRIMARY KEY, date_mined TEXT, screen_name TEXT, \
user_id INTEGER, users_name TEXT, created_at_UTC TEXT, is_retweet TEXT, \
retweeted_times TEXT, text TEXT, place_name TEXT, country_code TEXT, country TEXT, \
bounding_box TEXT, source_tweeted TEXT, geo TEXT, in_reply_to_user TEXT, \
inreply_statusid TEXT, posted_image_dest TEXT, tweeted_image BLOB, image_hash TEXT, \
media_type TEXT, media_url TEXT, media_id TEXT, posted_video_dest TEXT, \
tweeted_video BLOB, video_hash TEXT, video_type TEXT, video_url TEXT, \
url_in_tweet TEXT, status BLOB, status_hash TEXT, bookmark TEXT)")
conn.commit()
def get_all_tweets(screen_name):
alltweets = []
try:
new_tweets = api.user_timeline(screen_name = screen_name, count=200)
except tweepy.TweepError:
print("Failed to pull tweets from %s" % screen_name)
print("User may be protected/private.")
print("Exiting...")
sys.exit()
except tweepy.RateLimitError: print("Failed to pull the tweets due to a Twitter Rate Limit error.")
print("Please wait 15 min and try again...")
sys.exit()
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
print("getting tweets before %s" % (oldest))
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
print("...%s tweets downloaded so far" % (len(alltweets)))
for status in alltweets:
Tweetid = status.id
screenname = status.user.screen_name
userid = status.user.id
usersname = status.user.name
tweettime = status.created_at
if hasattr(status, 'retweeted_status'):
is_retweet = True
if hasattr(status.retweeted_status, 'extended_tweet'):
Amp_text = str(status.retweeted_status.extended_tweet['full_text'])
tweet = "RT: " + Amp_text.replace('&','&')
else:
Amp_text = status.retweeted_status.text
tweet = "RT: " + Amp_text.replace('&','&')
else:
is_retweet = False
Amp_text = status.text
tweet = Amp_text.replace('&','&')
retweeted_times = status.retweet_count
if status.place is not None:
placename = status.place.full_name
countrycode = status.place.country_code
country = status.place.country
boundingbox = str(status.place.bounding_box.coordinates)
else:
placename = None
countrycode = None
country = None
boundingbox = None
Tweet_source = status.source
geo = status.geo
if geo is not None:
geo = json.dumps(geo)
inreplytouser = status.in_reply_to_screen_name
inreply_tostatus = status.in_reply_to_status_id_str
if 'media' in status.entities:
image_posted = status.entities['media'][0]['media_url']
remove_tweet_url = image_posted.split('/')[-1]
posted_image_dest = os.path.join("Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_image/" + remove_tweet_url)
image_path = "Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_image/"
if not os.path.exists(image_path):
os.makedirs(image_path)
try:
print("Downloading... %s" % posted_image_dest)
urllib.request.urlretrieve(image_posted, filename = posted_image_dest)
tweeted_image = open(posted_image_dest, "rb").read()
image_hash = dump_hash(tweeted_image)
except urllib.error.URLError as e:
print("Error downloading file... %s ... from TweetID: %s" % (posted_image_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_image = None
image_hash = None
pass
except:
print("Error downloading file... %s ... from TweetID: %s" % (posted_image_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE - Unknown Error"
tweeted_image = None
image_hash = None
pass
mediatype = status.entities['media'][0]['type']
mediaurl = status.entities['media'][0]['media_url']
mediaid = status.entities['media'][0]['id']
else:
posted_image_dest = None
mediatype = None
mediaurl = None
mediaid = None
tweeted_image = None
image_hash = None
if hasattr(status, 'extended_entities'):
if 'video_info' in status.extended_entities['media'][0]:
variant_times = len(status.extended_entities['media'][0]['video_info']['variants'])
bit_rate = -1
for variant_count in range(0, variant_times):
if 'bitrate' in status.extended_entities['media'][0]['video_info']['variants'][variant_count] and \
bit_rate < status.extended_entities['media'][0]['video_info']['variants'][variant_count]['bitrate']:
bit_rate = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['bitrate']
videourl = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['url']
videotype = status.extended_entities['media'][0]['video_info']['variants'][variant_count]['content_type']
remove_video_url = videourl.split('/')[-1]
posted_video_dest = os.path.join("Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_video/" + remove_video_url)
video_path = "Case_Attachments/" + casename + "/tweets/" + screenname + "/tweeted_video/"
if not os.path.exists(video_path):
os.makedirs(video_path)
try:
print("Downloading... %s" % posted_video_dest)
urllib.request.urlretrieve(videourl, filename = posted_video_dest)
tweeted_video = open(posted_video_dest, "rb").read()
video_hash = dump_hash(tweeted_video)
except urllib.error.URLError as e:
print("Error downloading file... %s ... from TweetID: %s" % (posted_video_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_video = None
video_hash = None
pass
except:
print("Error downloading file... %s ... from TweetID: %s" % (posted_video_dest, str(Tweetid)))
posted_image_dest = "ERROR DOWNLOADING FILE"
tweeted_video = None
video_hash = None
pass
else:
posted_video_dest = None
videotype= None
videourl= None
tweeted_video = None
video_hash = None
else:
posted_video_dest = None
videotype= None
videourl= None
tweeted_video = None
video_hash = None
if not status.entities['urls']:
url_in_tweet = None
else:
url_in_tweet = str(status.entities['urls'][0]['url'])
now = time.strftime("%c")
status_dump = str(status).encode('utf-8')
status_hash = dump_hash(status_dump)
bookmark = None
try:
c.execute("INSERT INTO " + table_name + "(tweet_id, date_mined, screen_name, user_id, users_name, \
created_at_UTC, is_retweet, retweeted_times,text, place_name, \
country_code, country, bounding_box, source_tweeted, geo, \
in_reply_to_user, inreply_statusid, posted_image_dest, \
tweeted_image, image_hash, media_type, media_url, media_id, \
posted_video_dest, tweeted_video, video_hash, video_type, \
video_url, url_in_tweet, status, status_hash, bookmark) \
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)" , \
(Tweetid,
now,
screenname,
userid,
usersname,
tweettime,
is_retweet,
retweeted_times,
tweet,
placename,
countrycode,
country,
boundingbox,
Tweet_source,
geo,
inreplytouser,
inreply_tostatus,
posted_image_dest,
tweeted_image,
image_hash,
mediatype,
mediaurl,
mediaid,
posted_video_dest,
tweeted_video,
video_hash,
videotype,
videourl,
url_in_tweet,
str(status),
status_hash,
bookmark))
conn.commit()
print(str(Tweetid), "--- Successfully added to the Database")
except lite.IntegrityError:
print(str(Tweetid), "--- Record already Exists")
if __name__ == '__main__':
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
casename = CASE_NAME
dbname = casename + ".db"
conn = lite.connect(dbname)
c = conn.cursor()
screenname = USER_NAME
table_name = USER_NAME + "_Tweets"
create_db(table_name)
get_all_tweets(screenname)
print("\n Finished collecting Tweets from user --- %s" % screenname)
print("Results are stored in " + casename)
| true | true |
f70082354fbf968017c0cf3ef52caa845a6bb7ca | 5,156 | py | Python | onlinecourse/migrations/0001_initial.py | Safia2202/onlinecourseibm | 1818bb81b0c22cc237dcf5ee24381783b8d3f8fc | [
"Apache-2.0"
] | null | null | null | onlinecourse/migrations/0001_initial.py | Safia2202/onlinecourseibm | 1818bb81b0c22cc237dcf5ee24381783b8d3f8fc | [
"Apache-2.0"
] | null | null | null | onlinecourse/migrations/0001_initial.py | Safia2202/onlinecourseibm | 1818bb81b0c22cc237dcf5ee24381783b8d3f8fc | [
"Apache-2.0"
] | 1 | 2021-03-19T09:38:51.000Z | 2021-03-19T09:38:51.000Z | # Generated by Django 3.1.3 on 2021-03-13 11:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=1000)),
('is_correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='online course', max_length=30)),
('image', models.ImageField(upload_to='course_images/')),
('description', models.CharField(max_length=1000)),
('pub_date', models.DateField(null=True)),
('total_enrollment', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Enrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_enrolled', models.DateField(default=django.utils.timezone.now)),
('mode', models.CharField(choices=[('audit', 'Audit'), ('honor', 'Honor'), ('BETA', 'BETA')], default='audit', max_length=5)),
('rating', models.FloatField(default=5.0)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choices', models.ManyToManyField(to='onlinecourse.Choice')),
('enrollment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.enrollment')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=1000)),
('grade', models.IntegerField(default=1)),
('course', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='title', max_length=200)),
('order', models.IntegerField(default=0)),
('content', models.TextField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Learner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('occupation', models.CharField(choices=[('student', 'Student'), ('developer', 'Developer'), ('data_scientist', 'Data Scientist'), ('dba', 'Database Admin')], default='student', max_length=20)),
('social_link', models.URLField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Instructor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_time', models.BooleanField(default=True)),
('total_learners', models.IntegerField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='course',
name='instructors',
field=models.ManyToManyField(to='onlinecourse.Instructor'),
),
migrations.AddField(
model_name='course',
name='users',
field=models.ManyToManyField(through='onlinecourse.Enrollment', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.question'),
),
]
| 47.302752 | 210 | 0.585531 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=1000)),
('is_correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='online course', max_length=30)),
('image', models.ImageField(upload_to='course_images/')),
('description', models.CharField(max_length=1000)),
('pub_date', models.DateField(null=True)),
('total_enrollment', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Enrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_enrolled', models.DateField(default=django.utils.timezone.now)),
('mode', models.CharField(choices=[('audit', 'Audit'), ('honor', 'Honor'), ('BETA', 'BETA')], default='audit', max_length=5)),
('rating', models.FloatField(default=5.0)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choices', models.ManyToManyField(to='onlinecourse.Choice')),
('enrollment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.enrollment')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=1000)),
('grade', models.IntegerField(default=1)),
('course', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='title', max_length=200)),
('order', models.IntegerField(default=0)),
('content', models.TextField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Learner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('occupation', models.CharField(choices=[('student', 'Student'), ('developer', 'Developer'), ('data_scientist', 'Data Scientist'), ('dba', 'Database Admin')], default='student', max_length=20)),
('social_link', models.URLField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Instructor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_time', models.BooleanField(default=True)),
('total_learners', models.IntegerField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='course',
name='instructors',
field=models.ManyToManyField(to='onlinecourse.Instructor'),
),
migrations.AddField(
model_name='course',
name='users',
field=models.ManyToManyField(through='onlinecourse.Enrollment', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.question'),
),
]
| true | true |
f700828dd6b4498dce5f1f79ff761916bbd8bed5 | 33,203 | py | Python | indico/modules/categories/controllers/display.py | bnavigator/indico | 2881b6ea69ef992396a3a147f61d8598c11f41c9 | [
"MIT"
] | 1 | 2019-11-03T11:34:16.000Z | 2019-11-03T11:34:16.000Z | indico/modules/categories/controllers/display.py | NP-compete/indico | 80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549 | [
"MIT"
] | null | null | null | indico/modules/categories/controllers/display.py | NP-compete/indico | 80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549 | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from datetime import date, datetime, time, timedelta
from functools import partial
from io import BytesIO
from itertools import chain, groupby, imap
from math import ceil
from operator import attrgetter, itemgetter
from time import mktime
import dateutil
from dateutil.relativedelta import relativedelta
from flask import Response, flash, jsonify, redirect, request, session
from pytz import utc
from sqlalchemy.orm import joinedload, load_only, subqueryload, undefer, undefer_group
from werkzeug.exceptions import BadRequest, NotFound
from indico.core.db import db
from indico.core.db.sqlalchemy.colors import ColorTuple
from indico.core.db.sqlalchemy.util.queries import get_n_matching
from indico.modules.categories.controllers.base import RHDisplayCategoryBase
from indico.modules.categories.legacy import XMLCategorySerializer
from indico.modules.categories.models.categories import Category
from indico.modules.categories.serialize import (serialize_categories_ical, serialize_category, serialize_category_atom,
serialize_category_chain)
from indico.modules.categories.util import get_category_stats, get_upcoming_events, serialize_event_for_json_ld
from indico.modules.categories.views import WPCategory, WPCategoryCalendar, WPCategoryStatistics
from indico.modules.events.models.events import Event
from indico.modules.events.timetable.util import get_category_timetable
from indico.modules.events.util import get_base_ical_parameters
from indico.modules.news.util import get_recent_news
from indico.modules.users import User
from indico.modules.users.models.favorites import favorite_category_table
from indico.util.date_time import format_date, format_number, now_utc
from indico.util.decorators import classproperty
from indico.util.fs import secure_filename
from indico.util.i18n import _
from indico.util.string import to_unicode
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import send_file, url_for
from indico.web.rh import RH
from indico.web.util import jsonify_data
CALENDAR_COLOR_PALETTE = [
ColorTuple('#1F1100', '#ECC495'),
ColorTuple('#0F0202', '#B9CBCA'),
ColorTuple('#0D1E1F', '#C2ECEF'),
ColorTuple('#000000', '#D0C296'),
ColorTuple('#202020', '#EFEBC2')
]
def _flat_map(func, list_):
return chain.from_iterable(imap(func, list_))
class RHCategoryIcon(RHDisplayCategoryBase):
_category_query_options = undefer('icon'),
def _check_access(self):
# Category icons are always public
pass
def _process(self):
if not self.category.has_icon:
raise NotFound
metadata = self.category.icon_metadata
return send_file(metadata['filename'], BytesIO(self.category.icon), mimetype=metadata['content_type'],
conditional=True)
class RHCategoryLogo(RHDisplayCategoryBase):
_category_query_options = undefer('logo'),
def _process(self):
if not self.category.has_logo:
raise NotFound
metadata = self.category.logo_metadata
return send_file(metadata['filename'], BytesIO(self.category.logo), mimetype=metadata['content_type'],
conditional=True)
class RHCategoryStatistics(RHDisplayCategoryBase):
def _get_stats_json(self, stats):
data = {'events': stats['events_by_year'], 'contributions': stats['contribs_by_year'],
'files': stats['attachments'], 'updated': stats['updated'].isoformat()}
if self.category.is_root:
data['users'] = self._count_users()
return jsonify(data)
def _get_stats_html(self, stats):
plots, values, updated = self._process_stats(stats, root=self.category.is_root)
return WPCategoryStatistics.render_template('category_statistics.html', self.category,
plots=plots, values=values, updated=updated, has_stats=True)
def _process(self):
stats = get_category_stats(self.category.id)
if request.accept_mimetypes.best_match(('application/json', 'text/html')) == 'application/json':
return self._get_stats_json(stats)
else:
return self._get_stats_html(stats)
def _plot_data(self, stats, tooltip=''):
years = sorted(stats.iterkeys())
min_year = now_utc().year
max_year = min_year
if years:
min_year = min(min_year, years[0]) - 1
max_year = max(max_year, years[-1])
data = {year: stats.get(year, 0) for year in xrange(min_year, max_year + 1)}
max_y = ceil(max(data.itervalues()) * 1.1) # 1.1 for padding in the graph
else:
data = {}
max_y = 0
return {'min_x': min_year, 'max_x': max_year, 'min_y': 0, 'max_y': max_y, 'values': data,
'total': sum(data.itervalues()), 'label_x': _("Years"), 'label_y': '', 'tooltip': tooltip}
def _process_stats(self, stats, root=False):
# tooltip formatting is for ease of translation
plots = [(_('Number of events'),
_('The year is the one of the start date of the event.'),
self._plot_data(stats.get('events_by_year', {}),
tooltip=_('{value} events in {year}').format(value='', year=''))),
(_('Number of contributions'),
_('The year is the one of the start date of the contribution.'),
self._plot_data(stats.get('contribs_by_year', {}),
tooltip=_('{value} contributions in {year}').format(value='', year='')))]
values = [(_('Number of attachments'), stats['attachments'])]
if root:
values.append((_('Number of users'), self._count_users()))
return plots, values, stats['updated']
def _count_users(self):
return User.find(is_deleted=False, is_pending=False).count()
class RHCategoryInfo(RHDisplayCategoryBase):
@classproperty
@classmethod
def _category_query_options(cls):
children_strategy = subqueryload('children')
children_strategy.load_only('id', 'parent_id', 'title', 'protection_mode', 'event_creation_restricted')
children_strategy.subqueryload('acl_entries')
children_strategy.undefer('deep_children_count')
children_strategy.undefer('deep_events_count')
children_strategy.undefer('has_events')
return (children_strategy,
load_only('id', 'parent_id', 'title', 'protection_mode'),
subqueryload('acl_entries'),
undefer('deep_children_count'),
undefer('deep_events_count'),
undefer('has_events'),
undefer('chain'))
def _process(self):
return jsonify_data(flash=False,
**serialize_category_chain(self.category, include_children=True, include_parents=True))
class RHReachableCategoriesInfo(RH):
def _get_reachable_categories(self, id_, excluded_ids):
cat = Category.query.filter_by(id=id_).options(joinedload('children').load_only('id')).one()
ids = ({c.id for c in cat.children} | {c.id for c in cat.parent_chain_query}) - excluded_ids
if not ids:
return []
return (Category.query
.filter(Category.id.in_(ids))
.options(*RHCategoryInfo._category_query_options)
.all())
def _process(self):
excluded_ids = set(request.json.get('exclude', set())) if request.json else set()
categories = self._get_reachable_categories(request.view_args['category_id'], excluded_ids=excluded_ids)
return jsonify_data(categories=[serialize_category_chain(c, include_children=True) for c in categories],
flash=False)
class RHCategorySearch(RH):
def _process(self):
q = request.args['q'].lower()
query = (Category.query
.filter(Category.title_matches(q))
.options(undefer('deep_children_count'), undefer('deep_events_count'), undefer('has_events'),
joinedload('acl_entries')))
if session.user:
# Prefer favorite categories
query = query.order_by(Category.favorite_of.any(favorite_category_table.c.user_id == session.user.id)
.desc())
# Prefer exact matches and matches at the beginning, then order by category title and if
# those are identical by the chain titles
query = (query
.order_by((db.func.lower(Category.title) == q).desc(),
db.func.lower(Category.title).startswith(q).desc(),
db.func.lower(Category.title),
Category.chain_titles))
total_count = query.count()
query = query.limit(10)
return jsonify_data(categories=[serialize_category(c, with_favorite=True, with_path=True) for c in query],
total_count=total_count, flash=False)
class RHSubcatInfo(RHDisplayCategoryBase):
"""Get basic information about subcategories.
This is intended to return information shown on a category display
page that is not needed immediately and is somewhat expensive to
retrieve.
"""
@classproperty
@classmethod
def _category_query_options(cls):
children_strategy = joinedload('children')
children_strategy.load_only('id')
children_strategy.undefer('deep_events_count')
return children_strategy, load_only('id', 'parent_id', 'protection_mode')
def _process(self):
event_counts = {c.id: {'value': c.deep_events_count, 'pretty': format_number(c.deep_events_count)}
for c in self.category.children}
return jsonify_data(flash=False, event_counts=event_counts)
class RHDisplayCategoryEventsBase(RHDisplayCategoryBase):
"""Base class for display pages displaying an event list"""
_category_query_options = (joinedload('children').load_only('id', 'title', 'protection_mode'),
undefer('attachment_count'), undefer('has_events'))
_event_query_options = (joinedload('person_links'), joinedload('series'), undefer_group('series'),
load_only('id', 'category_id', 'created_dt', 'start_dt', 'end_dt', 'timezone',
'protection_mode', 'title', 'type_', 'series_pos', 'series_count',
'own_address', 'own_venue_id', 'own_venue_name'))
def _process_args(self):
RHDisplayCategoryBase._process_args(self)
self.now = now_utc(exact=False).astimezone(self.category.display_tzinfo)
def format_event_date(self, event):
day_month = 'dd MMM'
tzinfo = self.category.display_tzinfo
start_dt = event.start_dt.astimezone(tzinfo)
end_dt = event.end_dt.astimezone(tzinfo)
if start_dt.year != end_dt.year:
return '{} - {}'.format(to_unicode(format_date(start_dt, timezone=tzinfo)),
to_unicode(format_date(end_dt, timezone=tzinfo)))
elif (start_dt.month != end_dt.month) or (start_dt.day != end_dt.day):
return '{} - {}'.format(to_unicode(format_date(start_dt, day_month, timezone=tzinfo)),
to_unicode(format_date(end_dt, day_month, timezone=tzinfo)))
else:
return to_unicode(format_date(start_dt, day_month, timezone=tzinfo))
def group_by_month(self, events):
def _format_tuple(x):
(year, month), events = x
return {'name': format_date(date(year, month, 1), format='MMMM yyyy'),
'events': list(events),
'is_current': year == self.now.year and month == self.now.month}
def _key(event):
start_dt = event.start_dt.astimezone(self.category.tzinfo)
return start_dt.year, start_dt.month
months = groupby(events, key=_key)
return map(_format_tuple, months)
def happening_now(self, event):
return event.start_dt <= self.now < event.end_dt
def is_recent(self, dt):
return dt > self.now - relativedelta(weeks=1)
class RHDisplayCategory(RHDisplayCategoryEventsBase):
"""Show the contents of a category (events/subcategories)"""
def _process(self):
# Current events, which are always shown by default are events of this month and of the previous month.
# If there are no events in this range, it will include the last and next month containing events.
past_threshold = self.now - relativedelta(months=1, day=1, hour=0, minute=0)
future_threshold = self.now + relativedelta(months=1, day=1, hour=0, minute=0)
next_event_start_dt = (db.session.query(Event.start_dt)
.filter(Event.start_dt >= self.now, Event.category_id == self.category.id)
.order_by(Event.start_dt.asc(), Event.id.asc())
.first() or (None,))[0]
previous_event_start_dt = (db.session.query(Event.start_dt)
.filter(Event.start_dt < self.now, Event.category_id == self.category.id)
.order_by(Event.start_dt.desc(), Event.id.desc())
.first() or (None,))[0]
if next_event_start_dt is not None and next_event_start_dt > future_threshold:
future_threshold = next_event_start_dt + relativedelta(months=1, day=1, hour=0, minute=0)
if previous_event_start_dt is not None and previous_event_start_dt < past_threshold:
past_threshold = previous_event_start_dt.replace(day=1, hour=0, minute=0)
event_query = (Event.query.with_parent(self.category)
.options(*self._event_query_options)
.order_by(Event.start_dt.desc(), Event.id.desc()))
past_event_query = event_query.filter(Event.start_dt < past_threshold)
future_event_query = event_query.filter(Event.start_dt >= future_threshold)
current_event_query = event_query.filter(Event.start_dt >= past_threshold,
Event.start_dt < future_threshold)
json_ld_events = events = current_event_query.filter(Event.start_dt < future_threshold).all()
events_by_month = self.group_by_month(events)
future_event_count = future_event_query.count()
past_event_count = past_event_query.count()
if not session.user and future_event_count:
json_ld_events = json_ld_events + future_event_query.all()
show_future_events = bool(self.category.id in session.get('fetch_future_events_in', set()) or
(session.user and session.user.settings.get('show_future_events', False)))
show_past_events = bool(self.category.id in session.get('fetch_past_events_in', set()) or
(session.user and session.user.settings.get('show_past_events', False)))
managers = sorted(self.category.get_manager_list(), key=attrgetter('principal_type.name', 'name'))
threshold_format = '%Y-%m'
params = {'event_count': len(events),
'events_by_month': events_by_month,
'format_event_date': self.format_event_date,
'future_event_count': future_event_count,
'show_future_events': show_future_events,
'future_threshold': future_threshold.strftime(threshold_format),
'happening_now': self.happening_now,
'is_recent': self.is_recent,
'managers': managers,
'past_event_count': past_event_count,
'show_past_events': show_past_events,
'past_threshold': past_threshold.strftime(threshold_format),
'json_ld': map(serialize_event_for_json_ld, json_ld_events),
'atom_feed_url': url_for('.export_atom', self.category),
'atom_feed_title': _('Events of "{}"').format(self.category.title)}
params.update(get_base_ical_parameters(session.user, 'category',
'/export/categ/{0}.ics'.format(self.category.id), {'from': '-31d'}))
if not self.category.is_root:
return WPCategory.render_template('display/category.html', self.category, **params)
news = get_recent_news()
upcoming_events = get_upcoming_events()
return WPCategory.render_template('display/root_category.html', self.category, news=news,
upcoming_events=upcoming_events, **params)
class RHEventList(RHDisplayCategoryEventsBase):
"""Return the HTML for the event list before/after a specific month"""
def _parse_year_month(self, string):
try:
dt = datetime.strptime(string, '%Y-%m')
except (TypeError, ValueError):
return None
return self.category.display_tzinfo.localize(dt)
def _process_args(self):
RHDisplayCategoryEventsBase._process_args(self)
before = self._parse_year_month(request.args.get('before'))
after = self._parse_year_month(request.args.get('after'))
if before is None and after is None:
raise BadRequest('"before" or "after" parameter must be specified')
event_query = (Event.query.with_parent(self.category)
.options(*self._event_query_options)
.order_by(Event.start_dt.desc(), Event.id.desc()))
if before:
event_query = event_query.filter(Event.start_dt < before)
if after:
event_query = event_query.filter(Event.start_dt >= after)
self.events = event_query.all()
def _process(self):
events_by_month = self.group_by_month(self.events)
tpl = get_template_module('categories/display/event_list.html')
html = tpl.event_list_block(events_by_month=events_by_month, format_event_date=self.format_event_date,
is_recent=self.is_recent, happening_now=self.happening_now)
return jsonify_data(flash=False, html=html)
class RHShowEventsInCategoryBase(RHDisplayCategoryBase):
"""Set whether the events in a category are automatically displayed or not"""
session_field = ''
def _show_events(self, show_events):
category_ids = session.setdefault(self.session_field, set())
if show_events:
category_ids.add(self.category.id)
else:
category_ids.discard(self.category.id)
session.modified = True
def _process_DELETE(self):
self._show_events(False)
def _process_PUT(self):
self._show_events(True)
class RHShowFutureEventsInCategory(RHShowEventsInCategoryBase):
"""Set whether the past events in a category are automatically displayed or not"""
session_field = 'fetch_future_events_in'
class RHShowPastEventsInCategory(RHShowEventsInCategoryBase):
"""Set whether the past events in a category are automatically displayed or not"""
session_field = 'fetch_past_events_in'
class RHExportCategoryICAL(RHDisplayCategoryBase):
def _process(self):
filename = '{}-category.ics'.format(secure_filename(self.category.title, str(self.category.id)))
buf = serialize_categories_ical([self.category.id], session.user,
Event.end_dt >= (now_utc() - timedelta(weeks=4)))
return send_file(filename, buf, 'text/calendar')
class RHExportCategoryAtom(RHDisplayCategoryBase):
def _process(self):
filename = '{}-category.atom'.format(secure_filename(self.category.title, str(self.category.id)))
buf = serialize_category_atom(self.category,
url_for(request.endpoint, self.category, _external=True),
session.user,
Event.end_dt >= now_utc())
return send_file(filename, buf, 'application/atom+xml')
class RHXMLExportCategoryInfo(RH):
def _process_args(self):
try:
id_ = int(request.args['id'])
except ValueError:
raise BadRequest('Invalid Category ID')
self.category = Category.get_one(id_, is_deleted=False)
def _process(self):
category_xml_info = XMLCategorySerializer(self.category).serialize_category()
return Response(category_xml_info, mimetype='text/xml')
class RHCategoryOverview(RHDisplayCategoryBase):
"""Display the events for a particular day, week or month"""
def _process_args(self):
RHDisplayCategoryBase._process_args(self)
self.detail = request.args.get('detail', 'event')
if self.detail not in ('event', 'session', 'contribution'):
raise BadRequest('Invalid detail argument')
self.period = request.args.get('period', 'day')
if self.period not in ('day', 'month', 'week'):
raise BadRequest('Invalid period argument')
if 'date' in request.args:
try:
date = datetime.strptime(request.args['date'], '%Y-%m-%d')
except ValueError:
raise BadRequest('Invalid date argument')
else:
date = datetime.now()
date = self.category.display_tzinfo.localize(date)
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
if self.period == 'day':
self.start_dt = date
self.end_dt = self.start_dt + relativedelta(days=1)
elif self.period == 'week':
self.start_dt = date - relativedelta(days=date.weekday())
self.end_dt = self.start_dt + relativedelta(days=7)
elif self.period == 'month':
self.start_dt = date + relativedelta(day=1)
self.end_dt = self.start_dt + relativedelta(months=1)
def _process(self):
info = get_category_timetable([self.category.id], self.start_dt, self.end_dt, detail_level=self.detail,
tz=self.category.display_tzinfo, from_categ=self.category, grouped=False)
events = info['events']
# Only categories with icons are listed in the sidebar
subcategory_ids = {event.category.effective_icon_data['source_id']
for event in events if event.category.has_effective_icon}
subcategories = Category.query.filter(Category.id.in_(subcategory_ids))
# Events spanning multiple days must appear on all days
events = _flat_map(partial(self._process_multiday_events, info), events)
def _event_sort_key(event):
# Ongoing events are shown after all other events on the same day and are sorted by start_date
ongoing = getattr(event, 'ongoing', False)
return (event.start_dt.date(), ongoing,
-mktime(event.first_occurence_start_dt.timetuple()) if ongoing else event.start_dt.time())
events = sorted(events, key=_event_sort_key)
params = {
'detail': self.detail,
'period': self.period,
'subcategories': subcategories,
'start_dt': self.start_dt,
'end_dt': self.end_dt - relativedelta(days=1), # Display a close-ended interval
'previous_day_url': self._other_day_url(self.start_dt - relativedelta(days=1)),
'next_day_url': self._other_day_url(self.start_dt + relativedelta(days=1)),
'previous_month_url': self._other_day_url(self.start_dt - relativedelta(months=1)),
'next_month_url': self._other_day_url(self.start_dt + relativedelta(months=1)),
'previous_year_url': self._other_day_url(self.start_dt - relativedelta(years=1)),
'next_year_url': self._other_day_url(self.start_dt + relativedelta(years=1)),
'mathjax': True
}
if self.detail != 'event':
cte = self.category.get_protection_parent_cte()
params['accessible_categories'] = {cat_id
for cat_id, prot_parent_id in db.session.query(cte)
if prot_parent_id == self.category.id}
if self.period == 'day':
return WPCategory.render_template('display/overview/day.html', self.category, events=events, **params)
elif self.period == 'week':
days = self._get_week_days()
template = 'display/overview/week.html'
params['previous_week_url'] = self._other_day_url(self.start_dt - relativedelta(days=7))
params['next_week_url'] = self._other_day_url(self.start_dt + relativedelta(days=7))
elif self.period == 'month':
days = self._get_calendar_days()
template = 'display/overview/month.html'
events_by_day = []
for day in days:
events_by_day.append((day, self._pop_head_while(lambda x: x.start_dt.date() <= day.date(), events)))
# Check whether all weekends are empty
hide_weekend = (not any(map(itemgetter(1), events_by_day[5::7])) and
not any(map(itemgetter(1), events_by_day[6::7])))
if hide_weekend:
events_by_day = [x for x in events_by_day if x[0].weekday() not in (5, 6)]
return WPCategory.render_template(template, self.category, events_by_day=events_by_day,
hide_weekend=hide_weekend, **params)
def _get_week_days(self):
# Return the days shown in the weekly overview
return self._get_days(self.start_dt, self.end_dt)
def _get_calendar_days(self):
# Return the days shown in the monthly overview
start_dt = self.start_dt - relativedelta(days=self.start_dt.weekday())
end_dt = self.end_dt + relativedelta(days=(7 - self.end_dt.weekday()) % 7)
return self._get_days(start_dt, end_dt)
@staticmethod
def _get_days(start_dt, end_dt):
# Return all days in the open-ended interval
current_dt = start_dt
tz = current_dt.tzinfo
next_day = current_dt.date() + timedelta(1)
beginning_of_next_day = tz.localize(datetime.combine(next_day, time()))
while current_dt < end_dt:
yield current_dt
current_dt = beginning_of_next_day
beginning_of_next_day = current_dt + relativedelta(days=1)
@staticmethod
def _pop_head_while(predicate, list_):
# Pop the head of the list while the predicate is true and return the popped elements
res = []
while len(list_) and predicate(list_[0]):
res.append(list_[0])
list_.pop(0)
return res
def _other_day_url(self, date):
return url_for('.overview', self.category, detail=self.detail, period=self.period,
date=format_date(date, 'yyyy-MM-dd'))
def _process_multiday_events(self, info, event):
# Add "fake" proxy events for events spanning multiple days such that there is one event per day
# Function type: Event -> List[Event]
tzinfo = self.category.display_tzinfo
# Breaks, contributions and sessions grouped by start_dt. Each EventProxy will return the relevant ones only
timetable_objects = sorted(chain(*info[event.id].values()), key=attrgetter('timetable_entry.start_dt'))
timetable_objects_by_date = {x[0]: list(x[1]) for x
in groupby(timetable_objects, key=lambda x: x.start_dt.astimezone(tzinfo).date())}
# All the days of the event shown in the overview
event_days = self._get_days(max(self.start_dt, event.start_dt.astimezone(tzinfo)),
min(self.end_dt, event.end_dt.astimezone(tzinfo)))
# Generate a proxy object with adjusted start_dt and timetable_objects for each day
return [_EventProxy(event, day, tzinfo, timetable_objects_by_date.get(day.date(), [])) for day in event_days]
class _EventProxy(object):
def __init__(self, event, date, tzinfo, timetable_objects):
start_dt = datetime.combine(date, event.start_dt.astimezone(tzinfo).timetz())
assert date >= event.start_dt
assert date <= event.end_dt
object.__setattr__(self, '_start_dt', start_dt)
object.__setattr__(self, '_real_event', event)
object.__setattr__(self, '_event_tz_start_date', event.start_dt.astimezone(tzinfo).date())
object.__setattr__(self, '_timetable_objects', timetable_objects)
def __getattribute__(self, name):
if name == 'start_dt':
return object.__getattribute__(self, '_start_dt')
event = object.__getattribute__(self, '_real_event')
if name == 'timetable_objects':
return object.__getattribute__(self, '_timetable_objects')
if name == 'ongoing':
# the event is "ongoing" if the dates (in the tz of the category)
# of the event and the proxy (calendar entry) don't match
event_start_date = object.__getattribute__(self, '_event_tz_start_date')
return event_start_date != self.start_dt.date()
if name == 'first_occurence_start_dt':
return event.start_dt
return getattr(event, name)
def __setattr__(self, name, value):
raise AttributeError('This instance is read-only')
def __repr__(self):
return '<_EventProxy({}, {})>'.format(self.start_dt, object.__getattribute__(self, '_real_event'))
class RHCategoryCalendarView(RHDisplayCategoryBase):
def _process(self):
if not request.is_xhr:
return WPCategoryCalendar.render_template('display/calendar.html', self.category,
start_dt=request.args.get('start_dt'))
tz = self.category.display_tzinfo
start = tz.localize(dateutil.parser.parse(request.args['start'])).astimezone(utc)
end = tz.localize(dateutil.parser.parse(request.args['end'])).astimezone(utc)
query = (Event.query
.filter(Event.starts_between(start, end),
Event.is_visible_in(self.category.id),
~Event.is_deleted)
.options(load_only('id', 'title', 'start_dt', 'end_dt', 'category_id')))
events = self._get_event_data(query)
ongoing_events = (Event.query
.filter(Event.is_visible_in(self.category.id),
Event.start_dt < start,
Event.end_dt > end)
.options(load_only('id', 'title', 'start_dt', 'end_dt', 'timezone'))
.order_by(Event.title)
.all())
return jsonify_data(flash=False, events=events, ongoing_event_count=len(ongoing_events),
ongoing_events_html=self._render_ongoing_events(ongoing_events))
def _get_event_data(self, event_query):
data = []
tz = self.category.display_tzinfo
for event in event_query:
category_id = event.category_id
event_data = {'title': event.title,
'start': event.start_dt.astimezone(tz).replace(tzinfo=None).isoformat(),
'end': event.end_dt.astimezone(tz).replace(tzinfo=None).isoformat(),
'url': event.url}
colors = CALENDAR_COLOR_PALETTE[category_id % len(CALENDAR_COLOR_PALETTE)]
event_data.update({'textColor': '#' + colors.text, 'color': '#' + colors.background})
data.append(event_data)
return data
def _render_ongoing_events(self, ongoing_events):
template = get_template_module('categories/display/_calendar_ongoing_events.html')
return template.render_ongoing_events(ongoing_events, self.category.display_tzinfo)
class RHCategoryUpcomingEvent(RHDisplayCategoryBase):
"""Redirect to the upcoming event of a category."""
def _process(self):
event = self._get_upcoming_event()
if event:
return redirect(event.url)
else:
flash(_('There are no upcoming events for this category'))
return redirect(self.category.url)
def _get_upcoming_event(self):
query = (Event.query
.filter(Event.is_visible_in(self.category.id),
Event.start_dt > now_utc(),
~Event.is_deleted)
.options(subqueryload('acl_entries'))
.order_by(Event.start_dt, Event.id))
res = get_n_matching(query, 1, lambda event: event.can_access(session.user))
if res:
return res[0]
| 47.981214 | 120 | 0.640033 |
from __future__ import unicode_literals
from datetime import date, datetime, time, timedelta
from functools import partial
from io import BytesIO
from itertools import chain, groupby, imap
from math import ceil
from operator import attrgetter, itemgetter
from time import mktime
import dateutil
from dateutil.relativedelta import relativedelta
from flask import Response, flash, jsonify, redirect, request, session
from pytz import utc
from sqlalchemy.orm import joinedload, load_only, subqueryload, undefer, undefer_group
from werkzeug.exceptions import BadRequest, NotFound
from indico.core.db import db
from indico.core.db.sqlalchemy.colors import ColorTuple
from indico.core.db.sqlalchemy.util.queries import get_n_matching
from indico.modules.categories.controllers.base import RHDisplayCategoryBase
from indico.modules.categories.legacy import XMLCategorySerializer
from indico.modules.categories.models.categories import Category
from indico.modules.categories.serialize import (serialize_categories_ical, serialize_category, serialize_category_atom,
serialize_category_chain)
from indico.modules.categories.util import get_category_stats, get_upcoming_events, serialize_event_for_json_ld
from indico.modules.categories.views import WPCategory, WPCategoryCalendar, WPCategoryStatistics
from indico.modules.events.models.events import Event
from indico.modules.events.timetable.util import get_category_timetable
from indico.modules.events.util import get_base_ical_parameters
from indico.modules.news.util import get_recent_news
from indico.modules.users import User
from indico.modules.users.models.favorites import favorite_category_table
from indico.util.date_time import format_date, format_number, now_utc
from indico.util.decorators import classproperty
from indico.util.fs import secure_filename
from indico.util.i18n import _
from indico.util.string import to_unicode
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import send_file, url_for
from indico.web.rh import RH
from indico.web.util import jsonify_data
CALENDAR_COLOR_PALETTE = [
ColorTuple('#1F1100', '#ECC495'),
ColorTuple('#0F0202', '#B9CBCA'),
ColorTuple('#0D1E1F', '#C2ECEF'),
ColorTuple('#000000', '#D0C296'),
ColorTuple('#202020', '#EFEBC2')
]
def _flat_map(func, list_):
return chain.from_iterable(imap(func, list_))
class RHCategoryIcon(RHDisplayCategoryBase):
_category_query_options = undefer('icon'),
def _check_access(self):
pass
def _process(self):
if not self.category.has_icon:
raise NotFound
metadata = self.category.icon_metadata
return send_file(metadata['filename'], BytesIO(self.category.icon), mimetype=metadata['content_type'],
conditional=True)
class RHCategoryLogo(RHDisplayCategoryBase):
_category_query_options = undefer('logo'),
def _process(self):
if not self.category.has_logo:
raise NotFound
metadata = self.category.logo_metadata
return send_file(metadata['filename'], BytesIO(self.category.logo), mimetype=metadata['content_type'],
conditional=True)
class RHCategoryStatistics(RHDisplayCategoryBase):
def _get_stats_json(self, stats):
data = {'events': stats['events_by_year'], 'contributions': stats['contribs_by_year'],
'files': stats['attachments'], 'updated': stats['updated'].isoformat()}
if self.category.is_root:
data['users'] = self._count_users()
return jsonify(data)
def _get_stats_html(self, stats):
plots, values, updated = self._process_stats(stats, root=self.category.is_root)
return WPCategoryStatistics.render_template('category_statistics.html', self.category,
plots=plots, values=values, updated=updated, has_stats=True)
def _process(self):
stats = get_category_stats(self.category.id)
if request.accept_mimetypes.best_match(('application/json', 'text/html')) == 'application/json':
return self._get_stats_json(stats)
else:
return self._get_stats_html(stats)
def _plot_data(self, stats, tooltip=''):
years = sorted(stats.iterkeys())
min_year = now_utc().year
max_year = min_year
if years:
min_year = min(min_year, years[0]) - 1
max_year = max(max_year, years[-1])
data = {year: stats.get(year, 0) for year in xrange(min_year, max_year + 1)}
max_y = ceil(max(data.itervalues()) * 1.1) else:
data = {}
max_y = 0
return {'min_x': min_year, 'max_x': max_year, 'min_y': 0, 'max_y': max_y, 'values': data,
'total': sum(data.itervalues()), 'label_x': _("Years"), 'label_y': '', 'tooltip': tooltip}
def _process_stats(self, stats, root=False):
plots = [(_('Number of events'),
_('The year is the one of the start date of the event.'),
self._plot_data(stats.get('events_by_year', {}),
tooltip=_('{value} events in {year}').format(value='', year=''))),
(_('Number of contributions'),
_('The year is the one of the start date of the contribution.'),
self._plot_data(stats.get('contribs_by_year', {}),
tooltip=_('{value} contributions in {year}').format(value='', year='')))]
values = [(_('Number of attachments'), stats['attachments'])]
if root:
values.append((_('Number of users'), self._count_users()))
return plots, values, stats['updated']
def _count_users(self):
return User.find(is_deleted=False, is_pending=False).count()
class RHCategoryInfo(RHDisplayCategoryBase):
@classproperty
@classmethod
def _category_query_options(cls):
children_strategy = subqueryload('children')
children_strategy.load_only('id', 'parent_id', 'title', 'protection_mode', 'event_creation_restricted')
children_strategy.subqueryload('acl_entries')
children_strategy.undefer('deep_children_count')
children_strategy.undefer('deep_events_count')
children_strategy.undefer('has_events')
return (children_strategy,
load_only('id', 'parent_id', 'title', 'protection_mode'),
subqueryload('acl_entries'),
undefer('deep_children_count'),
undefer('deep_events_count'),
undefer('has_events'),
undefer('chain'))
def _process(self):
return jsonify_data(flash=False,
**serialize_category_chain(self.category, include_children=True, include_parents=True))
class RHReachableCategoriesInfo(RH):
def _get_reachable_categories(self, id_, excluded_ids):
cat = Category.query.filter_by(id=id_).options(joinedload('children').load_only('id')).one()
ids = ({c.id for c in cat.children} | {c.id for c in cat.parent_chain_query}) - excluded_ids
if not ids:
return []
return (Category.query
.filter(Category.id.in_(ids))
.options(*RHCategoryInfo._category_query_options)
.all())
def _process(self):
excluded_ids = set(request.json.get('exclude', set())) if request.json else set()
categories = self._get_reachable_categories(request.view_args['category_id'], excluded_ids=excluded_ids)
return jsonify_data(categories=[serialize_category_chain(c, include_children=True) for c in categories],
flash=False)
class RHCategorySearch(RH):
def _process(self):
q = request.args['q'].lower()
query = (Category.query
.filter(Category.title_matches(q))
.options(undefer('deep_children_count'), undefer('deep_events_count'), undefer('has_events'),
joinedload('acl_entries')))
if session.user:
query = query.order_by(Category.favorite_of.any(favorite_category_table.c.user_id == session.user.id)
.desc())
query = (query
.order_by((db.func.lower(Category.title) == q).desc(),
db.func.lower(Category.title).startswith(q).desc(),
db.func.lower(Category.title),
Category.chain_titles))
total_count = query.count()
query = query.limit(10)
return jsonify_data(categories=[serialize_category(c, with_favorite=True, with_path=True) for c in query],
total_count=total_count, flash=False)
class RHSubcatInfo(RHDisplayCategoryBase):
@classproperty
@classmethod
def _category_query_options(cls):
children_strategy = joinedload('children')
children_strategy.load_only('id')
children_strategy.undefer('deep_events_count')
return children_strategy, load_only('id', 'parent_id', 'protection_mode')
def _process(self):
event_counts = {c.id: {'value': c.deep_events_count, 'pretty': format_number(c.deep_events_count)}
for c in self.category.children}
return jsonify_data(flash=False, event_counts=event_counts)
class RHDisplayCategoryEventsBase(RHDisplayCategoryBase):
_category_query_options = (joinedload('children').load_only('id', 'title', 'protection_mode'),
undefer('attachment_count'), undefer('has_events'))
_event_query_options = (joinedload('person_links'), joinedload('series'), undefer_group('series'),
load_only('id', 'category_id', 'created_dt', 'start_dt', 'end_dt', 'timezone',
'protection_mode', 'title', 'type_', 'series_pos', 'series_count',
'own_address', 'own_venue_id', 'own_venue_name'))
def _process_args(self):
RHDisplayCategoryBase._process_args(self)
self.now = now_utc(exact=False).astimezone(self.category.display_tzinfo)
def format_event_date(self, event):
day_month = 'dd MMM'
tzinfo = self.category.display_tzinfo
start_dt = event.start_dt.astimezone(tzinfo)
end_dt = event.end_dt.astimezone(tzinfo)
if start_dt.year != end_dt.year:
return '{} - {}'.format(to_unicode(format_date(start_dt, timezone=tzinfo)),
to_unicode(format_date(end_dt, timezone=tzinfo)))
elif (start_dt.month != end_dt.month) or (start_dt.day != end_dt.day):
return '{} - {}'.format(to_unicode(format_date(start_dt, day_month, timezone=tzinfo)),
to_unicode(format_date(end_dt, day_month, timezone=tzinfo)))
else:
return to_unicode(format_date(start_dt, day_month, timezone=tzinfo))
def group_by_month(self, events):
def _format_tuple(x):
(year, month), events = x
return {'name': format_date(date(year, month, 1), format='MMMM yyyy'),
'events': list(events),
'is_current': year == self.now.year and month == self.now.month}
def _key(event):
start_dt = event.start_dt.astimezone(self.category.tzinfo)
return start_dt.year, start_dt.month
months = groupby(events, key=_key)
return map(_format_tuple, months)
def happening_now(self, event):
return event.start_dt <= self.now < event.end_dt
def is_recent(self, dt):
return dt > self.now - relativedelta(weeks=1)
class RHDisplayCategory(RHDisplayCategoryEventsBase):
def _process(self):
past_threshold = self.now - relativedelta(months=1, day=1, hour=0, minute=0)
future_threshold = self.now + relativedelta(months=1, day=1, hour=0, minute=0)
next_event_start_dt = (db.session.query(Event.start_dt)
.filter(Event.start_dt >= self.now, Event.category_id == self.category.id)
.order_by(Event.start_dt.asc(), Event.id.asc())
.first() or (None,))[0]
previous_event_start_dt = (db.session.query(Event.start_dt)
.filter(Event.start_dt < self.now, Event.category_id == self.category.id)
.order_by(Event.start_dt.desc(), Event.id.desc())
.first() or (None,))[0]
if next_event_start_dt is not None and next_event_start_dt > future_threshold:
future_threshold = next_event_start_dt + relativedelta(months=1, day=1, hour=0, minute=0)
if previous_event_start_dt is not None and previous_event_start_dt < past_threshold:
past_threshold = previous_event_start_dt.replace(day=1, hour=0, minute=0)
event_query = (Event.query.with_parent(self.category)
.options(*self._event_query_options)
.order_by(Event.start_dt.desc(), Event.id.desc()))
past_event_query = event_query.filter(Event.start_dt < past_threshold)
future_event_query = event_query.filter(Event.start_dt >= future_threshold)
current_event_query = event_query.filter(Event.start_dt >= past_threshold,
Event.start_dt < future_threshold)
json_ld_events = events = current_event_query.filter(Event.start_dt < future_threshold).all()
events_by_month = self.group_by_month(events)
future_event_count = future_event_query.count()
past_event_count = past_event_query.count()
if not session.user and future_event_count:
json_ld_events = json_ld_events + future_event_query.all()
show_future_events = bool(self.category.id in session.get('fetch_future_events_in', set()) or
(session.user and session.user.settings.get('show_future_events', False)))
show_past_events = bool(self.category.id in session.get('fetch_past_events_in', set()) or
(session.user and session.user.settings.get('show_past_events', False)))
managers = sorted(self.category.get_manager_list(), key=attrgetter('principal_type.name', 'name'))
threshold_format = '%Y-%m'
params = {'event_count': len(events),
'events_by_month': events_by_month,
'format_event_date': self.format_event_date,
'future_event_count': future_event_count,
'show_future_events': show_future_events,
'future_threshold': future_threshold.strftime(threshold_format),
'happening_now': self.happening_now,
'is_recent': self.is_recent,
'managers': managers,
'past_event_count': past_event_count,
'show_past_events': show_past_events,
'past_threshold': past_threshold.strftime(threshold_format),
'json_ld': map(serialize_event_for_json_ld, json_ld_events),
'atom_feed_url': url_for('.export_atom', self.category),
'atom_feed_title': _('Events of "{}"').format(self.category.title)}
params.update(get_base_ical_parameters(session.user, 'category',
'/export/categ/{0}.ics'.format(self.category.id), {'from': '-31d'}))
if not self.category.is_root:
return WPCategory.render_template('display/category.html', self.category, **params)
news = get_recent_news()
upcoming_events = get_upcoming_events()
return WPCategory.render_template('display/root_category.html', self.category, news=news,
upcoming_events=upcoming_events, **params)
class RHEventList(RHDisplayCategoryEventsBase):
def _parse_year_month(self, string):
try:
dt = datetime.strptime(string, '%Y-%m')
except (TypeError, ValueError):
return None
return self.category.display_tzinfo.localize(dt)
def _process_args(self):
RHDisplayCategoryEventsBase._process_args(self)
before = self._parse_year_month(request.args.get('before'))
after = self._parse_year_month(request.args.get('after'))
if before is None and after is None:
raise BadRequest('"before" or "after" parameter must be specified')
event_query = (Event.query.with_parent(self.category)
.options(*self._event_query_options)
.order_by(Event.start_dt.desc(), Event.id.desc()))
if before:
event_query = event_query.filter(Event.start_dt < before)
if after:
event_query = event_query.filter(Event.start_dt >= after)
self.events = event_query.all()
def _process(self):
events_by_month = self.group_by_month(self.events)
tpl = get_template_module('categories/display/event_list.html')
html = tpl.event_list_block(events_by_month=events_by_month, format_event_date=self.format_event_date,
is_recent=self.is_recent, happening_now=self.happening_now)
return jsonify_data(flash=False, html=html)
class RHShowEventsInCategoryBase(RHDisplayCategoryBase):
session_field = ''
def _show_events(self, show_events):
category_ids = session.setdefault(self.session_field, set())
if show_events:
category_ids.add(self.category.id)
else:
category_ids.discard(self.category.id)
session.modified = True
def _process_DELETE(self):
self._show_events(False)
def _process_PUT(self):
self._show_events(True)
class RHShowFutureEventsInCategory(RHShowEventsInCategoryBase):
session_field = 'fetch_future_events_in'
class RHShowPastEventsInCategory(RHShowEventsInCategoryBase):
session_field = 'fetch_past_events_in'
class RHExportCategoryICAL(RHDisplayCategoryBase):
def _process(self):
filename = '{}-category.ics'.format(secure_filename(self.category.title, str(self.category.id)))
buf = serialize_categories_ical([self.category.id], session.user,
Event.end_dt >= (now_utc() - timedelta(weeks=4)))
return send_file(filename, buf, 'text/calendar')
class RHExportCategoryAtom(RHDisplayCategoryBase):
def _process(self):
filename = '{}-category.atom'.format(secure_filename(self.category.title, str(self.category.id)))
buf = serialize_category_atom(self.category,
url_for(request.endpoint, self.category, _external=True),
session.user,
Event.end_dt >= now_utc())
return send_file(filename, buf, 'application/atom+xml')
class RHXMLExportCategoryInfo(RH):
def _process_args(self):
try:
id_ = int(request.args['id'])
except ValueError:
raise BadRequest('Invalid Category ID')
self.category = Category.get_one(id_, is_deleted=False)
def _process(self):
category_xml_info = XMLCategorySerializer(self.category).serialize_category()
return Response(category_xml_info, mimetype='text/xml')
class RHCategoryOverview(RHDisplayCategoryBase):
def _process_args(self):
RHDisplayCategoryBase._process_args(self)
self.detail = request.args.get('detail', 'event')
if self.detail not in ('event', 'session', 'contribution'):
raise BadRequest('Invalid detail argument')
self.period = request.args.get('period', 'day')
if self.period not in ('day', 'month', 'week'):
raise BadRequest('Invalid period argument')
if 'date' in request.args:
try:
date = datetime.strptime(request.args['date'], '%Y-%m-%d')
except ValueError:
raise BadRequest('Invalid date argument')
else:
date = datetime.now()
date = self.category.display_tzinfo.localize(date)
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
if self.period == 'day':
self.start_dt = date
self.end_dt = self.start_dt + relativedelta(days=1)
elif self.period == 'week':
self.start_dt = date - relativedelta(days=date.weekday())
self.end_dt = self.start_dt + relativedelta(days=7)
elif self.period == 'month':
self.start_dt = date + relativedelta(day=1)
self.end_dt = self.start_dt + relativedelta(months=1)
def _process(self):
info = get_category_timetable([self.category.id], self.start_dt, self.end_dt, detail_level=self.detail,
tz=self.category.display_tzinfo, from_categ=self.category, grouped=False)
events = info['events']
subcategory_ids = {event.category.effective_icon_data['source_id']
for event in events if event.category.has_effective_icon}
subcategories = Category.query.filter(Category.id.in_(subcategory_ids))
events = _flat_map(partial(self._process_multiday_events, info), events)
def _event_sort_key(event):
ongoing = getattr(event, 'ongoing', False)
return (event.start_dt.date(), ongoing,
-mktime(event.first_occurence_start_dt.timetuple()) if ongoing else event.start_dt.time())
events = sorted(events, key=_event_sort_key)
params = {
'detail': self.detail,
'period': self.period,
'subcategories': subcategories,
'start_dt': self.start_dt,
'end_dt': self.end_dt - relativedelta(days=1), 'previous_day_url': self._other_day_url(self.start_dt - relativedelta(days=1)),
'next_day_url': self._other_day_url(self.start_dt + relativedelta(days=1)),
'previous_month_url': self._other_day_url(self.start_dt - relativedelta(months=1)),
'next_month_url': self._other_day_url(self.start_dt + relativedelta(months=1)),
'previous_year_url': self._other_day_url(self.start_dt - relativedelta(years=1)),
'next_year_url': self._other_day_url(self.start_dt + relativedelta(years=1)),
'mathjax': True
}
if self.detail != 'event':
cte = self.category.get_protection_parent_cte()
params['accessible_categories'] = {cat_id
for cat_id, prot_parent_id in db.session.query(cte)
if prot_parent_id == self.category.id}
if self.period == 'day':
return WPCategory.render_template('display/overview/day.html', self.category, events=events, **params)
elif self.period == 'week':
days = self._get_week_days()
template = 'display/overview/week.html'
params['previous_week_url'] = self._other_day_url(self.start_dt - relativedelta(days=7))
params['next_week_url'] = self._other_day_url(self.start_dt + relativedelta(days=7))
elif self.period == 'month':
days = self._get_calendar_days()
template = 'display/overview/month.html'
events_by_day = []
for day in days:
events_by_day.append((day, self._pop_head_while(lambda x: x.start_dt.date() <= day.date(), events)))
hide_weekend = (not any(map(itemgetter(1), events_by_day[5::7])) and
not any(map(itemgetter(1), events_by_day[6::7])))
if hide_weekend:
events_by_day = [x for x in events_by_day if x[0].weekday() not in (5, 6)]
return WPCategory.render_template(template, self.category, events_by_day=events_by_day,
hide_weekend=hide_weekend, **params)
def _get_week_days(self):
return self._get_days(self.start_dt, self.end_dt)
def _get_calendar_days(self):
start_dt = self.start_dt - relativedelta(days=self.start_dt.weekday())
end_dt = self.end_dt + relativedelta(days=(7 - self.end_dt.weekday()) % 7)
return self._get_days(start_dt, end_dt)
@staticmethod
def _get_days(start_dt, end_dt):
current_dt = start_dt
tz = current_dt.tzinfo
next_day = current_dt.date() + timedelta(1)
beginning_of_next_day = tz.localize(datetime.combine(next_day, time()))
while current_dt < end_dt:
yield current_dt
current_dt = beginning_of_next_day
beginning_of_next_day = current_dt + relativedelta(days=1)
@staticmethod
def _pop_head_while(predicate, list_):
res = []
while len(list_) and predicate(list_[0]):
res.append(list_[0])
list_.pop(0)
return res
def _other_day_url(self, date):
return url_for('.overview', self.category, detail=self.detail, period=self.period,
date=format_date(date, 'yyyy-MM-dd'))
def _process_multiday_events(self, info, event):
tzinfo = self.category.display_tzinfo
timetable_objects = sorted(chain(*info[event.id].values()), key=attrgetter('timetable_entry.start_dt'))
timetable_objects_by_date = {x[0]: list(x[1]) for x
in groupby(timetable_objects, key=lambda x: x.start_dt.astimezone(tzinfo).date())}
event_days = self._get_days(max(self.start_dt, event.start_dt.astimezone(tzinfo)),
min(self.end_dt, event.end_dt.astimezone(tzinfo)))
return [_EventProxy(event, day, tzinfo, timetable_objects_by_date.get(day.date(), [])) for day in event_days]
class _EventProxy(object):
def __init__(self, event, date, tzinfo, timetable_objects):
start_dt = datetime.combine(date, event.start_dt.astimezone(tzinfo).timetz())
assert date >= event.start_dt
assert date <= event.end_dt
object.__setattr__(self, '_start_dt', start_dt)
object.__setattr__(self, '_real_event', event)
object.__setattr__(self, '_event_tz_start_date', event.start_dt.astimezone(tzinfo).date())
object.__setattr__(self, '_timetable_objects', timetable_objects)
def __getattribute__(self, name):
if name == 'start_dt':
return object.__getattribute__(self, '_start_dt')
event = object.__getattribute__(self, '_real_event')
if name == 'timetable_objects':
return object.__getattribute__(self, '_timetable_objects')
if name == 'ongoing':
event_start_date = object.__getattribute__(self, '_event_tz_start_date')
return event_start_date != self.start_dt.date()
if name == 'first_occurence_start_dt':
return event.start_dt
return getattr(event, name)
def __setattr__(self, name, value):
raise AttributeError('This instance is read-only')
def __repr__(self):
return '<_EventProxy({}, {})>'.format(self.start_dt, object.__getattribute__(self, '_real_event'))
class RHCategoryCalendarView(RHDisplayCategoryBase):
def _process(self):
if not request.is_xhr:
return WPCategoryCalendar.render_template('display/calendar.html', self.category,
start_dt=request.args.get('start_dt'))
tz = self.category.display_tzinfo
start = tz.localize(dateutil.parser.parse(request.args['start'])).astimezone(utc)
end = tz.localize(dateutil.parser.parse(request.args['end'])).astimezone(utc)
query = (Event.query
.filter(Event.starts_between(start, end),
Event.is_visible_in(self.category.id),
~Event.is_deleted)
.options(load_only('id', 'title', 'start_dt', 'end_dt', 'category_id')))
events = self._get_event_data(query)
ongoing_events = (Event.query
.filter(Event.is_visible_in(self.category.id),
Event.start_dt < start,
Event.end_dt > end)
.options(load_only('id', 'title', 'start_dt', 'end_dt', 'timezone'))
.order_by(Event.title)
.all())
return jsonify_data(flash=False, events=events, ongoing_event_count=len(ongoing_events),
ongoing_events_html=self._render_ongoing_events(ongoing_events))
def _get_event_data(self, event_query):
data = []
tz = self.category.display_tzinfo
for event in event_query:
category_id = event.category_id
event_data = {'title': event.title,
'start': event.start_dt.astimezone(tz).replace(tzinfo=None).isoformat(),
'end': event.end_dt.astimezone(tz).replace(tzinfo=None).isoformat(),
'url': event.url}
colors = CALENDAR_COLOR_PALETTE[category_id % len(CALENDAR_COLOR_PALETTE)]
event_data.update({'textColor': ' data.append(event_data)
return data
def _render_ongoing_events(self, ongoing_events):
template = get_template_module('categories/display/_calendar_ongoing_events.html')
return template.render_ongoing_events(ongoing_events, self.category.display_tzinfo)
class RHCategoryUpcomingEvent(RHDisplayCategoryBase):
def _process(self):
event = self._get_upcoming_event()
if event:
return redirect(event.url)
else:
flash(_('There are no upcoming events for this category'))
return redirect(self.category.url)
def _get_upcoming_event(self):
query = (Event.query
.filter(Event.is_visible_in(self.category.id),
Event.start_dt > now_utc(),
~Event.is_deleted)
.options(subqueryload('acl_entries'))
.order_by(Event.start_dt, Event.id))
res = get_n_matching(query, 1, lambda event: event.can_access(session.user))
if res:
return res[0]
| true | true |
f70082df53201fc2c56287cc36465ce81430d5e2 | 43,776 | py | Python | python/tvm/relay/op/strategy/generic.py | xndcn/tvm | 7a20b4a490acbdcefa92c9b7c68188139f4916a3 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/relay/op/strategy/generic.py | xndcn/tvm | 7a20b4a490acbdcefa92c9b7c68188139f4916a3 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/relay/op/strategy/generic.py | xndcn/tvm | 7a20b4a490acbdcefa92c9b7c68188139f4916a3 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of generic operator strategy."""
# pylint: disable=invalid-name,unused-argument
import logging
import re
from tvm import topi, _ffi, te, ir
from tvm.topi.utils import get_const_int, get_const_float, get_const_tuple, get_float_tuple
from tvm.target import generic_func, override_native_generic_func
from .. import op as _op
logger = logging.getLogger("strategy")
def naive_schedule(_, outs, target):
"""Return the naive default schedule"""
if "gpu" in target.keys:
# For GPU, we at least need thread binding to make a valid schedule.
# So the naive schedule cannot be compiled.
raise RuntimeError(
"Cannot compile for GPU targets if no tuned schedule is found. "
"Please see the warning messages above for more information about the failed workloads."
)
return te.create_schedule(outs[-1].op)
def wrap_topi_schedule(topi_schedule):
"""Wrap TOPI schedule which doesn't use attrs"""
def wrapper(attrs, outs, target):
with target:
return topi_schedule(outs)
return wrapper
def get_conv2d_in_channels(data_shape, data_layout):
"""Get conv2d input channels"""
data_shape = get_const_tuple(data_shape)
if len(data_shape) == 4:
idx = data_layout.find("C")
assert idx >= 0, "Invalid conv2d data layout {}".format(data_layout)
return data_shape[idx]
if re.match(r"NCHW\d*c", data_layout):
# NCHW[8]c
return data_shape[1] * data_shape[4]
raise ValueError("Unknown conv2d data layout {}".format(data_layout))
def get_conv2d_out_channels(kernel_shape, kernel_layout):
"""Get conv2d output channels"""
kernel_shape = get_const_tuple(kernel_shape)
if len(kernel_shape) == 4:
idx = kernel_layout.find("O")
assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout)
return kernel_shape[idx]
if re.match(r"OIHW\d*i\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[5]
if re.match(r"OIHW\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[4]
raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout))
def is_depthwise_conv2d(data_shape, data_layout, kernel_shape, kernel_layout, groups):
ic = get_conv2d_in_channels(data_shape, data_layout)
oc = get_conv2d_out_channels(kernel_shape, kernel_layout)
return ic == oc == groups
@generic_func
def schedule_injective(attrs, outs, target):
"""Schedule injective ops"""
with target:
return topi.generic.schedule_injective(outs)
@generic_func
def schedule_reduce(attrs, outs, target):
"""Schedule reduction ops"""
with target:
return topi.generic.schedule_reduce(outs)
_op._schedule_injective = schedule_injective
_op._schedule_reduce = schedule_reduce
# concatenate
@generic_func
def schedule_concatenate(attrs, outs, target):
"""Schedule concatenate op"""
with target:
return topi.generic.schedule_injective(outs)
# pool
@generic_func
def schedule_pool(attrs, outs, target):
"""Schedule pooling ops"""
with target:
return topi.generic.schedule_pool(outs, attrs.layout)
# pool_grad
@generic_func
def schedule_pool_grad(attrs, outs, target):
"""Schedule pooling gradient ops"""
with target:
return topi.generic.schedule_pool_grad(outs)
# adaptive pool
@generic_func
def schedule_adaptive_pool(attrs, outs, target):
"""Schedule adaptive pooling ops"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
# softmax
def wrap_compute_softmax(topi_compute):
"""Wrap softmax topi compute"""
def _compute_softmax(attrs, inputs, out_type):
axis = attrs.get_int("axis")
return [topi_compute(inputs[0], axis)]
return _compute_softmax
@override_native_generic_func("softmax_strategy")
def softmax_strategy(attrs, inputs, out_type, target):
"""softmax generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.generic.schedule_softmax),
name="softmax.generic",
)
return strategy
# log_softmax
@generic_func
def schedule_log_softmax(attrs, outs, target):
"""Schedule log_softmax op"""
with target:
return topi.generic.schedule_softmax(outs)
# lrn
@generic_func
def schedule_lrn(attrs, outs, target):
"""Schedule LRN op"""
with target:
return topi.generic.schedule_lrn(outs)
# bitpack
@generic_func
def schedule_bitpack(attrs, outs, target):
"""Schedule bitpack"""
with target:
return topi.generic.schedule_bitpack(outs)
get_auto_scheduler_rewritten_layout = _ffi.get_global_func(
"relay.attrs.get_auto_scheduler_rewritten_layout"
)
# conv2d
def wrap_compute_conv2d(
topi_compute,
need_data_layout=False,
need_out_layout=False,
has_groups=False,
need_auto_scheduler_layout=False,
):
"""Wrap conv2d topi compute"""
def _compute_conv2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.out_dtype
auto_scheduler_rewritten_layout = get_auto_scheduler_rewritten_layout(attrs)
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilation]
if has_groups:
args.append(attrs.groups)
if need_data_layout:
args.append(data_layout)
if need_out_layout:
args.append(out_layout)
args.append(out_dtype)
if need_auto_scheduler_layout:
args.append(auto_scheduler_rewritten_layout)
return [topi_compute(*args)]
return _compute_conv2d
@override_native_generic_func("conv2d_strategy")
def conv2d_strategy(attrs, inputs, out_type, target):
"""conv2d generic strategy"""
logger.warning("conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
data, kernel = inputs
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_nchw),
name="conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_conv2d_nhwc),
name="conv2d_nhwc.generic",
)
elif layout == "HWCN":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_hwcn),
wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn),
name="conv2d_hwcn.generic",
)
else:
raise RuntimeError("Unsupported conv2d layout {}".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nhwc, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc),
name="group_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported group_conv2d layout {}".format(layout))
return strategy
# conv2d_NCHWc
@override_native_generic_func("conv2d_NCHWc_strategy")
def conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""conv2d_NCHWc generic strategy"""
logger.warning("conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
if inputs[0].dtype == "int8" or inputs[0].dtype == "uint8":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc_int8, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.generic",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.generic",
)
return strategy
# depthwise_conv2d_NCHWc
@override_native_generic_func("depthwise_conv2d_NCHWc_strategy")
def depthwise_conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""depthwise_conv2d generic strategy"""
logger.warning("depthwise_conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_NCHWc),
name="depthwise_conv2d_NCHWc.generic",
)
return strategy
# conv2d_winograd_without_weight_transform
@override_native_generic_func("conv2d_winograd_without_weight_transform_strategy")
def conv2d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv2d_winograd_without_weight_transform")
# conv2d_gemm_without_weight_transform
@override_native_generic_func("conv2d_gemm_without_weight_transform_strategy")
def conv2d_gemm_without_weight_transform_strategy(attrs, inputs, out_type, target):
"""conv2d_gemm_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv2d_gemm_without_weight_transform")
# conv2d_winograd_weight_transform
@generic_func
def schedule_conv2d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_weight_transform(outs)
# conv2d_winograd_nnpack_weight_transform
@generic_func
def schedule_conv2d_winograd_nnpack_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_nnpack_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs)
# conv2d_gemm_weight_transform
@generic_func
def schedule_conv2d_gemm_weight_transform(attrs, outs, target):
"""Schedule conv2d_gemm_weight_transform"""
with target:
return topi.generic.schedule_conv2d_gemm_weight_transform(outs)
# deformable_conv2d
def wrap_compute_deformable_conv2d(topi_compute):
"""wrap deformable_conv2d topi compute"""
def _compute_deformable_conv2d(attrs, inputs, out_dtype):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(
inputs[0],
inputs[1],
inputs[2],
strides,
padding,
dilation,
deformable_groups,
groups,
out_dtype,
)
return [out]
return _compute_deformable_conv2d
@override_native_generic_func("deformable_conv2d_strategy")
def deformable_conv2d_strategy(attrs, inputs, out_type, target):
"""deformable_conv2d generic strategy"""
layout = attrs.data_layout
strategy = _op.OpStrategy()
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_deformable_conv2d_nchw),
name="deformable_conv2d_nchw.generic",
)
elif layout == "NHWC":
# This implementation should never be picked by autotvm
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc),
naive_schedule,
name="deformable_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Layout %s is not supported in deformable conv2d" % layout)
return strategy
# conv2d_transpose
def wrap_compute_conv2d_transpose(topi_compute):
"""wrap conv2d_transpose topi compute"""
def compute_conv2d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv2d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return compute_conv2d_transpose
@override_native_generic_func("conv2d_transpose_strategy")
def conv2d_transpose_strategy(attrs, inputs, out_type, target):
"""conv2d_transpose generic strategy"""
logger.warning("conv2d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.generic",
)
return strategy
# conv3d_transpose
def wrap_compute_conv3d_transpose(topi_compute):
"""wrap conv3d_transpose topi compute"""
def compute_conv3d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv3d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
output_padding = get_const_tuple(attrs.output_padding)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return compute_conv3d_transpose
@override_native_generic_func("conv3d_transpose_strategy")
def conv3d_transpose_strategy(attrs, inputs, out_type, target):
"""conv3d_transpose generic strategy"""
logger.warning("conv3d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCDHW", "only support ncdhw for now"
assert dilation == (1, 1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv3d_transpose(topi.nn.conv3d_transpose_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_transpose_ncdhw),
name="conv3d_transpose_ncdhw.generic",
)
return strategy
# conv3d
def wrap_compute_conv3d(topi_compute, need_layout=False):
"""wrap conv3d topi compute"""
def _compute_conv3d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
(dilation_d, dilation_h, dilation_w) = dilation
if dilation_d < 1 or dilation_h < 1 or dilation_w < 1:
raise ValueError("Dilation should be positive value")
if groups != 1:
raise ValueError("Not support arbitrary group number for conv3d")
if need_layout:
out = topi_compute(inputs[0], inputs[1], strides, padding, dilation, layout, out_dtype)
else:
out = topi_compute(inputs[0], inputs[1], strides, padding, dilation, out_dtype)
return [out]
return _compute_conv3d
@override_native_generic_func("conv3d_strategy")
def conv3d_strategy(attrs, inputs, out_type, target):
"""conv3d generic strategy"""
logger.warning("conv3d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.generic",
)
elif layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ndhwc),
wrap_topi_schedule(topi.generic.schedule_conv3d_ndhwc),
name="conv3d_ndhwc.generic",
)
else:
raise ValueError("Not support this layout {} yet".format(layout))
return strategy
# conv3d_winograd_without_weight_transform
@override_native_generic_func("conv3d_winograd_without_weight_transform_strategy")
def conv3d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
"""conv3d_winograd_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv3d_winograd_without_weight_transform")
# conv3d_winograd_weight_transform
@generic_func
def schedule_conv3d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv3d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv3d_winograd_weight_transform(outs)
# conv1d
def wrap_compute_conv1d(topi_compute):
"""wrap conv1d topi compute"""
def _compute_conv1d(attrs, inputs, out_type):
"""Compute definition of conv1d"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
return [topi_compute(inputs[0], inputs[1], strides, padding, dilation, out_dtype)]
return _compute_conv1d
@override_native_generic_func("conv1d_strategy")
def conv1d_strategy(attrs, inputs, out_type, target):
"""conv1d generic strategy"""
logger.warning("conv1d is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_ncw),
name="conv1d_ncw.generic",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_nwc),
wrap_topi_schedule(topi.generic.schedule_conv1d_nwc),
name="conv1d_nwc.generic",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
# conv1d_transpose
def wrap_compute_conv1d_transpose(topi_compute):
"""wrap conv1d_transpose topi compute"""
def _compute_conv1d_tranpsoe(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return _compute_conv1d_tranpsoe
@override_native_generic_func("conv1d_transpose_strategy")
def conv1d_transpose_strategy(attrs, inputs, out_type, target):
"""conv1d_transpose generic strategy"""
logger.warning("conv1d_transpose is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
strategy.add_implementation(
wrap_compute_conv1d_transpose(topi.nn.conv1d_transpose_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_transpose_ncw),
name="conv1d_transpose_ncw.generic",
)
return strategy
# dilation2d
def wrap_compute_dilation2d(topi_compute, need_data_layout=False):
"""Wrap dilation2d topi compute"""
def _compute_dilation2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilations = get_const_tuple(attrs.dilations)
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilations]
if need_data_layout:
args.append(data_layout)
args.append(out_dtype)
return [topi_compute(*args)]
return _compute_dilation2d
@override_native_generic_func("dilation2d_strategy")
def dilation2d_strategy(attrs, inputs, out_type, target):
"""dilation2d_strategy generic strategy"""
logger.warning("dilation2d_strategy is not optimized for this platform.")
strategy = _op.OpStrategy()
dilations = get_const_tuple(attrs.dilations)
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
assert layout in ["NCHW", "NHWC"]
(dilation_h, dilation_w) = dilations
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if layout == "NCHW":
assert kernel_layout == "IHW"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nchw),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nchw),
name="dilation2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWI"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nhwc),
name="dilation2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported dilation2d layout {}".format(layout))
return strategy
# dense
def wrap_compute_dense(topi_compute):
"""wrap dense topi compute"""
def _compute_dense(attrs, inputs, out_type):
"""Compute definition of dense"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
return [topi_compute(inputs[0], inputs[1], None, out_dtype)]
return _compute_dense
@override_native_generic_func("dense_strategy")
def dense_strategy(attrs, inputs, out_type, target):
"""dense generic strategy"""
logger.warning("dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense.generic",
)
return strategy
# batch_matmul
def wrap_compute_batch_matmul(topi_compute):
"""wrap batch_matmul topi compute"""
def _compute_batch_matmul(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], out_type.shape)]
return _compute_batch_matmul
@override_native_generic_func("batch_matmul_strategy")
def batch_matmul_strategy(attrs, inputs, out_type, target):
"""batch_matmul generic strategy"""
logger.warning("batch_matmul is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_matmul(topi.nn.batch_matmul),
wrap_topi_schedule(topi.generic.schedule_batch_matmul),
name="batch_matmul.generic",
)
return strategy
# sparse dense
def wrap_compute_sparse_dense(topi_compute):
"""wrap sparse dense topi compute"""
def _compute_sparse_dense(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3], attrs["sparse_lhs"])]
return _compute_sparse_dense
@override_native_generic_func("sparse_dense_strategy")
def sparse_dense_strategy(attrs, inputs, out_type, target):
"""sparse dense generic strategy"""
logger.warning("sparse dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.nn.sparse_dense),
wrap_topi_schedule(topi.generic.schedule_sparse_dense),
name="sparse_dense.generic",
)
return strategy
@override_native_generic_func("sparse_dense_padded_strategy")
def sparse_dense_padded_strategy(attrs, inputs, out_type, target):
"""sparse dense padded generic strategy"""
raise NotImplementedError("sparse_dense_padded is only implemented for cuda")
# sparse_transpose
@generic_func
def schedule_sparse_transpose(attrs, outs, target):
"""schedule sparse_transpose"""
with target:
return topi.generic.schedule_sparse_transpose(outs)
# argsort
def wrap_compute_argsort(topi_compute):
"""Wrap argsort topi compute"""
def _compute_argsort(attrs, inputs, _):
axis = get_const_int(attrs.axis)
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend, dtype=dtype)]
return _compute_argsort
@override_native_generic_func("argsort_strategy")
def argsort_strategy(attrs, inputs, out_type, target):
"""argsort generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argsort(topi.argsort),
wrap_topi_schedule(topi.generic.schedule_argsort),
name="argsort.generic",
)
return strategy
# topk
def wrap_compute_topk(topi_compute):
"""Wrap topk compute"""
def _compute_topk(attrs, inputs, out_type):
if attrs.k is not None:
k = attrs.k
else:
k = inputs[1]
axis = get_const_int(attrs.axis)
ret_type = attrs.ret_type
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
out = topi_compute(inputs[0], k, axis, ret_type, is_ascend, dtype)
out = out if isinstance(out, list) else [out]
return out
return _compute_topk
@override_native_generic_func("topk_strategy")
def topk_strategy(attrs, inputs, out_type, target):
"""topk generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_topk(topi.topk),
wrap_topi_schedule(topi.generic.schedule_topk),
name="topk.generic",
)
return strategy
# multibox_prior
def wrap_compute_multibox_prior(topi_compute):
"""Wrap multibox_prior compute"""
def _compute_multibox_prior(attrs, inputs, _):
"""Compute definition of multibox_prior"""
sizes = get_float_tuple(attrs.sizes)
ratios = get_float_tuple(attrs.ratios)
steps = get_float_tuple(attrs.steps)
offsets = get_float_tuple(attrs.offsets)
clip = bool(get_const_int(attrs.clip))
return [topi_compute(inputs[0], sizes, ratios, steps, offsets, clip)]
return _compute_multibox_prior
@override_native_generic_func("multibox_prior_strategy")
def multibox_prior_strategy(attrs, inputs, out_type, target):
"""multibox_prior generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_prior(topi.vision.ssd.multibox_prior),
wrap_topi_schedule(topi.generic.schedule_multibox_prior),
name="multibox_prior.generic",
)
return strategy
# multibox_transform_loc
def wrap_compute_multibox_transform_loc(topi_compute):
"""Wrap multibox_transform_loc compute"""
def _compute_multibox_transform_loc(attrs, inputs, _):
"""Compute definition of multibox_detection"""
clip = bool(get_const_int(attrs.clip))
threshold = get_const_float(attrs.threshold)
variances = get_float_tuple(attrs.variances)
return topi_compute(inputs[0], inputs[1], inputs[2], clip, threshold, variances)
return _compute_multibox_transform_loc
@override_native_generic_func("multibox_transform_loc_strategy")
def multibox_transform_loc_strategy(attrs, inputs, out_type, target):
"""schedule multibox_transform_loc"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_transform_loc(topi.vision.ssd.multibox_transform_loc),
wrap_topi_schedule(topi.generic.schedule_multibox_transform_loc),
name="multibox_transform_loc.generic",
)
return strategy
# get_valid_counts
def wrap_compute_get_valid_counts(topi_compute):
"""wrap get_valid_counts topi compute"""
def _compute_get_valid_counts(attrs, inputs, out_type):
score_threshold = inputs[1]
id_index = get_const_int(attrs.id_index)
score_index = get_const_int(attrs.score_index)
if attrs.score_threshold is not None:
score_threshold = get_const_float(attrs.score_threshold)
return topi_compute(inputs[0], score_threshold, id_index, score_index)
return _compute_get_valid_counts
@override_native_generic_func("get_valid_counts_strategy")
def get_valid_counts_strategy(attrs, inputs, out_type, target):
"""get_valid_counts generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_get_valid_counts(topi.vision.get_valid_counts),
wrap_topi_schedule(topi.generic.schedule_get_valid_counts),
name="get_valid_counts.generic",
)
return strategy
# non-maximum suppression
def wrap_compute_nms(topi_compute):
"""wrap nms topi compute"""
def _compute_nms(attrs, inputs, out_type):
max_output_size = inputs[3]
iou_threshold = inputs[4]
if attrs.max_output_size is not None:
max_output_size = attrs.max_output_size
if attrs.iou_threshold is not None:
iou_threshold = get_const_float(attrs.iou_threshold)
return_indices = bool(get_const_int(attrs.return_indices))
force_suppress = bool(get_const_int(attrs.force_suppress))
top_k = get_const_int(attrs.top_k)
coord_start = get_const_int(attrs.coord_start)
score_index = get_const_int(attrs.score_index)
id_index = get_const_int(attrs.id_index)
invalid_to_bottom = bool(get_const_int(attrs.invalid_to_bottom))
if return_indices:
return topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
]
return _compute_nms
@override_native_generic_func("non_max_suppression_strategy")
def nms_strategy(attrs, inputs, out_type, target):
"""nms generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_nms(topi.vision.non_max_suppression),
wrap_topi_schedule(topi.generic.schedule_nms),
name="nms.generic",
)
return strategy
# roi_align
def wrap_compute_roi_align(topi_compute):
"""wrap roi_align topi compute"""
def _compute_roi_align(attrs, inputs, out_type):
assert attrs.layout == "NCHW"
pooled_size = get_const_tuple(attrs.pooled_size)
return [
topi_compute(
inputs[0],
inputs[1],
pooled_size=pooled_size,
spatial_scale=attrs.spatial_scale,
sample_ratio=attrs.sample_ratio,
)
]
return _compute_roi_align
@override_native_generic_func("roi_align_strategy")
def roi_align_strategy(attrs, inputs, out_type, target):
"""roi_align generic strategy"""
strategy = _op.OpStrategy()
layout = attrs.layout
assert layout == "NCHW", "only support nchw for now"
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.generic",
)
return strategy
# roi_pool
@generic_func
def schedule_roi_pool(attrs, outs, target):
"""schedule roi_pool"""
with target:
return topi.generic.schedule_roi_pool(outs)
# proposal
def wrap_compute_proposal(topi_compute):
"""wrap proposal topi compute"""
def _compute_proposal(attrs, inputs, out_type):
scales = get_float_tuple(attrs.scales)
ratios = get_float_tuple(attrs.ratios)
feature_stride = attrs.feature_stride
threshold = attrs.threshold
rpn_pre_nms_top_n = attrs.rpn_pre_nms_top_n
rpn_post_nms_top_n = attrs.rpn_post_nms_top_n
rpn_min_size = attrs.rpn_min_size
iou_loss = bool(get_const_int(attrs.iou_loss))
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
)
]
return _compute_proposal
@override_native_generic_func("proposal_strategy")
def proposal_strategy(attrs, inputs, out_type, target):
"""proposal generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_proposal(topi.vision.rcnn.proposal),
wrap_topi_schedule(topi.generic.schedule_proposal),
name="proposal.generic",
)
return strategy
# scatter
@override_native_generic_func("scatter_strategy")
def scatter_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter.generic",
)
return strategy
def wrap_compute_scatter(topi_compute):
"""Wrap scatter topi compute"""
def _compute_scatter(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], inputs[2], axis=attrs.axis)]
return _compute_scatter
@override_native_generic_func("scatter_add_strategy")
def scatter_add_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter_add),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter_add.generic",
)
return strategy
# scatter_nd
@override_native_generic_func("scatter_nd_strategy")
def scatter_nd_strategy(attrs, inputs, out_type, target):
"""scatter_nd generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter_nd(topi.scatter_nd),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_nd.generic",
)
return strategy
def wrap_compute_scatter_nd(topi_compute):
"""Wrap scatter_nd topi compute"""
def _compute_scatter_nd(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], attrs.out_shape)]
return _compute_scatter_nd
# bitserial_conv2d
def wrap_compute_bitserial_conv2d(topi_compute):
"""wrap bitserial_conv2d topi compute"""
def compute_bitserial_conv2d(attrs, inputs, out_dtype):
"""Compute definition for bitserial conv2d."""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
activation_bits = attrs.activation_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0],
inputs[1],
strides,
padding,
activation_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar,
)
]
return compute_bitserial_conv2d
@override_native_generic_func("bitserial_conv2d_strategy")
def bitserial_conv2d_strategy(attrs, inputs, out_type, target):
"""bitserial_conv2d generic strategy"""
logger.warning("bitserial_conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.generic",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.generic",
)
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy
# bitserial_dense
def wrap_compute_bitserial_dense(topi_compute):
"""wrap bitserial_dense topi compute"""
def compute_bitserial_dense(attrs, inputs, out_type):
"""Compute definition of bitserial dense"""
data_bits = attrs.data_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0], inputs[1], data_bits, weight_bits, pack_dtype, out_dtype, unipolar
)
]
return compute_bitserial_dense
@override_native_generic_func("bitserial_dense_strategy")
def bitserial_dense_strategy(attrs, inputs, out_type, target):
"""bitserial_dense generic strategy"""
logger.warning("bitserial_dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_bitserial_dense(topi.nn.bitserial_dense),
wrap_topi_schedule(topi.generic.schedule_bitserial_dense),
name="bitserial_dense.generic",
)
return strategy
# correlation
def wrap_compute_correlation(topi_compute):
"""wrap correlation topi compute"""
def _compute_correlation(attrs, inputs, out_type):
kernel_size = attrs.kernel_size
max_displacement = attrs.max_displacement
stride1 = attrs.stride1
stride2 = attrs.stride2
padding = get_const_tuple(attrs.padding)
is_multiply = attrs.is_multiply
return [
topi_compute(
inputs[0],
inputs[1],
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
)
]
return _compute_correlation
@override_native_generic_func("correlation_strategy")
def correlation_strategy(attrs, inputs, out_type, target):
"""correlation generic strategy"""
logger.warning("correlation is not optimized for this platform.")
layout = attrs.layout
assert layout == "NCHW", "Only support NCHW layout"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_correlation(topi.nn.correlation_nchw),
wrap_topi_schedule(topi.generic.schedule_correlation_nchw),
name="correlation.generic",
)
return strategy
# argwhere
def wrap_compute_argwhere(topi_compute):
"""wrap argwhere topi compute"""
def _compute_argwhere(attrs, inputs, out_type):
output_shape = []
for s in out_type.shape:
if hasattr(s, "value"):
output_shape.append(s)
else:
output_shape.append(te.var("any_dim", "int32"))
new_output_type = ir.TensorType(output_shape, "int32")
return [topi_compute(new_output_type, inputs[0])]
return _compute_argwhere
@override_native_generic_func("argwhere_strategy")
def argwhere_strategy(attrs, inputs, out_type, target):
"""argwhere generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argwhere(topi.argwhere),
wrap_topi_schedule(topi.generic.schedule_argwhere),
name="argwhere.generic",
)
return strategy
| 34.442172 | 100 | 0.690927 | import logging
import re
from tvm import topi, _ffi, te, ir
from tvm.topi.utils import get_const_int, get_const_float, get_const_tuple, get_float_tuple
from tvm.target import generic_func, override_native_generic_func
from .. import op as _op
logger = logging.getLogger("strategy")
def naive_schedule(_, outs, target):
if "gpu" in target.keys:
raise RuntimeError(
"Cannot compile for GPU targets if no tuned schedule is found. "
"Please see the warning messages above for more information about the failed workloads."
)
return te.create_schedule(outs[-1].op)
def wrap_topi_schedule(topi_schedule):
def wrapper(attrs, outs, target):
with target:
return topi_schedule(outs)
return wrapper
def get_conv2d_in_channels(data_shape, data_layout):
data_shape = get_const_tuple(data_shape)
if len(data_shape) == 4:
idx = data_layout.find("C")
assert idx >= 0, "Invalid conv2d data layout {}".format(data_layout)
return data_shape[idx]
if re.match(r"NCHW\d*c", data_layout):
return data_shape[1] * data_shape[4]
raise ValueError("Unknown conv2d data layout {}".format(data_layout))
def get_conv2d_out_channels(kernel_shape, kernel_layout):
kernel_shape = get_const_tuple(kernel_shape)
if len(kernel_shape) == 4:
idx = kernel_layout.find("O")
assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout)
return kernel_shape[idx]
if re.match(r"OIHW\d*i\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[5]
if re.match(r"OIHW\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[4]
raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout))
def is_depthwise_conv2d(data_shape, data_layout, kernel_shape, kernel_layout, groups):
ic = get_conv2d_in_channels(data_shape, data_layout)
oc = get_conv2d_out_channels(kernel_shape, kernel_layout)
return ic == oc == groups
@generic_func
def schedule_injective(attrs, outs, target):
with target:
return topi.generic.schedule_injective(outs)
@generic_func
def schedule_reduce(attrs, outs, target):
with target:
return topi.generic.schedule_reduce(outs)
_op._schedule_injective = schedule_injective
_op._schedule_reduce = schedule_reduce
@generic_func
def schedule_concatenate(attrs, outs, target):
with target:
return topi.generic.schedule_injective(outs)
@generic_func
def schedule_pool(attrs, outs, target):
with target:
return topi.generic.schedule_pool(outs, attrs.layout)
@generic_func
def schedule_pool_grad(attrs, outs, target):
with target:
return topi.generic.schedule_pool_grad(outs)
@generic_func
def schedule_adaptive_pool(attrs, outs, target):
with target:
return topi.generic.schedule_adaptive_pool(outs)
def wrap_compute_softmax(topi_compute):
def _compute_softmax(attrs, inputs, out_type):
axis = attrs.get_int("axis")
return [topi_compute(inputs[0], axis)]
return _compute_softmax
@override_native_generic_func("softmax_strategy")
def softmax_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.generic.schedule_softmax),
name="softmax.generic",
)
return strategy
@generic_func
def schedule_log_softmax(attrs, outs, target):
with target:
return topi.generic.schedule_softmax(outs)
@generic_func
def schedule_lrn(attrs, outs, target):
with target:
return topi.generic.schedule_lrn(outs)
@generic_func
def schedule_bitpack(attrs, outs, target):
with target:
return topi.generic.schedule_bitpack(outs)
get_auto_scheduler_rewritten_layout = _ffi.get_global_func(
"relay.attrs.get_auto_scheduler_rewritten_layout"
)
def wrap_compute_conv2d(
topi_compute,
need_data_layout=False,
need_out_layout=False,
has_groups=False,
need_auto_scheduler_layout=False,
):
def _compute_conv2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.out_dtype
auto_scheduler_rewritten_layout = get_auto_scheduler_rewritten_layout(attrs)
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilation]
if has_groups:
args.append(attrs.groups)
if need_data_layout:
args.append(data_layout)
if need_out_layout:
args.append(out_layout)
args.append(out_dtype)
if need_auto_scheduler_layout:
args.append(auto_scheduler_rewritten_layout)
return [topi_compute(*args)]
return _compute_conv2d
@override_native_generic_func("conv2d_strategy")
def conv2d_strategy(attrs, inputs, out_type, target):
logger.warning("conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
data, kernel = inputs
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_nchw),
name="conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_conv2d_nhwc),
name="conv2d_nhwc.generic",
)
elif layout == "HWCN":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_hwcn),
wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn),
name="conv2d_hwcn.generic",
)
else:
raise RuntimeError("Unsupported conv2d layout {}".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nhwc, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc),
name="group_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported group_conv2d layout {}".format(layout))
return strategy
@override_native_generic_func("conv2d_NCHWc_strategy")
def conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
logger.warning("conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
if inputs[0].dtype == "int8" or inputs[0].dtype == "uint8":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc_int8, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.generic",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.generic",
)
return strategy
@override_native_generic_func("depthwise_conv2d_NCHWc_strategy")
def depthwise_conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
logger.warning("depthwise_conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_NCHWc),
name="depthwise_conv2d_NCHWc.generic",
)
return strategy
@override_native_generic_func("conv2d_winograd_without_weight_transform_strategy")
def conv2d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
raise ValueError("No generic implemenation for conv2d_winograd_without_weight_transform")
@override_native_generic_func("conv2d_gemm_without_weight_transform_strategy")
def conv2d_gemm_without_weight_transform_strategy(attrs, inputs, out_type, target):
raise ValueError("No generic implemenation for conv2d_gemm_without_weight_transform")
@generic_func
def schedule_conv2d_winograd_weight_transform(attrs, outs, target):
with target:
return topi.generic.schedule_conv2d_winograd_weight_transform(outs)
@generic_func
def schedule_conv2d_winograd_nnpack_weight_transform(attrs, outs, target):
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs)
@generic_func
def schedule_conv2d_gemm_weight_transform(attrs, outs, target):
with target:
return topi.generic.schedule_conv2d_gemm_weight_transform(outs)
def wrap_compute_deformable_conv2d(topi_compute):
def _compute_deformable_conv2d(attrs, inputs, out_dtype):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(
inputs[0],
inputs[1],
inputs[2],
strides,
padding,
dilation,
deformable_groups,
groups,
out_dtype,
)
return [out]
return _compute_deformable_conv2d
@override_native_generic_func("deformable_conv2d_strategy")
def deformable_conv2d_strategy(attrs, inputs, out_type, target):
layout = attrs.data_layout
strategy = _op.OpStrategy()
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_deformable_conv2d_nchw),
name="deformable_conv2d_nchw.generic",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc),
naive_schedule,
name="deformable_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Layout %s is not supported in deformable conv2d" % layout)
return strategy
def wrap_compute_conv2d_transpose(topi_compute):
def compute_conv2d_transpose(attrs, inputs, out_dtype):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return compute_conv2d_transpose
@override_native_generic_func("conv2d_transpose_strategy")
def conv2d_transpose_strategy(attrs, inputs, out_type, target):
logger.warning("conv2d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.generic",
)
return strategy
def wrap_compute_conv3d_transpose(topi_compute):
def compute_conv3d_transpose(attrs, inputs, out_dtype):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
output_padding = get_const_tuple(attrs.output_padding)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return compute_conv3d_transpose
@override_native_generic_func("conv3d_transpose_strategy")
def conv3d_transpose_strategy(attrs, inputs, out_type, target):
logger.warning("conv3d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCDHW", "only support ncdhw for now"
assert dilation == (1, 1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv3d_transpose(topi.nn.conv3d_transpose_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_transpose_ncdhw),
name="conv3d_transpose_ncdhw.generic",
)
return strategy
def wrap_compute_conv3d(topi_compute, need_layout=False):
def _compute_conv3d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
(dilation_d, dilation_h, dilation_w) = dilation
if dilation_d < 1 or dilation_h < 1 or dilation_w < 1:
raise ValueError("Dilation should be positive value")
if groups != 1:
raise ValueError("Not support arbitrary group number for conv3d")
if need_layout:
out = topi_compute(inputs[0], inputs[1], strides, padding, dilation, layout, out_dtype)
else:
out = topi_compute(inputs[0], inputs[1], strides, padding, dilation, out_dtype)
return [out]
return _compute_conv3d
@override_native_generic_func("conv3d_strategy")
def conv3d_strategy(attrs, inputs, out_type, target):
logger.warning("conv3d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.generic",
)
elif layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ndhwc),
wrap_topi_schedule(topi.generic.schedule_conv3d_ndhwc),
name="conv3d_ndhwc.generic",
)
else:
raise ValueError("Not support this layout {} yet".format(layout))
return strategy
@override_native_generic_func("conv3d_winograd_without_weight_transform_strategy")
def conv3d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
raise ValueError("No generic implemenation for conv3d_winograd_without_weight_transform")
@generic_func
def schedule_conv3d_winograd_weight_transform(attrs, outs, target):
with target:
return topi.generic.schedule_conv3d_winograd_weight_transform(outs)
def wrap_compute_conv1d(topi_compute):
def _compute_conv1d(attrs, inputs, out_type):
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
return [topi_compute(inputs[0], inputs[1], strides, padding, dilation, out_dtype)]
return _compute_conv1d
@override_native_generic_func("conv1d_strategy")
def conv1d_strategy(attrs, inputs, out_type, target):
logger.warning("conv1d is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_ncw),
name="conv1d_ncw.generic",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_nwc),
wrap_topi_schedule(topi.generic.schedule_conv1d_nwc),
name="conv1d_nwc.generic",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
def wrap_compute_conv1d_transpose(topi_compute):
def _compute_conv1d_tranpsoe(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return _compute_conv1d_tranpsoe
@override_native_generic_func("conv1d_transpose_strategy")
def conv1d_transpose_strategy(attrs, inputs, out_type, target):
logger.warning("conv1d_transpose is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
strategy.add_implementation(
wrap_compute_conv1d_transpose(topi.nn.conv1d_transpose_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_transpose_ncw),
name="conv1d_transpose_ncw.generic",
)
return strategy
def wrap_compute_dilation2d(topi_compute, need_data_layout=False):
def _compute_dilation2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilations = get_const_tuple(attrs.dilations)
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilations]
if need_data_layout:
args.append(data_layout)
args.append(out_dtype)
return [topi_compute(*args)]
return _compute_dilation2d
@override_native_generic_func("dilation2d_strategy")
def dilation2d_strategy(attrs, inputs, out_type, target):
logger.warning("dilation2d_strategy is not optimized for this platform.")
strategy = _op.OpStrategy()
dilations = get_const_tuple(attrs.dilations)
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
assert layout in ["NCHW", "NHWC"]
(dilation_h, dilation_w) = dilations
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if layout == "NCHW":
assert kernel_layout == "IHW"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nchw),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nchw),
name="dilation2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWI"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nhwc),
name="dilation2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported dilation2d layout {}".format(layout))
return strategy
def wrap_compute_dense(topi_compute):
def _compute_dense(attrs, inputs, out_type):
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
return [topi_compute(inputs[0], inputs[1], None, out_dtype)]
return _compute_dense
@override_native_generic_func("dense_strategy")
def dense_strategy(attrs, inputs, out_type, target):
logger.warning("dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense.generic",
)
return strategy
def wrap_compute_batch_matmul(topi_compute):
def _compute_batch_matmul(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], out_type.shape)]
return _compute_batch_matmul
@override_native_generic_func("batch_matmul_strategy")
def batch_matmul_strategy(attrs, inputs, out_type, target):
logger.warning("batch_matmul is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_matmul(topi.nn.batch_matmul),
wrap_topi_schedule(topi.generic.schedule_batch_matmul),
name="batch_matmul.generic",
)
return strategy
def wrap_compute_sparse_dense(topi_compute):
def _compute_sparse_dense(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3], attrs["sparse_lhs"])]
return _compute_sparse_dense
@override_native_generic_func("sparse_dense_strategy")
def sparse_dense_strategy(attrs, inputs, out_type, target):
logger.warning("sparse dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.nn.sparse_dense),
wrap_topi_schedule(topi.generic.schedule_sparse_dense),
name="sparse_dense.generic",
)
return strategy
@override_native_generic_func("sparse_dense_padded_strategy")
def sparse_dense_padded_strategy(attrs, inputs, out_type, target):
raise NotImplementedError("sparse_dense_padded is only implemented for cuda")
@generic_func
def schedule_sparse_transpose(attrs, outs, target):
with target:
return topi.generic.schedule_sparse_transpose(outs)
def wrap_compute_argsort(topi_compute):
def _compute_argsort(attrs, inputs, _):
axis = get_const_int(attrs.axis)
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend, dtype=dtype)]
return _compute_argsort
@override_native_generic_func("argsort_strategy")
def argsort_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argsort(topi.argsort),
wrap_topi_schedule(topi.generic.schedule_argsort),
name="argsort.generic",
)
return strategy
def wrap_compute_topk(topi_compute):
def _compute_topk(attrs, inputs, out_type):
if attrs.k is not None:
k = attrs.k
else:
k = inputs[1]
axis = get_const_int(attrs.axis)
ret_type = attrs.ret_type
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
out = topi_compute(inputs[0], k, axis, ret_type, is_ascend, dtype)
out = out if isinstance(out, list) else [out]
return out
return _compute_topk
@override_native_generic_func("topk_strategy")
def topk_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_topk(topi.topk),
wrap_topi_schedule(topi.generic.schedule_topk),
name="topk.generic",
)
return strategy
def wrap_compute_multibox_prior(topi_compute):
def _compute_multibox_prior(attrs, inputs, _):
sizes = get_float_tuple(attrs.sizes)
ratios = get_float_tuple(attrs.ratios)
steps = get_float_tuple(attrs.steps)
offsets = get_float_tuple(attrs.offsets)
clip = bool(get_const_int(attrs.clip))
return [topi_compute(inputs[0], sizes, ratios, steps, offsets, clip)]
return _compute_multibox_prior
@override_native_generic_func("multibox_prior_strategy")
def multibox_prior_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_prior(topi.vision.ssd.multibox_prior),
wrap_topi_schedule(topi.generic.schedule_multibox_prior),
name="multibox_prior.generic",
)
return strategy
def wrap_compute_multibox_transform_loc(topi_compute):
def _compute_multibox_transform_loc(attrs, inputs, _):
clip = bool(get_const_int(attrs.clip))
threshold = get_const_float(attrs.threshold)
variances = get_float_tuple(attrs.variances)
return topi_compute(inputs[0], inputs[1], inputs[2], clip, threshold, variances)
return _compute_multibox_transform_loc
@override_native_generic_func("multibox_transform_loc_strategy")
def multibox_transform_loc_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_transform_loc(topi.vision.ssd.multibox_transform_loc),
wrap_topi_schedule(topi.generic.schedule_multibox_transform_loc),
name="multibox_transform_loc.generic",
)
return strategy
def wrap_compute_get_valid_counts(topi_compute):
def _compute_get_valid_counts(attrs, inputs, out_type):
score_threshold = inputs[1]
id_index = get_const_int(attrs.id_index)
score_index = get_const_int(attrs.score_index)
if attrs.score_threshold is not None:
score_threshold = get_const_float(attrs.score_threshold)
return topi_compute(inputs[0], score_threshold, id_index, score_index)
return _compute_get_valid_counts
@override_native_generic_func("get_valid_counts_strategy")
def get_valid_counts_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_get_valid_counts(topi.vision.get_valid_counts),
wrap_topi_schedule(topi.generic.schedule_get_valid_counts),
name="get_valid_counts.generic",
)
return strategy
def wrap_compute_nms(topi_compute):
def _compute_nms(attrs, inputs, out_type):
max_output_size = inputs[3]
iou_threshold = inputs[4]
if attrs.max_output_size is not None:
max_output_size = attrs.max_output_size
if attrs.iou_threshold is not None:
iou_threshold = get_const_float(attrs.iou_threshold)
return_indices = bool(get_const_int(attrs.return_indices))
force_suppress = bool(get_const_int(attrs.force_suppress))
top_k = get_const_int(attrs.top_k)
coord_start = get_const_int(attrs.coord_start)
score_index = get_const_int(attrs.score_index)
id_index = get_const_int(attrs.id_index)
invalid_to_bottom = bool(get_const_int(attrs.invalid_to_bottom))
if return_indices:
return topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
]
return _compute_nms
@override_native_generic_func("non_max_suppression_strategy")
def nms_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_nms(topi.vision.non_max_suppression),
wrap_topi_schedule(topi.generic.schedule_nms),
name="nms.generic",
)
return strategy
def wrap_compute_roi_align(topi_compute):
def _compute_roi_align(attrs, inputs, out_type):
assert attrs.layout == "NCHW"
pooled_size = get_const_tuple(attrs.pooled_size)
return [
topi_compute(
inputs[0],
inputs[1],
pooled_size=pooled_size,
spatial_scale=attrs.spatial_scale,
sample_ratio=attrs.sample_ratio,
)
]
return _compute_roi_align
@override_native_generic_func("roi_align_strategy")
def roi_align_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
layout = attrs.layout
assert layout == "NCHW", "only support nchw for now"
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.generic",
)
return strategy
@generic_func
def schedule_roi_pool(attrs, outs, target):
with target:
return topi.generic.schedule_roi_pool(outs)
def wrap_compute_proposal(topi_compute):
def _compute_proposal(attrs, inputs, out_type):
scales = get_float_tuple(attrs.scales)
ratios = get_float_tuple(attrs.ratios)
feature_stride = attrs.feature_stride
threshold = attrs.threshold
rpn_pre_nms_top_n = attrs.rpn_pre_nms_top_n
rpn_post_nms_top_n = attrs.rpn_post_nms_top_n
rpn_min_size = attrs.rpn_min_size
iou_loss = bool(get_const_int(attrs.iou_loss))
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
)
]
return _compute_proposal
@override_native_generic_func("proposal_strategy")
def proposal_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_proposal(topi.vision.rcnn.proposal),
wrap_topi_schedule(topi.generic.schedule_proposal),
name="proposal.generic",
)
return strategy
@override_native_generic_func("scatter_strategy")
def scatter_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter.generic",
)
return strategy
def wrap_compute_scatter(topi_compute):
def _compute_scatter(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], inputs[2], axis=attrs.axis)]
return _compute_scatter
@override_native_generic_func("scatter_add_strategy")
def scatter_add_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter_add),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter_add.generic",
)
return strategy
@override_native_generic_func("scatter_nd_strategy")
def scatter_nd_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter_nd(topi.scatter_nd),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_nd.generic",
)
return strategy
def wrap_compute_scatter_nd(topi_compute):
def _compute_scatter_nd(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], attrs.out_shape)]
return _compute_scatter_nd
def wrap_compute_bitserial_conv2d(topi_compute):
def compute_bitserial_conv2d(attrs, inputs, out_dtype):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
activation_bits = attrs.activation_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0],
inputs[1],
strides,
padding,
activation_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar,
)
]
return compute_bitserial_conv2d
@override_native_generic_func("bitserial_conv2d_strategy")
def bitserial_conv2d_strategy(attrs, inputs, out_type, target):
logger.warning("bitserial_conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.generic",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.generic",
)
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy
def wrap_compute_bitserial_dense(topi_compute):
def compute_bitserial_dense(attrs, inputs, out_type):
data_bits = attrs.data_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0], inputs[1], data_bits, weight_bits, pack_dtype, out_dtype, unipolar
)
]
return compute_bitserial_dense
@override_native_generic_func("bitserial_dense_strategy")
def bitserial_dense_strategy(attrs, inputs, out_type, target):
logger.warning("bitserial_dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_bitserial_dense(topi.nn.bitserial_dense),
wrap_topi_schedule(topi.generic.schedule_bitserial_dense),
name="bitserial_dense.generic",
)
return strategy
def wrap_compute_correlation(topi_compute):
def _compute_correlation(attrs, inputs, out_type):
kernel_size = attrs.kernel_size
max_displacement = attrs.max_displacement
stride1 = attrs.stride1
stride2 = attrs.stride2
padding = get_const_tuple(attrs.padding)
is_multiply = attrs.is_multiply
return [
topi_compute(
inputs[0],
inputs[1],
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
)
]
return _compute_correlation
@override_native_generic_func("correlation_strategy")
def correlation_strategy(attrs, inputs, out_type, target):
logger.warning("correlation is not optimized for this platform.")
layout = attrs.layout
assert layout == "NCHW", "Only support NCHW layout"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_correlation(topi.nn.correlation_nchw),
wrap_topi_schedule(topi.generic.schedule_correlation_nchw),
name="correlation.generic",
)
return strategy
def wrap_compute_argwhere(topi_compute):
def _compute_argwhere(attrs, inputs, out_type):
output_shape = []
for s in out_type.shape:
if hasattr(s, "value"):
output_shape.append(s)
else:
output_shape.append(te.var("any_dim", "int32"))
new_output_type = ir.TensorType(output_shape, "int32")
return [topi_compute(new_output_type, inputs[0])]
return _compute_argwhere
@override_native_generic_func("argwhere_strategy")
def argwhere_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argwhere(topi.argwhere),
wrap_topi_schedule(topi.generic.schedule_argwhere),
name="argwhere.generic",
)
return strategy
| true | true |
f700831c65772de4a694995aa5bd1a37f57f8a1c | 11,955 | py | Python | googlemaps_helpers/main.py | mrcagney/googlemaps_helpers | 75dfcc3e5e788d04c3af3e7608909b349ac83e8d | [
"MIT"
] | 1 | 2017-06-25T17:58:37.000Z | 2017-06-25T17:58:37.000Z | googlemaps_helpers/main.py | araichev/googlemaps_helpers | 75dfcc3e5e788d04c3af3e7608909b349ac83e8d | [
"MIT"
] | null | null | null | googlemaps_helpers/main.py | araichev/googlemaps_helpers | 75dfcc3e5e788d04c3af3e7608909b349ac83e8d | [
"MIT"
] | null | null | null | from itertools import product
import math
from collections import OrderedDict
from pathlib import Path
import logging
import pandas as pd
import numpy as np
import geopandas as gpd
import shapely.geometry as sg
import googlemaps
# Configure logging
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s \n%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
WGS84 = {'init': 'epsg:4326'}
# Maximum number of elements in a Google Maps Distance Matrix API query
MAX_ELEMENTS = 100
def flip_coords(xy_list):
"""
Given a list of coordinate pairs, swap the first and second
coordinates and return the resulting list.
"""
return [(y, x) for (x, y) in xy_list]
def make_ids(n, prefix='row_'):
"""
Return a list of ``n`` (integer) unique strings of the form
``prefix``<number>.
"""
k = int(math.log10(n)) + 1 # Number of digits for padding
return [prefix + '{num:0{pad}d}'.format(num=i, pad=k) for i in range(n)]
def to_df(distance_matrix_response, origin_ids=None, destination_ids=None):
"""
Given a (decoded) JSON response to a Google Maps
Distance Matrix API call, convert it into a DataFrame with the
following columns.
- ``'origin_address'``
- ``'origin_id'``: ID of origin; defaults to an element of
:func:`make_ids`
- ``'destination_address'``
- ``'destination_id'``: ID of destination; defaluts to an element of
:func:`make_ids`
- ``'duration'``: time from origin to destination; includes
time in traffic if that's available in the response
- ``'distance'``: distance from origin to destination
The origin and destination addresses in the response can optionally
be assigned IDs by setting ``origin_ids`` (list of strings) and
``destination_ids`` (list of strings).
"""
# Initialize
r = distance_matrix_response
columns = ['origin_address', 'destination_address', 'origin_id',
'destination_id', 'duration', 'distance']
f = pd.DataFrame([], columns=columns)
# Append addresses
if not r['rows']:
return f
f['origin_address'], f['destination_address'] = zip(
*product(r['origin_addresses'], r['destination_addresses']))
# Append IDs
if origin_ids is None:
origin_ids = make_ids(len(r['origin_addresses']))
if destination_ids is None:
destination_ids = make_ids(len(r['destination_addresses']))
f['origin_id'], f['destination_id'] = zip(
*product(origin_ids, destination_ids))
# Append durations and distances
durs = []
dists = []
for row in r['rows']:
for e in row['elements']:
if e['status'] == 'OK':
if 'duration_in_traffic' in e:
dur_key = 'duration_in_traffic'
else:
dur_key = 'duration'
durs.append(e[dur_key]['value'])
dists.append(e['distance']['value'])
else:
durs.append(np.nan)
dists.append(np.nan)
f['duration'] = durs
f['distance'] = dists
return f
def point_df_to_gdf(f, x_col='lon', y_col='lat', from_crs=WGS84):
"""
Given a DataFrame of points with x coordinates
in the column ``x_col`` and y coordinates in the column ``y_col``,
with respect to the GeoPandas coordinate reference system
``from_crs`` (dictionary), convert the DataFrame into a GeoDataFrame
with that coordinate reference system and with a ``'geometry'``
column that corresponds to the points.
Delete the original x and y columns, and return the result.
"""
f = f.copy()
f['geometry'] = f[[x_col, y_col]].apply(lambda p: sg.Point(p), axis=1)
f = f.drop([x_col, y_col], axis=1)
f = gpd.GeoDataFrame(f)
f.crs = from_crs
return f
def point_gdf_to_df(f, x_col='lon', y_col='lat', to_crs=WGS84):
"""
The inverse of :func:`point_df_to_gdf`.
Given a GeoDataFrame of points, convert to the coordinate reference
system ``to_crs`` (dictionary), then split its ``'geometry'`` column
into x coordinates in the column ``x_col`` and y coordinates in the
columns ``y_col``, deleting the ``'geometry'`` column afterwards.
Coerce the result into a DataFrame and return it.
"""
f = f.copy()
if f.crs is None:
raise ValueError('GeoDataFrame needs a crs attribute')
if f.crs != to_crs:
f = f.to_crs(to_crs)
f[x_col], f[y_col] = zip(*f['geometry'].map(lambda p: p.coords[0]))
del f['geometry']
return pd.DataFrame(f)
def build_distance_matrix_df(client, origins_gdf, destinations_gdf,
origin_id_col=None, destination_id_col=None,
max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):
"""
Compute the duration-distance matrix between the given origins
and destinations, assuming that the number of origins multiplied
by the number of destinations is at most ``max_elements``.
To do this, call the Google Maps Distance Matrix API once.
INPUT:
- ``client``: google-maps-services-python Client instance
- ``origins_gdf``: GeoDataFrame of point; the origins
- ``destinations_gdf``: GeoDataFrame of points; the destinations
- ``origin_id_col``: string; name of ID column in ``origins_gdf``
- ``destination_id_col``: string; name of ID column in
``destinations_gdf``
- ``max_elements``: integer; max number of elements allowable in
one Google Maps Distance Matrix API call
- ``distance_matrix_kwargs``: dictionary; keyword arguments for
Google Maps Distance Matrix API
OUTPUT:
A DataFrame of the form output by :func:`to_df` where the origins
come from ``origins_gdf`` and the destinations come from
``destinations_gdf``.
Return an empty DataFrame with the expected column names if an
HTTPError on Timeout exception occurs.
"""
# Initialize origin and destinations GeoDataFrames
o_gdf = origins_gdf.copy()
d_gdf = destinations_gdf.copy()
n = o_gdf.shape[0]*d_gdf.shape[0]
if n > max_elements:
raise ValueError('Number of origins times number of destinations '
'is {}, which exceeds threshold of {} elements'.format(
n, max_elements))
# Prepare origin data
if o_gdf.crs != WGS84:
o_gdf = o_gdf.to_crs(WGS84)
if origin_id_col is None:
origin_id_col = 'temp_id'
o_gdf[origin_id_col] = make_ids(o_gdf.shape[0])
o_locs = [geo.coords[0] for geo in o_gdf['geometry']]
o_ids = o_gdf[origin_id_col].values
# Prepare destination data
if d_gdf.crs != WGS84:
d_gdf = d_gdf.to_crs(WGS84)
if destination_id_col is None:
destination_id_col = 'temp_id'
d_gdf[destination_id_col] = make_ids(d_gdf.shape[0])
d_locs = [geo.coords[0] for geo in d_gdf['geometry']]
d_ids = d_gdf[destination_id_col].values
# Get matrix info
try:
r = client.distance_matrix(flip_coords(o_locs),
flip_coords(d_locs), **distance_matrix_kwargs)
f = to_df(r, o_ids, d_ids)
except (googlemaps.exceptions.HTTPError, googlemaps.exceptions.Timeout):
# Empty DataFrame
f = pd.DataFrame(columns=[
'origin_address',
'origin_id',
'destination_address',
'destination_id',
'duration',
'distance',
])
return f
def run_distance_matrix_job(client, origins_gdf, destinations_gdf, out_dir,
origin_id_col=None, destination_id_col=None,
max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):
"""
Compute the duration-distance matrix between the given origins
and destinations.
To do this, call the Google Maps Distance Matrix API repeatedly,
ensuring that each call uses no more than ``max_elements`` elements.
INPUT:
- ``client``: google-maps-services-python Client instance
- ``origins_gdf``: GeoDataFrame of points; the origins
- ``destinations_gdf``: GeoDataFrame of points; the destinations
- ``out_dir``: string or Path object of a directory at which
to store the output files; create the directory if it does not
exist
- ``origin_id_col``: string; name of ID column in ``origins_gdf``
- ``destination_id_col``: string; name of ID column in
``destinations_gdf``
- ``max_elements``: integer; max number of elements allowable in
one Google Maps Distance Matrix API call
- ``distance_matrix_kwargs``: dictionary; keyword arguments for
Google Maps Distance Matrix API
OUTPUT:
A collection of CSV files located at ``out_dir`` of the form output
by :func:`to_df`, where the origins comes from ``origins_gdf`` and
the destinations come from ``destinations_gdf``.
Each file will contains one origin points and at most
``max_elements`` destination points, for a total of at most
``max_elements`` rows.
An empty DataFrame with the expected column names will be saved to
file if an HTTPError on Timeout exception occurs.
This can happen if, for example, the daily query limit is exceeded.
"""
o_gdf = origins_gdf.copy()
d_gdf = destinations_gdf.copy()
n_o = o_gdf.shape[0]
n_d = d_gdf.shape[0]
# Create IDs if necessary
if origin_id_col is None:
origin_id_col = 'ersatz_origin_id'
o_gdf[origin_id_col] = make_ids(n_o, 'orig_row_')
if destination_id_col is None:
destination_id_col = 'ersatz_destination_id'
d_gdf[destination_id_col] = make_ids(n_d, 'dest_row_')
# Get mode for logging
mode = distance_matrix_kwargs.get('mode', 'driving')
# Make output directory if it does not exist
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir(parents=True)
# Iterate through origins.
# For each origin segment all destinations into chunks of size
# at most ``max_elements``.
# For each destination chunk, build a one-to-many matrix from the
# origin to all the destinations in the chunk and save it to file.
for ix, orig_id in o_gdf[[origin_id_col]].itertuples():
logger.info('Working on origin {} of {} (id {})'.format(
ix + 1, n_o, orig_id))
# Chunk destinations and build one-to-many matrices from origin
# to destination chunks.
# A failed attempt (e.g. through API usage over limit)
# will build an empty matrix
for j in range(math.ceil(n_d/max_elements)):
n1 = max_elements*j
n2 = min(max_elements*(j + 1), n_d)
dest_id1, dest_id2 = (
d_gdf[destination_id_col].iat[n1],
d_gdf[destination_id_col].iat[n2 - 1]
)
path = Path(out_dir)/'{}_from_{}_to_{}--{}.csv'.format(
mode, orig_id, dest_id1, dest_id2)
f = build_distance_matrix_df(client, o_gdf.loc[ix:ix],
d_gdf.iloc[n1:n2],
origin_id_col=origin_id_col,
destination_id_col=destination_id_col,
**distance_matrix_kwargs)
f.to_csv(path, index=False)
if f.empty:
logger.info('* Failed to get data for ' + path.stem)
def compute_cost(n, cost=0.5/1000, num_freebies=0,
daily_limit=100000, chunk_size=MAX_ELEMENTS):
"""
Estimate the cost of a sequence of Google Maps Distance Matrix
queries comprising a total of n elements at ``cost`` USD per
element, where the first ``num_freebies`` (integer) elements are
free.
Return a Series that includes the cost and some other metadata.
"""
d = OrderedDict()
d['#elements'] = n
d['exceeds {!s}-element daily limit?'.format(daily_limit)] = (
n > daily_limit)
d['estimated cost for job in USD'] = max(0, n - num_freebies)*cost
d['estimated duration for job in minutes'] = n/chunk_size/60
return pd.Series(d)
| 36.448171 | 76 | 0.658302 | from itertools import product
import math
from collections import OrderedDict
from pathlib import Path
import logging
import pandas as pd
import numpy as np
import geopandas as gpd
import shapely.geometry as sg
import googlemaps
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s \n%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
WGS84 = {'init': 'epsg:4326'}
MAX_ELEMENTS = 100
def flip_coords(xy_list):
return [(y, x) for (x, y) in xy_list]
def make_ids(n, prefix='row_'):
k = int(math.log10(n)) + 1 return [prefix + '{num:0{pad}d}'.format(num=i, pad=k) for i in range(n)]
def to_df(distance_matrix_response, origin_ids=None, destination_ids=None):
r = distance_matrix_response
columns = ['origin_address', 'destination_address', 'origin_id',
'destination_id', 'duration', 'distance']
f = pd.DataFrame([], columns=columns)
if not r['rows']:
return f
f['origin_address'], f['destination_address'] = zip(
*product(r['origin_addresses'], r['destination_addresses']))
if origin_ids is None:
origin_ids = make_ids(len(r['origin_addresses']))
if destination_ids is None:
destination_ids = make_ids(len(r['destination_addresses']))
f['origin_id'], f['destination_id'] = zip(
*product(origin_ids, destination_ids))
durs = []
dists = []
for row in r['rows']:
for e in row['elements']:
if e['status'] == 'OK':
if 'duration_in_traffic' in e:
dur_key = 'duration_in_traffic'
else:
dur_key = 'duration'
durs.append(e[dur_key]['value'])
dists.append(e['distance']['value'])
else:
durs.append(np.nan)
dists.append(np.nan)
f['duration'] = durs
f['distance'] = dists
return f
def point_df_to_gdf(f, x_col='lon', y_col='lat', from_crs=WGS84):
f = f.copy()
f['geometry'] = f[[x_col, y_col]].apply(lambda p: sg.Point(p), axis=1)
f = f.drop([x_col, y_col], axis=1)
f = gpd.GeoDataFrame(f)
f.crs = from_crs
return f
def point_gdf_to_df(f, x_col='lon', y_col='lat', to_crs=WGS84):
f = f.copy()
if f.crs is None:
raise ValueError('GeoDataFrame needs a crs attribute')
if f.crs != to_crs:
f = f.to_crs(to_crs)
f[x_col], f[y_col] = zip(*f['geometry'].map(lambda p: p.coords[0]))
del f['geometry']
return pd.DataFrame(f)
def build_distance_matrix_df(client, origins_gdf, destinations_gdf,
origin_id_col=None, destination_id_col=None,
max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):
o_gdf = origins_gdf.copy()
d_gdf = destinations_gdf.copy()
n = o_gdf.shape[0]*d_gdf.shape[0]
if n > max_elements:
raise ValueError('Number of origins times number of destinations '
'is {}, which exceeds threshold of {} elements'.format(
n, max_elements))
if o_gdf.crs != WGS84:
o_gdf = o_gdf.to_crs(WGS84)
if origin_id_col is None:
origin_id_col = 'temp_id'
o_gdf[origin_id_col] = make_ids(o_gdf.shape[0])
o_locs = [geo.coords[0] for geo in o_gdf['geometry']]
o_ids = o_gdf[origin_id_col].values
if d_gdf.crs != WGS84:
d_gdf = d_gdf.to_crs(WGS84)
if destination_id_col is None:
destination_id_col = 'temp_id'
d_gdf[destination_id_col] = make_ids(d_gdf.shape[0])
d_locs = [geo.coords[0] for geo in d_gdf['geometry']]
d_ids = d_gdf[destination_id_col].values
try:
r = client.distance_matrix(flip_coords(o_locs),
flip_coords(d_locs), **distance_matrix_kwargs)
f = to_df(r, o_ids, d_ids)
except (googlemaps.exceptions.HTTPError, googlemaps.exceptions.Timeout):
f = pd.DataFrame(columns=[
'origin_address',
'origin_id',
'destination_address',
'destination_id',
'duration',
'distance',
])
return f
def run_distance_matrix_job(client, origins_gdf, destinations_gdf, out_dir,
origin_id_col=None, destination_id_col=None,
max_elements=MAX_ELEMENTS, **distance_matrix_kwargs):
o_gdf = origins_gdf.copy()
d_gdf = destinations_gdf.copy()
n_o = o_gdf.shape[0]
n_d = d_gdf.shape[0]
if origin_id_col is None:
origin_id_col = 'ersatz_origin_id'
o_gdf[origin_id_col] = make_ids(n_o, 'orig_row_')
if destination_id_col is None:
destination_id_col = 'ersatz_destination_id'
d_gdf[destination_id_col] = make_ids(n_d, 'dest_row_')
mode = distance_matrix_kwargs.get('mode', 'driving')
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir(parents=True)
for ix, orig_id in o_gdf[[origin_id_col]].itertuples():
logger.info('Working on origin {} of {} (id {})'.format(
ix + 1, n_o, orig_id))
for j in range(math.ceil(n_d/max_elements)):
n1 = max_elements*j
n2 = min(max_elements*(j + 1), n_d)
dest_id1, dest_id2 = (
d_gdf[destination_id_col].iat[n1],
d_gdf[destination_id_col].iat[n2 - 1]
)
path = Path(out_dir)/'{}_from_{}_to_{}--{}.csv'.format(
mode, orig_id, dest_id1, dest_id2)
f = build_distance_matrix_df(client, o_gdf.loc[ix:ix],
d_gdf.iloc[n1:n2],
origin_id_col=origin_id_col,
destination_id_col=destination_id_col,
**distance_matrix_kwargs)
f.to_csv(path, index=False)
if f.empty:
logger.info('* Failed to get data for ' + path.stem)
def compute_cost(n, cost=0.5/1000, num_freebies=0,
daily_limit=100000, chunk_size=MAX_ELEMENTS):
d = OrderedDict()
d['#elements'] = n
d['exceeds {!s}-element daily limit?'.format(daily_limit)] = (
n > daily_limit)
d['estimated cost for job in USD'] = max(0, n - num_freebies)*cost
d['estimated duration for job in minutes'] = n/chunk_size/60
return pd.Series(d)
| true | true |
f700833e6a88443d785e41ccd0504c32a9ae21bb | 12,489 | py | Python | locust/test/test_runners.py | wenlongyan/locust | 777d1cc9c8dd93fc8693f06780cbf8166fe137df | [
"MIT"
] | 1 | 2019-10-18T10:18:20.000Z | 2019-10-18T10:18:20.000Z | locust/test/test_runners.py | wenlongyan/locust | 777d1cc9c8dd93fc8693f06780cbf8166fe137df | [
"MIT"
] | null | null | null | locust/test/test_runners.py | wenlongyan/locust | 777d1cc9c8dd93fc8693f06780cbf8166fe137df | [
"MIT"
] | null | null | null | import unittest
import gevent
from gevent import sleep
from gevent.queue import Queue
import mock
from locust import events
from locust.core import Locust, TaskSet, task
from locust.exception import LocustError
from locust.main import parse_options
from locust.rpc import Message
from locust.runners import LocalLocustRunner, MasterLocustRunner
from locust.stats import global_stats, RequestStats
from locust.test.testcases import LocustTestCase
def mocked_rpc_server():
class MockedRpcServer(object):
queue = Queue()
outbox = []
def __init__(self, host, port):
pass
@classmethod
def mocked_send(cls, message):
cls.queue.put(message.serialize())
sleep(0)
def recv(self):
results = self.queue.get()
return Message.unserialize(results)
def send(self, message):
self.outbox.append(message.serialize())
return MockedRpcServer
class TestMasterRunner(LocustTestCase):
def setUp(self):
global_stats.reset_all()
self._slave_report_event_handlers = [h for h in events.slave_report._handlers]
parser, _, _ = parse_options()
args = [
"--clients", "10",
"--hatch-rate", "10"
]
opts, _ = parser.parse_args(args)
self.options = opts
def tearDown(self):
events.slave_report._handlers = self._slave_report_event_handlers
def test_slave_connect(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "zeh_fake_client1"))
self.assertEqual(1, len(master.clients))
self.assertTrue("zeh_fake_client1" in master.clients, "Could not find fake client in master instance's clients dict")
server.mocked_send(Message("client_ready", None, "zeh_fake_client2"))
server.mocked_send(Message("client_ready", None, "zeh_fake_client3"))
server.mocked_send(Message("client_ready", None, "zeh_fake_client4"))
self.assertEqual(4, len(master.clients))
server.mocked_send(Message("quit", None, "zeh_fake_client3"))
self.assertEqual(3, len(master.clients))
def test_slave_stats_report_median(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "fake_client"))
master.stats.get("/", "GET").log(100, 23455)
master.stats.get("/", "GET").log(800, 23455)
master.stats.get("/", "GET").log(700, 23455)
data = {"user_count":1}
events.report_to_master.fire(client_id="fake_client", data=data)
master.stats.clear_all()
server.mocked_send(Message("stats", data, "fake_client"))
s = master.stats.get("/", "GET")
self.assertEqual(700, s.median_response_time)
def test_master_total_stats(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "fake_client"))
stats = RequestStats()
stats.log_request("GET", "/1", 100, 3546)
stats.log_request("GET", "/1", 800, 56743)
stats2 = RequestStats()
stats2.log_request("GET", "/2", 700, 2201)
server.mocked_send(Message("stats", {
"stats":stats.serialize_stats(),
"stats_total": stats.total.serialize(),
"errors":stats.serialize_errors(),
"user_count": 1,
}, "fake_client"))
server.mocked_send(Message("stats", {
"stats":stats2.serialize_stats(),
"stats_total": stats2.total.serialize(),
"errors":stats2.serialize_errors(),
"user_count": 2,
}, "fake_client"))
self.assertEqual(700, master.stats.total.median_response_time)
def test_master_current_response_times(self):
class MyTestLocust(Locust):
pass
start_time = 1
with mock.patch("time.time") as mocked_time:
mocked_time.return_value = start_time
global_stats.reset_all()
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
mocked_time.return_value += 1
server.mocked_send(Message("client_ready", None, "fake_client"))
stats = RequestStats()
stats.log_request("GET", "/1", 100, 3546)
stats.log_request("GET", "/1", 800, 56743)
server.mocked_send(Message("stats", {
"stats":stats.serialize_stats(),
"stats_total": stats.total.get_stripped_report(),
"errors":stats.serialize_errors(),
"user_count": 1,
}, "fake_client"))
mocked_time.return_value += 1
stats2 = RequestStats()
stats2.log_request("GET", "/2", 400, 2201)
server.mocked_send(Message("stats", {
"stats":stats2.serialize_stats(),
"stats_total": stats2.total.get_stripped_report(),
"errors":stats2.serialize_errors(),
"user_count": 2,
}, "fake_client"))
mocked_time.return_value += 4
self.assertEqual(400, master.stats.total.get_current_response_time_percentile(0.5))
self.assertEqual(800, master.stats.total.get_current_response_time_percentile(0.95))
# let 10 second pass, do some more requests, send it to the master and make
# sure the current response time percentiles only accounts for these new requests
mocked_time.return_value += 10
stats.log_request("GET", "/1", 20, 1)
stats.log_request("GET", "/1", 30, 1)
stats.log_request("GET", "/1", 3000, 1)
server.mocked_send(Message("stats", {
"stats":stats.serialize_stats(),
"stats_total": stats.total.get_stripped_report(),
"errors":stats.serialize_errors(),
"user_count": 2,
}, "fake_client"))
self.assertEqual(30, master.stats.total.get_current_response_time_percentile(0.5))
self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95))
def test_spawn_zero_locusts(self):
class MyTaskSet(TaskSet):
@task
def my_task(self):
pass
class MyTestLocust(Locust):
task_set = MyTaskSet
min_wait = 100
max_wait = 100
runner = LocalLocustRunner([MyTestLocust], self.options)
timeout = gevent.Timeout(2.0)
timeout.start()
try:
runner.start_hatching(0, 1, wait=True)
runner.greenlet.join()
except gevent.Timeout:
self.fail("Got Timeout exception. A locust seems to have been spawned, even though 0 was specified.")
finally:
timeout.cancel()
def test_spawn_uneven_locusts(self):
"""
Tests that we can accurately spawn a certain number of locusts, even if it's not an
even number of the connected slaves
"""
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
for i in range(5):
server.mocked_send(Message("client_ready", None, "fake_client%i" % i))
master.start_hatching(7, 7)
self.assertEqual(5, len(server.outbox))
num_clients = 0
for msg in server.outbox:
num_clients += Message.unserialize(msg).data["num_clients"]
self.assertEqual(7, num_clients, "Total number of locusts that would have been spawned is not 7")
def test_spawn_fewer_locusts_than_slaves(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
for i in range(5):
server.mocked_send(Message("client_ready", None, "fake_client%i" % i))
master.start_hatching(2, 2)
self.assertEqual(5, len(server.outbox))
num_clients = 0
for msg in server.outbox:
num_clients += Message.unserialize(msg).data["num_clients"]
self.assertEqual(2, num_clients, "Total number of locusts that would have been spawned is not 2")
def test_exception_in_task(self):
class HeyAnException(Exception):
pass
class MyLocust(Locust):
class task_set(TaskSet):
@task
def will_error(self):
raise HeyAnException(":(")
runner = LocalLocustRunner([MyLocust], self.options)
l = MyLocust()
l._catch_exceptions = False
self.assertRaises(HeyAnException, l.run)
self.assertRaises(HeyAnException, l.run)
self.assertEqual(1, len(runner.exceptions))
hash_key, exception = runner.exceptions.popitem()
self.assertTrue("traceback" in exception)
self.assertTrue("HeyAnException" in exception["traceback"])
self.assertEqual(2, exception["count"])
def test_exception_is_catched(self):
""" Test that exceptions are stored, and execution continues """
class HeyAnException(Exception):
pass
class MyTaskSet(TaskSet):
def __init__(self, *a, **kw):
super(MyTaskSet, self).__init__(*a, **kw)
self._task_queue = [
{"callable":self.will_error, "args":[], "kwargs":{}},
{"callable":self.will_stop, "args":[], "kwargs":{}},
]
@task(1)
def will_error(self):
raise HeyAnException(":(")
@task(1)
def will_stop(self):
self.interrupt()
class MyLocust(Locust):
min_wait = 10
max_wait = 10
task_set = MyTaskSet
runner = LocalLocustRunner([MyLocust], self.options)
l = MyLocust()
# supress stderr
with mock.patch("sys.stderr") as mocked:
l.task_set._task_queue = [l.task_set.will_error, l.task_set.will_stop]
self.assertRaises(LocustError, l.run) # make sure HeyAnException isn't raised
l.task_set._task_queue = [l.task_set.will_error, l.task_set.will_stop]
self.assertRaises(LocustError, l.run) # make sure HeyAnException isn't raised
self.assertEqual(2, len(mocked.method_calls))
# make sure exception was stored
self.assertEqual(1, len(runner.exceptions))
hash_key, exception = runner.exceptions.popitem()
self.assertTrue("traceback" in exception)
self.assertTrue("HeyAnException" in exception["traceback"])
self.assertEqual(2, exception["count"])
class TestMessageSerializing(unittest.TestCase):
def test_message_serialize(self):
msg = Message("client_ready", None, "my_id")
rebuilt = Message.unserialize(msg.serialize())
self.assertEqual(msg.type, rebuilt.type)
self.assertEqual(msg.data, rebuilt.data)
self.assertEqual(msg.node_id, rebuilt.node_id)
| 40.417476 | 129 | 0.57899 | import unittest
import gevent
from gevent import sleep
from gevent.queue import Queue
import mock
from locust import events
from locust.core import Locust, TaskSet, task
from locust.exception import LocustError
from locust.main import parse_options
from locust.rpc import Message
from locust.runners import LocalLocustRunner, MasterLocustRunner
from locust.stats import global_stats, RequestStats
from locust.test.testcases import LocustTestCase
def mocked_rpc_server():
class MockedRpcServer(object):
queue = Queue()
outbox = []
def __init__(self, host, port):
pass
@classmethod
def mocked_send(cls, message):
cls.queue.put(message.serialize())
sleep(0)
def recv(self):
results = self.queue.get()
return Message.unserialize(results)
def send(self, message):
self.outbox.append(message.serialize())
return MockedRpcServer
class TestMasterRunner(LocustTestCase):
def setUp(self):
global_stats.reset_all()
self._slave_report_event_handlers = [h for h in events.slave_report._handlers]
parser, _, _ = parse_options()
args = [
"--clients", "10",
"--hatch-rate", "10"
]
opts, _ = parser.parse_args(args)
self.options = opts
def tearDown(self):
events.slave_report._handlers = self._slave_report_event_handlers
def test_slave_connect(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "zeh_fake_client1"))
self.assertEqual(1, len(master.clients))
self.assertTrue("zeh_fake_client1" in master.clients, "Could not find fake client in master instance's clients dict")
server.mocked_send(Message("client_ready", None, "zeh_fake_client2"))
server.mocked_send(Message("client_ready", None, "zeh_fake_client3"))
server.mocked_send(Message("client_ready", None, "zeh_fake_client4"))
self.assertEqual(4, len(master.clients))
server.mocked_send(Message("quit", None, "zeh_fake_client3"))
self.assertEqual(3, len(master.clients))
def test_slave_stats_report_median(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "fake_client"))
master.stats.get("/", "GET").log(100, 23455)
master.stats.get("/", "GET").log(800, 23455)
master.stats.get("/", "GET").log(700, 23455)
data = {"user_count":1}
events.report_to_master.fire(client_id="fake_client", data=data)
master.stats.clear_all()
server.mocked_send(Message("stats", data, "fake_client"))
s = master.stats.get("/", "GET")
self.assertEqual(700, s.median_response_time)
def test_master_total_stats(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "fake_client"))
stats = RequestStats()
stats.log_request("GET", "/1", 100, 3546)
stats.log_request("GET", "/1", 800, 56743)
stats2 = RequestStats()
stats2.log_request("GET", "/2", 700, 2201)
server.mocked_send(Message("stats", {
"stats":stats.serialize_stats(),
"stats_total": stats.total.serialize(),
"errors":stats.serialize_errors(),
"user_count": 1,
}, "fake_client"))
server.mocked_send(Message("stats", {
"stats":stats2.serialize_stats(),
"stats_total": stats2.total.serialize(),
"errors":stats2.serialize_errors(),
"user_count": 2,
}, "fake_client"))
self.assertEqual(700, master.stats.total.median_response_time)
def test_master_current_response_times(self):
class MyTestLocust(Locust):
pass
start_time = 1
with mock.patch("time.time") as mocked_time:
mocked_time.return_value = start_time
global_stats.reset_all()
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
mocked_time.return_value += 1
server.mocked_send(Message("client_ready", None, "fake_client"))
stats = RequestStats()
stats.log_request("GET", "/1", 100, 3546)
stats.log_request("GET", "/1", 800, 56743)
server.mocked_send(Message("stats", {
"stats":stats.serialize_stats(),
"stats_total": stats.total.get_stripped_report(),
"errors":stats.serialize_errors(),
"user_count": 1,
}, "fake_client"))
mocked_time.return_value += 1
stats2 = RequestStats()
stats2.log_request("GET", "/2", 400, 2201)
server.mocked_send(Message("stats", {
"stats":stats2.serialize_stats(),
"stats_total": stats2.total.get_stripped_report(),
"errors":stats2.serialize_errors(),
"user_count": 2,
}, "fake_client"))
mocked_time.return_value += 4
self.assertEqual(400, master.stats.total.get_current_response_time_percentile(0.5))
self.assertEqual(800, master.stats.total.get_current_response_time_percentile(0.95))
# let 10 second pass, do some more requests, send it to the master and make
# sure the current response time percentiles only accounts for these new requests
mocked_time.return_value += 10
stats.log_request("GET", "/1", 20, 1)
stats.log_request("GET", "/1", 30, 1)
stats.log_request("GET", "/1", 3000, 1)
server.mocked_send(Message("stats", {
"stats":stats.serialize_stats(),
"stats_total": stats.total.get_stripped_report(),
"errors":stats.serialize_errors(),
"user_count": 2,
}, "fake_client"))
self.assertEqual(30, master.stats.total.get_current_response_time_percentile(0.5))
self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95))
def test_spawn_zero_locusts(self):
class MyTaskSet(TaskSet):
@task
def my_task(self):
pass
class MyTestLocust(Locust):
task_set = MyTaskSet
min_wait = 100
max_wait = 100
runner = LocalLocustRunner([MyTestLocust], self.options)
timeout = gevent.Timeout(2.0)
timeout.start()
try:
runner.start_hatching(0, 1, wait=True)
runner.greenlet.join()
except gevent.Timeout:
self.fail("Got Timeout exception. A locust seems to have been spawned, even though 0 was specified.")
finally:
timeout.cancel()
def test_spawn_uneven_locusts(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
for i in range(5):
server.mocked_send(Message("client_ready", None, "fake_client%i" % i))
master.start_hatching(7, 7)
self.assertEqual(5, len(server.outbox))
num_clients = 0
for msg in server.outbox:
num_clients += Message.unserialize(msg).data["num_clients"]
self.assertEqual(7, num_clients, "Total number of locusts that would have been spawned is not 7")
def test_spawn_fewer_locusts_than_slaves(self):
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
for i in range(5):
server.mocked_send(Message("client_ready", None, "fake_client%i" % i))
master.start_hatching(2, 2)
self.assertEqual(5, len(server.outbox))
num_clients = 0
for msg in server.outbox:
num_clients += Message.unserialize(msg).data["num_clients"]
self.assertEqual(2, num_clients, "Total number of locusts that would have been spawned is not 2")
def test_exception_in_task(self):
class HeyAnException(Exception):
pass
class MyLocust(Locust):
class task_set(TaskSet):
@task
def will_error(self):
raise HeyAnException(":(")
runner = LocalLocustRunner([MyLocust], self.options)
l = MyLocust()
l._catch_exceptions = False
self.assertRaises(HeyAnException, l.run)
self.assertRaises(HeyAnException, l.run)
self.assertEqual(1, len(runner.exceptions))
hash_key, exception = runner.exceptions.popitem()
self.assertTrue("traceback" in exception)
self.assertTrue("HeyAnException" in exception["traceback"])
self.assertEqual(2, exception["count"])
def test_exception_is_catched(self):
class HeyAnException(Exception):
pass
class MyTaskSet(TaskSet):
def __init__(self, *a, **kw):
super(MyTaskSet, self).__init__(*a, **kw)
self._task_queue = [
{"callable":self.will_error, "args":[], "kwargs":{}},
{"callable":self.will_stop, "args":[], "kwargs":{}},
]
@task(1)
def will_error(self):
raise HeyAnException(":(")
@task(1)
def will_stop(self):
self.interrupt()
class MyLocust(Locust):
min_wait = 10
max_wait = 10
task_set = MyTaskSet
runner = LocalLocustRunner([MyLocust], self.options)
l = MyLocust()
# supress stderr
with mock.patch("sys.stderr") as mocked:
l.task_set._task_queue = [l.task_set.will_error, l.task_set.will_stop]
self.assertRaises(LocustError, l.run) # make sure HeyAnException isn't raised
l.task_set._task_queue = [l.task_set.will_error, l.task_set.will_stop]
self.assertRaises(LocustError, l.run) self.assertEqual(2, len(mocked.method_calls))
# make sure exception was stored
self.assertEqual(1, len(runner.exceptions))
hash_key, exception = runner.exceptions.popitem()
self.assertTrue("traceback" in exception)
self.assertTrue("HeyAnException" in exception["traceback"])
self.assertEqual(2, exception["count"])
class TestMessageSerializing(unittest.TestCase):
def test_message_serialize(self):
msg = Message("client_ready", None, "my_id")
rebuilt = Message.unserialize(msg.serialize())
self.assertEqual(msg.type, rebuilt.type)
self.assertEqual(msg.data, rebuilt.data)
self.assertEqual(msg.node_id, rebuilt.node_id)
| true | true |
f70083f0a986985862c8523f29ef60425bd92fd6 | 1,690 | py | Python | revoke.py | jarviswwong/openssl-ca-server | 7e966d5c1e9be28466553cb7b8e9a94cdf5d22ee | [
"MIT"
] | 3 | 2021-07-29T09:49:08.000Z | 2021-11-25T11:42:34.000Z | revoke.py | Damoclesword/openssl-ca-server | 7e966d5c1e9be28466553cb7b8e9a94cdf5d22ee | [
"MIT"
] | null | null | null | revoke.py | Damoclesword/openssl-ca-server | 7e966d5c1e9be28466553cb7b8e9a94cdf5d22ee | [
"MIT"
] | 2 | 2020-04-28T23:34:44.000Z | 2021-06-10T05:34:36.000Z | import os
import datetime
import hashlib
import pexpect
from config import *
from common import openssl, jsonMessage, gencrl
from OpenSSL import crypto
# 通过证书文件吊销证书
def revokeFromCert(cert):
# 读取证书数据
try:
x509_obj = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
# get_serial_number返回10进制的serial,需转为16进制
serial = hex(x509_obj.get_serial_number())[2:]
except crypto.Error:
return jsonMessage(status=-1,
msg="[ERROR]: Wrong certificate (X509) format!")
# 存到临时文件夹里
path = os.path.join(
'/tmp',
hashlib.md5(str(datetime.datetime.now()).encode('utf-8')).hexdigest() +
"_revokecert.crt")
with open(path, "w") as f:
f.write(cert.decode('utf8'))
return revoking(path, serial)
# 通过serial吊销证书,方法是去CA/newcerts文件夹下寻找相应证书的备份
# @serial:必须为16进制格式
def revokeFromSerial(serial):
path = os.path.join(CA_NEWCERTS, serial + ".pem")
if not os.path.exists(path):
msg = "[ERROR]: This may be an invalid serial number!"
return jsonMessage(-1, msg)
return revoking(path, serial)
def revoking(certfile, serial):
child = openssl('ca', '-revoke', certfile)
ret = child.expect(
['Already revoked', 'Revoking Certificate', pexpect.EOF])
if ret == 0:
msg = "[ERROR]: This certificate is revoked!"
return jsonMessage(-1, msg)
elif ret == 1:
msg = "Revoke Certificate success! Serial number is " + serial
# 重新生成一遍证书文件
gencrl()
return jsonMessage(0, msg, {"Serial Number": serial})
elif ret == 2:
msg = "[ERROR]: Revoke failed, unknown error!"
return jsonMessage(-1, msg)
| 29.137931 | 79 | 0.634911 | import os
import datetime
import hashlib
import pexpect
from config import *
from common import openssl, jsonMessage, gencrl
from OpenSSL import crypto
def revokeFromCert(cert):
try:
x509_obj = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
serial = hex(x509_obj.get_serial_number())[2:]
except crypto.Error:
return jsonMessage(status=-1,
msg="[ERROR]: Wrong certificate (X509) format!")
path = os.path.join(
'/tmp',
hashlib.md5(str(datetime.datetime.now()).encode('utf-8')).hexdigest() +
"_revokecert.crt")
with open(path, "w") as f:
f.write(cert.decode('utf8'))
return revoking(path, serial)
def revokeFromSerial(serial):
path = os.path.join(CA_NEWCERTS, serial + ".pem")
if not os.path.exists(path):
msg = "[ERROR]: This may be an invalid serial number!"
return jsonMessage(-1, msg)
return revoking(path, serial)
def revoking(certfile, serial):
child = openssl('ca', '-revoke', certfile)
ret = child.expect(
['Already revoked', 'Revoking Certificate', pexpect.EOF])
if ret == 0:
msg = "[ERROR]: This certificate is revoked!"
return jsonMessage(-1, msg)
elif ret == 1:
msg = "Revoke Certificate success! Serial number is " + serial
gencrl()
return jsonMessage(0, msg, {"Serial Number": serial})
elif ret == 2:
msg = "[ERROR]: Revoke failed, unknown error!"
return jsonMessage(-1, msg)
| true | true |
f700848773d400c4ada7594725b4d25b98b7c121 | 1,536 | py | Python | dev/circuitpython/examples/neotrellis_simpletest.py | scripsi/picodeebee | 0ec77e92f09fa8711705623482e57a5e0b702696 | [
"MIT"
] | 7 | 2021-03-15T10:06:20.000Z | 2022-03-23T02:53:15.000Z | Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/neotrellis_simpletest.py | IanSMoyes/SpiderPi | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | [
"Apache-2.0"
] | 5 | 2021-04-27T18:21:11.000Z | 2021-05-02T14:17:14.000Z | Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/neotrellis_simpletest.py | IanSMoyes/SpiderPi | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
from board import SCL, SDA
import busio
from adafruit_neotrellis.neotrellis import NeoTrellis
# create the i2c object for the trellis
i2c_bus = busio.I2C(SCL, SDA)
# create the trellis
trellis = NeoTrellis(i2c_bus)
# some color definitions
OFF = (0, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 150, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
BLUE = (0, 0, 255)
PURPLE = (180, 0, 255)
# this will be called when button events are received
def blink(event):
# turn the LED on when a rising edge is detected
if event.edge == NeoTrellis.EDGE_RISING:
trellis.pixels[event.number] = CYAN
# turn the LED off when a rising edge is detected
elif event.edge == NeoTrellis.EDGE_FALLING:
trellis.pixels[event.number] = OFF
for i in range(16):
# activate rising edge events on all keys
trellis.activate_key(i, NeoTrellis.EDGE_RISING)
# activate falling edge events on all keys
trellis.activate_key(i, NeoTrellis.EDGE_FALLING)
# set all keys to trigger the blink callback
trellis.callbacks[i] = blink
# cycle the LEDs on startup
trellis.pixels[i] = PURPLE
time.sleep(0.05)
for i in range(16):
trellis.pixels[i] = OFF
time.sleep(0.05)
while True:
# call the sync function call any triggered callbacks
trellis.sync()
# the trellis can only be read every 17 millisecons or so
time.sleep(0.02)
| 27.428571 | 63 | 0.676432 |
import time
from board import SCL, SDA
import busio
from adafruit_neotrellis.neotrellis import NeoTrellis
i2c_bus = busio.I2C(SCL, SDA)
trellis = NeoTrellis(i2c_bus)
OFF = (0, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 150, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
BLUE = (0, 0, 255)
PURPLE = (180, 0, 255)
def blink(event):
if event.edge == NeoTrellis.EDGE_RISING:
trellis.pixels[event.number] = CYAN
elif event.edge == NeoTrellis.EDGE_FALLING:
trellis.pixels[event.number] = OFF
for i in range(16):
trellis.activate_key(i, NeoTrellis.EDGE_RISING)
trellis.activate_key(i, NeoTrellis.EDGE_FALLING)
trellis.callbacks[i] = blink
trellis.pixels[i] = PURPLE
time.sleep(0.05)
for i in range(16):
trellis.pixels[i] = OFF
time.sleep(0.05)
while True:
trellis.sync()
time.sleep(0.02)
| true | true |
f70085721ab1f27e1f288841c761b393cb3b5ed0 | 2,068 | py | Python | MuPythonLibrary/Uefi/EdkII/VariableFormat_Test.py | matthewfcarlson/mu_pip_python_library | 659538b80fd5c060e053e14a828d9d41161682a1 | [
"BSD-2-Clause"
] | 8 | 2019-10-05T09:06:39.000Z | 2022-03-11T10:45:12.000Z | MuPythonLibrary/Uefi/EdkII/VariableFormat_Test.py | matthewfcarlson/mu_pip_python_library | 659538b80fd5c060e053e14a828d9d41161682a1 | [
"BSD-2-Clause"
] | 3 | 2018-12-14T21:14:17.000Z | 2019-04-18T20:26:55.000Z | MuPythonLibrary/Uefi/EdkII/VariableFormat_Test.py | matthewfcarlson/mu_pip_python_library | 659538b80fd5c060e053e14a828d9d41161682a1 | [
"BSD-2-Clause"
] | 8 | 2019-05-10T19:18:39.000Z | 2022-03-11T10:45:09.000Z | # @file VariableFormat_Test.py
# Unit test harness for the VariableFormat module/classes.
#
##
# Copyright (c) 2017, Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
import unittest
import MuPythonLibrary.Uefi.EdkII.VariableFormat as VF
class TestVariableHeader(unittest.TestCase):
def test_set_name(self):
var = VF.VariableHeader()
test_name = "MyNewName"
var.set_name(test_name)
self.assertEqual(var.Name, test_name)
def test_get_packed_name(self):
var = VF.VariableHeader()
test_name = "MyNewName"
var.set_name(test_name)
test_name_packed = bytes.fromhex('4D0079004E00650077004E0061006D0065000000')
self.assertEqual(var.get_packed_name(), test_name_packed)
if __name__ == '__main__':
unittest.main()
| 38.296296 | 84 | 0.762089 |
import unittest
import MuPythonLibrary.Uefi.EdkII.VariableFormat as VF
class TestVariableHeader(unittest.TestCase):
def test_set_name(self):
var = VF.VariableHeader()
test_name = "MyNewName"
var.set_name(test_name)
self.assertEqual(var.Name, test_name)
def test_get_packed_name(self):
var = VF.VariableHeader()
test_name = "MyNewName"
var.set_name(test_name)
test_name_packed = bytes.fromhex('4D0079004E00650077004E0061006D0065000000')
self.assertEqual(var.get_packed_name(), test_name_packed)
if __name__ == '__main__':
unittest.main()
| true | true |
f7008588a2462d13b0cabd06be14dc705036a2fd | 3,068 | py | Python | yaql/tests/test_common.py | nzlosh/yaql | bb65fc64026d431ffb866d02825deb3a0e4b5943 | [
"Apache-2.0"
] | 112 | 2015-10-18T02:57:41.000Z | 2022-03-28T18:26:36.000Z | yaql/tests/test_common.py | nzlosh/yaql | bb65fc64026d431ffb866d02825deb3a0e4b5943 | [
"Apache-2.0"
] | 3 | 2020-06-09T11:54:38.000Z | 2021-04-30T06:12:37.000Z | yaql/tests/test_common.py | nzlosh/yaql | bb65fc64026d431ffb866d02825deb3a0e4b5943 | [
"Apache-2.0"
] | 27 | 2015-12-10T00:10:02.000Z | 2022-03-20T21:51:01.000Z | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaql.tests
class TestCommon(yaql.tests.TestCase):
def test_null(self):
self.assertIsNone(self.eval('null'))
def test_true(self):
res = self.eval('true')
self.assertTrue(res)
self.assertIsInstance(res, bool)
def test_false(self):
res = self.eval('false')
self.assertFalse(res)
self.assertIsInstance(res, bool)
def test_string(self):
self.assertEqual('True', self.eval('True'))
self.assertEqual('some string', self.eval("'some string'"))
def test_null_to_null(self):
self.assertTrue(self.eval('null = null'))
self.assertFalse(self.eval('null != null'))
self.assertTrue(self.eval('null <= null'))
self.assertTrue(self.eval('null >= null'))
self.assertFalse(self.eval('null < null'))
self.assertFalse(self.eval('null > null'))
def test_ordering(self):
self.assertTrue(self.eval('null < 0'))
self.assertTrue(self.eval('null < true'))
self.assertTrue(self.eval('null < false'))
self.assertTrue(self.eval('null < a'))
self.assertTrue(self.eval('null <= 0'))
self.assertFalse(self.eval('null > 0'))
self.assertFalse(self.eval('null >= 0'))
self.assertTrue(self.eval('null != 0'))
self.assertTrue(self.eval('null != false'))
self.assertFalse(self.eval('null = false'))
self.assertFalse(self.eval('null = 0'))
self.assertFalse(self.eval('0 < null'))
self.assertFalse(self.eval('0 <= null'))
self.assertTrue(self.eval('0 >= null'))
self.assertTrue(self.eval('0 > null'))
def test_max(self):
self.assertEqual(5, self.eval('max(1, 5)'))
self.assertEqual(-1, self.eval('max(null, -1)'))
self.assertIsNone(self.eval('max(null, null)'))
def test_min(self):
self.assertEqual(1, self.eval('min(1, 5)'))
self.assertIsNone(self.eval('min(null, -1)'))
self.assertIsNone(self.eval('min(null, null)'))
def test_comparision_of_incomparable(self):
self.assertFalse(self.eval('a = 1'))
self.assertFalse(self.eval('a = false'))
self.assertFalse(self.eval('a = null'))
self.assertFalse(self.eval('[a] = [false]'))
self.assertTrue(self.eval('a != 1'))
self.assertTrue(self.eval('a != false'))
self.assertTrue(self.eval('[a] != [false]'))
self.assertTrue(self.eval('a != null'))
| 38.35 | 78 | 0.622229 |
import yaql.tests
class TestCommon(yaql.tests.TestCase):
def test_null(self):
self.assertIsNone(self.eval('null'))
def test_true(self):
res = self.eval('true')
self.assertTrue(res)
self.assertIsInstance(res, bool)
def test_false(self):
res = self.eval('false')
self.assertFalse(res)
self.assertIsInstance(res, bool)
def test_string(self):
self.assertEqual('True', self.eval('True'))
self.assertEqual('some string', self.eval("'some string'"))
def test_null_to_null(self):
self.assertTrue(self.eval('null = null'))
self.assertFalse(self.eval('null != null'))
self.assertTrue(self.eval('null <= null'))
self.assertTrue(self.eval('null >= null'))
self.assertFalse(self.eval('null < null'))
self.assertFalse(self.eval('null > null'))
def test_ordering(self):
self.assertTrue(self.eval('null < 0'))
self.assertTrue(self.eval('null < true'))
self.assertTrue(self.eval('null < false'))
self.assertTrue(self.eval('null < a'))
self.assertTrue(self.eval('null <= 0'))
self.assertFalse(self.eval('null > 0'))
self.assertFalse(self.eval('null >= 0'))
self.assertTrue(self.eval('null != 0'))
self.assertTrue(self.eval('null != false'))
self.assertFalse(self.eval('null = false'))
self.assertFalse(self.eval('null = 0'))
self.assertFalse(self.eval('0 < null'))
self.assertFalse(self.eval('0 <= null'))
self.assertTrue(self.eval('0 >= null'))
self.assertTrue(self.eval('0 > null'))
def test_max(self):
self.assertEqual(5, self.eval('max(1, 5)'))
self.assertEqual(-1, self.eval('max(null, -1)'))
self.assertIsNone(self.eval('max(null, null)'))
def test_min(self):
self.assertEqual(1, self.eval('min(1, 5)'))
self.assertIsNone(self.eval('min(null, -1)'))
self.assertIsNone(self.eval('min(null, null)'))
def test_comparision_of_incomparable(self):
self.assertFalse(self.eval('a = 1'))
self.assertFalse(self.eval('a = false'))
self.assertFalse(self.eval('a = null'))
self.assertFalse(self.eval('[a] = [false]'))
self.assertTrue(self.eval('a != 1'))
self.assertTrue(self.eval('a != false'))
self.assertTrue(self.eval('[a] != [false]'))
self.assertTrue(self.eval('a != null'))
| true | true |
f70086097b2870bddac591ff695bf96e16de715d | 7,800 | py | Python | keras/saving/saved_model/layer_serialization.py | quantumalaviya/keras | 8d874de12ed2e199d9528bfff891f4f60ee2a636 | [
"Apache-2.0"
] | 1 | 2021-09-11T21:25:20.000Z | 2021-09-11T21:25:20.000Z | keras/saving/saved_model/layer_serialization.py | quantumalaviya/keras | 8d874de12ed2e199d9528bfff891f4f60ee2a636 | [
"Apache-2.0"
] | null | null | null | keras/saving/saved_model/layer_serialization.py | quantumalaviya/keras | 8d874de12ed2e199d9528bfff891f4f60ee2a636 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions implementing Layer SavedModel serialization."""
from keras.mixed_precision import policy
from keras.saving.saved_model import base_serialization
from keras.saving.saved_model import constants
from keras.saving.saved_model import save_impl
from keras.saving.saved_model import serialized_attributes
from keras.utils import generic_utils
import tensorflow.compat.v2 as tf
class LayerSavedModelSaver(base_serialization.SavedModelSaver):
"""Implements Layer SavedModel serialization."""
@property
def object_identifier(self):
return constants.LAYER_IDENTIFIER
@property
def python_properties(self):
# TODO(kathywu): Add python property validator
return self._python_properties_internal()
def _python_properties_internal(self):
"""Returns dictionary of all python properties."""
# TODO(kathywu): Add support for metrics serialization.
# TODO(kathywu): Synchronize with the keras spec (go/keras-json-spec) once
# the python config serialization has caught up.
metadata = dict(
name=self.obj.name,
trainable=self.obj.trainable,
expects_training_arg=self.obj._expects_training_arg, # pylint: disable=protected-access
dtype=policy.serialize(self.obj._dtype_policy), # pylint: disable=protected-access
batch_input_shape=getattr(self.obj, '_batch_input_shape', None),
stateful=self.obj.stateful,
must_restore_from_config=self.obj._must_restore_from_config, # pylint: disable=protected-access
)
metadata.update(get_serialized(self.obj))
if self.obj.input_spec is not None:
# Layer's input_spec has already been type-checked in the property setter.
metadata['input_spec'] = tf.nest.map_structure(
lambda x: generic_utils.serialize_keras_object(x) if x else None,
self.obj.input_spec)
if (self.obj.activity_regularizer is not None and
hasattr(self.obj.activity_regularizer, 'get_config')):
metadata['activity_regularizer'] = generic_utils.serialize_keras_object(
self.obj.activity_regularizer)
if self.obj._build_input_shape is not None: # pylint: disable=protected-access
metadata['build_input_shape'] = self.obj._build_input_shape # pylint: disable=protected-access
return metadata
def objects_to_serialize(self, serialization_cache):
return (self._get_serialized_attributes(
serialization_cache).objects_to_serialize)
def functions_to_serialize(self, serialization_cache):
return (self._get_serialized_attributes(
serialization_cache).functions_to_serialize)
def _get_serialized_attributes(self, serialization_cache):
"""Generates or retrieves serialized attributes from cache."""
keras_cache = serialization_cache.setdefault(constants.KERAS_CACHE_KEY, {})
if self.obj in keras_cache:
return keras_cache[self.obj]
serialized_attr = keras_cache[self.obj] = (
serialized_attributes.SerializedAttributes.new(self.obj))
if (save_impl.should_skip_serialization(self.obj) or
self.obj._must_restore_from_config): # pylint: disable=protected-access
return serialized_attr
object_dict, function_dict = self._get_serialized_attributes_internal(
serialization_cache)
serialized_attr.set_and_validate_objects(object_dict)
serialized_attr.set_and_validate_functions(function_dict)
return serialized_attr
def _get_serialized_attributes_internal(self, serialization_cache):
"""Returns dictionary of serialized attributes."""
objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)
functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)
# Attribute validator requires that the default save signature is added to
# function dict, even if the value is None.
functions['_default_save_signature'] = None
return objects, functions
# TODO(kathywu): Move serialization utils (and related utils from
# generic_utils.py) to a separate file.
def get_serialized(obj):
with generic_utils.skip_failed_serialization():
# Store the config dictionary, which may be used when reviving the object.
# When loading, the program will attempt to revive the object from config,
# and if that fails, the object will be revived from the SavedModel.
return generic_utils.serialize_keras_object(obj)
class InputLayerSavedModelSaver(base_serialization.SavedModelSaver):
"""InputLayer serialization."""
@property
def object_identifier(self):
return constants.INPUT_LAYER_IDENTIFIER
@property
def python_properties(self):
return dict(
class_name=type(self.obj).__name__,
name=self.obj.name,
dtype=self.obj.dtype,
sparse=self.obj.sparse,
ragged=self.obj.ragged,
batch_input_shape=self.obj._batch_input_shape, # pylint: disable=protected-access
config=self.obj.get_config())
def objects_to_serialize(self, serialization_cache):
return {}
def functions_to_serialize(self, serialization_cache):
return {}
class RNNSavedModelSaver(LayerSavedModelSaver):
"""RNN layer serialization."""
@property
def object_identifier(self):
return constants.RNN_LAYER_IDENTIFIER
def _get_serialized_attributes_internal(self, serialization_cache):
objects, functions = (
super(RNNSavedModelSaver, self)._get_serialized_attributes_internal(
serialization_cache))
states = tf.__internal__.tracking.wrap(self.obj.states)
# SaveModel require all the objects to be Trackable when saving.
# If the states is still a tuple after wrap_or_unwrap, it means it doesn't
# contain any trackable item within it, eg empty tuple or (None, None) for
# stateless ConvLSTM2D. We convert them to list so that wrap_or_unwrap can
# make it a Trackable again for saving. When loaded, ConvLSTM2D is
# able to handle the tuple/list conversion.
if isinstance(states, tuple):
states = tf.__internal__.tracking.wrap(list(states))
objects['states'] = states
return objects, functions
class VocabularySavedModelSaver(LayerSavedModelSaver):
"""Handles vocabulary layer serialization.
This class is needed for StringLookup, IntegerLookup, and TextVectorization,
which all have a vocabulary as part of the config. Currently, we keep this
vocab as part of the config until saving, when we need to clear it to avoid
initializing a StaticHashTable twice (once when restoring the config and once
when restoring restoring module resources). After clearing the vocab, we
presist a property to the layer indicating it was constructed with a vocab.
"""
@property
def python_properties(self):
# TODO(kathywu): Add python property validator
metadata = self._python_properties_internal()
# Clear the vocabulary from the config during saving.
metadata['config']['vocabulary'] = None
# Persist a property to track that a vocabulary was passed on construction.
metadata['config']['has_input_vocabulary'] = self.obj._has_input_vocabulary # pylint: disable=protected-access
return metadata
| 42.162162 | 115 | 0.751154 |
from keras.mixed_precision import policy
from keras.saving.saved_model import base_serialization
from keras.saving.saved_model import constants
from keras.saving.saved_model import save_impl
from keras.saving.saved_model import serialized_attributes
from keras.utils import generic_utils
import tensorflow.compat.v2 as tf
class LayerSavedModelSaver(base_serialization.SavedModelSaver):
@property
def object_identifier(self):
return constants.LAYER_IDENTIFIER
@property
def python_properties(self):
return self._python_properties_internal()
def _python_properties_internal(self):
metadata = dict(
name=self.obj.name,
trainable=self.obj.trainable,
expects_training_arg=self.obj._expects_training_arg, dtype=policy.serialize(self.obj._dtype_policy), batch_input_shape=getattr(self.obj, '_batch_input_shape', None),
stateful=self.obj.stateful,
must_restore_from_config=self.obj._must_restore_from_config, )
metadata.update(get_serialized(self.obj))
if self.obj.input_spec is not None:
metadata['input_spec'] = tf.nest.map_structure(
lambda x: generic_utils.serialize_keras_object(x) if x else None,
self.obj.input_spec)
if (self.obj.activity_regularizer is not None and
hasattr(self.obj.activity_regularizer, 'get_config')):
metadata['activity_regularizer'] = generic_utils.serialize_keras_object(
self.obj.activity_regularizer)
if self.obj._build_input_shape is not None: # pylint: disable=protected-access
metadata['build_input_shape'] = self.obj._build_input_shape # pylint: disable=protected-access
return metadata
def objects_to_serialize(self, serialization_cache):
return (self._get_serialized_attributes(
serialization_cache).objects_to_serialize)
def functions_to_serialize(self, serialization_cache):
return (self._get_serialized_attributes(
serialization_cache).functions_to_serialize)
def _get_serialized_attributes(self, serialization_cache):
keras_cache = serialization_cache.setdefault(constants.KERAS_CACHE_KEY, {})
if self.obj in keras_cache:
return keras_cache[self.obj]
serialized_attr = keras_cache[self.obj] = (
serialized_attributes.SerializedAttributes.new(self.obj))
if (save_impl.should_skip_serialization(self.obj) or
self.obj._must_restore_from_config): # pylint: disable=protected-access
return serialized_attr
object_dict, function_dict = self._get_serialized_attributes_internal(
serialization_cache)
serialized_attr.set_and_validate_objects(object_dict)
serialized_attr.set_and_validate_functions(function_dict)
return serialized_attr
def _get_serialized_attributes_internal(self, serialization_cache):
objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)
functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)
# Attribute validator requires that the default save signature is added to
# function dict, even if the value is None.
functions['_default_save_signature'] = None
return objects, functions
# TODO(kathywu): Move serialization utils (and related utils from
# generic_utils.py) to a separate file.
def get_serialized(obj):
with generic_utils.skip_failed_serialization():
# Store the config dictionary, which may be used when reviving the object.
# When loading, the program will attempt to revive the object from config,
# and if that fails, the object will be revived from the SavedModel.
return generic_utils.serialize_keras_object(obj)
class InputLayerSavedModelSaver(base_serialization.SavedModelSaver):
@property
def object_identifier(self):
return constants.INPUT_LAYER_IDENTIFIER
@property
def python_properties(self):
return dict(
class_name=type(self.obj).__name__,
name=self.obj.name,
dtype=self.obj.dtype,
sparse=self.obj.sparse,
ragged=self.obj.ragged,
batch_input_shape=self.obj._batch_input_shape, # pylint: disable=protected-access
config=self.obj.get_config())
def objects_to_serialize(self, serialization_cache):
return {}
def functions_to_serialize(self, serialization_cache):
return {}
class RNNSavedModelSaver(LayerSavedModelSaver):
@property
def object_identifier(self):
return constants.RNN_LAYER_IDENTIFIER
def _get_serialized_attributes_internal(self, serialization_cache):
objects, functions = (
super(RNNSavedModelSaver, self)._get_serialized_attributes_internal(
serialization_cache))
states = tf.__internal__.tracking.wrap(self.obj.states)
# SaveModel require all the objects to be Trackable when saving.
# If the states is still a tuple after wrap_or_unwrap, it means it doesn't
if isinstance(states, tuple):
states = tf.__internal__.tracking.wrap(list(states))
objects['states'] = states
return objects, functions
class VocabularySavedModelSaver(LayerSavedModelSaver):
@property
def python_properties(self):
metadata = self._python_properties_internal()
metadata['config']['vocabulary'] = None
metadata['config']['has_input_vocabulary'] = self.obj._has_input_vocabulary return metadata
| true | true |
f700864adcc91b720f4dd4a166b39644acc7d0b2 | 76 | py | Python | random number generation.py | Ratheshprabakar/Python-Programs | fca9d4f0b5f5f5693b3d7e25c6d890f4973dc19e | [
"MIT"
] | 2 | 2019-07-10T06:32:05.000Z | 2019-11-13T07:52:53.000Z | random number generation.py | Ratheshprabakar/Python-Programs | fca9d4f0b5f5f5693b3d7e25c6d890f4973dc19e | [
"MIT"
] | null | null | null | random number generation.py | Ratheshprabakar/Python-Programs | fca9d4f0b5f5f5693b3d7e25c6d890f4973dc19e | [
"MIT"
] | 1 | 2019-10-12T06:56:13.000Z | 2019-10-12T06:56:13.000Z | import random
x=random.random()
print("The Random number is",round(x,3))
| 19 | 41 | 0.710526 | import random
x=random.random()
print("The Random number is",round(x,3))
| true | true |
f700867ec71ff7fa38349f5f83924777079c446f | 1,627 | py | Python | cmframework/src/cmframework/utils/cmpluginmanager.py | akraino-edge-stack/ta-config-manager | 8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44 | [
"Apache-2.0"
] | null | null | null | cmframework/src/cmframework/utils/cmpluginmanager.py | akraino-edge-stack/ta-config-manager | 8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44 | [
"Apache-2.0"
] | null | null | null | cmframework/src/cmframework/utils/cmpluginmanager.py | akraino-edge-stack/ta-config-manager | 8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import re
import logging
from cmframework.apis import cmerror
class CMPluginManager(object):
def __init__(self, plugins_path):
self.pluginlist = {}
self.filterdict = {}
self.plugins_path = plugins_path
# pylint: disable=no-self-use
def load_plugin(self):
raise cmerror.CMError('Not implemented')
# pylint: disable=no-self-use
def build_input(self, indata, filtername):
search_re = re.compile(filtername)
if isinstance(indata, dict):
filter_data = {}
for key, value in indata.iteritems():
logging.debug('Matching %s against %s', key, filtername)
if search_re.match(key):
filter_data[key] = value
else:
filter_data = []
for key in indata:
logging.debug('Matching %s against %s', key, filtername)
if search_re.match(key):
filter_data.append(key)
return filter_data
| 33.204082 | 74 | 0.653964 |
import re
import logging
from cmframework.apis import cmerror
class CMPluginManager(object):
def __init__(self, plugins_path):
self.pluginlist = {}
self.filterdict = {}
self.plugins_path = plugins_path
def load_plugin(self):
raise cmerror.CMError('Not implemented')
def build_input(self, indata, filtername):
search_re = re.compile(filtername)
if isinstance(indata, dict):
filter_data = {}
for key, value in indata.iteritems():
logging.debug('Matching %s against %s', key, filtername)
if search_re.match(key):
filter_data[key] = value
else:
filter_data = []
for key in indata:
logging.debug('Matching %s against %s', key, filtername)
if search_re.match(key):
filter_data.append(key)
return filter_data
| true | true |
f70086a12cb7a805133ce4cd015dbd84c54c0b65 | 28 | py | Python | ent2id/__init__.py | skojaku/ent2id | 1483cc9430999db7a6598dfdf0afa7302ada4893 | [
"CC0-1.0"
] | null | null | null | ent2id/__init__.py | skojaku/ent2id | 1483cc9430999db7a6598dfdf0afa7302ada4893 | [
"CC0-1.0"
] | null | null | null | ent2id/__init__.py | skojaku/ent2id | 1483cc9430999db7a6598dfdf0afa7302ada4893 | [
"CC0-1.0"
] | null | null | null | from ent2id.Ent2Id import *
| 14 | 27 | 0.785714 | from ent2id.Ent2Id import *
| true | true |
f70086a2b3578a8b051c307075e29ccb0a1db0f5 | 5,775 | py | Python | venv/Lib/site-packages/tencentcloud/hcm/v20181106/models.py | Lparksi/bot | 8a38953d09436b60e8edff4ebe86bf19fe3b7046 | [
"MIT"
] | 3 | 2020-03-31T10:36:31.000Z | 2020-04-23T12:01:10.000Z | venv/Lib/site-packages/tencentcloud/hcm/v20181106/models.py | Lparksi/bot | 8a38953d09436b60e8edff4ebe86bf19fe3b7046 | [
"MIT"
] | 1 | 2020-07-16T14:51:26.000Z | 2020-07-30T12:46:55.000Z | venv/Lib/site-packages/tencentcloud/hcm/v20181106/models.py | Lparksi/bot | 8a38953d09436b60e8edff4ebe86bf19fe3b7046 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class EvaluationRequest(AbstractModel):
"""Evaluation请求参数结构体
"""
def __init__(self):
"""
:param SessionId: 图片唯一标识,一张图片一个SessionId;
:type SessionId: str
:param Image: 图片数据,需要使用base64对图片的二进制数据进行编码,与url参数二者填一即可;
:type Image: str
:param HcmAppid: 业务应用ID,与账号应用APPID无关,是用来方便客户管理服务的参数,新的 HcmAppid 可以在[控制台](https://console.cloud.tencent.com/hcm)【应用管理】下新建。
:type HcmAppid: str
:param Url: 图片url,与Image参数二者填一即可;
:type Url: str
:param SupportHorizontalImage: 横屏拍摄开关,若开启则支持传输横屏拍摄的图片;
:type SupportHorizontalImage: bool
:param RejectNonArithmeticImage: 拒绝非速算图(如风景图、人物图)开关,若开启,则遇到非速算图会快速返回拒绝的结果,但极端情况下可能会影响评估结果(比如算式截图贴到风景画里可能被判为非速算图直接返回了)。
:type RejectNonArithmeticImage: bool
:param IsAsync: 异步模式标识,0:同步模式,1:异步模式。默认为同步模式
:type IsAsync: int
:param EnableDispRelatedVertical: 是否展开耦合算式中的竖式计算
:type EnableDispRelatedVertical: bool
:param EnableDispMidresult: 是否展示竖式算式的中间结果和格式控制字符
:type EnableDispMidresult: bool
:param EnablePdfRecognize: 是否开启pdf识别,默认开启
:type EnablePdfRecognize: bool
:param PdfPageIndex: pdf页码,从0开始,默认为0
:type PdfPageIndex: int
"""
self.SessionId = None
self.Image = None
self.HcmAppid = None
self.Url = None
self.SupportHorizontalImage = None
self.RejectNonArithmeticImage = None
self.IsAsync = None
self.EnableDispRelatedVertical = None
self.EnableDispMidresult = None
self.EnablePdfRecognize = None
self.PdfPageIndex = None
def _deserialize(self, params):
self.SessionId = params.get("SessionId")
self.Image = params.get("Image")
self.HcmAppid = params.get("HcmAppid")
self.Url = params.get("Url")
self.SupportHorizontalImage = params.get("SupportHorizontalImage")
self.RejectNonArithmeticImage = params.get("RejectNonArithmeticImage")
self.IsAsync = params.get("IsAsync")
self.EnableDispRelatedVertical = params.get("EnableDispRelatedVertical")
self.EnableDispMidresult = params.get("EnableDispMidresult")
self.EnablePdfRecognize = params.get("EnablePdfRecognize")
self.PdfPageIndex = params.get("PdfPageIndex")
class EvaluationResponse(AbstractModel):
"""Evaluation返回参数结构体
"""
def __init__(self):
"""
:param SessionId: 图片唯一标识,一张图片一个SessionId;
:type SessionId: str
:param Items: 识别出的算式信息;
注意:此字段可能返回 null,表示取不到有效值。
:type Items: list of Item
:param TaskId: 任务 id,用于查询接口
:type TaskId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SessionId = None
self.Items = None
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.SessionId = params.get("SessionId")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = Item()
obj._deserialize(item)
self.Items.append(obj)
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class Item(AbstractModel):
"""识别出的算术式信息及评估结果
"""
def __init__(self):
"""
:param Item: 识别的算式是否正确
:type Item: str
:param ItemString: 识别的算式
:type ItemString: str
:param ItemCoord: 识别的算式在图片上的位置信息
:type ItemCoord: :class:`tencentcloud.hcm.v20181106.models.ItemCoord`
:param Answer: 推荐的答案,暂不支持多个关系运算符、无关系运算符、单位换算错题的推荐答案返回。
:type Answer: str
:param ExpressionType: 算式题型编号,如加减乘除四则题型,具体题型及编号如下:1 加减乘除四则 2 加减乘除已知结果求运算因子3 判断大小 4 约等于估算 5 带余数除法 6 分数四则运算 7 单位换算 8 竖式加减法 9 竖式乘除法 10 脱式计算 11 解方程
注意:此字段可能返回 null,表示取不到有效值。
:type ExpressionType: str
"""
self.Item = None
self.ItemString = None
self.ItemCoord = None
self.Answer = None
self.ExpressionType = None
def _deserialize(self, params):
self.Item = params.get("Item")
self.ItemString = params.get("ItemString")
if params.get("ItemCoord") is not None:
self.ItemCoord = ItemCoord()
self.ItemCoord._deserialize(params.get("ItemCoord"))
self.Answer = params.get("Answer")
self.ExpressionType = params.get("ExpressionType")
class ItemCoord(AbstractModel):
"""目标算式在图片上的坐标信息
"""
def __init__(self):
"""
:param Height: 算式高度
:type Height: int
:param Width: 算式宽度
:type Width: int
:param X: 算式图的左上角横坐标
:type X: int
:param Y: 算式图的左上角纵坐标
:type Y: int
"""
self.Height = None
self.Width = None
self.X = None
self.Y = None
def _deserialize(self, params):
self.Height = params.get("Height")
self.Width = params.get("Width")
self.X = params.get("X")
self.Y = params.get("Y") | 33.381503 | 151 | 0.641732 |
from tencentcloud.common.abstract_model import AbstractModel
class EvaluationRequest(AbstractModel):
def __init__(self):
self.SessionId = None
self.Image = None
self.HcmAppid = None
self.Url = None
self.SupportHorizontalImage = None
self.RejectNonArithmeticImage = None
self.IsAsync = None
self.EnableDispRelatedVertical = None
self.EnableDispMidresult = None
self.EnablePdfRecognize = None
self.PdfPageIndex = None
def _deserialize(self, params):
self.SessionId = params.get("SessionId")
self.Image = params.get("Image")
self.HcmAppid = params.get("HcmAppid")
self.Url = params.get("Url")
self.SupportHorizontalImage = params.get("SupportHorizontalImage")
self.RejectNonArithmeticImage = params.get("RejectNonArithmeticImage")
self.IsAsync = params.get("IsAsync")
self.EnableDispRelatedVertical = params.get("EnableDispRelatedVertical")
self.EnableDispMidresult = params.get("EnableDispMidresult")
self.EnablePdfRecognize = params.get("EnablePdfRecognize")
self.PdfPageIndex = params.get("PdfPageIndex")
class EvaluationResponse(AbstractModel):
def __init__(self):
self.SessionId = None
self.Items = None
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.SessionId = params.get("SessionId")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = Item()
obj._deserialize(item)
self.Items.append(obj)
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class Item(AbstractModel):
def __init__(self):
self.Item = None
self.ItemString = None
self.ItemCoord = None
self.Answer = None
self.ExpressionType = None
def _deserialize(self, params):
self.Item = params.get("Item")
self.ItemString = params.get("ItemString")
if params.get("ItemCoord") is not None:
self.ItemCoord = ItemCoord()
self.ItemCoord._deserialize(params.get("ItemCoord"))
self.Answer = params.get("Answer")
self.ExpressionType = params.get("ExpressionType")
class ItemCoord(AbstractModel):
def __init__(self):
self.Height = None
self.Width = None
self.X = None
self.Y = None
def _deserialize(self, params):
self.Height = params.get("Height")
self.Width = params.get("Width")
self.X = params.get("X")
self.Y = params.get("Y") | true | true |
f70087c4c75215da4de26e95ab7e5bd813b1cd4c | 9,438 | py | Python | tests/utils/git_test.py | breml/rally | ea43beb6b60481ee77508da84605add571fc021d | [
"Apache-2.0"
] | null | null | null | tests/utils/git_test.py | breml/rally | ea43beb6b60481ee77508da84605add571fc021d | [
"Apache-2.0"
] | null | null | null | tests/utils/git_test.py | breml/rally | ea43beb6b60481ee77508da84605add571fc021d | [
"Apache-2.0"
] | null | null | null | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import logging
import unittest.mock as mock
from unittest import TestCase
from esrally import exceptions
from esrally.utils import git
class GitTests(TestCase):
def test_is_git_working_copy(self):
test_dir = os.path.dirname(os.path.dirname(__file__))
# this test is assuming that nobody stripped the git repo info in their Rally working copy
self.assertFalse(git.is_working_copy(test_dir))
self.assertTrue(git.is_working_copy(os.path.dirname(test_dir)))
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_git_version_too_old(self, run_subprocess_with_logging, run_subprocess):
# any non-zero return value will do
run_subprocess_with_logging.return_value = 64
run_subprocess.return_value = "1.0.0"
with self.assertRaises(exceptions.SystemSetupError) as ctx:
git.head_revision("/src")
self.assertEqual("Your git version is [1.0.0] but Rally requires at least git 1.9. Please update git.", ctx.exception.args[0])
run_subprocess_with_logging.assert_called_with("git -C /src --version", level=logging.DEBUG)
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_clone_successful(self, run_subprocess_with_logging, ensure_dir):
run_subprocess_with_logging.return_value = 0
src = "/src"
remote = "http://github.com/some/project"
git.clone(src, remote)
ensure_dir.assert_called_with(src)
run_subprocess_with_logging.assert_called_with("git clone http://github.com/some/project /src")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_clone_with_error(self, run_subprocess_with_logging, ensure_dir):
run_subprocess_with_logging.return_value = 128
src = "/src"
remote = "http://github.com/some/project"
with self.assertRaises(exceptions.SupplyError) as ctx:
git.clone(src, remote)
self.assertEqual("Could not clone from [http://github.com/some/project] to [/src]", ctx.exception.args[0])
ensure_dir.assert_called_with(src)
run_subprocess_with_logging.assert_called_with("git clone http://github.com/some/project /src")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_fetch_successful(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.fetch("/src", remote="my-origin")
run_subprocess.assert_called_with("git -C /src fetch --prune --quiet my-origin")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_fetch_with_error(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = True
with self.assertRaises(exceptions.SupplyError) as ctx:
git.fetch("/src", remote="my-origin")
self.assertEqual("Could not fetch source tree from [my-origin]", ctx.exception.args[0])
run_subprocess.assert_called_with("git -C /src fetch --prune --quiet my-origin")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_checkout_successful(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.checkout("/src", "feature-branch")
run_subprocess.assert_called_with("git -C /src checkout --quiet feature-branch")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_checkout_with_error(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = True
with self.assertRaises(exceptions.SupplyError) as ctx:
git.checkout("/src", "feature-branch")
self.assertEqual("Could not checkout branch [feature-branch]. Do you have uncommitted changes?", ctx.exception.args[0])
run_subprocess.assert_called_with("git -C /src checkout --quiet feature-branch")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_rebase(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.rebase("/src", remote="my-origin", branch="feature-branch")
calls = [
mock.call("git -C /src checkout --quiet feature-branch"),
mock.call("git -C /src rebase --quiet my-origin/feature-branch")
]
run_subprocess.assert_has_calls(calls)
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_pull(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.pull("/src", remote="my-origin", branch="feature-branch")
calls = [
mock.call("git -C /src fetch --prune --quiet my-origin"),
mock.call("git -C /src checkout --quiet feature-branch"),
mock.call("git -C /src rebase --quiet my-origin/feature-branch")
]
run_subprocess.assert_has_calls(calls)
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_pull_ts(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.pull_ts("/src", "20160101T110000Z")
run_subprocess.assert_called_with(
"git -C /src fetch --quiet origin && git -C /src checkout "
"--quiet `git -C /src rev-list -n 1 --before=\"20160101T110000Z\" --date=iso8601 origin/master`")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_pull_revision(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.pull_revision("/src", "3694a07")
run_subprocess.assert_called_with("git -C /src fetch --quiet origin && git -C /src checkout --quiet 3694a07")
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_head_revision(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = ["3694a07"]
self.assertEqual("3694a07", git.head_revision("/src"))
run_subprocess.assert_called_with("git -C /src rev-parse --short HEAD")
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_list_remote_branches(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = [" origin/HEAD",
" origin/master",
" origin/5.0.0-alpha1",
" origin/5"]
self.assertEqual(["master", "5.0.0-alpha1", "5"], git.branches("/src", remote=True))
run_subprocess.assert_called_with("git -C /src for-each-ref refs/remotes/ --format='%(refname:short)'")
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_list_local_branches(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = [" HEAD",
" master",
" 5.0.0-alpha1",
" 5"]
self.assertEqual(["master", "5.0.0-alpha1", "5"], git.branches("/src", remote=False))
run_subprocess.assert_called_with("git -C /src for-each-ref refs/heads/ --format='%(refname:short)'")
| 52.433333 | 134 | 0.703963 |
import os
import logging
import unittest.mock as mock
from unittest import TestCase
from esrally import exceptions
from esrally.utils import git
class GitTests(TestCase):
def test_is_git_working_copy(self):
test_dir = os.path.dirname(os.path.dirname(__file__))
self.assertFalse(git.is_working_copy(test_dir))
self.assertTrue(git.is_working_copy(os.path.dirname(test_dir)))
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_git_version_too_old(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 64
run_subprocess.return_value = "1.0.0"
with self.assertRaises(exceptions.SystemSetupError) as ctx:
git.head_revision("/src")
self.assertEqual("Your git version is [1.0.0] but Rally requires at least git 1.9. Please update git.", ctx.exception.args[0])
run_subprocess_with_logging.assert_called_with("git -C /src --version", level=logging.DEBUG)
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_clone_successful(self, run_subprocess_with_logging, ensure_dir):
run_subprocess_with_logging.return_value = 0
src = "/src"
remote = "http://github.com/some/project"
git.clone(src, remote)
ensure_dir.assert_called_with(src)
run_subprocess_with_logging.assert_called_with("git clone http://github.com/some/project /src")
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_clone_with_error(self, run_subprocess_with_logging, ensure_dir):
run_subprocess_with_logging.return_value = 128
src = "/src"
remote = "http://github.com/some/project"
with self.assertRaises(exceptions.SupplyError) as ctx:
git.clone(src, remote)
self.assertEqual("Could not clone from [http://github.com/some/project] to [/src]", ctx.exception.args[0])
ensure_dir.assert_called_with(src)
run_subprocess_with_logging.assert_called_with("git clone http://github.com/some/project /src")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_fetch_successful(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.fetch("/src", remote="my-origin")
run_subprocess.assert_called_with("git -C /src fetch --prune --quiet my-origin")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_fetch_with_error(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = True
with self.assertRaises(exceptions.SupplyError) as ctx:
git.fetch("/src", remote="my-origin")
self.assertEqual("Could not fetch source tree from [my-origin]", ctx.exception.args[0])
run_subprocess.assert_called_with("git -C /src fetch --prune --quiet my-origin")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_checkout_successful(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.checkout("/src", "feature-branch")
run_subprocess.assert_called_with("git -C /src checkout --quiet feature-branch")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_checkout_with_error(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = True
with self.assertRaises(exceptions.SupplyError) as ctx:
git.checkout("/src", "feature-branch")
self.assertEqual("Could not checkout branch [feature-branch]. Do you have uncommitted changes?", ctx.exception.args[0])
run_subprocess.assert_called_with("git -C /src checkout --quiet feature-branch")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_rebase(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.rebase("/src", remote="my-origin", branch="feature-branch")
calls = [
mock.call("git -C /src checkout --quiet feature-branch"),
mock.call("git -C /src rebase --quiet my-origin/feature-branch")
]
run_subprocess.assert_has_calls(calls)
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_pull(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.pull("/src", remote="my-origin", branch="feature-branch")
calls = [
mock.call("git -C /src fetch --prune --quiet my-origin"),
mock.call("git -C /src checkout --quiet feature-branch"),
mock.call("git -C /src rebase --quiet my-origin/feature-branch")
]
run_subprocess.assert_has_calls(calls)
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_pull_ts(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.pull_ts("/src", "20160101T110000Z")
run_subprocess.assert_called_with(
"git -C /src fetch --quiet origin && git -C /src checkout "
"--quiet `git -C /src rev-list -n 1 --before=\"20160101T110000Z\" --date=iso8601 origin/master`")
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_pull_revision(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = False
git.pull_revision("/src", "3694a07")
run_subprocess.assert_called_with("git -C /src fetch --quiet origin && git -C /src checkout --quiet 3694a07")
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_head_revision(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = ["3694a07"]
self.assertEqual("3694a07", git.head_revision("/src"))
run_subprocess.assert_called_with("git -C /src rev-parse --short HEAD")
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_list_remote_branches(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = [" origin/HEAD",
" origin/master",
" origin/5.0.0-alpha1",
" origin/5"]
self.assertEqual(["master", "5.0.0-alpha1", "5"], git.branches("/src", remote=True))
run_subprocess.assert_called_with("git -C /src for-each-ref refs/remotes/ --format='%(refname:short)'")
@mock.patch("esrally.utils.process.run_subprocess_with_output")
@mock.patch("esrally.utils.process.run_subprocess_with_logging")
def test_list_local_branches(self, run_subprocess_with_logging, run_subprocess):
run_subprocess_with_logging.return_value = 0
run_subprocess.return_value = [" HEAD",
" master",
" 5.0.0-alpha1",
" 5"]
self.assertEqual(["master", "5.0.0-alpha1", "5"], git.branches("/src", remote=False))
run_subprocess.assert_called_with("git -C /src for-each-ref refs/heads/ --format='%(refname:short)'")
| true | true |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.