signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def error ( self , message ) : """An error occured , closing the dialog box"""
QMessageBox . critical ( self , _ ( "Array editor" ) , message ) self . setAttribute ( Qt . WA_DeleteOnClose ) self . reject ( )
def set_sample_probability ( probability ) : """Set the probability that a batch will be submitted to the InfluxDB server . This should be a value that is greater than or equal to ` ` 0 ` ` and less than or equal to ` ` 1.0 ` ` . A value of ` ` 0.25 ` ` would represent a probability of 25 % that a batch would be written to InfluxDB . : param float probability : The value between 0 and 1.0 that represents the probability that a batch will be submitted to the InfluxDB server ."""
global _sample_probability if not 0.0 <= probability <= 1.0 : raise ValueError ( 'Invalid probability value' ) LOGGER . debug ( 'Setting sample probability to %.2f' , probability ) _sample_probability = float ( probability )
def _encode_item ( self , item : str ) -> str : """If anonymization is on , an item gets salted and hashed here . : param str item : : return : Hashed item , if anonymization is on ; the unmodified item otherwise : rtype : str"""
assert item is not None if not self . __redis_conf [ 'anonymization' ] : return item connection = self . __get_connection ( ) salt = connection . get ( self . __redis_conf [ 'salt_key' ] ) if salt is None : salt = create_salt ( ) connection . set ( self . __redis_conf [ 'salt_key' ] , salt ) BlackRed . __release_connection ( connection ) return sha512 ( salt + item . encode ( ) ) . hexdigest ( )
def RIBSystemRouteLimitExceeded_originator_switch_info_switchIdentifier ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) RIBSystemRouteLimitExceeded = ET . SubElement ( config , "RIBSystemRouteLimitExceeded" , xmlns = "http://brocade.com/ns/brocade-notification-stream" ) originator_switch_info = ET . SubElement ( RIBSystemRouteLimitExceeded , "originator-switch-info" ) switchIdentifier = ET . SubElement ( originator_switch_info , "switchIdentifier" ) switchIdentifier . text = kwargs . pop ( 'switchIdentifier' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def make_csv_table ( column_list = [ ] , column_lbls = None , header = '' , column_type = None , row_lbls = None , transpose = False , precision = 2 , use_lbl_width = True , comma_repl = '<com>' , raw = False , new = False , standardize = False ) : """Creates a csv table with aligned columns make _ csv _ table Args : column _ list ( list ) : column _ lbls ( None ) : header ( str ) : column _ type ( None ) : row _ lbls ( None ) : transpose ( bool ) : Returns : str : csv _ text Example : > > > # ENABLE _ DOCTEST > > > from utool . util _ csv import * # NOQA > > > column _ list = [ [ 1 , 2 , 3 ] , [ ' A ' , ' B ' , ' C ' ] ] > > > column _ lbls = [ ' num ' , ' alpha ' ] > > > header = ' # Test CSV ' > > > column _ type = ( int , str ) > > > row _ lbls = None > > > transpose = False > > > csv _ text = make _ csv _ table ( column _ list , column _ lbls , header , column _ type , row _ lbls , transpose ) > > > result = csv _ text > > > print ( result ) # Test CSV # num _ rows = 3 # num , alpha 1 , A 2 , B 3 , C"""
import utool as ut assert comma_repl . find ( ',' ) == - 1 , 'comma_repl cannot contain a comma!' if transpose : column_lbls , row_lbls = row_lbls , column_lbls column_list = list ( map ( list , zip ( * column_list ) ) ) if row_lbls is not None : if isinstance ( column_list , np . ndarray ) : column_list = column_list . tolist ( ) if isinstance ( row_lbls , np . ndarray ) : row_lbls = row_lbls . tolist ( ) column_list = [ row_lbls ] + column_list column_lbls = [ 'ROWLBL' ] + list ( map ( six . text_type , column_lbls ) ) if column_type is not None : column_type = [ six . text_type ] + column_type if len ( column_list ) == 0 : print ( '[csv] No columns' ) return header column_len = [ len ( col ) for col in column_list ] num_data = column_len [ 0 ] if num_data == 0 : # print ( ' [ csv . make _ csv _ table ( ) ] No data . ( header = % r ) ' % ( header , ) ) return header if any ( [ num_data != clen for clen in column_len ] ) : print ( '[csv] column_lbls = %r ' % ( column_lbls , ) ) print ( '[csv] column_len = %r ' % ( column_len , ) ) print ( '[csv] inconsistent column lengths' ) return header if column_type is None : column_type = list ( map ( type , ut . get_list_column ( column_list , 0 ) ) ) # column _ type = [ type ( col [ 0 ] ) for col in column _ list ] csv_rows = [ ] if new : csv_rows . append ( header ) elif not raw : csv_rows . append ( header ) if not standardize : csv_rows . append ( '# num_rows=%r' % num_data ) column_maxlen = [ ] column_str_list = [ ] if column_lbls is None : column_lbls = [ '' ] * len ( column_list ) def _toint ( c ) : if c is None : return 'None' try : if np . isnan ( c ) : return 'nan' except TypeError as ex : print ( '------' ) print ( '[csv] TypeError %r ' % ex ) print ( '[csv] _toint(c) failed' ) print ( '[csv] c = %r ' % c ) print ( '[csv] type(c) = %r ' % type ( c ) ) print ( '------' ) raise return ( '%d' ) % int ( c ) import uuid textable_types = [ uuid . UUID , six . text_type ] try : if standardize : def csv_format ( r ) : text = ut . repr2 ( r , precision = precision ) # text = six . text _ type ( r ) # Check if needs escape escape_chars = [ '"' , ' ' , ',' ] if any ( [ c in text for c in escape_chars ] ) : # escape quotes with quotes text = text . replace ( '"' , '""' ) # encapsulate with quotes text = '"' + text + '"' return text for col , lbl , coltype in zip ( column_list , column_lbls , column_type ) : col_str = [ csv_format ( r ) for r in col ] column_str_list . append ( col_str ) pass else : # Loop over every column for col , lbl , coltype in zip ( column_list , column_lbls , column_type ) : # Loop over every row in the column ( using list comprehension ) if coltype is list or util_type . is_list ( coltype ) : col_str = [ six . text_type ( c ) . replace ( ',' , ' ' ) . replace ( '.' , '<dot>' ) for c in col ] elif ( coltype is float or util_type . is_float ( coltype ) or coltype == np . float32 or util_type . is_valid_floattype ( coltype ) ) : precision_fmtstr = '%.' + six . text_type ( precision ) + 'f' col_str = [ 'None' if r is None else precision_fmtstr % float ( r ) for r in col ] # col _ = [ r if r is None else float ( r ) for r in col ] # col _ str = [ ut . repr2 ( r , precision = 2 ) for r in col _ ] elif coltype is int or util_type . is_int ( coltype ) or coltype == np . int64 : col_str = [ _toint ( c ) for c in ( col ) ] elif coltype in textable_types or util_type . is_str ( coltype ) : col_str = [ six . text_type ( c ) . replace ( ',' , comma_repl ) for c in col ] else : print ( '[csv] is_unknown coltype=%r' % ( coltype , ) ) try : col_str = [ six . text_type ( c ) for c in ( col ) ] except UnicodeDecodeError : try : col_str = [ ut . ensure_unicode ( c ) for c in ( col ) ] except Exception : col_str = [ repr ( c ) for c in ( col ) ] column_str_list . append ( col_str ) for col_str , lbl in zip ( column_str_list , column_lbls ) : col_lens = [ len ( s ) for s in ( col_str ) ] max_len = max ( col_lens ) if use_lbl_width : # The column label counts towards the column width max_len = max ( len ( lbl ) , max_len ) column_maxlen . append ( max_len ) except Exception as ex : # ut . embed ( ) ut . printex ( ex , keys = [ 'col' , 'lbl' , 'coltype' ] ) raise def _fmtfn ( maxlen ) : return '' . join ( [ '%' , six . text_type ( maxlen + 2 ) , 's' ] ) fmtstr = ',' . join ( [ _fmtfn ( maxlen ) for maxlen in column_maxlen ] ) try : if new : csv_rows . append ( '# ' + fmtstr % tuple ( column_lbls ) ) elif not raw : csv_rows . append ( '# ' + fmtstr % tuple ( column_lbls ) ) # csv _ rows . append ( ' # ' + fmtstr % column _ lbls ) except Exception as ex : # print ( len ( column _ list ) ) # ut . embed ( ) ut . printex ( ex , keys = [ 'fmtstr' , 'column_lbls' ] ) raise for row in zip ( * column_str_list ) : csv_rows . append ( ' ' + fmtstr % row ) csv_text = '\n' . join ( csv_rows ) return csv_text
def warning ( title = "" , text = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) : """Display a simple warning : param text : text inside the window : type text : str : param title : title of the window : type title : str : param width : window width : type width : int : param height : window height : type height : int : param timeout : close the window after n seconds : type timeout : int"""
return _simple_dialog ( Gtk . MessageType . WARNING , text , title , width , height , timeout )
def CSS_addRule ( self , styleSheetId , ruleText , location ) : """Function path : CSS . addRule Domain : CSS Method name : addRule Parameters : Required arguments : ' styleSheetId ' ( type : StyleSheetId ) - > The css style sheet identifier where a new rule should be inserted . ' ruleText ' ( type : string ) - > The text of a new rule . ' location ' ( type : SourceRange ) - > Text position of a new rule in the target style sheet . Returns : ' rule ' ( type : CSSRule ) - > The newly created rule . Description : Inserts a new rule with the given < code > ruleText < / code > in a stylesheet with given < code > styleSheetId < / code > , at the position specified by < code > location < / code > ."""
assert isinstance ( ruleText , ( str , ) ) , "Argument 'ruleText' must be of type '['str']'. Received type: '%s'" % type ( ruleText ) subdom_funcs = self . synchronous_command ( 'CSS.addRule' , styleSheetId = styleSheetId , ruleText = ruleText , location = location ) return subdom_funcs
def fill_parentidid2obj_r0 ( self , id2obj , child_obj ) : """Fill id2obj with all parent key item IDs and their objects ."""
for parent_obj in child_obj . parents : if parent_obj . item_id not in id2obj : id2obj [ parent_obj . item_id ] = parent_obj self . fill_parentidid2obj_r0 ( id2obj , parent_obj )
def filter_whitespace ( mode : str , text : str ) -> str : """Transform whitespace in ` ` text ` ` according to ` ` mode ` ` . Available modes are : * ` ` all ` ` : Return all whitespace unmodified . * ` ` single ` ` : Collapse consecutive whitespace with a single whitespace character , preserving newlines . * ` ` oneline ` ` : Collapse all runs of whitespace into a single space character , removing all newlines in the process . . . versionadded : : 4.3"""
if mode == "all" : return text elif mode == "single" : text = re . sub ( r"([\t ]+)" , " " , text ) text = re . sub ( r"(\s*\n\s*)" , "\n" , text ) return text elif mode == "oneline" : return re . sub ( r"(\s+)" , " " , text ) else : raise Exception ( "invalid whitespace mode %s" % mode )
def view ( results_dir , result_id , hide_simulation_output , parameters , no_pager ) : """View results of simulations ."""
campaign = sem . CampaignManager . load ( results_dir ) # Pick the most appropriate function based on the level of detail we want if hide_simulation_output : get_results_function = campaign . db . get_results else : get_results_function = campaign . db . get_complete_results # If a result id was specified , just query for that result if result_id : output = '\n\n\n' . join ( [ pprint . pformat ( item ) for item in get_results_function ( result_id = result_id ) ] ) else : [ params , defaults ] = zip ( * get_params_and_defaults ( campaign . db . get_params ( ) , campaign . db ) ) if not parameters : # Convert to string string_defaults = list ( ) for idx , d in enumerate ( defaults ) : string_defaults . append ( str ( d ) ) script_params = query_parameters ( params , string_defaults ) else : script_params = import_parameters_from_file ( parameters ) # Perform the search output = '\n\n\n' . join ( [ pprint . pformat ( item ) for item in get_results_function ( script_params ) ] ) # Print the results if no_pager : click . echo ( output ) else : click . echo_via_pager ( output )
def parse_ssois_return ( ssois_return , object_name , imagetype , camera_filter = 'r.MP9601' , telescope_instrument = 'CFHT/MegaCam' ) : """Parse through objects in ssois query and filter out images of desired filter , type , exposure time , and instrument"""
assert camera_filter in [ 'r.MP9601' , 'u.MP9301' ] ret_table = [ ] good_table = 0 table_reader = ascii . get_reader ( Reader = ascii . Basic ) table_reader . inconsistent_handler = _skip_missing_data table_reader . header . splitter . delimiter = '\t' table_reader . data . splitter . delimiter = '\t' table = table_reader . read ( ssois_return ) for row in table : # Excludes the OSSOS wallpaper . # note : ' Telescope _ Insturment ' is a typo in SSOIS ' s return format if not 'MegaCam' in row [ 'Telescope_Insturment' ] : continue # Check if image of object exists in OSSOS observations if not storage . exists ( storage . get_uri ( row [ 'Image' ] [ : - 1 ] ) ) : continue if not str ( row [ 'Image_target' ] ) . startswith ( 'WP' ) : good_table += 1 ret_table . append ( row ) if good_table > 0 : print ( " %d images found" % good_table ) return ret_table
def post_video ( self , videoUrl , name = None , ingestMedia = True ) : '''Post and optionally ingest media from the specified URL'''
if name is None : name = os . path . basename ( videoUrl ) url = '/videos' data = { 'name' : name } new_video = self . _make_request ( self . CMS_Server , 'POST' , url , data = data ) if ingestMedia : self . ingest_video ( new_video [ 'id' ] , videoUrl ) return new_video
def pin_ls ( self , type = "all" , ** kwargs ) : """Lists objects pinned to local storage . By default , all pinned objects are returned , but the ` ` type ` ` flag or arguments can restrict that to a specific pin type or to some specific objects respectively . . . code - block : : python > > > c . pin _ ls ( ) { ' Keys ' : { ' QmNNPMA1eGUbKxeph6yqV8ZmRkdVat . . . YMuz ' : { ' Type ' : ' recursive ' } , ' QmNPZUCeSN5458Uwny8mXSWubjjr6J . . . kP5e ' : { ' Type ' : ' recursive ' } , ' QmNg5zWpRMxzRAVg7FTQ3tUxVbKj8E . . . gHPz ' : { ' Type ' : ' indirect ' } , ' QmNiuVapnYCrLjxyweHeuk6Xdqfvts . . . wCCe ' : { ' Type ' : ' indirect ' } } } Parameters type : " str " The type of pinned keys to list . Can be : * ` ` " direct " ` ` * ` ` " indirect " ` ` * ` ` " recursive " ` ` * ` ` " all " ` ` Returns dict : Hashes of pinned IPFS objects and why they are pinned"""
kwargs . setdefault ( "opts" , { "type" : type } ) return self . _client . request ( '/pin/ls' , decoder = 'json' , ** kwargs )
def add_behavior_for_pclass ( self , definition , cls ) : """Define an additional base class for the Python class created for a particular definition . : param unicode definition : The definition the Python class for which the base class will be included . : param type cls : The additional base class . : raise ValueError : If a Python class for the given definition has already been created . Behavior cannot be retroactively added to a Python class . All behaviors must be registered before the first call to ` ` pclass _ for _ definition ` ` for a particular definition . : return : ` ` None ` `"""
if definition in self . _pclasses : raise AlreadyCreatedClass ( definition ) if definition not in self . definitions : raise NoSuchDefinition ( definition ) self . _behaviors . setdefault ( definition , [ ] ) . append ( cls )
def options ( self , context , module_options ) : '''DOMAIN Domain to enumerate DNS for . Defaults to all zones .'''
self . domains = None if module_options and 'DOMAIN' in module_options : self . domains = module_options [ 'DOMAIN' ]
def set_is_success ( self , last_hop ) : """Sets the flag if traceroute result is successful or not ."""
for packet in last_hop . packets : if packet . rtt and not packet . is_error : self . is_success = True break else : self . set_last_hop_errors ( last_hop )
def _create_activity1 ( self , process , name , activity_class = "UserTask" ) : """Create a new activity . : param process : parent process id : type process : basestring : param name : new activity name : type name : basestring : param activity _ class : type of activity : UserTask ( default ) or Subprocess : type activity _ class : basestring : return : the created : class : ` models . Activity ` : raises IllegalArgumentError : When the provided arguments are incorrect : raises APIError : When the object could not be created"""
if self . match_app_version ( label = 'wim' , version = '>=2.0.0' , default = False ) : raise APIError ( 'This method is only compatible with versions of KE-chain where the internal `wim` module ' 'has a version <=2.0.0. Use the `Client.create_activity2()` method.' ) if activity_class and activity_class not in ActivityType . values ( ) : raise IllegalArgumentError ( "Please provide accepted activity_class (provided:{} accepted:{})" . format ( activity_class , ( ActivityType . USERTASK , ActivityType . SUBPROCESS , ActivityType . SERVICETASK ) ) ) data = { "name" : name , "process" : process , "activity_class" : activity_class } response = self . _request ( 'POST' , self . _build_url ( 'activities' ) , data = data ) if response . status_code != requests . codes . created : # pragma : no cover raise APIError ( "Could not create activity" ) data = response . json ( ) return Activity ( data [ 'results' ] [ 0 ] , client = self )
def get_default_collection_parsers ( parser_finder : ParserFinder , conversion_finder : ConversionFinder ) -> List [ AnyParser ] : """Utility method to return the default parsers able to parse a dictionary from a file . : return :"""
return [ SingleFileParserFunction ( parser_function = read_dict_or_list_from_json , streaming_mode = True , custom_name = 'read_dict_or_list_from_json' , supported_exts = { '.json' } , supported_types = { dict , list } , function_args = { 'conversion_finder' : conversion_finder } ) , MultifileCollectionParser ( parser_finder ) ]
def is_incomplete_option ( all_args , cmd_param ) : """: param all _ args : the full original list of args supplied : param cmd _ param : the current command paramter : return : whether or not the last option declaration ( i . e . starts " - " or " - - " ) is incomplete and corresponds to this cmd _ param . In other words whether this cmd _ param option can still accept values"""
if not isinstance ( cmd_param , Option ) : return False if cmd_param . is_flag : return False last_option = None for index , arg_str in enumerate ( reversed ( [ arg for arg in all_args if arg != WORDBREAK ] ) ) : if index + 1 > cmd_param . nargs : break if start_of_option ( arg_str ) : last_option = arg_str return True if last_option and last_option in cmd_param . opts else False
def construct_mapping ( self , node , deep = False ) : """Use ODict for maps"""
mapping = ODict ( ) for key_node , value_node in node . value : key = self . construct_object ( key_node , deep = deep ) value = self . construct_object ( value_node , deep = deep ) mapping [ key ] = value return mapping
def _pdf ( self , xloc , dist , length , cache ) : """Probability density function . Example : > > > print ( chaospy . Iid ( chaospy . Uniform ( ) , 2 ) . pdf ( . . . [ [ 0.5 , 1.5 ] , [ 0.5 , 0.5 ] ] ) ) [1 . 0 . ]"""
output = evaluation . evaluate_density ( dist , xloc . reshape ( 1 , - 1 ) ) . reshape ( length , - 1 ) assert xloc . shape == output . shape return output
def _SID_call_prep ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) : """Preparation work for SomaticIndelDetector ."""
base_config = items [ 0 ] [ "config" ] for x in align_bams : bam . index ( x , base_config ) params = [ "-R" , ref_file , "-T" , "SomaticIndelDetector" , "-U" , "ALLOW_N_CIGAR_READS" ] # Limit per base read start count to between 200-10000 , i . e . from any base # can no more 10000 new reads begin . # Further , limit maxNumberOfReads accordingly , otherwise SID discards # windows for high coverage panels . paired = vcfutils . get_paired_bams ( align_bams , items ) params += [ "--read_filter" , "NotPrimaryAlignment" ] params += [ "-I:tumor" , paired . tumor_bam ] min_af = float ( get_in ( paired . tumor_config , ( "algorithm" , "min_allele_fraction" ) , 10 ) ) / 100.0 if paired . normal_bam is not None : params += [ "-I:normal" , paired . normal_bam ] # notice there must be at least 4 reads of coverage in normal params += [ "--filter_expressions" , "T_COV<6||N_COV<4||T_INDEL_F<%s||T_INDEL_CF<0.7" % min_af ] else : params += [ "--unpaired" ] params += [ "--filter_expressions" , "COV<6||INDEL_F<%s||INDEL_CF<0.7" % min_af ] if region : params += [ "-L" , bamprep . region_to_gatk ( region ) , "--interval_set_rule" , "INTERSECTION" ] return params
def p_fully_qualified_class_name ( p ) : '''fully _ qualified _ class _ name : namespace _ name | NS _ SEPARATOR namespace _ name | NAMESPACE NS _ SEPARATOR namespace _ name'''
if len ( p ) == 2 : p [ 0 ] = p [ 1 ] elif len ( p ) == 3 : p [ 0 ] = p [ 1 ] + p [ 2 ] else : p [ 0 ] = p [ 1 ] + p [ 2 ] + p [ 3 ]
def _get_available_engine_upgrades ( client , major = False ) : """Returns all extant rds engine upgrades . As a nested mapping of engine type to known versions and their upgrades . Defaults to minor upgrades , but configurable to major . Example : : > > > _ get _ engine _ upgrades ( client ) ' oracle - se2 ' : { ' 12.1.0.2 . v2 ' : ' 12.1.0.2 . v5 ' , '12.1.0.2 . v3 ' : ' 12.1.0.2 . v5 ' } , ' postgres ' : { ' 9.3.1 ' : ' 9.3.14 ' , '9.3.10 ' : ' 9.3.14 ' , '9.3.12 ' : ' 9.3.14 ' , '9.3.2 ' : ' 9.3.14 ' }"""
results = { } engine_versions = client . describe_db_engine_versions ( ) [ 'DBEngineVersions' ] for v in engine_versions : if not v [ 'Engine' ] in results : results [ v [ 'Engine' ] ] = { } if 'ValidUpgradeTarget' not in v or len ( v [ 'ValidUpgradeTarget' ] ) == 0 : continue for t in v [ 'ValidUpgradeTarget' ] : if not major and t [ 'IsMajorVersionUpgrade' ] : continue if LooseVersion ( t [ 'EngineVersion' ] ) > LooseVersion ( results [ v [ 'Engine' ] ] . get ( v [ 'EngineVersion' ] , '0.0.0' ) ) : results [ v [ 'Engine' ] ] [ v [ 'EngineVersion' ] ] = t [ 'EngineVersion' ] return results
def draft_child ( self ) : """Get the draft ( RESERVED ) child ."""
return super ( PIDNodeVersioning , self ) . children . status ( PIDStatus . RESERVED ) . one_or_none ( )
def init_app ( state ) : """Prepare the Flask application for Flask - Split . : param state : : class : ` BlueprintSetupState ` instance"""
app = state . app app . config . setdefault ( 'SPLIT_ALLOW_MULTIPLE_EXPERIMENTS' , False ) app . config . setdefault ( 'SPLIT_DB_FAILOVER' , False ) app . config . setdefault ( 'SPLIT_IGNORE_IP_ADDRESSES' , [ ] ) app . config . setdefault ( 'SPLIT_ROBOT_REGEX' , r""" (?i)\b( Baidu| Gigabot| Googlebot| libwww-perl| lwp-trivial| msnbot| SiteUptime| Slurp| WordPress| ZIBB| ZyBorg )\b """ ) app . jinja_env . globals . update ( { 'ab_test' : ab_test , 'finished' : finished } ) @ app . template_filter ( ) def percentage ( number ) : number *= 100 if abs ( number ) < 10 : return "%.1f%%" % round ( number , 1 ) else : return "%d%%" % round ( number )
def get ( self , channel_sid ) : """Constructs a UserChannelContext : param channel _ sid : The SID of the Channel that has the User Channel to fetch : returns : twilio . rest . chat . v2 . service . user . user _ channel . UserChannelContext : rtype : twilio . rest . chat . v2 . service . user . user _ channel . UserChannelContext"""
return UserChannelContext ( self . _version , service_sid = self . _solution [ 'service_sid' ] , user_sid = self . _solution [ 'user_sid' ] , channel_sid = channel_sid , )
def hostinterface_delete ( interfaceids , ** kwargs ) : '''Delete host interface . . versionadded : : 2016.3.0 : param interfaceids : IDs of the host interfaces to delete : param _ connection _ user : Optional - zabbix user ( can also be set in opts or pillar , see module ' s docstring ) : param _ connection _ password : Optional - zabbix password ( can also be set in opts or pillar , see module ' s docstring ) : param _ connection _ url : Optional - url of zabbix frontend ( can also be set in opts , pillar , see module ' s docstring ) : return : ID of deleted host interfaces , False on failure . CLI Example : . . code - block : : bash salt ' * ' zabbix . hostinterface _ delete 50'''
conn_args = _login ( ** kwargs ) ret = { } try : if conn_args : method = 'hostinterface.delete' if isinstance ( interfaceids , list ) : params = interfaceids else : params = [ interfaceids ] ret = _query ( method , params , conn_args [ 'url' ] , conn_args [ 'auth' ] ) return ret [ 'result' ] [ 'interfaceids' ] else : raise KeyError except KeyError : return ret
def build_emv_data ( self ) : """TODO : 95 TVR 82 app _ int _ prof"""
emv_data = '' emv_data += self . TLV . build ( { '82' : self . _get_app_interchange_profile ( ) } ) emv_data += self . TLV . build ( { '9A' : get_date ( ) } ) emv_data += self . TLV . build ( { '95' : self . term . get_tvr ( ) } ) emv_data += self . TLV . build ( { '9F10' : self . card . get_iss_application_data ( ) } ) emv_data += self . TLV . build ( { '9F26' : self . card . get_application_cryptogram ( ) } ) emv_data += self . TLV . build ( { '9F36' : self . card . get_transaction_counter ( ) } ) emv_data += self . TLV . build ( { '9F37' : self . term . get_unpredno ( ) } ) emv_data += self . TLV . build ( { '9F1A' : self . term . get_country_code ( ) } ) return emv_data
def _publish_replset ( self , data , base_prefix ) : """Given a response to replSetGetStatus , publishes all numeric values of the instance , aggregate stats of healthy nodes vs total nodes , and the observed statuses of all nodes in the replica set ."""
prefix = base_prefix + [ 'replset' ] self . _publish_dict_with_prefix ( data , prefix ) total_nodes = len ( data [ 'members' ] ) healthy_nodes = reduce ( lambda value , node : value + node [ 'health' ] , data [ 'members' ] , 0 ) self . _publish_dict_with_prefix ( { 'healthy_nodes' : healthy_nodes , 'total_nodes' : total_nodes } , prefix ) for node in data [ 'members' ] : replset_node_name = node [ self . config [ 'replset_node_name' ] ] node_name = str ( replset_node_name . split ( '.' ) [ 0 ] ) self . _publish_dict_with_prefix ( node , prefix + [ 'node' , node_name ] )
def get_pattern ( self ) : """It returns its url pattern"""
if self . is_root_node ( ) : return "" else : parent_pattern = self . parent . get_pattern ( ) if parent_pattern != "" : parent_pattern = u"{}" . format ( parent_pattern ) if not self . page and not self . is_leaf_node ( ) : if self . hide_in_url : return u'{0}' . format ( parent_pattern ) else : return u'{0}{1}' . format ( parent_pattern , self . name ) else : if self . is_leaf_node ( ) and self . page . regex and self . page . show_regex : return u'{0}{1}/{2}' . format ( parent_pattern , self . page . slug , self . page . regex ) elif self . is_leaf_node ( ) and ( not self . page . regex or not self . page . show_regex ) : return u'{0}{1}/' . format ( parent_pattern , self . page . slug ) elif not self . is_leaf_node ( ) and self . page . regex and self . page . show_regex : return u'{0}{1}/{2}/' . format ( parent_pattern , self . page . slug , self . page . regex ) else : return u'{0}{1}/' . format ( parent_pattern , self . page . slug )
def _get_fct_number_of_arg ( self , fct ) : """Get the number of argument of a fuction ."""
py_version = sys . version_info [ 0 ] if py_version >= 3 : return len ( inspect . signature ( fct ) . parameters ) return len ( inspect . getargspec ( fct ) [ 0 ] )
def set_default_unit_all ( self , twig = None , unit = None , ** kwargs ) : """TODO : add documentation"""
if twig is not None and unit is None : # then try to support value as the first argument if no matches with twigs if isinstance ( unit , u . Unit ) or not isinstance ( twig , str ) : unit = twig twig = None elif not len ( self . filter ( twig = twig , check_default = check_default , ** kwargs ) ) : unit = twig twig = None for param in self . filter ( twig = twig , ** kwargs ) . to_list ( ) : param . set_default_unit ( unit )
def potential_energy ( self , potential = None ) : r"""The potential energy * per unit mass * : . . math : : E _ \ Phi = \ Phi ( \ boldsymbol { q } ) Returns E : : class : ` ~ astropy . units . Quantity ` The potential energy ."""
if self . hamiltonian is None and potential is None : raise ValueError ( "To compute the potential energy, a potential" " object must be provided!" ) if potential is None : potential = self . hamiltonian . potential return super ( Orbit , self ) . potential_energy ( potential )
def apply_completion ( self , completion ) : """Insert a given completion ."""
assert isinstance ( completion , Completion ) # If there was already a completion active , cancel that one . if self . complete_state : self . go_to_completion ( None ) self . complete_state = None # Insert text from the given completion . self . delete_before_cursor ( - completion . start_position ) self . insert_text ( completion . text )
def it_self_update ( ) : """Check from GitHub slpkg repository if new version is available download and update itself"""
__new_version__ = "" repository = "gitlab" branch = "master" ver_link = ( "https://raw.{0}usercontent.com/{1}/{2}/" "{3}/{4}/__metadata__.py" . format ( repository , _meta_ . __author__ , _meta_ . __all__ , branch , _meta_ . __all__ ) ) version_data = URL ( ver_link ) . reading ( ) for line in version_data . splitlines ( ) : line = line . strip ( ) if line . startswith ( "__version_info__" ) : __new_version__ = "." . join ( re . findall ( r"\d+" , line ) ) if __new_version__ > _meta_ . __version__ : if _meta_ . default_answer in [ "y" , "Y" ] : answer = _meta_ . default_answer else : print ( "\nNew version '{0}-{1}' is available !\n" . format ( _meta_ . __all__ , __new_version__ ) ) try : answer = raw_input ( "Would you like to upgrade [y/N]? " ) except EOFError : print ( "" ) raise SystemExit ( ) if answer in [ "y" , "Y" ] : print ( "" ) # new line after answer else : raise SystemExit ( ) dwn_link = [ "https://{0}.com/{1}/{2}/archive/" "v{3}/{4}-{5}.tar.gz" . format ( repository , _meta_ . __author__ , _meta_ . __all__ , __new_version__ , _meta_ . __all__ , __new_version__ ) ] if not os . path . exists ( _meta_ . build_path ) : os . makedirs ( _meta_ . build_path ) Download ( _meta_ . build_path , dwn_link , repo = "" ) . start ( ) os . chdir ( _meta_ . build_path ) slpkg_tar_file = "slpkg" + "-" + __new_version__ + ".tar.gz" tar = tarfile . open ( slpkg_tar_file ) tar . extractall ( ) tar . close ( ) file_name = "{0}-{1}" . format ( _meta_ . __all__ , __new_version__ ) os . chdir ( file_name ) check_md5 ( pkg_checksum ( slpkg_tar_file , _meta_ . __all__ ) , _meta_ . build_path + slpkg_tar_file ) subprocess . call ( "chmod +x {0}" . format ( "install.sh" ) , shell = True ) subprocess . call ( "sh install.sh" , shell = True ) else : print ( "\n{0}: There is no new version, already used the last !" "\n" . format ( _meta_ . __all__ ) ) raise SystemExit ( )
def queue ( self ) : """The name of the queue that this command was assigned to ."""
entry = self . _proto . commandQueueEntry if entry . HasField ( 'queueName' ) : return entry . queueName return None
def set_log_level ( log_level ) : """Set logging level of this module . Using ` logbook < https : / / logbook . readthedocs . io / en / stable / > ` _ _ module for logging . : param int log _ level : One of the log level of ` logbook < https : / / logbook . readthedocs . io / en / stable / api / base . html > ` _ _ . Disabled logging if ` ` log _ level ` ` is ` ` logbook . NOTSET ` ` . : raises LookupError : If ` ` log _ level ` ` is an invalid value ."""
if not LOGBOOK_INSTALLED : return # validate log level logbook . get_level_name ( log_level ) if log_level == logger . level : return if log_level == logbook . NOTSET : set_logger ( is_enable = False ) else : set_logger ( is_enable = True ) logger . level = log_level dataproperty . set_log_level ( log_level ) try : import simplesqlite simplesqlite . set_log_level ( log_level ) except ImportError : pass
def get_objectives_by_objective_banks ( self , objective_bank_ids ) : """Gets the list of ` ` Objectives ` ` corresponding to a list of ` ` ObjectiveBanks ` ` . arg : objective _ bank _ ids ( osid . id . IdList ) : list of objective bank ` ` Ids ` ` return : ( osid . learning . ObjectiveList ) - list of objectives raise : NullArgument - ` ` objective _ bank _ ids ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : PermissionDenied - authorization failure * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for # osid . resource . ResourceBinSession . get _ resources _ by _ bins objective_list = [ ] for objective_bank_id in objective_bank_ids : objective_list += list ( self . get_objectives_by_objective_bank ( objective_bank_id ) ) return objects . ObjectiveList ( objective_list )
def as_object_version ( value ) : """Get an object version object from an object version ID or an object version . : param value : A : class : ` invenio _ files _ rest . models . ObjectVersion ` or an object version ID . : returns : A : class : ` invenio _ files _ rest . models . ObjectVersion ` instance ."""
return value if isinstance ( value , ObjectVersion ) else ObjectVersion . query . filter_by ( version_id = value ) . one_or_none ( )
def thickness_hydrostatic ( pressure , temperature , ** kwargs ) : r"""Calculate the thickness of a layer via the hypsometric equation . This thickness calculation uses the pressure and temperature profiles ( and optionally mixing ratio ) via the hypsometric equation with virtual temperature adjustment . . math : : Z _ 2 - Z _ 1 = - \ frac { R _ d } { g } \ int _ { p _ 1 } ^ { p _ 2 } T _ v d \ ln p , which is based off of Equation 3.24 in [ Hobbs2006 ] _ . This assumes a hydrostatic atmosphere . Layer bottom and depth specified in pressure . Parameters pressure : ` pint . Quantity ` Atmospheric pressure profile temperature : ` pint . Quantity ` Atmospheric temperature profile mixing : ` pint . Quantity ` , optional Profile of dimensionless mass mixing ratio . If none is given , virtual temperature is simply set to be the given temperature . molecular _ weight _ ratio : ` pint . Quantity ` or float , optional The ratio of the molecular weight of the constituent gas to that assumed for air . Defaults to the ratio for water vapor to dry air . ( : math : ` \ epsilon \ approx0.622 ` ) . bottom : ` pint . Quantity ` , optional The bottom of the layer in pressure . Defaults to the first observation . depth : ` pint . Quantity ` , optional The depth of the layer in hPa . Defaults to the full profile if bottom is not given , and 100 hPa if bottom is given . Returns ` pint . Quantity ` The thickness of the layer in meters . See Also thickness _ hydrostatic _ from _ relative _ humidity , pressure _ to _ height _ std , virtual _ temperature"""
mixing = kwargs . pop ( 'mixing' , None ) molecular_weight_ratio = kwargs . pop ( 'molecular_weight_ratio' , mpconsts . epsilon ) bottom = kwargs . pop ( 'bottom' , None ) depth = kwargs . pop ( 'depth' , None ) # Get the data for the layer , conditional upon bottom / depth being specified and mixing # ratio being given if bottom is None and depth is None : if mixing is None : layer_p , layer_virttemp = pressure , temperature else : layer_p = pressure layer_virttemp = virtual_temperature ( temperature , mixing , molecular_weight_ratio ) else : if mixing is None : layer_p , layer_virttemp = get_layer ( pressure , temperature , bottom = bottom , depth = depth ) else : layer_p , layer_temp , layer_w = get_layer ( pressure , temperature , mixing , bottom = bottom , depth = depth ) layer_virttemp = virtual_temperature ( layer_temp , layer_w , molecular_weight_ratio ) # Take the integral ( with unit handling ) and return the result in meters return ( - mpconsts . Rd / mpconsts . g * np . trapz ( layer_virttemp . to ( 'K' ) , x = np . log ( layer_p / units . hPa ) ) * units . K ) . to ( 'm' )
def _executor_script ( self ) : """Create shell - script in charge of executing the benchmark and return its path ."""
fd , path = tempfile . mkstemp ( suffix = '.sh' , dir = os . getcwd ( ) ) os . close ( fd ) with open ( path , 'w' ) as ostr : self . _write_executor_script ( ostr ) mode = os . stat ( path ) . st_mode os . chmod ( path , mode | stat . S_IEXEC | stat . S_IRGRP | stat . S_IRUSR ) return path
def comp ( request , slug , directory_slug = None ) : """View the requested comp"""
context = { } path = settings . COMPS_DIR comp_dir = os . path . split ( path ) [ 1 ] template = "{0}/{1}" . format ( comp_dir , slug ) if directory_slug : template = "{0}/{1}/{2}" . format ( comp_dir , directory_slug , slug ) working_dir = os . path . join ( path , slug ) if os . path . isdir ( working_dir ) : return redirect ( 'comp-listing' , directory_slug = slug ) try : t = get_template ( template ) except TemplateDoesNotExist : return redirect ( 'comp-listing' ) c = RequestContext ( request , context ) return HttpResponse ( t . render ( c ) )
def _add_credits_grade_to_section ( url , section ) : """Given the registration url passed in , add credits , grade , grade date in the section object"""
section_reg_data = get_resource ( url ) if section_reg_data is not None : section . student_grade = section_reg_data [ 'Grade' ] section . is_auditor = section_reg_data [ 'Auditor' ] if len ( section_reg_data [ 'GradeDate' ] ) > 0 : section . grade_date = parse ( section_reg_data [ "GradeDate" ] ) . date ( ) try : raw_credits = section_reg_data [ 'Credits' ] . strip ( ) section . student_credits = Decimal ( raw_credits ) except InvalidOperation : pass
def create ( vm_info ) : '''Creates a virtual machine from the given VM information This is what is used to request a virtual machine to be created by the cloud provider , wait for it to become available , and then ( optionally ) log in and install Salt on it . Events fired : This function fires the event ` ` salt / cloud / vm _ name / creating ` ` , with the payload containing the names of the VM , profile , and provider . @ param vm _ info . . code - block : : text name : < str > profile : < dict > driver : < provider > : < profile > clonefrom : < vm _ name > clonemode : < mode > ( default : state , choices : state , child , all ) @ type vm _ info dict @ return dict of resulting vm . ! ! ! Passwords can and should be included ! ! !'''
try : # Check for required profile parameters before sending any API calls . if vm_info [ 'profile' ] and config . is_profile_configured ( __opts__ , __active_provider_name__ or 'virtualbox' , vm_info [ 'profile' ] ) is False : return False except AttributeError : pass vm_name = vm_info [ "name" ] deploy = config . get_cloud_config_value ( 'deploy' , vm_info , __opts__ , search_global = False , default = True ) wait_for_ip_timeout = config . get_cloud_config_value ( 'wait_for_ip_timeout' , vm_info , __opts__ , default = 60 ) boot_timeout = config . get_cloud_config_value ( 'boot_timeout' , vm_info , __opts__ , default = 60 * 1000 ) power = config . get_cloud_config_value ( 'power_on' , vm_info , __opts__ , default = False ) key_filename = config . get_cloud_config_value ( 'private_key' , vm_info , __opts__ , search_global = False , default = None ) clone_mode = map_clonemode ( vm_info ) wait_for_pattern = vm_info [ 'waitforpattern' ] if 'waitforpattern' in vm_info . keys ( ) else None interface_index = vm_info [ 'interfaceindex' ] if 'interfaceindex' in vm_info . keys ( ) else 0 log . debug ( "Going to fire event: starting create" ) __utils__ [ 'cloud.fire_event' ] ( 'event' , 'starting create' , 'salt/cloud/{0}/creating' . format ( vm_info [ 'name' ] ) , args = __utils__ [ 'cloud.filter_event' ] ( 'creating' , vm_info , [ 'name' , 'profile' , 'provider' , 'driver' ] ) , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] ) # to create the virtual machine . request_kwargs = { 'name' : vm_info [ 'name' ] , 'clone_from' : vm_info [ 'clonefrom' ] , 'clone_mode' : clone_mode } __utils__ [ 'cloud.fire_event' ] ( 'event' , 'requesting instance' , 'salt/cloud/{0}/requesting' . format ( vm_info [ 'name' ] ) , args = __utils__ [ 'cloud.filter_event' ] ( 'requesting' , request_kwargs , list ( request_kwargs ) ) , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] ) vm_result = vb_clone_vm ( ** request_kwargs ) # Booting and deploying if needed if power : vb_start_vm ( vm_name , timeout = boot_timeout ) ips = vb_wait_for_network_address ( wait_for_ip_timeout , machine_name = vm_name , wait_for_pattern = wait_for_pattern ) if ips : ip = ips [ interface_index ] log . info ( "[ %s ] IPv4 is: %s" , vm_name , ip ) # ssh or smb using ip and install salt only if deploy is True if deploy : vm_info [ 'key_filename' ] = key_filename vm_info [ 'ssh_host' ] = ip res = __utils__ [ 'cloud.bootstrap' ] ( vm_info , __opts__ ) vm_result . update ( res ) __utils__ [ 'cloud.fire_event' ] ( 'event' , 'created machine' , 'salt/cloud/{0}/created' . format ( vm_info [ 'name' ] ) , args = __utils__ [ 'cloud.filter_event' ] ( 'created' , vm_result , list ( vm_result ) ) , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] ) # Passwords should be included in this object ! ! return vm_result
def prescan ( self ) : """Scan urls scheduled for prescanning ( e . g . - - find - links )"""
if self . to_scan : list ( map ( self . scan_url , self . to_scan ) ) self . to_scan = None
def compatcallback ( f ) : """Compatibility callback decorator for older click version . Click 1.0 does not have a version string stored , so we need to use getattr here to be safe ."""
if getattr ( click , '__version__' , '0.0' ) >= '2.0' : return f return update_wrapper ( lambda ctx , value : f ( ctx , None , value ) , f )
def filter_data ( self , pattern = '' ) : """Filter available varaibles"""
filtered_profiles = { } with open ( self . abspath ) as fobj : for idx , line in enumerate ( fobj ) : if 'TIME SERIES' in line : break if pattern in line and ( idx - self . _attributes [ 'CATALOG' ] - 1 ) > 0 : filtered_profiles [ idx - self . _attributes [ 'CATALOG' ] - 1 ] = line return filtered_profiles
def check_type_keywords ( self , schema , rule , path ) : """All supported keywords : - allowempty _ map - assertion - class - date - default - desc - enum - example - extensions - func - ident - include _ name - map _ regex _ rule - mapping - matching - matching _ rule - name - nullable - pattern - pattern _ regexp - range - regex _ mappings - required - schema - sequence - type - type _ class - unique - version"""
if not self . strict_rule_validation : return global_keywords = [ 'type' , 'desc' , 'example' , 'extensions' , 'name' , 'nullable' , 'version' , 'func' , 'include' ] all_allowed_keywords = { 'str' : global_keywords + [ 'default' , 'pattern' , 'range' , 'enum' , 'required' , 'unique' , 'req' ] , 'int' : global_keywords + [ 'default' , 'range' , 'enum' , 'required' , 'unique' ] , 'float' : global_keywords + [ 'default' , 'enum' , 'range' , 'required' ] , 'number' : global_keywords + [ 'default' , 'enum' ] , 'bool' : global_keywords + [ 'default' , 'enum' ] , 'map' : global_keywords + [ 'allowempty_map' , 'mapping' , 'map' , 'allowempty' , 'required' , 'matching-rule' , 'range' , 'class' ] , 'seq' : global_keywords + [ 'sequence' , 'seq' , 'required' , 'range' , 'matching' ] , 'sequence' : global_keywords + [ 'sequence' , 'seq' , 'required' ] , 'mapping' : global_keywords + [ 'mapping' , 'seq' , 'required' ] , 'timestamp' : global_keywords + [ 'default' , 'enum' ] , 'date' : global_keywords + [ 'default' , 'enum' ] , 'symbol' : global_keywords + [ 'default' , 'enum' ] , 'scalar' : global_keywords + [ 'default' , 'enum' ] , 'text' : global_keywords + [ 'default' , 'enum' , 'pattern' ] , 'any' : global_keywords + [ 'default' , 'enum' ] , 'enum' : global_keywords + [ 'default' , 'enum' ] , 'none' : global_keywords + [ 'default' , 'enum' , 'required' ] , } rule_type = schema . get ( 'type' ) if not rule_type : # Special cases for the " shortcut methods " if 'sequence' in schema or 'seq' in schema : rule_type = 'sequence' elif 'mapping' in schema or 'map' in schema : rule_type = 'mapping' allowed_keywords = all_allowed_keywords . get ( rule_type ) if not allowed_keywords and 'sequence' not in schema and 'mapping' not in schema and 'seq' not in schema and 'map' not in schema : raise RuleError ( 'No allowed keywords found for type: {0}' . format ( rule_type ) ) for k , v in schema . items ( ) : if k not in allowed_keywords : raise RuleError ( 'Keyword "{0}" is not supported for type: "{1}" ' . format ( k , rule_type ) )
def multiprocess_permutation ( bed_dict , mut_df , opts ) : """Handles parallelization of permutations by splitting work by chromosome ."""
chroms = sorted ( bed_dict . keys ( ) ) multiprocess_flag = opts [ 'processes' ] > 0 if multiprocess_flag : num_processes = opts [ 'processes' ] else : num_processes = 1 num_permutations = opts [ 'num_permutations' ] if not opts [ 'by_sample' ] : obs_result = [ ] else : uniq_samp = mut_df [ 'Tumor_Sample' ] . unique ( ) obs_result = pd . DataFrame ( np . zeros ( ( len ( uniq_samp ) , len ( cols ) ) ) , index = uniq_samp , columns = cols ) # initialize list containing output if not opts [ 'score_dir' ] : result_list = [ [ 0 , 0 , 0 , 0 , 0 , 0 , 0 ] for k in range ( num_permutations ) ] else : result_list = [ [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] for k in range ( num_permutations ) ] # iterate over each chromosome for i in range ( 0 , len ( chroms ) , num_processes ) : if multiprocess_flag : pool = Pool ( processes = num_processes ) tmp_num_proc = len ( chroms ) - i if i + num_processes > len ( chroms ) else num_processes info_repeat = ( ( bed_dict [ chroms [ tmp_ix ] ] , mut_df , opts ) for tmp_ix in range ( i , i + tmp_num_proc ) ) process_results = pool . imap ( singleprocess_permutation , info_repeat ) process_results . next = utils . keyboard_exit_wrapper ( process_results . next ) try : for chrom_result , obs_mutations in process_results : for j in range ( num_permutations ) : result_list [ j ] [ 0 ] += chrom_result [ j ] [ 0 ] result_list [ j ] [ 1 ] += chrom_result [ j ] [ 1 ] result_list [ j ] [ 2 ] += chrom_result [ j ] [ 2 ] result_list [ j ] [ 3 ] += chrom_result [ j ] [ 3 ] result_list [ j ] [ 4 ] += chrom_result [ j ] [ 4 ] result_list [ j ] [ 5 ] += chrom_result [ j ] [ 5 ] result_list [ j ] [ 6 ] += chrom_result [ j ] [ 6 ] if opts [ 'score_dir' ] : result_list [ j ] [ 7 ] += chrom_result [ j ] [ 7 ] result_list [ j ] [ 8 ] += chrom_result [ j ] [ 8 ] if not opts [ 'by_sample' ] : obs_result . append ( obs_mutations ) else : obs_result = obs_result + obs_mutations except KeyboardInterrupt : pool . close ( ) pool . join ( ) logger . info ( 'Exited by user. ctrl-c' ) sys . exit ( 0 ) pool . close ( ) pool . join ( ) else : info = ( bed_dict [ chroms [ i ] ] , mut_df , opts ) chrom_result , obs_mutations = singleprocess_permutation ( info ) for j in range ( num_permutations ) : result_list [ j ] [ 0 ] += chrom_result [ j ] [ 0 ] result_list [ j ] [ 1 ] += chrom_result [ j ] [ 1 ] result_list [ j ] [ 2 ] += chrom_result [ j ] [ 2 ] result_list [ j ] [ 3 ] += chrom_result [ j ] [ 3 ] result_list [ j ] [ 4 ] += chrom_result [ j ] [ 4 ] result_list [ j ] [ 5 ] += chrom_result [ j ] [ 5 ] result_list [ j ] [ 6 ] += chrom_result [ j ] [ 6 ] if opts [ 'score_dir' ] : result_list [ j ] [ 7 ] += chrom_result [ j ] [ 7 ] result_list [ j ] [ 8 ] += chrom_result [ j ] [ 8 ] if not opts [ 'by_sample' ] : obs_result . append ( obs_mutations ) else : obs_result = obs_result + obs_mutations return result_list , obs_result
def get_shiftfile_row ( self ) : """Return the information for a shiftfile for this image to provide compatability with the IRAF - based MultiDrizzle ."""
if self . fit is not None : rowstr = '%s %0.6f %0.6f %0.6f %0.6f %0.6f %0.6f\n' % ( self . name , self . fit [ 'offset' ] [ 0 ] , self . fit [ 'offset' ] [ 1 ] , self . fit [ 'rot' ] , self . fit [ 'scale' ] [ 0 ] , self . fit [ 'rms' ] [ 0 ] , self . fit [ 'rms' ] [ 1 ] ) else : rowstr = None return rowstr
def pull_all_rtl ( configuration ) : """Pulls all translations - reviewed or not - for RTL languages"""
print ( "Pulling all translated RTL languages from transifex..." ) for lang in configuration . rtl_langs : print ( 'rm -rf conf/locale/' + lang ) execute ( 'rm -rf conf/locale/' + lang ) execute ( 'tx pull -l ' + lang ) clean_translated_locales ( configuration , langs = configuration . rtl_langs )
def deserialize ( self , text , method = 'json' , encoding = 'utf8' , raise_exception = False ) : """Alias of helper . string . serialization . deserialize"""
return self . helper . string . serialization . deserialize ( text , method = method , encoding = encoding , raise_exception = raise_exception )
def _set_logger ( logger_name , level = logging . INFO ) : """Convenience function to quickly configure full debug output to go to the console ."""
log = logging . getLogger ( logger_name ) log . setLevel ( level ) ch = logging . StreamHandler ( None ) ch . setLevel ( level ) formatter = logging . Formatter ( DebugFmtString ) # add formatter to ch ch . setFormatter ( formatter ) # add ch to logger log . addHandler ( ch )
def run_in_subprocess ( func , * args , ** kwargs ) : """Run function in subprocess , return a Process object"""
from multiprocessing import Process thread = Process ( target = func , args = args , kwargs = kwargs ) thread . daemon = True thread . start ( ) return thread
def waveguide ( path , points , finish , bend_radius , number_of_points = 0.01 , direction = None , layer = 0 , datatype = 0 ) : '''Easy waveguide creation tool with absolute positioning . path : starting ` gdspy . Path ` points : coordinates along which the waveguide will travel finish : end point of the waveguide bend _ radius : radius of the turns in the waveguide number _ of _ points : same as in ` path . turn ` direction : starting direction layer : GDSII layer number datatype : GDSII datatype number Return ` path ` .'''
if direction is not None : path . direction = direction axis = 0 if path . direction [ 1 ] == 'x' else 1 points . append ( finish [ ( axis + len ( points ) ) % 2 ] ) n = len ( points ) if points [ 0 ] > ( path . x , path . y ) [ axis ] : path . direction = [ '+x' , '+y' ] [ axis ] else : path . direction = [ '-x' , '-y' ] [ axis ] for i in range ( n ) : path . segment ( abs ( points [ i ] - ( path . x , path . y ) [ axis ] ) - bend_radius , layer = layer , datatype = datatype ) axis = 1 - axis if i < n - 1 : goto = points [ i + 1 ] else : goto = finish [ axis ] if ( goto > ( path . x , path . y ) [ axis ] ) ^ ( ( path . direction [ 0 ] == '+' ) ^ ( path . direction [ 1 ] == 'x' ) ) : bend = 'l' else : bend = 'r' path . turn ( bend_radius , bend , number_of_points = number_of_points , layer = layer , datatype = datatype ) return path . segment ( abs ( finish [ axis ] - ( path . x , path . y ) [ axis ] ) , layer = layer , datatype = datatype )
def _prepare_inputs ( ma_fn , bam_file , out_dir ) : """Convert to fastq with counts"""
fixed_fa = os . path . join ( out_dir , "file_reads.fa" ) count_name = dict ( ) with file_transaction ( fixed_fa ) as out_tx : with open ( out_tx , 'w' ) as out_handle : with open ( ma_fn ) as in_handle : h = next ( in_handle ) for line in in_handle : cols = line . split ( "\t" ) name_with_counts = "%s_x%s" % ( cols [ 0 ] , sum ( map ( int , cols [ 2 : ] ) ) ) count_name [ cols [ 0 ] ] = name_with_counts out_handle . write ( ">%s\n%s\n" % ( name_with_counts , cols [ 1 ] ) ) fixed_bam = os . path . join ( out_dir , "align.bam" ) bam_handle = pysam . AlignmentFile ( bam_file , "rb" ) with pysam . AlignmentFile ( fixed_bam , "wb" , template = bam_handle ) as out_handle : for read in bam_handle . fetch ( ) : read . query_name = count_name [ read . query_name ] out_handle . write ( read ) return fixed_fa , fixed_bam
def get ( self , name = None , provider = 'AwsEKS' , print_output = True ) : """List all cluster ."""
# Create cluster object Cluster = getattr ( providers , provider ) cluster = Cluster ( name ) self . kubeconf . open ( ) if name is None : clusters = self . kubeconf . get_clusters ( ) print ( "Running Clusters:" ) for cluster in clusters : print ( f" - {cluster['name']}" ) else : # Check that cluster exists . if self . check_cluster_exists ( name ) is False : raise JhubctlError ( "Cluster name not found in availabe clusters." ) cluster = self . kubeconf . get_cluster ( name = cluster . cluster_name ) pprint . pprint ( cluster , depth = 4 )
def clean_tmpdir ( path ) : """Invoked atexit , this removes our tmpdir"""
if os . path . exists ( path ) and os . path . isdir ( path ) : rmtree ( path )
def to_rec ( samples , default_keys = None ) : """Convert inputs into CWL records , useful for single item parallelization ."""
recs = samples_to_records ( [ normalize_missing ( utils . to_single_data ( x ) ) for x in samples ] , default_keys ) return [ [ x ] for x in recs ]
def set_owner ( self , path , ** kwargs ) : """Set owner of a path ( i . e . a file or a directory ) . The parameters owner and group cannot both be null . : param owner : user : param group : group"""
response = self . _put ( path , 'SETOWNER' , ** kwargs ) assert not response . content
def list_archive ( archive , verbosity = 1 , program = None , interactive = True ) : """List given archive ."""
# Set default verbosity to 1 since the listing output should be visible . util . check_existing_filename ( archive ) if verbosity >= 0 : util . log_info ( "Listing %s ..." % archive ) return _handle_archive ( archive , 'list' , verbosity = verbosity , interactive = interactive , program = program )
def run_solr_text_on ( solrInstance , category , q , qf , fields , optionals ) : """Return the result of a solr query on the given solrInstance ( Enum ESOLR ) , for a certain document _ category ( ESOLRDoc ) and id"""
if optionals == None : optionals = "" query = solrInstance . value + "select?q=" + q + "&qf=" + qf + "&fq=document_category:\"" + category . value + "\"&fl=" + fields + "&wt=json&indent=on" + optionals # print ( " QUERY : " , query ) response = requests . get ( query ) return response . json ( ) [ 'response' ] [ 'docs' ]
def clean_path_middleware ( environ , start_response = None ) : '''Clean url from double slashes and redirect if needed .'''
path = environ [ 'PATH_INFO' ] if path and '//' in path : url = re . sub ( "/+" , '/' , path ) if not url . startswith ( '/' ) : url = '/%s' % url qs = environ [ 'QUERY_STRING' ] if qs : url = '%s?%s' % ( url , qs ) raise HttpRedirect ( url )
def get_removes ( self , api = None , profile = None ) : """Returns filtered list of Remove objects in this registry : param str api : Return Remove objects with this api name or None to return all Remove objects . : param str profile : Return Remove objects with this profile or None to return all Remove objects . : return : list of Remove objects"""
out = [ ] for ft in self . get_features ( api ) : out . extend ( ft . get_removes ( profile ) ) return out
def PSWAP ( angle , q1 , q2 ) : """Produces a parameterized SWAP gate : : PSWAP ( phi ) = [ [ 1 , 0 , 0 , 0 ] , [0 , 0 , exp ( 1j * phi ) , 0 ] , [0 , exp ( 1j * phi ) , 0 , 0 ] , [0 , 0 , 0 , 1 ] ] : param angle : The angle of the phase to apply to the swapped states . This phase is applied to q1 when it is in the 1 state and to q2 when it is in the 0 state . : param q1 : Qubit 1. : param q2 : Qubit 2. : returns : A Gate object ."""
return Gate ( name = "PSWAP" , params = [ angle ] , qubits = [ unpack_qubit ( q ) for q in ( q1 , q2 ) ] )
def bulk_create ( self , objs , batch_size = None ) : """Inserts each of the instances into the database . This does * not * call save ( ) on each of the instances , does not send any pre / post save signals , and does not set the primary key attribute if it is an autoincrement field ."""
self . _insert ( objs , batch_size = batch_size , return_id = False , force_insert = True ) self . refresh ( )
def insrtd ( item , inset ) : """Insert an item into a double precision set . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / insrtd _ c . html : param item : Item to be inserted . : type item : Union [ float , Iterable [ float ] ] : param inset : Insertion set . : type inset : spiceypy . utils . support _ types . SpiceCell"""
assert isinstance ( inset , stypes . SpiceCell ) if hasattr ( item , "__iter__" ) : for d in item : libspice . insrtd_c ( ctypes . c_double ( d ) , ctypes . byref ( inset ) ) else : item = ctypes . c_double ( item ) libspice . insrtd_c ( item , ctypes . byref ( inset ) )
def with_partition_id ( self , partition_id ) : """Init with partition Id . : param partition _ id : ID of a given partition . : type partition _ id : str"""
self . partition_id = partition_id self . owner = None self . token = None self . epoch = 0 self . event_processor_context = None
def bulk_write ( self , dev_handle , ep , intf , data , timeout ) : r"""Perform a bulk write . dev _ handle is the value returned by the open _ device ( ) method . The ep parameter is the bEndpointAddress field whose endpoint the data will be sent to . intf is the bInterfaceNumber field of the interface containing the endpoint . The data parameter is the data to be sent . It must be an instance of the array . array class . The timeout parameter specifies a time limit to the operation in miliseconds . The method returns the number of bytes written ."""
_not_implemented ( self . bulk_write )
def analog_units ( self ) : """Shortcut to retrieve all analog points units [ Used by Bokeh trending feature ]"""
au = [ ] us = [ ] for each in self . points : if isinstance ( each , NumericPoint ) : au . append ( each . properties . name ) us . append ( each . properties . units_state ) return dict ( zip ( au , us ) )
def on_init ( target : "EncryptableMixin" , args , kwargs ) : """Intercept SQLAlchemy ' s instance init event . SQLALchemy allows callback to intercept ORM instance init functions . The calling arguments will be an empty instance of the ` target ` model , plus the arguments passed to ` _ _ init _ _ ` . The ` kwargs ` dictionary is mutable ( which is why it is not passed as ` * * kwargs ` ) . We leverage this callback to conditionally remove the ` _ _ plaintext _ _ ` value and set the ` ciphertext ` property ."""
encryptor = target . __encryptor__ # encryption context may be nullable try : encryption_context_key = str ( kwargs [ target . __encryption_context_key__ ] ) except KeyError : return # do not encrypt targets that are not configured for it if encryption_context_key not in encryptor : return plaintext = target . plaintext_to_str ( kwargs . pop ( target . __plaintext__ ) ) # do not try to encrypt when plaintext is None if plaintext is None : return ciphertext , key_ids = encryptor . encrypt ( encryption_context_key , plaintext ) target . ciphertext = ( ciphertext , key_ids )
def cbpdn_relax ( k ) : """Do relaxation for the cbpdn stage . The only parameter is the slice index ` k ` and there are no return values ; all inputs and outputs are from and to global variables ."""
mp_Z_X [ k ] = mp_xrlx * mp_Z_X [ k ] + ( 1 - mp_xrlx ) * mp_Z_Y [ k ]
def get_config_variable ( self , config_id , offset ) : """Get a chunk of a config variable ' s value ."""
config = self . _config_variables . get ( config_id ) if config is None : return [ b"" ] return [ bytes ( config . current_value [ offset : offset + 20 ] ) ]
def _check_pillar ( self , force = False ) : '''Check the pillar for errors , refuse to run the state if there are errors in the pillar and return the pillar errors'''
if force : return True if '_errors' in self . state . opts [ 'pillar' ] : return False return True
def export_gltf ( scene , extras = None , include_normals = False ) : """Export a scene object as a GLTF directory . This puts each mesh into a separate file ( i . e . a ` buffer ` ) as opposed to one larger file . Parameters scene : trimesh . Scene Scene to be exported Returns export : dict Format : { file name : file data }"""
# if we were passed a bare Trimesh or Path3D object if ( not util . is_instance_named ( scene , "Scene" ) and hasattr ( scene , "scene" ) ) : scene = scene . scene ( ) # create the header and buffer data tree , buffer_items = _create_gltf_structure ( scene = scene , extras = extras , include_normals = include_normals ) # store files as { name : data } files = { } # make one buffer per buffer _ items buffers = [ None ] * len ( buffer_items ) # A bufferView is a slice of a file views = [ None ] * len ( buffer_items ) # create the buffer views for i , item in enumerate ( buffer_items ) : views [ i ] = { "buffer" : i , "byteOffset" : 0 , "byteLength" : len ( item ) } buffer_data = _byte_pad ( bytes ( ) . join ( buffer_items [ i : i + 2 ] ) ) buffer_name = "gltf_buffer_{}.bin" . format ( i ) buffers [ i ] = { "uri" : buffer_name , "byteLength" : len ( buffer_data ) } files [ buffer_name ] = buffer_data tree [ "buffers" ] = buffers tree [ "bufferViews" ] = views files [ "model.gltf" ] = json . dumps ( tree ) . encode ( "utf-8" ) return files
def permitted_query ( self , query , group , operations ) : '''Change the ` ` query ` ` so that only instances for which ` ` group ` ` has roles with permission on ` ` operations ` ` are returned .'''
session = query . session models = session . router user = group . user if user . is_superuser : # super - users have all permissions return query roles = group . roles . query ( ) roles = group . roles . query ( ) # query on all roles for group # The throgh model for Role / Permission relationship throgh_model = models . role . permissions . model models [ throgh_model ] . filter ( role = roles , permission__model_type = query . model , permission__operations = operations ) # query on all relevant permissions permissions = router . permission . filter ( model_type = query . model , level = operations ) owner_query = query . filter ( user = user ) # all roles for the query model with appropriate permission level roles = models . role . filter ( model_type = query . model , level__ge = level ) # Now we need groups which have these roles groups = Role . groups . throughquery ( session ) . filter ( role = roles ) . get_field ( 'group' ) # I need to know if user is in any of these groups if user . groups . filter ( id = groups ) . count ( ) : # it is , lets get the model with permissions less # or equal permission level permitted = models . instancerole . filter ( role = roles ) . get_field ( 'object_id' ) return owner_query . union ( model . objects . filter ( id = permitted ) ) else : return owner_query
def filter ( self , sids ) : """Extracs a submap of self for the given sids ."""
dic = self . __class__ ( self . shape_y , self . shape_z ) for sid in sids : try : dic [ sid ] = self [ sid ] except KeyError : pass return dic
def degree ( self , kind = 'out' , weighted = True ) : '''Returns an array of vertex degrees . kind : either ' in ' or ' out ' , useful for directed graphs weighted : controls whether to count edges or sum their weights'''
if kind == 'out' : axis = 1 adj = self . matrix ( 'dense' , 'csc' ) else : axis = 0 adj = self . matrix ( 'dense' , 'csr' ) if not weighted and self . is_weighted ( ) : # With recent numpy and a dense matrix , could do : # d = np . count _ nonzero ( adj , axis = axis ) d = ( adj != 0 ) . sum ( axis = axis ) else : d = adj . sum ( axis = axis ) return np . asarray ( d ) . ravel ( )
def SDSS_spectra_query ( self , cat_name , ra , dec , radius , group = True , ** kwargs ) : """Use astroquery to search SDSS for sources within a search cone Parameters cat _ name : str A name for the imported catalog ( e . g . ' 2MASS ' ) ra : astropy . units . quantity . Quantity The RA of the center of the cone search dec : astropy . units . quantity . Quantity The Dec of the center of the cone search radius : astropy . units . quantity . Quantity The radius of the cone search"""
# Verify the cat _ name if self . _catalog_check ( cat_name ) : # Prep the current catalog as an astropy . QTable tab = at . Table . from_pandas ( self . catalog ) # Cone search Vizier print ( "Searching SDSS for sources within {} of ({}, {}). Please be patient..." . format ( viz_cat , radius , ra , dec ) ) crds = coord . SkyCoord ( ra = ra , dec = dec , frame = 'icrs' ) try : data = SDSS . query_region ( crds , spectro = True , radius = radius ) except : print ( "No data found in SDSS within {} of ({}, {})." . format ( viz_cat , radius , ra , dec ) ) return # Ingest the data self . ingest_data ( data , cat_name , 'id' , ra_col = ra_col , dec_col = dec_col ) # Regroup if len ( self . catalogs ) > 1 and group : self . group_sources ( self . xmatch_radius )
def make_dataframe ( table , clean = True , verbose = False , ** kwargs ) : """Coerce a provided table ( QuerySet , list of lists , list of Series ) > > > dt = datetime . datetime > > > make _ dataframe ( [ [ 1,2,3 ] , [ 4,5,6 ] ] ) 0 1 2 0 1 2 3 1 4 5 6 > > > make _ dataframe ( [ ] ) Empty DataFrame Columns : [ ] Index : [ ] > > > make _ dataframe ( [ OrderedDict ( [ ( ' a ' , 2 ) , ( ' b ' , 3 ) ] ) , PrettyDict ( [ ( ' a ' , 4 ) , ( ' b ' , 5 ) ] ) ] ) a b 0 2 3 1 4 5 > > > make _ dataframe ( [ [ dt ( 2700 , 1 , 1 ) , dt ( 2015 , 11 , 2 ) ] , [ ( 2700 - 2015 ) * 365.25 + 60 , 1 ] ] ) . T 0 1 0 2262-04-11 23:47:16.854775 + 00:00 250256 1 2015-11-02 00:00:00 + 00:00 1"""
if hasattr ( table , 'objects' ) and not callable ( table . objects ) : table = table . objects if hasattr ( table , 'filter' ) and callable ( table . values ) : table = pd . DataFrame . from_records ( list ( table . values ( ) ) . all ( ) ) elif isinstance ( table , basestring ) and os . path . isfile ( table ) : table = pd . DataFrame . from_csv ( table ) # elif isinstance ( table , ValuesQuerySet ) or ( isinstance ( table , ( list , tuple ) ) and # len ( table ) and all ( isinstance ( v , Mapping ) for v in table ) ) : # table = pd . DataFrame . from _ records ( table ) try : table = pd . DataFrame ( table , ** kwargs ) except ( IndexError , ValueError , AttributeError , TypeError ) : table = pd . DataFrame ( table ) if clean and len ( table ) and isinstance ( table , pd . DataFrame ) : if verbose : print ( 'Cleaning up OutOfBoundsDatetime values...' ) for col in table . columns : if any_generated ( ( isinstance ( v , DATETIME_TYPES ) for v in table [ col ] ) ) : table [ col ] = clean_series ( table [ col ] ) table = table . dropna ( how = 'all' ) return table
def add_storage_account ( self , name , key , endpoint ) : # type : ( StorageCredentials , str , str , str ) - > None """Add a storage account : param StorageCredentials self : this : param str name : name of storage account to store : param str key : storage key or sas : param str endpoint : endpoint"""
if name in self . _storage_accounts : raise ValueError ( '{} already exists in storage accounts' . format ( name ) ) self . _storage_accounts [ name ] = StorageAccount ( name , key , endpoint , self . _general_options . concurrency . transfer_threads , self . _general_options . timeout , self . _general_options . proxy , )
def cmd ( send , _ , args ) : """Returns a list of admins . V = Verified ( authed to NickServ ) , U = Unverified . Syntax : { command }"""
adminlist = [ ] for admin in args [ 'db' ] . query ( Permissions ) . order_by ( Permissions . nick ) . all ( ) : if admin . registered : adminlist . append ( "%s (V)" % admin . nick ) else : adminlist . append ( "%s (U)" % admin . nick ) send ( ", " . join ( adminlist ) , target = args [ 'nick' ] )
def add_capability ( base , * classes ) : """Add capabilites to an existing base , all objects get the additional functionality , but don ' t get inited . Use with great care !"""
if _debug : add_capability . _debug ( "add_capability %r %r" , base , classes ) # start out with a collector if not issubclass ( base , Collector ) : raise TypeError ( "base must be a subclass of Collector" ) # make sure you only add capabilities for cls in classes : if not issubclass ( cls , Capability ) : raise TypeError ( "%s is not a Capability subclass" % ( cls , ) ) base . __bases__ += classes for cls in classes : base . __name__ += '+' + cls . __name__
def sha1sum_file ( filename ) : '''Return the secure hash digest with sha1 algorithm for a given file > > > from timeside . core . tools . test _ samples import samples > > > wav _ file = samples [ " C4 _ scale . wav " ] > > > print sha1sum _ file ( wav _ file ) a598e78d0b5c90da54a77e34c083abdcd38d42ba'''
import hashlib import io sha1 = hashlib . sha1 ( ) chunk_size = sha1 . block_size * io . DEFAULT_BUFFER_SIZE with open ( filename , 'rb' ) as f : for chunk in iter ( lambda : f . read ( chunk_size ) , b'' ) : sha1 . update ( chunk ) return sha1 . hexdigest ( )
def extent ( self , srid = None ) : """Returns the GeoQuerySet extent as a 4 - tuple . Keyword args : srid - - EPSG id for for transforming the output geometry ."""
expr = self . geo_field . name if srid : expr = geofn . Transform ( expr , srid ) expr = models . Extent ( expr ) clone = self . all ( ) name , val = clone . aggregate ( expr ) . popitem ( ) return val
def create ( self , repo_slug = None , key = None , label = None ) : """Associate an ssh key with your repo and return it ."""
key = '%s' % key repo_slug = repo_slug or self . bitbucket . repo_slug or '' url = self . bitbucket . url ( 'SET_DEPLOY_KEY' , username = self . bitbucket . username , repo_slug = repo_slug ) return self . bitbucket . dispatch ( 'POST' , url , auth = self . bitbucket . auth , key = key , label = label )
def get_dev_mac_learn ( devid , auth , url ) : '''function takes devid of specific device and issues a RESTFUL call to gather the current IP - MAC learning entries on the target device . : param devid : int value of the target device : param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class : param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass : return : list of dict objects which contain the mac learn table of target device id : rtype : list > > > from pyhpeimc . auth import * > > > from pyhpeimc . plat . device import * > > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " ) > > > dev _ mac _ learn = get _ dev _ mac _ learn ( ' 10 ' , auth . creds , auth . url ) > > > assert type ( dev _ mac _ learn ) is list > > > assert ' deviceId ' in dev _ mac _ learn [ 0]'''
get_dev_mac_learn_url = '/imcrs/res/access/ipMacLearn/' + str ( devid ) f_url = url + get_dev_mac_learn_url try : r = requests . get ( f_url , auth = auth , headers = HEADERS ) if r . status_code == 200 : if len ( r . text ) < 1 : mac_learn_query = { } return mac_learn_query else : mac_learn_query = ( json . loads ( r . text ) ) [ 'ipMacLearnResult' ] return mac_learn_query except requests . exceptions . RequestException as e : return "Error:\n" + str ( e ) + " get_dev_mac_learn: An Error has occured"
def update_configuration ( self , ** kwargs ) : """Update configuration using valid kwargs as defined in the enable constructor . : param dict kwargs : kwargs to satisfy valid args from ` enable ` : rtype : bool"""
updated = False if 'announced_networks' in kwargs : kwargs . update ( announced_ne_setting = kwargs . pop ( 'announced_networks' ) ) if 'bgp_profile' in kwargs : kwargs . update ( bgp_profile_ref = kwargs . pop ( 'bgp_profile' ) ) if 'autonomous_system' in kwargs : kwargs . update ( bgp_as_ref = kwargs . pop ( 'autonomous_system' ) ) announced_ne = kwargs . pop ( 'announced_ne_setting' , None ) for name , value in kwargs . items ( ) : _value = element_resolver ( value ) if self . data . get ( name ) != _value : self . data [ name ] = _value updated = True if announced_ne is not None : s = self . data . get ( 'announced_ne_setting' ) ne = self . _unwrap ( announced_ne ) if len ( announced_ne ) != len ( s ) or not self . _equal ( ne , s ) : self . data . update ( announced_ne_setting = ne ) updated = True return updated
def _getitem ( self , key , validate = False ) : """Return specified page from cache or file ."""
key = int ( key ) pages = self . pages if key < 0 : key %= len ( self ) elif self . _indexed and key >= len ( pages ) : raise IndexError ( 'index out of range' ) if key < len ( pages ) : page = pages [ key ] if self . _cache : if not isinstance ( page , inttypes ) : if validate and validate != page . hash : raise RuntimeError ( 'page hash mismatch' ) return page elif isinstance ( page , ( TiffPage , self . _tiffpage ) ) : if validate and validate != page . hash : raise RuntimeError ( 'page hash mismatch' ) return page self . _seek ( key ) page = self . _tiffpage ( self . parent , index = key , keyframe = self . _keyframe ) if validate and validate != page . hash : raise RuntimeError ( 'page hash mismatch' ) if self . _cache : pages [ key ] = page return page
def is_all_field_none ( self ) : """: rtype : bool"""
if self . _color is not None : return False if self . _default_avatar_status is not None : return False if self . _restriction_chat is not None : return False return True
def add_ones ( a ) : """Adds a column of 1s at the end of the array"""
arr = N . ones ( ( a . shape [ 0 ] , a . shape [ 1 ] + 1 ) ) arr [ : , : - 1 ] = a return arr
def _proxy ( self ) : """Generate an instance context for the instance , the context is capable of performing various actions . All instance actions are proxied to the context : returns : KeyContext for this KeyInstance : rtype : twilio . rest . api . v2010 . account . key . KeyContext"""
if self . _context is None : self . _context = KeyContext ( self . _version , account_sid = self . _solution [ 'account_sid' ] , sid = self . _solution [ 'sid' ] , ) return self . _context
def json ( self , ** kwargs ) : """Create JSON object out of the response ."""
try : return super ( ScraperResponse , self ) . json ( ** kwargs ) except ValueError as ve : raise ParseException ( ve )
def get_assessments_metadata ( self ) : """Gets the metadata for the assessments . return : ( osid . Metadata ) - metadata for the assessments * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for osid . learning . ActivityForm . get _ assets _ metadata _ template metadata = dict ( self . _mdata [ 'assessments' ] ) metadata . update ( { 'existing_assessments_values' : self . _my_map [ 'assessmentIds' ] } ) return Metadata ( ** metadata )
def create_sym_log_bar_chart ( self , x_labels , y_values , y_label ) : """Creates bar chart ( log version ) : param x _ labels : Names for each variable : param y _ values : Values of x labels : param y _ label : Label of y axis : return : Sym - log bar chart"""
ax1 = self . create_bar_chart ( x_labels , y_values , y_label ) ax1 . set_yscale ( "sym-log" , linthreshy = 1e-12 ) # logarithmic plot return ax1
def cell_normalize ( data ) : """Returns the data where the expression is normalized so that the total count per cell is equal ."""
if sparse . issparse ( data ) : data = sparse . csc_matrix ( data . astype ( float ) ) # normalize in - place sparse_cell_normalize ( data . data , data . indices , data . indptr , data . shape [ 1 ] , data . shape [ 0 ] ) return data data_norm = data . astype ( float ) total_umis = [ ] for i in range ( data . shape [ 1 ] ) : di = data_norm [ : , i ] total_umis . append ( di . sum ( ) ) di /= total_umis [ i ] med = np . median ( total_umis ) data_norm *= med return data_norm
def generate_properties ( self ) : """Means object with defined keys . . . code - block : : python ' properties ' : { ' key ' : { ' type ' : ' number ' } , Valid object is containing key called ' key ' and value any number ."""
self . create_variable_is_dict ( ) with self . l ( 'if {variable}_is_dict:' ) : self . create_variable_keys ( ) for key , prop_definition in self . _definition [ 'properties' ] . items ( ) : key_name = re . sub ( r'($[^a-zA-Z]|[^a-zA-Z0-9])' , '' , key ) with self . l ( 'if "{}" in {variable}_keys:' , key ) : self . l ( '{variable}_keys.remove("{}")' , key ) self . l ( '{variable}__{0} = {variable}["{1}"]' , key_name , key ) self . generate_func_code_block ( prop_definition , '{}__{}' . format ( self . _variable , key_name ) , '{}.{}' . format ( self . _variable_name , key ) , ) if isinstance ( prop_definition , dict ) and 'default' in prop_definition : self . l ( 'else: {variable}["{}"] = {}' , key , repr ( prop_definition [ 'default' ] ) )
def get_parents_for ( self , child_ids ) : """Returns parent aliases for a list of child IDs . : param list child _ ids : : rtype : set : return : a set of parent aliases"""
self . _cache_init ( ) parent_candidates = [ ] for parent , children in self . _cache_get_entry ( self . CACHE_NAME_PARENTS ) . items ( ) : if set ( children ) . intersection ( child_ids ) : parent_candidates . append ( parent ) return set ( parent_candidates )
def add_reference ( self , reftype : str , label : str , target ) : """Add reference object in references under rtype / label = target"""
# The self . data [ reftype ] dict springs into being during the # register _ references event handler at startup , which looks in the # kb registry for all registered reference names . self . data [ reftype ] [ label ] = target