content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def count_char(char, word): """Counts the characters in word""" return word.count(char) # If you want to do it manually try a for loop
363222f4876c5a574a84fe14214760c505e920b0
0
def get_sos_model(sample_narratives): """Return sample sos_model """ return { 'name': 'energy', 'description': "A system of systems model which encapsulates " "the future supply and demand of energy for the UK", 'scenarios': [ 'population' ], 'narratives': sample_narratives, 'sector_models': [ 'energy_demand', 'energy_supply' ], 'scenario_dependencies': [ { 'source': 'population', 'source_output': 'population_count', 'sink': 'energy_demand', 'sink_input': 'population' } ], 'model_dependencies': [ { 'source': 'energy_demand', 'source_output': 'gas_demand', 'sink': 'energy_supply', 'sink_input': 'natural_gas_demand' } ] }
885c251b8bbda2ebc5a950b083faed35c58f41cc
1
def check_context(model, sentence, company_name): """ Check if the company name in the sentence is actually a company name. :param model: the spacy model. :param sentence: the sentence to be analysed. :param company_name: the name of the company. :return: True if the company name means a company/product. """ doc = model(sentence) for t in doc.ents: if t.lower_ == company_name: #if company name is called if t.label_ == "ORG" or t.label_ == "PRODUCT": #check they actually mean the company return True return False
993c27924844b7cd0c570a9ce5fa404ef6d29b97
4
def getItemSize(dataType): """ Gets the size of an object depending on its data type name Args: dataType (String): Data type of the object Returns: (Integer): Size of the object """ # If it's a vector 6, its size is 6 if dataType.startswith("VECTOR6"): return 6 # If it,s a vector 3, its size is 6 elif dataType.startswith("VECTOR3"): return 3 # Else its size is only 1 return 1
2ab9c83bef56cd8dbe56c558d123e24c9da6eb0e
5
import time def FloatDateTime(): """Returns datetime stamp in Miro's REV_DATETIME format as a float, e.g. 20110731.123456""" return float(time.strftime('%Y%m%d.%H%M%S', time.localtime()))
115aef9104124774692af1ba62a48a5423b9dc2a
6
import json def read_prediction_dependencies(pred_file): """ Reads in the predictions from the parser's output file. Returns: two String list with the predicted heads and dependency names, respectively. """ heads = [] deps = [] with open(pred_file, encoding="utf-8") as f: for line in f: j = json.loads(line) heads.extend(j["predicted_heads"]) deps.extend(j["predicted_dependencies"]) heads = list(map(str, heads)) return heads, deps
c8280c861d998d0574fb831cd9738b733fd53388
7
def CleanGrant(grant): """Returns a "cleaned" grant by rounding properly the internal data. This insures that 2 grants coming from 2 different sources are actually identical, irrespective of the logging/storage precision used. """ return grant._replace(latitude=round(grant.latitude, 6), longitude=round(grant.longitude, 6), height_agl=round(grant.height_agl, 2), max_eirp=round(grant.max_eirp, 3))
648bb0a76f9a7cfe355ee8ffced324eb6ceb601e
8
def trim(str): """Remove multiple spaces""" return ' '.join(str.strip().split())
ed98f521c1cea24552959aa334ffb0c314b9f112
9
import torch def get_optimizer(lr): """ Specify an optimizer and its parameters. Returns ------- tuple(torch.optim.Optimizer, dict) The optimizer class and the dictionary of kwargs that should be passed in to the optimizer constructor. """ return (torch.optim.SGD, {"lr": lr, "weight_decay": 1e-6, "momentum": 0.9})
213090258414059f7a01bd40ecd7ef04158d60e5
10
import codecs import csv def open_csv(path): """open_csv.""" _lines = [] with codecs.open(path, encoding='utf8') as fs: for line in csv.reader(fs): if len(line) == 3: _lines.append(line) return _lines
501ff4a2a1a242439c21d3131cecd407dcfa36af
12
def merge(intervals: list[list[int]]) -> list[list[int]]: """Generate a new schedule with non-overlapping intervals by merging intervals which overlap Complexity: n = len(intervals) Time: O(nlogn) for the initial sort Space: O(n) for the worst case of no overlapping intervals Examples: >>> merge(intervals=[[1,3],[2,6],[8,10],[15,18]]) [[1, 6], [8, 10], [15, 18]] >>> merge(intervals=[[1,4],[4,5]]) [[1, 5]] >>> merge(intervals=[[1,4]]) [[1, 4]] """ ## EDGE CASES ## if len(intervals) <= 1: return intervals """ALGORITHM""" ## INITIALIZE VARS ## intervals.sort(key=lambda k: k[0]) # sort on start times # DS's/res merged_intervals = [] # MERGE INTERVALS prev_interval, remaining_intervals = intervals[0], intervals[1:] for curr_interval in remaining_intervals: # if prev interval end >= curr interval start if prev_interval[1] >= curr_interval[0]: # adjust new prev interval prev_interval[1] = max(prev_interval[1], curr_interval[1]) else: merged_intervals.append(prev_interval) prev_interval = curr_interval merged_intervals.append(prev_interval) return merged_intervals
49a9d7d461ba67ec3b5f839331c2a13d9fc068d0
13
import math def distance(a, b): """ Computes a :param a: :param b: :return: """ x = a[0] - b[0] y = a[1] - b[1] return math.sqrt(x ** 2 + y ** 2)
60b637771cd215a4cf83761a142fb6fdeb84d96e
16
def rsquared_adj(r, nobs, df_res, has_constant=True): """ Compute the adjusted R^2, coefficient of determination. Args: r (float): rsquared value nobs (int): number of observations the model was fit on df_res (int): degrees of freedom of the residuals (nobs - number of model params) has_constant (bool): whether the fitted model included a constant (intercept) Returns: float: adjusted coefficient of determination """ if has_constant: return 1.0 - (nobs - 1) / df_res * (1.0 - r) else: return 1.0 - nobs / df_res * (1.0 - r)
8d466437db7ec9de9bc7ee1d9d50a3355479209d
17
def _compile_unit(i): """Append gas to unit and update CO2e for pint/iam-unit compatibility""" if " equivalent" in i["unit"]: return i["unit"].replace("CO2 equivalent", "CO2e") if i["unit"] in ["kt", "t"]: return " ".join([i["unit"], i["gas"]]) else: return i["unit"]
0692167e95159d08b306a241baf4eadefdc29b35
18
import math def Calculo_por_etapas(Diccionario): """Calculo de la hornilla por etapas""" Lista_Contenido=[] Lista_columnas=[] #Normalización de la capacidad de la hornilla #Mem_dias=float(Diccionario['¿Cada cuantos días quiere moler? (días)']) #Mem_Temp=Normalizar_Capacidad(float(Diccionario['Capacidad estimada de la hornilla']),Mem_dias) #print(float(Diccionario['Capacidad estimada de la hornilla'])) #print(Mem_Temp) Etapas=int(float(Diccionario['Etapas']))#Mem_Temp[1] #Etapas=12 #Saturador "minimo son dos etapas" if (Etapas>2): Factor_Division=Etapas-2 else: Factor_Division=2 Etapas=2 #Caracteristicas de las celdas de cada columna (Lista_columnas) #Fila 0 concentración de solidos inicial #Fila 1 Concentración de solidos final #Fila 2 Concentración promedio #Fila 3 Masa de jugo de entrada #Fila 4 Calor Especifico P Cte jugo #Fila 5 Densidad del Jugo #Fila 6 Volumen de jugo kg #Fila 7 Volumen de jugo en L #Fila 8 Temperatura de Entrada #Fila 9 Temperatura de Salida #Fila 10 Entalpia de Vaporización #Fila 11 Masa de Agua a Evaporar #Fila 12 Calor Nece Calc por Etapa for i in range(13): for j in range (Etapas): Lista_columnas.append(float(i+j)) Lista_Contenido.append(Lista_columnas) Lista_columnas=[] Lista_Contenido[0][0]=float(Diccionario['CSS del jugo pos-evaporación']) #Concentracion_solidos_inicial (CSS02) Lista_Contenido[1][0]=float(Diccionario['CSS panela']) #Concentracion_solidos_final (CSSF1) Lista_Contenido[0][Etapas-1]=float(Diccionario['CSS del jugo de Caña']) #Concentracion_solidos_inicial (CSS01) Lista_Contenido[1][Etapas-1]=float(Diccionario['CSS del jugo clarificado']) #Concentracion_solidos_final (CSSF1) if(Etapas>2): ite=0 for i in range(Etapas-2,0,-1): Lista_Contenido[0][i]=Lista_Contenido[1][i+1] if(ite==0): Lista_Contenido[1][i]=((Lista_Contenido[0][0]-Lista_Contenido[0][i])/Factor_Division)+Lista_Contenido[0][i] ite=ite+1 else: Lista_Contenido[1][i]=((Lista_Contenido[0][0]-Lista_Contenido[0][Etapas-2])/Factor_Division)+Lista_Contenido[0][i] for i in range(Etapas-1,-1,-1): #Concentración promedio=(Concentracion_solidos_inicial+Concentracion_solidos_final)/2 Lista_Contenido[2][i]=(Lista_Contenido[0][i]+Lista_Contenido[1][i])/2 if(i==Etapas-1): #Masa de jugo de entrada Lista_Contenido[3][i]=float(Diccionario['A clarificación']) else: #Masa de jugo de entrada=(Masa de jugo etapa anterior*CCS inicial etapa anterior)/CCS Final etapa anterior Lista_Contenido[3][i]=Lista_Contenido[3][i+1]*Lista_Contenido[0][i+1]/Lista_Contenido[1][i+1] #Calor_Especifico_P_Cte_jugo=4.18*(1-(0.006*Concetracion_promedio)) Lista_Contenido[4][i]=4.18*(1-(0.006*Lista_Contenido[2][i])) #Densidad_del_Jugo=997.39+(4.46*Concetracion_promedio) Lista_Contenido[5][i]=997.39+(4.46*Lista_Contenido[2][i]) #Volumen_jugo=Masa_jugo_de_entrada/Densidad_del_Jugo Lista_Contenido[6][i]=Lista_Contenido[3][i]/Lista_Contenido[5][i] #Volumen_jugo_L=Volumen_jugo*1000 Lista_Contenido[7][i]=Lista_Contenido[6][i]*1000.0 if(i==Etapas-1): #Temperatura_Entrada=Temperatura ambiente Lista_Contenido[8][i]=float(Diccionario['Temperatura del ambiente']) else: #Temperatura_Entrada=Temperatura_ebullición_agua+0.2209*math.exp(0.0557*Concentracion_solidos_inicial) Lista_Contenido[8][i]=Lista_Contenido[9][i+1] #Temperatura_Salida=G37+0.2209*math.exp(0.0557*Concentracion_solidos_final) Lista_Contenido[9][i]=float(Diccionario['Temperatura de ebullición del agua'])+0.2209*math.exp(0.0557*Lista_Contenido[1][i]) #Entalpia_Vaporizacion=(2492.9-(2.0523*Temperatura_Entrada))-(0.0030752*(Temperatura_Entrada**2)) Lista_Contenido[10][i]=(2492.9-(2.0523*Lista_Contenido[8][i]))-(0.0030752*(Lista_Contenido[8][i]**2)) #Masa_Agua_Evaporar=Masa_jugo_de_entrada-(Masa_jugo_de_entrada*Concentracion_solidos_inicial/Concentracion_solidos_final) Lista_Contenido[11][i]=Lista_Contenido[3][i]-(Lista_Contenido[3][i]*Lista_Contenido[0][i]/Lista_Contenido[1][i]) #Calor_por_Etapa=(Masa_jugo_de_entrada*Calor_Especifico_P_Cte_jugo*(Temperatura_Salida-Temperatura_Entrada)+Masa_Agua_Evaporar*Entalpia_Vaporizacion)/3600 Lista_Contenido[12][i]=(Lista_Contenido[3][i]*Lista_Contenido[4][i]*(Lista_Contenido[9][i]-Lista_Contenido[8][i])+Lista_Contenido[11][i]*Lista_Contenido[10][i])/3600.0 #Fijar decimales en 3 for j in range (13): for i in range (Etapas): Lista_Contenido[j][i]=round(Lista_Contenido[j][i],3) #Cambiar la salida o posicion de la paila de punteo a la paila 3 o 4 Lista_contenido_2=[] L_aux=[] for i in Lista_Contenido: inio=3 if (Etapas!=7): L_aux.append(i[2]) L_aux.append(i[1]) L_aux.append(i[0]) inio=3 else: L_aux.append(i[3]) L_aux.append(i[2]) L_aux.append(i[1]) L_aux.append(i[0]) inio=4 for t in range(inio,len(i)): L_aux.append(i[t]) Lista_contenido_2.append(L_aux) L_aux=[] Lista_Contenido=Lista_contenido_2 Etiquetas=[ 'Concentracion de Solidos Inicial [ºBrix]', 'Concentracion de Solidos Final [ºBrix]', 'Concentracion de Solidos Promedio [ºBrix]', 'Masa de Jugo Entrada [Kg]', 'Calor Especifico P Cte jugo [kJ/Kg °C]', 'Densidad del Jugo [kg/m3]', 'Volumen de jugo [m^3/kg]', 'Volumen de jugo [L]', 'Temperatura de Entrada [ºC]', 'Temperatura de Salida [ºC]', 'Entalpia de Vaporización [kJ/kg]', 'Masa de Agua a Evaporar [kg]', 'Calor Nece Calc por Etapa [kW]' ] Dict_aux=dict(zip(Etiquetas,Lista_Contenido)) Dict_aux_2=dict(zip(['Etapas'],[Etapas])) Dict_aux.update(Dict_aux_2) return Dict_aux
c3b531e1b3fbb3491a9d7a5521c216e5ce5c5b38
19
from collections import defaultdict def concline_generator(matches, idxs, df, metadata, add_meta, category, fname, preserve_case=False): """ Get all conclines :param matches: a list of formatted matches :param idxs: their (sent, word) idx """ conc_res = [] # potential speedup: turn idxs into dict mdict = defaultdict(list) # if remaking idxs here, don't need to do it earlier idxs = list(matches.index) for mid, (s, i) in zip(matches, idxs): #for s, i in matches: mdict[s].append((i, mid)) # shorten df to just relevant sents to save lookup time df = df.loc[list(mdict.keys())] # don't look up the same sentence multiple times for s, tup in sorted(mdict.items()): sent = df.loc[s] if not preserve_case: sent = sent.str.lower() meta = metadata[s] sname = meta.get('speaker', 'none') for i, mid in tup: if not preserve_case: mid = mid.lower() ix = '%d,%d' % (s, i) start = ' '.join(sent.loc[:i-1].values) end = ' '.join(sent.loc[i+1:].values) lin = [ix, category, fname, sname, start, mid, end] if add_meta: for k, v in sorted(meta.items()): if k in ['speaker', 'parse', 'sent_id']: continue if isinstance(add_meta, list): if k in add_meta: lin.append(v) elif add_meta is True: lin.append(v) conc_res.append(lin) return conc_res
b0f9cc9039f78996b38ed87f5faf3b725226a7dd
20
def map_string(affix_string: str, punctuation: str, whitespace_only: bool = False) -> str: """Turn affix string into type char representation. Types are 'w' for non-whitespace char, and 's' for whitespace char. :param affix_string: a string :type: str :param punctuation: the set of characters to treat as punctuation :type punctuation: str :param whitespace_only: whether to treat only whitespace as word boundary or also include (some) punctuation :type whitespace_only: bool :return: the type char representation :rtype: str """ if whitespace_only: return "".join(["s" if char == " " else "w" for char in affix_string]) else: return "".join(["s" if char == " " or char in punctuation else "w" for char in affix_string])
6258f9e57a9081a1c791ec7c22f855079a99cdfb
21
def modularity(modules, G, L): """ calculate modularity modularity = [list of nx.Graph objects] G = graph L = num of links """ N_m = len(modules) M = 0.0 for s in range(N_m): l_s = 0.0 d_s = 0 for i in modules[s]: l_s += float(modules[s].degree(i)) d_s += float(G.degree(i)) M += (l_s / L) - (d_s / (2.0 * L))**2 return M
fc818a1f8cda14c04f90c94b699853465da11797
22
import re def extract_digits_from_end_of_string(input_string): """ Gets digits at the end of a string :param input_string: str :return: int """ result = re.search(r'(\d+)$', input_string) if result is not None: return int(result.group(0))
aae771a051a228c53c36062437de65ae4aa15d44
23
import torch def move_bdim_to_front(x, result_ndim=None): """ Returns a tensor with a batch dimension at the front. If a batch dimension already exists, move it. Otherwise, create a new batch dimension at the front. If `result_ndim` is not None, ensure that the resulting tensor has rank equal to `result_ndim`. """ x_dim = len(x.shape) x_bdim = x.bdim if x_bdim is None: x = torch.unsqueeze(x, 0) else: x = torch.movedim(x, x_bdim, 0) if result_ndim is None: return x diff = result_ndim - x_dim - (x_bdim is None) for _ in range(diff): x = torch.unsqueeze(x, 1) return x
313a1837b6c3b451cebacaa7815f2631dfa387e5
24
import os def rawmap(k2i, file): """ Map index to raw data from file Arguments k2i: key-to-index map file: file containing raw data map Returns raw: index-to-raw map if file exists else identity map """ raw = {0: ''} if os.path.isfile(file): with open(file, "r") as f: for line in f.readlines(): line = line.split("\t") k, rw = line[0].strip(), line[1].strip() raw[k2i[k]] = rw else: for k in k2i: raw[k2i[k]] = k2i[k] return raw
2f9c9d9ef8c4606eef61feb03cbd01c7ba88f716
25
import random def out_flag(): """Either -o or --outfile""" return '-o' if random.randint(0, 1) else '--outfile'
129e7a493618ca7457fab271a396023807fd2f38
26
def replace_unwanted_xml_attrs(body): """ Method to return transformed string after removing all the unwanted characters from given xml body :param body: :return: """ return body.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
6f7dde06590bc8b8ad8477e7cee284ae38568b42
28
def valid_template(template): """Is this a template that returns a valid URL?""" if template.name.lower() == "google books" and ( template.has("plainurl") or template.has("plain-url") ): return True if template.name.lower() == "billboardurlbyname": return True return False
51191d6b60af23265dc6cb4ff87c520e80bac59f
29
def get_choice(): """ Gets and returns choice for mode to use when running minimax """ choice = input( "Please enter a number (1 - 4)\n 1. Both players use minimax correctly at every turn\n 2. The starting player (X) is an expert and the opponent (0) only has a 50% chance to use minimax\n\t at each turn\n 3. The starting player (X) only has a 50% chance to use minimax at each turn and the opponent (0)\n\t is an expert.\n 4. Both players only have a 50% chance to use minimax at each turn.\n" ) while (choice != '1' and choice != '2' and choice != '3' and choice != '4'): choice = input("Not a choice. Go agane: (1 - 4)\n") return choice
d79278acc9bc0a36480c1067b81e64c5512dd586
30
import argparse def validate_esc(esc): """Validate esc options\n Give an error if the characters aren't '*?[]' """ esc = esc.replace("]", "[") argset = set(esc) charset = {"*", "?", "["} if argset.difference(charset): err = "input character is not '*?[]'" raise argparse.ArgumentTypeError(err) return "".join(argset)
26e30eb8a5a9d62fc311d0c9b41adfbe2fd5f6cd
34
import math def tanD(angle): """ angle est la mesure d'un angle en degrés ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Retourne la tangente de angle. """ return math.tan(math.radians(angle))
641e564fefcdf6d1b804507b672e0e6476144b48
36
def offset_zero_by_one(feature): """Sets the start coordinate to 1 if it is actually 0. Required for the flanking to work properly in those cases. """ if feature.start == 0: feature.start += 1 return feature
3c8fb9754bde7b7efaa5d092e8239aeb099e26a4
37
from typing import OrderedDict def build_pathmatcher(name, defaultServiceUrl): """ This builds and returns a full pathMatcher entry, for appending to an existing URL map. Parameters: name: The name of the pathMatcher. defaultServiceUrl: Denotes the URL requests should go to if none of the path patterns match. """ matcher = OrderedDict() matcher['defaultService'] = defaultServiceUrl matcher['name'] = name return matcher
e21a79d51b41bd393a8fa2e254c6db7cf61bd441
38
import re def add_whitespace(c_fn): """ Add two spaces between all tokens of a C function """ tok = re.compile(r'[a-zA-Z0-9_]+|\*|\(|\)|\,|\[|\]') return ' ' + ' '.join(tok.findall(c_fn)) + ' '
57d59a5956c3914fa01587b6262e7d4348d77446
39
def mask_array(array, idx, n_behind, n_ahead): """[summary] Args: array ([type]): [description] idx ([type]): [description] n_behind ([type]): [description] n_ahead ([type]): [description] Returns: [type]: [description] """ first = max(0, idx - n_behind) last = min(idx + n_ahead + 1, len(array)) array_masked = array[first:last].copy() return array_masked
04781f75bd1b0cae5b690759b5da475f59a43fe8
40
def check_min_sample_periods(X, time_column, min_sample_periods): """ Check if all periods contained in a dataframe for a certain time_column contain at least min_sample_periods examples. """ return (X[time_column].value_counts() >= min_sample_periods).prod()
074c196a169d65582dbb32cc57c86c82ce4cb9c9
41
def quote_ident(val): """ This method returns a new string replacing " with "", and adding a " at the start and end of the string. """ return '"' + val.replace('"', '""') + '"'
452058861fb5be138db3599755fbf3c6d715c0a8
42
def webpage_attribute_getter(attr): """ Helper function for defining getters for web_page attributes, e.g. ``get_foo_enabled = webpage_attribute_getter("foo")`` returns a value of ``webpage.foo`` attribute. """ def _getter(self): return getattr(self.web_page, attr) return _getter
3626f8e2d8c6fb7fbb490dc72f796599cdbc874e
43
def party_name_from_key(party_key): """returns the relevant party name""" relevant_parties = {0: 'Alternativet', 1: 'Dansk Folkeparti', 2: 'Det Konservative Folkeparti', 3: 'Enhedslisten - De Rød-Grønne', 4: 'Liberal Alliance', 5: 'Nye Borgerlige', 6: 'Radikale Venstre', 7: 'SF - Socialistisk Folkeparti', 8: 'Socialdemokratiet', 9: 'Venstre, Danmarks Liberale Parti'} return relevant_parties[party_key]
86041235738017ae3dbd2a5042c5038c0a3ae786
44
def assembleR(X, W, fct): """ """ M = W * fct(X) return M
c792da453b981cc3974e32aa353124f5a5e9c46d
45
import uuid def make_uuid(value): """Converts a value into a python uuid object.""" if isinstance(value, uuid.UUID): return value return uuid.UUID(value)
b65b5739151d84bedd39bc994441d1daa33d1b51
46
import re from pathlib import Path import json def parse_json_with_comments(pathlike): """ Parse a JSON file after removing any comments. Comments can use either ``//`` for single-line comments or or ``/* ... */`` for multi-line comments. The input filepath can be a string or ``pathlib.Path``. Parameters ---------- filename : str or os.PathLike Path to the input JSON file either as a string or as a ``pathlib.Path`` object. Returns ------- obj : dict JSON object representing the input file. Note ---- This code was adapted from: https://web.archive.org/web/20150520154859/http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html """ # Regular expression to identify comments comment_re = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?', re.DOTALL | re.MULTILINE) # if we passed in a string, convert it to a Path if isinstance(pathlike, str): pathlike = Path(pathlike) with open(pathlike, 'r') as file_buff: content = ''.join(file_buff.readlines()) # Looking for comments match = comment_re.search(content) while match: # single line comment content = content[:match.start()] + content[match.end():] match = comment_re.search(content) # Return JSON object config = json.loads(content) return config
e79a461c210879d66b699fe49e84d0d2c58a964b
47
def write_velocity_files(U_25_RHS_str, U_50_RHS_str, U_100_RHS_str, U_125_RHS_str, U_150_RHS_str, U_25_LHS_str, U_50_LHS_str, U_100_LHS_str, U_125_LHS_str, U_150_LHS_str, path_0_100, path_0_125, path_0_150, path_0_25, path_0_50): """Create the details file for the surrounding cases, and write the velocities in line two""" fname = "details" # Filename file_25_path = path_0_25 file_50_path = path_0_50 file_100_path = path_0_100 file_125_path = path_0_125 file_150_path = path_0_150 details_file_25 = file_25_path + fname details_file_50 = file_50_path + fname details_file_100 = file_100_path + fname details_file_125 = file_125_path + fname details_file_150 = file_150_path + fname with open(details_file_25, 'w+') as f: f.write('Velocity' +'\n') f.write(U_25_RHS_str) with open(details_file_50, 'w+') as f: f.write('Velocity' +'\n') f.write(U_50_RHS_str) with open(details_file_100, 'w+') as f: f.write('Velocity' +'\n') f.write(U_100_RHS_str) with open(details_file_125, 'w+') as f: f.write('Velocity' +'\n') f.write(U_125_RHS_str) with open(details_file_150, 'w+') as f: f.write('Velocity' +'\n') f.write(U_150_RHS_str) return details_file_25, details_file_50, details_file_100, details_file_125, details_file_150
6c4af67ea659c09669f7294ec453db5e4e9fb9df
48
def get_school_total_students(school_id, aug_school_info): """ Gets total number of students associated with a school. Args: district_id (str): NCES ID of target district (e.g. '0100005'). aug_school_info (pandas.DataFrame): Target augmented school information (as formatted by `auxiliary.data_handler.DataHandler`). Returns: int: Single number comprising school-level data. """ return int(aug_school_info.loc[school_id]["total_students"])
d0d2ea36a2e3f4b47992aea9cc0c18c5ba7e0ff3
51
def get_version(): # noqa: E501 """API version The API version # noqa: E501 :rtype: str """ return '1.0.0'
75df6627bb2aaec205a0679d86c190d7b861baf5
52
import uuid def get_uuid_from_str(input_id: str) -> str: """ Returns an uuid3 string representation generated from an input string. :param input_id: :return: uuid3 string representation """ return str(uuid.uuid3(uuid.NAMESPACE_DNS, input_id))
51ce9ceab7c4f9d63d45fbee93286711bcba3093
53
def createList(value, n): """ @param value: value to initialize the list @param n: list size to be created @return: size n list initialized to value """ return [value for i in range (n)]
ff419e6c816f9b916a156e21c68fd66b36de9cfb
54
def heur(puzzle, item_total_calc, total_calc): """ Heuristic template that provides the current and target position for each number and the total function. Parameters: puzzle - the puzzle item_total_calc - takes 4 parameters: current row, target row, current col, target col. Returns int. total_calc - takes 1 parameter, the sum of item_total_calc over all entries, and returns int. This is the value of the heuristic function """ t = 0 for row in range(3): for col in range(3): val = puzzle.peek(row, col) - 1 target_col = val % 3 target_row = val / 3 # account for 0 as blank if target_row < 0: target_row = 2 t += item_total_calc(row, target_row, col, target_col) return total_calc(t)
bed67110858733a20b89bc1aacd6c5dc3ea04e13
56
def make_argparse_help_safe(s): """Make strings safe for argparse's help. Argparse supports %{} - templates. This is sometimes not needed. Make user supplied strings safe for this. """ return s.replace('%', '%%').replace('%%%', '%%')
3a1e6e072a8307df884e39b5b3a0218678d08462
57
def create_pysm_commands( mapfile, nside, bandcenter_ghz, bandwidth_ghz, beam_arcmin, coord, mpi_launch, mpi_procs, mpi_nodes, ): """ Return lines of shell code to generate the precomputed input sky map. """ mpistr = "{}".format(mpi_launch) if mpi_procs != "": mpistr = "{} {} 1".format(mpistr, mpi_procs) if mpi_nodes != "": mpistr = "{} {} 1".format(mpistr, mpi_nodes) outstr = "# Create sky model\n" outstr = '{}if [ ! -e "{}" ]; then\n'.format(outstr, mapfile) outstr = '{} echo "Creating sky model {} ..."\n'.format(outstr, mapfile) outstr = '{} {} ./pysm_sky.py --output "{}" --nside {} --bandcenter_ghz {} --bandwidth_ghz {} --beam_arcmin {} --coord {}\n'.format( outstr, mpistr, mapfile, nside, bandcenter_ghz, bandwidth_ghz, beam_arcmin, coord, ) outstr = "{}fi\n".format(outstr) outstr = "{}\n".format(outstr) return outstr
f0528968096f41a291a369477d8e2071f4b52339
58
import math def total_elastic_cross_section_browning1994_cm2(atomic_number, energy_keV): """ From browning1994 Valid in the range 100 eV to 30 keV for elements 1 to 92. """ Z = atomic_number E = energy_keV factor = 3.0e-18 power_z = math.pow(Z, 1.7) power_e = math.pow(E, 0.5) nominator = factor*power_z denominator = E + 0.005 * power_z * power_e + 0.0007 * Z * Z / power_e cross_section_cm2 = nominator/denominator return cross_section_cm2
bf12a49e3aba07a44e44bfb6df87212745fd5ed3
60
from typing import Tuple def decimal_to_boolean_list(num: int, padding: int = 0) -> Tuple[bool, ...]: """ Convert a decimal number into a tuple of booleans, representing its binary value. """ # Convert the decimal into binary binary = bin(num).replace('0b', '').zfill(padding) # Return a tuple of booleans, one for each element of the binary number (it's either '0' or '1' so we can convert # directly to boolean) return tuple(char == '1' for char in binary)
c13831214faece847960089f781cc1c6442205ec
62
def tpack(text, width=100): """Pack a list of words into lines, so long as each line (including intervening spaces) is no longer than _width_""" lines = [text[0]] for word in text[1:]: if len(lines[-1]) + 1 + len(word) <= width: lines[-1] += (' ' + word) else: lines += [word] return lines
e1b1b54a528c8dc2142a750156d3db1f754b4268
63
import os def _get_embedding_filename(base_dir, split_name, step): """Create the filename for embeddings.""" return os.path.join(base_dir, str(step), f'{split_name}-embeddings.tfrecord')
d6e4ca535b462ddf120ee77924c1bf2f3c662f24
64
import argparse def parse_args(): """Build file label list""" parser = argparse.ArgumentParser(description='Build file label list') parser.add_argument('data_path', type=str, help='root directory for the dataset') parser.add_argument('dataset', type=str, choices=[ 'ucf101', 'hmdb51', 'kinetics400', 'kinetics600', 'kinetics700', 'sthv1', 'sthv2'], help='name of the dataset') parser.add_argument('--ann_root', type=str, default='annotation') parser.add_argument('--out_root', type=str, default='../datalist') parser.add_argument('--phase', type=str, default='train', choices=['train', 'val']) parser.add_argument('--level', type=int, default=2, choices=[1, 2]) parser.add_argument('--source', type=str, default='rgb', choices=['rgb', 'flow', 'video']) parser.add_argument('--split', type=int, default=1, choices=[1, 2, 3]) args = parser.parse_args() return args
267d6fbe34e48525dfa50987fb3ce674ec28d381
66
import os def is_directory(dir_path): """Validates that the argument passed into 'argparse' is a directory.""" if not os.path.isdir(dir_path): raise ValueError('Path is not a directory: %s' % dir_path) return dir_path
57f8407eb02ae0c035f14d139a41a424d36df378
67
def is_palindrome(s: str) -> bool: """Return whether a string is a palindrome This is as efficient as you can get when computing whether a string is a palindrome. It runs in O(n) time and O(1) space. """ if len(s) <= 1: return True i = 0 j = len(s) - 1 while i < j: if s[i] != s[j]: return False i += 1 j -= 1 return True
6d3001486fe3603a17e72861e3bdea495cd675c1
68
def my_hostogram(gray, bins): """ pixel values has to be within bins range, otherwise index out of range, for example if pixel 400th has value 70, but bins are -> [0...40], then histogram[70] yields IOR """ histogram = [0 for i in bins] for i in range(gray.shape[0]): for j in range(gray.shape[1]): histogram[gray[i][j]] = histogram[gray[i][j]] + 1 return histogram
a2e774fb7b2249325191b20e6fa08847e38211c2
69
def reverse(password, position_x, position_y): """Reverse from position_x to position_y in password.""" password_slice = password[position_x:position_y + 1] password[position_x:position_y + 1] = password_slice[::-1] return password
46fec2c6b9c02d8efa71d53451974e46cbe68102
70
def GetBoolValueFromString(s): """Returns True for true/1 strings, and False for false/0, None otherwise.""" if s and s.lower() == 'true' or s == '1': return True elif s and s.lower() == 'false' or s == '0': return False else: return None
d6ef53e837fc825a32e073e3a86185093dd1d037
71
def get_typical_qualifications(cfg): """ create qualification list to filter just workers with: - + 98% approval rate - + 500 or more accepted HIT - Location USA :param cfg: :return: """ if not cfg['hit_type'].getboolean('apply_qualification'): return [] qualification_requirements=[ { # Worker_​NumberHITsApproved 'QualificationTypeId': '00000000000000000040', 'Comparator': 'GreaterThanOrEqualTo', 'IntegerValues': [ 500, ], 'RequiredToPreview': False, 'ActionsGuarded': 'Accept' }, { # Worker_​PercentAssignmentsApproved 'QualificationTypeId': '000000000000000000L0', 'Comparator': 'GreaterThanOrEqualTo', 'IntegerValues': [ 98, ], 'RequiredToPreview': False, 'ActionsGuarded': 'Accept' }, { # Worker_Locale 'QualificationTypeId': '00000000000000000071', 'Comparator': 'EqualTo', 'LocaleValues': [ { 'Country':"US" } ], 'RequiredToPreview': False, 'ActionsGuarded': 'Accept' }, ] return qualification_requirements
4cfad92d7c2587e2fce1caeac032a69f87c70c01
73
def search_sorted(array, value): """ Searches the given sorted array for the given value using a BinarySearch which should execute in O(log N). array a 1D sorted numerical array value the numerical value to search for returns index of array closest to value returns None if value is outside variable bounds """ def index_to_check(rmin, rmax): return (rmin + rmax) / 2 range_min = 0 range_max_0 = len(array) range_max = range_max_0 numloops = 0 while numloops < 100: numloops += 1 if (range_max - range_min) == 1: if (range_max == range_max_0) or (range_min == 0): raise LookupError("For some reason, range_max-" +\ "range_min reached 1 before " +\ "the element was found. The " +\ "element being searched for " +\ ("was %s. (min,max)" % (value,) +\ ("=%s" % ((range_min, range_max),)))) else: high_index = range_max else: high_index = index_to_check(range_min, range_max) high_val = array[high_index] low_val = array[high_index - 1] if value < low_val: range_max = high_index elif value > high_val: range_min = high_index else: # low_val <= value <= high_val if (2 * (high_val - value)) < (high_val - low_val): return high_index else: return high_index - 1 raise NotImplementedError("Something went wrong! I " +\ "caught a pseudo-infinite loop!")
6eec5fb24cd2da1989b4b80260ce185191d782f1
76
import os import subprocess def call_port(command, arguments): """ This function calls the port executable with the specified parameters, printing the output to stdout. """ command = ["port", command] + arguments if (os.getuid != 0): print("Using sudo to execute port.") return subprocess.call(["sudo"] + command) else: return subprocess.call(command)
b5209388a03093758b680220600dd99749be5c81
78
import argparse import os def parse_command_line_arguments(): """ Parse the command-line arguments being passed to RMG Py. This uses the :mod:`argparse` module, which ensures that the command-line arguments are sensible, parses them, and returns them. """ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-i', '--input', metavar='FILE', help='a predictor training input file') parser.add_argument('-w', '--weights', metavar='H5', help='Saved model weights to continue training on (typically for transfer learning)') parser.add_argument('-d', '--data', metavar='FILE', help='A file specifying which datasets to train on. Alternatively, a space-separated .csv file' ' with SMILES/InChI and output(s) in the first and subsequent columns, respectively.') parser.add_argument('-o', '--out_dir', metavar='DIR', default=os.getcwd(), help='Output directory') parser.add_argument('-n', '--normalize', action='store_true', help='Normalize output based on training set mean and standard deviation') parser.add_argument('--save_tensors_dir', metavar='DIR', help='Location to save tensors on disk (frees up memory)') parser.add_argument('--keep_tensors', action='store_true', help='Do not delete directory containing tensors at end of job') parser.add_argument('-f', '--folds', type=int, default=5, help='number of folds for training') parser.add_argument('-tr', '--train_ratio', type=float, default=0.9, help='Fraction of training data to use for actual training, rest is early-stopping validation') parser.add_argument('-te', '--test_ratio', type=float, default=0.0, help='Fraction of data to use for testing. If loading data from database,' ' test ratios are specified in datasets file') parser.add_argument('-t', '--train_mode', default='full_train', help='train mode: currently support in_house and keras for k-fold cross-validation,' ' and full_train for full training') parser.add_argument('-bs', '--batch_size', type=int, default=1, help='batch training size') parser.add_argument('-lr', '--learning_rate', default='0.0007_30.0', help='two parameters for learning rate') parser.add_argument('-ep', '--nb_epoch', type=int, default=150, help='number of epochs for training') parser.add_argument('-pc', '--patience', type=int, default=10, help='Number of consecutive epochs allowed for loss increase before stopping early.' ' Note: A value of -1 indicates that the best model will NOT be saved!') parser.add_argument('-s', '--seed', type=int, default=0, help='Numpy random seed') return parser.parse_args()
49b0beb15f6f8b2bba903e15dcb0bf43d79ac11e
79
def _tear_down_response(data): """Helper function to extract header, payload and end from received response data.""" response_header = data[2:17] # Below is actually not used response_payload_size = data[18] response_payload = data[19:-2] response_end = data[-2:] return response_header, response_payload, response_end
0c9684c2c054beaff018f85a6775d46202d0095a
81
def stack_atomic_call_middleware(q_dict, q_queryset, logger, middleware): """ Calls the middleware function atomically. * Returns cached queue on error or None """ cached_q_dict = q_dict[:] cached_q_query = q_queryset.all() try: middleware(q_dict, q_queryset, logger) except: logger.error('MM_STACK: Middleware exception occurred in %s' % middleware.__name__) return [cached_q_dict, cached_q_query] return None
9d01c51e19702ba4bc0ae155f0b9b386a4d947b6
82
def isText(node): """ Returns True if the supplied node is free text. """ return node.nodeType == node.TEXT_NODE
150efc016028d0fab4630ad5e754ebaeed0c82c0
83
def enumerate_changes(levels): """Assign a unique integer to each run of identical values. Repeated but non-consecutive values will be assigned different integers. """ return levels.diff().fillna(0).abs().cumsum().astype(int)
4787c0e84d6bca8f6038389e5bebf74317059ed8
84
def get_steps(x, shape): """ Convert a (vocab_size, steps * batch_size) array into a [(vocab_size, batch_size)] * steps list of views """ steps = shape[1] if x is None: return [None for step in range(steps)] xs = x.reshape(shape + (-1,)) return [xs[:, step, :] for step in range(steps)]
44133ddd1ad78b3ea05042c6c16558bb982c9206
85
import math def _validate(api_indicator_matype, option, parameters:dict, **kwargs): # -> dict """Validates kwargs and attaches them to parameters.""" # APO, PPO, BBANDS matype = int(math.fabs(kwargs["matype"])) if "matype" in kwargs else None if option == "matype" and matype is not None and matype in api_indicator_matype: parameters["matype"] = matype # BBANDS nbdevup = math.fabs(kwargs["nbdevup"]) if "nbdevup" in kwargs else None nbdevdn = math.fabs(kwargs["nbdevdn"]) if "nbdevdn" in kwargs else None if option == "nbdevup" and nbdevup is not None: parameters["nbdevup"] = nbdevup if option == "nbdevdn" and nbdevdn is not None: parameters["nbdevdn"] = nbdevdn # ULTOSC timeperiod1 = int(math.fabs(kwargs["timeperiod1"])) if "timeperiod1" in kwargs else None timeperiod2 = int(math.fabs(kwargs["timeperiod2"])) if "timeperiod2" in kwargs else None timeperiod3 = int(math.fabs(kwargs["timeperiod3"])) if "timeperiod3" in kwargs else None if option == "timeperiod1" and timeperiod1 is not None: parameters["timeperiod1"] = timeperiod1 if option == "timeperiod2" and timeperiod2 is not None: parameters["timeperiod2"] = timeperiod2 if option == "timeperiod3" and timeperiod3 is not None: parameters["timeperiod3"] = timeperiod3 # SAR acceleration = math.fabs(float(kwargs["acceleration"])) if "acceleration" in kwargs else None maximum = math.fabs(float(kwargs["maximum"])) if "maximum" in kwargs else None if option == "acceleration" and acceleration is not None: parameters["acceleration"] = acceleration if option == "maximum" and maximum is not None: parameters["maximum"] = maximum # MAMA fastlimit = math.fabs(float(kwargs["fastlimit"])) if "fastlimit" in kwargs else None slowlimit = math.fabs(float(kwargs["slowlimit"])) if "slowlimit" in kwargs else None if option == "fastlimit" and fastlimit is not None and fastlimit > 0 and fastlimit < 1: parameters["fastlimit"] = fastlimit if option == "slowlimit" and slowlimit is not None and slowlimit > 0 and slowlimit < 1: parameters["slowlimit"] = slowlimit # MACD, APO, PPO, ADOSC fastperiod = int(math.fabs(kwargs["fastperiod"])) if "fastperiod" in kwargs else None slowperiod = int(math.fabs(kwargs["slowperiod"])) if "slowperiod" in kwargs else None signalperiod = int(math.fabs(kwargs["signalperiod"])) if "signalperiod" in kwargs else None if option == "fastperiod" and fastperiod is not None: parameters["fastperiod"] = fastperiod if option == "slowperiod" and slowperiod is not None: parameters["slowperiod"] = slowperiod if option == "signalperiod" and signalperiod is not None: parameters["signalperiod"] = signalperiod # MACDEXT fastmatype = int(math.fabs(kwargs["fastmatype"])) if "fastmatype" in kwargs else None slowmatype = int(math.fabs(kwargs["slowmatype"])) if "slowmatype" in kwargs else None signalmatype = int(math.fabs(kwargs["signalmatype"])) if "signalmatype" in kwargs else None if option == "fastmatype" and fastmatype is not None and fastmatype in api_indicator_matype: parameters["fastmatype"] = fastmatype if option == "slowmatype" and slowmatype is not None and slowmatype in api_indicator_matype: parameters["slowmatype"] = slowmatype if option == "signalmatype" and signalmatype is not None and signalmatype in api_indicator_matype: parameters["signalmatype"] = signalmatype # STOCH(F), STOCHRSI fastkperiod = int(math.fabs(kwargs["fastkperiod"])) if "fastkperiod" in kwargs else None fastdperiod = int(math.fabs(kwargs["fastdperiod"])) if "fastdperiod" in kwargs else None fastdmatype = int(math.fabs(kwargs["fastdmatype"])) if "fastdmatype" in kwargs else None if option == "fastkperiod" and fastkperiod is not None: parameters["fastkperiod"] = fastkperiod if option == "fastdperiod" and fastdperiod is not None: parameters["fastdperiod"] = fastdperiod if option == "fastdmatype" and fastdmatype is not None and fastdmatype in api_indicator_matype: parameters["fastdmatype"] = fastdmatype # STOCH(F), STOCHRSI slowkperiod = int(math.fabs(kwargs["slowkperiod"])) if "slowkperiod" in kwargs else None slowdperiod = int(math.fabs(kwargs["slowdperiod"])) if "slowdperiod" in kwargs else None slowkmatype = int(math.fabs(kwargs["slowkmatype"])) if "slowkmatype" in kwargs else None slowdmatype = int(math.fabs(kwargs["slowdmatype"])) if "slowdmatype" in kwargs else None if option == "slowkperiod" and slowkperiod is not None: parameters["slowkperiod"] = slowkperiod if option == "slowdperiod" and slowdperiod is not None: parameters["slowdperiod"] = slowdperiod if option == "slowkmatype" and slowkmatype is not None and slowkmatype in api_indicator_matype: parameters["slowkmatype"] = slowkmatype if option == "slowdmatype" and slowdmatype is not None and slowdmatype in api_indicator_matype: parameters["slowdmatype"] = slowdmatype return parameters
d73903514aa87f854d08e3447cca85f64eaa4b31
86
def scale_y_values(y_data, y_reference, y_max): """ Scale the plot in y direction, to prevent extreme values. :param y_data: the y data of the plot :param y_reference: the maximum value of the plot series (e.g. Normal force), which will be scaled to y_max :param y_max: the maximum y value for the plot (e.g. if y_max=1, no y value in the plot will be greater than 1) """ multipl_factor = y_max / y_reference for i in range(len(y_data)): y_data[i] = y_data[i] * multipl_factor return y_data, multipl_factor
b3b22b0f868ce46926a4eecfc1c5d0ac2a7c1f7e
87
def set_heating_contribution(agent, pv_power): """ If the water tank is currently in use, compute and return the part of the pv_power used for heating the water""" pv_power_to_heating = 0 if agent.water_tank.is_active(): pv_power_to_heating = pv_power * agent.pv_panel.heating_contribution return pv_power_to_heating
ece29b7f0fbbe10907ada8fd1450919f01ab74c3
88
def sunlight_duration(hour_angle_sunrise): """Returns the duration of Sunlight, in minutes, with Hour Angle in degrees, hour_angle.""" sunlight_durration = 8 * hour_angle_sunrise # this seems like the wrong output return sunlight_durration
b2887dd86caf25e7cac613bfa10b4de26c932c09
89
import re def wrapper_handle_attrs(func): """转化html的标签属性为字典""" # 这是一个装饰Parsing.handle_attrs_tmp、Parsing.handle_attrs_tag的装饰器 def handle_attrs(self, attrs_str): attrs = dict() if attrs_str == '/': return attrs attrs_list = re.findall(self.attr_reg, attrs_str) for attr in attrs_list: attrs[attr[0]] = func(self, attr) return attrs return handle_attrs
d7396433c9721c26c8d419d4e78f2b8445f5dd70
90
def match_term(term, dictionary, case_sensitive, lemmatize=True): """ Parameters ---------- term dictionary case_sensitive lemmatize Including lemmas improves performance slightly Returns ------- """ if (not case_sensitive and term.lower() in dictionary) or term in dictionary: return True if (case_sensitive and lemmatize) and term.rstrip('s').lower() in dictionary: return True elif (not case_sensitive and lemmatize) and term.rstrip('s') in dictionary: return True return False
aba706a211cf68e7c8c1668200da3f9c8613b3d2
93
def deep_len(lnk): """ Returns the deep length of a possibly deep linked list. >>> deep_len(Link(1, Link(2, Link(3)))) 3 >>> deep_len(Link(Link(1, Link(2)), Link(3, Link(4)))) 4 >>> levels = Link(Link(Link(1, Link(2)), \ Link(3)), Link(Link(4), Link(5))) >>> print(levels) <<<1 2> 3> <4> 5> >>> deep_len(levels) 5 """ if not lnk: return 0 if type(lnk.first) == int: return 1 + deep_len(lnk.rest) return deep_len(lnk.first) + deep_len(lnk.rest)
d8a33600085e51b181752b2dd81d5bcdae7aaff9
95
import os import glob def get_files_path(file_path: str) -> list: """Get all files path Args: file_path: root folder path Returns: list: list of string containing all files paths """ filepath='data' all_files = [] for root, dirs, files in os.walk(filepath): files = glob.glob(os.path.join(root,'*.json')) for f in files: all_files.append(f) return all_files
d775bbe229b1ad53c173ae5d98246b04a3050dfa
97
import math def pixel_distance(A, B): """ In 9th grade I sat in geometry class wondering "when then hell am I ever going to use this?"...today is that day. Return the distance between two pixels """ (col_A, row_A) = A (col_B, row_B) = B return math.sqrt(math.pow(col_B - col_A, 2) + math.pow(row_B - row_A, 2))
64853c44400428c8040ae47d1cc2cca17aed0a5f
101
from pathlib import Path def get_archive(): """Ensure that the archive file exists and return its path. This is a function so the path can be made configurable in the future. Returns: :obj:`str`: The full local path to the archive file. """ filename = '/config/archive.txt' archfile = Path(filename) if not archfile.exists(): archfile.touch() return filename
78abc493d7f256ebf53ec2cfeb9ab4f1d42b5c02
103
def convert_units(str): """ Convert some string with binary prefix to int bytes""" unit = ''.join(ele for ele in str if not ele.isdigit()).strip().lower() return int(''.join(ele for ele in str if ele.isdigit()))*{ "b": 1, "B": 1, "k": 2**10, "kb": 2**10, "m": 2**20, "mb": 2**20, "g": 2**30, "gb": 2**30, "t": 2**40, "tb": 2**40 }.get(unit, 1)
a9de044090bfd4311a27dbbf373361e7d88a1e06
104
def match_piecewise(candidates: set, symbol: str, sep: str='::') -> set: """ Match the requested symbol reverse piecewise (split on ``::``) against the candidates. This allows you to under-specify the base namespace so that ``"MyClass"`` can match ``my_namespace::MyClass`` Args: candidates: set of possible matches for symbol symbol: the symbol to match against sep: the separator between identifier elements Returns: set of matches """ piecewise_list = set() for item in candidates: split_symbol = symbol.split(sep) split_item = item.split(sep) split_symbol.reverse() split_item.reverse() min_length = len(split_symbol) split_item = split_item[:min_length] if split_symbol == split_item: piecewise_list.add(item) return piecewise_list
1c6d7240365ef22f753aa4195cfb5e879fc453e0
105
def task_6_list_all_supplier_countries(cur) -> list: """ List all supplier countries Args: cur: psycopg cursor Returns: 29 records """ cur.execute("""SELECT country FROM suppliers""") return cur.fetchall()
a3d8af1eb2948ebc01e408265d20b0055f1a0504
106
def kev_to_wavelength(kev): """Calculate the wavelength from kev""" lamda = 12.3984 / kev #keV to Angstrom return lamda
cfb3126e56bc0890dd8cf2caa50a240b380dad56
107
def CalculateOSNames(os_name, os_variants): """Calculates all the names an OS can be called, according to its variants. @type os_name: string @param os_name: base name of the os @type os_variants: list or None @param os_variants: list of supported variants @rtype: list @return: list of valid names """ if os_variants: return ["%s+%s" % (os_name, v) for v in os_variants] else: return [os_name]
5689ed7da55cec929045e95344c60e7a06af711d
108
def pad(data, pad_id): """ Pad all lists in data to the same length. """ width = max(len(d) for d in data) return [d + [pad_id] * (width - len(d)) for d in data]
a0951f4332879600d25c061cf1c553126d6df8d2
109
def dropannotation(annotation_list): """ Drop out the annotation contained in annotation_list """ target = "" for c in annotation_list: if not c == "#": target += c else: return target return target
9f4a695eaf80f79dce943f2f91926d9c823483b6
111
def EntryToSlaveName(entry): """Produces slave name from the slaves config dict.""" name = entry.get('slavename') or entry.get('hostname') if 'subdir' in entry: return '%s#%s' % (name, entry['subdir']) return name
258e68c683592c21ea8111f21ba3ab648ddb8c57
112
def _parallel_predict_proba(ensemble, X, idx, results): """ Compute predictions of SCM estimators """ for k in idx: res = ensemble.estimators[k].predict(X[:, ensemble.estim_features[k]]) results = results + res return results
b0a2d5c59318506202c9331597ab2a11eacb7a32
113
def read_test_case(file_path): """ reads one test case from file. returns contents of test case Parameters ---------- file_path : str the path of the test case file to read. Returns ------- list a list of contents of the test case. """ file = open(file_path, "r") number = int(file.readline().strip()) case = list() for i in range(number): case.append(file.readline().strip()) file.close() return case
6a87ff979d0b1ccf838ebef56401a48760711541
114
import torch def accuracy4batch(model, testloader, criterion): """save a model checkpoint INPUT: model: pytorch nn model. testloader: DataLoader. test data set criterion: criterion. loss criterion device: torch.device. device on which model/data is based OUTPUT: accuracy: float in [0:1]. percenct proportion of correct classifications in testloader test_loss: float. absolute error """ test_loss = 0 accuracy = 0 model.eval() with torch.no_grad(): for inputs, labels in testloader: inputs, labels = inputs.to(model.device), labels.to(model.device) logps = model.forward(inputs) batch_loss = criterion(logps, labels) test_loss += batch_loss.item() # Calculate accuracy ps = torch.exp(logps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)).item() accuracy = accuracy/len(testloader) return accuracy, test_loss
2005984b94f17bf601034953bbea3dca6542143d
115
import os def pickup_path(start_path, filename, default=None): """pickupping the config file path start path = "/foo/bar/boo", filename = "config.ini" finding candidates are ["/foo/bar/boo/config.ini", "/foo/bar/config.ini", "/foo/config.ini", "/config.ini"] """ start_point = os.path.normpath(os.path.abspath(start_path)) current = start_point candidates = [] while True: candidates.append(os.path.join(current, filename)) if current == "/": break current, dropped = os.path.split(current) for path in candidates: if os.path.exists(path): return path return default
2535871cbb1197dde07f41063114bd37b88149e7
117
def add_matrices(matrix_a, matrix_b): """Add two n x n matrices """ return [[x + y for x, y in zip(matrix_a[i], matrix_b[i])] for i in range(len(matrix_a))]
a9f6a857892872fde584b6884e59a8b624220061
118
def matrix_multiply(A, B): """ Multiply two matrices A and B. :param A: the right matrix :param B: the left matrix :return: A * B """ # define m and n for the matrix as well as l, the connecting dimension between A and B m, l, n = len(A), len(A[0]), len(B[0]) # initialize an all zeros matrix C = [[0.0 for _ in range(len(B[0]))] for _ in range(len(A))] # iterative over the rows of C for i in range(m): # iterative over the columns of C for j in range(n): # set C[i][j] to the dot product of ith row of A and the jth column of B C[i][j] = sum(A[i][k] * B[k][j] for k in range(l)) # return the matrix C = A @ B return C
3cd551ea87d9f925654a4153106c2fe87e33fa8c
119
def get_cd(wcs, n=1): """ Get the value of the change in world coordinate per pixel across a linear axis. Defaults to wcs.wcs.cd if present. Does not support rotated headers (e.g., with nonzero CDm_n where m!=n) """ if hasattr(wcs.wcs,'cd'): if wcs.wcs.cd[n-1,n-1] != 0: return wcs.wcs.cd[n-1,n-1] else: return wcs.wcs.get_cdelt()[n-1]
9b31c81a1a5e87efeb201ffef7f8f65f846fe0b7
121
def duplicate_detector(gate_orders: list[tuple[str]]) -> int: """Detects any schematics that have an identical combination of gates.""" difference = len(gate_orders) - len(list(set(gate_orders))) # List - list with no duplicates return difference
e439a106abc0ff21bfe9773b3185d35b5bf05aa0
122
def permutations(x): """Return all permutations of x""" def fn(i): if i == len(x): ans.append(x.copy()) for k in range(i, len(x)): x[i], x[k] = x[k], x[i] fn(i+1) x[i], x[k] = x[k], x[i] ans = [] fn(0) return ans
691c701e1ac17da5dabb0fc3fe607ff68ac8fcdc
123
def clean_string(s: str) -> str: """Cleans and returns an input string >>> clean_string(" xYz ") 'XYZ' """ return str(s).strip().upper()
c97281505492ded5b9167076312959c5eee41a6c
124
def XOR(v1, v2): """ XOR operation element by element from 2 lists :param v1: [1, 0, 1, 0, 0, 1] :param v2: [1, 1, 0, 0, 1, 1] :return: [0, 1, 1, 0, 1, 0] """ return [a ^ b for a, b in zip(v1, v2)]
e3b94b35ccf4e1dd99cc51f32c70f96c5fe99795
125
def get_dayofweek(date): """ Returns day of week in string format from date parameter (in datetime format). """ return date.strftime("%A")
4a0f728733870998331ea6f796b167b9dd3276ab
126
import torch from typing import Optional def iou( predict: torch.Tensor, target: torch.Tensor, mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ This is a great loss because it emphasizes on the active regions of the predict and targets """ dims = tuple(range(predict.dim())[1:]) if mask is not None: predict = predict * mask target = target * mask intersect = (predict * target).sum(dims) union = (predict + target - predict * target).sum(dims) + 1e-4 return (intersect / union).sum() / intersect.numel()
7608189bde3b640a8f148e3628e5668a3b310655
130
def getCondVisibility(condition): """ Returns ``True`` (``1``) or ``False`` (``0``) as a ``bool``. :param condition: string - condition to check. List of Conditions: http://wiki.xbmc.org/?title=List_of_Boolean_Conditions .. note:: You can combine two (or more) of the above settings by using "+" as an ``AND`` operator, "|" as an ``OR`` operator, "!" as a ``NOT`` operator, and "[" and "]" to bracket expressions. example:: visible = xbmc.getCondVisibility('[Control.IsVisible(41) + !Control.IsVisible(12)]') """ return bool(1)
761914696ac2050c6bf130e5b49221be043903bd
131
import re def sortRules(ruleList): """Return sorted list of rules. Rules should be in a tab-delimited format: 'rule\t\t[four letter negation tag]' Sorts list of rules descending based on length of the rule, splits each rule into components, converts pattern to regular expression, and appends it to the end of the rule. """ ruleList.sort(key = len, reverse = True) sortedList = [] for rule in ruleList: s = rule.strip().split('\t') splitTrig = s[0].split() trig = r'\s+'.join(splitTrig) pattern = r'\b(' + trig + r')\b' s.append(re.compile(pattern, re.IGNORECASE)) sortedList.append(s) return sortedList
5b98903fd48f562d22e0ce269aa55e52963fa4a9
132
def to_array(string): """Converts a string to an array relative to its spaces. Args: string (str): The string to convert into array Returns: str: New array """ try: new_array = string.split(" ") # Convert the string into array while "" in new_array: # Check if the array contains empty strings new_array.remove("") return new_array except: print("The parameter string is not a str") return string
7ee87a2b245a71666939e9ce2e23dc07fcaa0153
134
def civic_methods(method001, method002, method003): """Create test fixture for methods.""" return [method001, method002, method003]
63913e2cfe866c65d9a1e7d5d3ba2e081b8e12f6
135
README.md exists but content is empty.
Downloads last month
42