code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def test_games_malformed_turn(self): <NEW_LINE> <INDENT> player1 = LegitPlayer() <NEW_LINE> player1.end_of_game = mock.MagicMock() <NEW_LINE> player2 = LegitPlayer() <NEW_LINE> player2.end_of_game = mock.MagicMock() <NEW_LINE> player2.play_turn = mock.MagicMock(return_value="lolol") <NEW_LINE> player1guard = PlayerGuard(player1) <NEW_LINE> player2guard = PlayerGuard(player2) <NEW_LINE> uuids_to_player = {self.uuidp1:player1guard, self.uuidp2:player2guard} <NEW_LINE> ref = Referee(uuids_to_player, self.uuids_to_name, self.obs_man) <NEW_LINE> result = ref.run_game() <NEW_LINE> player1.end_of_game.assert_called_once_with("p1") <NEW_LINE> player2.end_of_game.assert_not_called() <NEW_LINE> bad_players, game_results = result <NEW_LINE> expected_bad_players = [self.uuidp2] <NEW_LINE> for actual, expected in zip(bad_players, expected_bad_players): <NEW_LINE> <INDENT> self.assertEqual(actual, expected) <NEW_LINE> <DEDENT> expected_game_results = [self.uuidp1] <NEW_LINE> for actual, expected in zip(game_results, expected_game_results): <NEW_LINE> <INDENT> self.assertEqual(actual, expected) | tests that a player who gives a malformed turn
loses and end_of_games is not called on it | 625941b05166f23b2e1a4ea1 |
def get_neighbor(self, theta, new_places, new_states): <NEW_LINE> <INDENT> assert isinstance(theta, np.ndarray) <NEW_LINE> assert np.all([x < self.states for x in theta]), "Invalid theta" <NEW_LINE> new_theta = np.copy(theta) <NEW_LINE> new_theta.put(new_places, new_states) <NEW_LINE> return new_theta | Function to get a neighbor from a theta
Parameters
---------
theta : np.ndarray
A numpy array of integers describing the state
new_places : np.ndarray
An array of locations for which to swap values
new_states : np.ndarray
An array of new values to swap out
Output:
--------
Returns a neighboring state | 625941b0adb09d7d5db6c4de |
def test_abs_path(self): <NEW_LINE> <INDENT> fname = self.id().split(".")[3] <NEW_LINE> backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') <NEW_LINE> node = self.make_simple_node(base_dir="{0}/{1}/node".format(module_name, fname)) <NEW_LINE> try: <NEW_LINE> <INDENT> self.run_pb(["init", "-B", os.path.relpath("%s/backup" % node.base_dir, self.dir_path)]) <NEW_LINE> self.assertEqual(1, 0, 'Expecting Error due to initialization with non-absolute path in --backup-path. Output: {0} \n CMD: {1}'.format( repr(self.output), self.cmd)) <NEW_LINE> <DEDENT> except ProbackupException as e: <NEW_LINE> <INDENT> self.assertEqual(e.message, "ERROR: -B, --backup-path must be an absolute path\n", '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) <NEW_LINE> <DEDENT> self.del_test_dir(module_name, fname) | failure with backup catalog should be given as absolute path | 625941b0097d151d1a222ba5 |
def test_reschedule_true(self): <NEW_LINE> <INDENT> instance_uuid = self.instance['uuid'] <NEW_LINE> method_args = (None, None, None, None, False, {}) <NEW_LINE> self.mox.StubOutWithMock(self.compute, '_deallocate_network') <NEW_LINE> self.mox.StubOutWithMock(self.compute, '_reschedule') <NEW_LINE> try: <NEW_LINE> <INDENT> raise test.TestingException("Original") <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> exc_info = sys.exc_info() <NEW_LINE> self.compute._deallocate_network(self.context, self.instance) <NEW_LINE> self.compute._reschedule(self.context, None, {}, instance_uuid, self.compute.scheduler_rpcapi.run_instance, method_args, task_states.SCHEDULING).AndReturn(True) <NEW_LINE> self.compute._log_original_error(exc_info, instance_uuid) <NEW_LINE> self.mox.ReplayAll() <NEW_LINE> self.compute._reschedule_or_reraise(self.context, self.instance, None, None, None, False, None, {}) | Test behavior when re-scheduling happens | 625941b08a43f66fc4b53db3 |
def set_randomize(self, randomize): <NEW_LINE> <INDENT> assert isinstance(randomize, bool), LOGGER.error("randomize must be boolean") <NEW_LINE> self.__randomize = randomize | Set whether to randomize moves selection.
:Parameters:
#. randomize (boolean): Whether to pull moves randomly from path
or pull moves in order at every step. | 625941b03317a56b869399ae |
def forward(self, prediction_out, target): <NEW_LINE> <INDENT> target = torch.where(target==1, torch.ones_like(target), -1*torch.ones_like(target)).view(-1) <NEW_LINE> target = target.float() <NEW_LINE> A = 1.0 - target*prediction_out.view(-1) <NEW_LINE> max_squared = torch.nn.Softplus()(A) if self.use_softplus else torch.clamp(A, min=0.0) <NEW_LINE> maxhinge_loss = max_squared.mean() <NEW_LINE> loss_dict = OrderedDict() <NEW_LINE> return maxhinge_loss, loss_dict | prediction_out (B,1): f(x) | 625941b0d8ef3951e3243287 |
def traiteLigne(ligne): <NEW_LINE> <INDENT> ligne = ligne.decode("Latin1") <NEW_LINE> newLine = u"" <NEW_LINE> c, m = 0, 0 <NEW_LINE> while c < len(ligne): <NEW_LINE> <INDENT> if ligne[c] == " ": <NEW_LINE> <INDENT> newLine = newLine + ligne[m:c] + "-*-" <NEW_LINE> m = c + 1 <NEW_LINE> <DEDENT> c = c + 1 <NEW_LINE> <DEDENT> newLine = newLine + ligne[m:] <NEW_LINE> return newLine.encode("Utf8") | convertit une ligne de 'Latin1' en 'Utf8', avec insertion de -*- | 625941b0d7e4931a7ee9dc65 |
def test_team_builder_config_product_size_materials_id_delete(self): <NEW_LINE> <INDENT> pass | Test case for team_builder_config_product_size_materials_id_delete
Delete a model instance by {{id}} from the data source. | 625941b0adb09d7d5db6c4e0 |
def _get_files(self, m_string_path): <NEW_LINE> <INDENT> for _, _, list_files in os.walk(m_string_path): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> return set(list_files) | this method returns a set of files in the directory of csv files
Requirements:
package os
Inputs:
m_string_path
Type: string
Desc: the path of the directory with bulk upload files
Important Info:
None
Return:
object
Type: set
Desc: files in the directory | 625941b0293b9510aa2c2fde |
def get_page_info(url, page_num): <NEW_LINE> <INDENT> base_url = 'http://www.indeed.com' <NEW_LINE> page_job_descriptions = [] <NEW_LINE> start_num = str(page_num * 10) <NEW_LINE> page_url = ''.join([url, '&start=', start_num]) <NEW_LINE> print('Getting page: ' + page_url) <NEW_LINE> html_page = requests.get(page_url).text <NEW_LINE> page_soup = make_soup(html_page) <NEW_LINE> job_link_area = page_soup.find(id = 'resultsCol') <NEW_LINE> if job_link_area == None: <NEW_LINE> <INDENT> job_link_area = page_soup.find(id = 'resultsCol') <NEW_LINE> if job_link_area == None: <NEW_LINE> <INDENT> print('Cannot find job link area for: ' + page_url) <NEW_LINE> with open('output/failed_to_parse_page.txt', 'a') as text_file: <NEW_LINE> <INDENT> text_file.write('\n') <NEW_LINE> text_file.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n') <NEW_LINE> text_file.write(page_url + '\n') <NEW_LINE> text_file.write(html_page + '\n') <NEW_LINE> <DEDENT> return page_job_descriptions <NEW_LINE> <DEDENT> <DEDENT> job_urls = [base_url + link.get('href') for link in job_link_area.find_all('a', href=True)] <NEW_LINE> job_urls = [x for x in job_urls if 'clk' in x] <NEW_LINE> for i in range(len(job_urls)): <NEW_LINE> <INDENT> description = get_job_info(job_urls[i]) <NEW_LINE> if description: <NEW_LINE> <INDENT> page_job_descriptions.append(description) <NEW_LINE> <DEDENT> <DEDENT> print('Page ' + str(page_num) + ' gets job description:\n' ) <NEW_LINE> print(page_job_descriptions) <NEW_LINE> print('\n') <NEW_LINE> return page_job_descriptions | This function takes a URL and a page number as arguments,
combines them into a new URL for search, and returns a two-dimensional list,
where each value of the list is a one-dimensional list that contains a set of words
appearing in one job Ad of this page.
url: string, a base URL before the page number
page_num: int, a page number
return: list, a two-dimensional list where each value of the list is a one-dimensional list | 625941b0596a89723608980e |
def random_walk_normal_fn(scale=1., name=None): <NEW_LINE> <INDENT> def _fn(state_parts, seed): <NEW_LINE> <INDENT> with tf.name_scope(name, 'random_walk_normal_fn', values=[state_parts, scale, seed]): <NEW_LINE> <INDENT> scales = scale if mcmc_util.is_list_like(scale) else [scale] <NEW_LINE> if len(scales) == 1: <NEW_LINE> <INDENT> scales *= len(state_parts) <NEW_LINE> <DEDENT> if len(state_parts) != len(scales): <NEW_LINE> <INDENT> raise ValueError('`scale` must broadcast with `state_parts`.') <NEW_LINE> <DEDENT> seed_stream = distributions.SeedStream(seed, salt='RandomWalkNormalFn') <NEW_LINE> next_state_parts = [ tf.random_normal( mean=state_part, stddev=scale_part, shape=tf.shape(state_part), dtype=state_part.dtype.base_dtype, seed=seed_stream() ) for scale_part, state_part in zip(scales, state_parts)] <NEW_LINE> return next_state_parts <NEW_LINE> <DEDENT> <DEDENT> return _fn | Returns a callable that adds a random normal perturbation to the input.
This function returns a callable that accepts a Python `list` of `Tensor`s of
any shapes and `dtypes` representing the state parts of the `current_state`
and a random seed. The supplied argument `scale` must be a `Tensor` or Python
`list` of `Tensor`s representing the scale of the generated
proposal. `scale` must broadcast with the state parts of `current_state`.
The callable adds a sample from a zero-mean normal distribution with the
supplied scales to each state part and returns a same-type `list` of `Tensor`s
as the state parts of `current_state`.
Args:
scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes`
controlling the scale of the normal proposal distribution.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'random_walk_normal_fn'.
Returns:
random_walk_normal_fn: A callable accepting a Python `list` of `Tensor`s
representing the state parts of the `current_state` and an `int`
representing the random seed to be used to generate the proposal. The
callable returns the same-type `list` of `Tensor`s as the input and
represents the proposal for the RWM algorithm. | 625941b00a50d4780f666bda |
def num_chars(s): <NEW_LINE> <INDENT> return len(str(s)) | function to return number of characters | 625941b02c8b7c6e89b35510 |
def dispatch(self): <NEW_LINE> <INDENT> if self.controller.options.serial: <NEW_LINE> <INDENT> self.serial_dispatch() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.parallel_dispatch() | Run the doctests for the controller's specified sources,
by calling :meth:`parallel_dispatch` or :meth:`serial_dispatch`
according to the ``--serial`` option.
EXAMPLES::
sage: from sage.doctest.control import DocTestController, DocTestDefaults
sage: from sage.doctest.forker import DocTestDispatcher
sage: from sage.doctest.reporting import DocTestReporter
sage: from sage.doctest.util import Timer
sage: from sage.env import SAGE_SRC
sage: import os
sage: freehom = os.path.join(SAGE_SRC, 'sage', 'modules', 'free_module_homspace.py')
sage: bigo = os.path.join(SAGE_SRC, 'sage', 'rings', 'big_oh.py')
sage: DC = DocTestController(DocTestDefaults(), [freehom, bigo])
sage: DC.expand_files_into_sources()
sage: DD = DocTestDispatcher(DC)
sage: DR = DocTestReporter(DC)
sage: DC.reporter = DR
sage: DC.dispatcher = DD
sage: DC.timer = Timer().start()
sage: DD.dispatch()
sage -t .../sage/modules/free_module_homspace.py
[... tests, ... s]
sage -t .../sage/rings/big_oh.py
[... tests, ... s] | 625941b0d7e4931a7ee9dc67 |
def _validate_network_subnetpools(self, network, subnet_ip_version, new_subnetpool, network_scope): <NEW_LINE> <INDENT> ipv6_pd_subnetpool = new_subnetpool == const.IPV6_PD_POOL_ID <NEW_LINE> if network_scope: <NEW_LINE> <INDENT> if (ipv6_pd_subnetpool or new_subnetpool and new_subnetpool.address_scope_id != network_scope.id): <NEW_LINE> <INDENT> raise addr_scope_exc.NetworkAddressScopeAffinityError() <NEW_LINE> <DEDENT> <DEDENT> for subnet in network.subnets: <NEW_LINE> <INDENT> if ipv6_pd_subnetpool: <NEW_LINE> <INDENT> if (subnet.ip_version == const.IP_VERSION_6 and subnet.subnetpool_id != const.IPV6_PD_POOL_ID): <NEW_LINE> <INDENT> raise exc.NetworkSubnetPoolAffinityError() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if new_subnetpool: <NEW_LINE> <INDENT> if (subnet.subnetpool_id != new_subnetpool.id and subnet.ip_version == new_subnetpool.ip_version and not network_scope): <NEW_LINE> <INDENT> raise exc.NetworkSubnetPoolAffinityError() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if (subnet.subnetpool_id and subnet.ip_version == subnet_ip_version): <NEW_LINE> <INDENT> raise exc.NetworkSubnetPoolAffinityError() | Validate all subnets on the given network have been allocated from
the same subnet pool as new_subnetpool if no address scope is
used. If address scopes are used, validate that all subnets on the
given network participate in the same address scope. | 625941b0293b9510aa2c2fe0 |
def items(self): <NEW_LINE> <INDENT> return self._list | Returns all items in this list as a python list. | 625941b0d8ef3951e324328b |
def store_attr(names=None, self=None, but='', cast=False, **attrs): <NEW_LINE> <INDENT> fr = sys._getframe(1) <NEW_LINE> args = fr.f_code.co_varnames[:fr.f_code.co_argcount] <NEW_LINE> if self: args = ('self', *args) <NEW_LINE> else: self = fr.f_locals[args[0]] <NEW_LINE> if not hasattr(self, '__stored_args__'): self.__stored_args__ = {} <NEW_LINE> anno = annotations(self) if cast else {} <NEW_LINE> if not attrs: <NEW_LINE> <INDENT> ns = re.split(', *', names) if names else args[1:] <NEW_LINE> attrs = {n:fr.f_locals[n] for n in ns} <NEW_LINE> <DEDENT> if isinstance(but,str): but = re.split(', *', but) <NEW_LINE> attrs = {k:v for k,v in attrs.items() if k not in but} <NEW_LINE> return _store_attr(self, anno, **attrs) | Store params named in comma-separated `names` from calling context into attrs in `self` | 625941b082261d6c526ab1eb |
def generate_seed(): <NEW_LINE> <INDENT> return random.randint(0, 100000000000) | Generate a random seed to be used in worldgen
:return: random integer | 625941b03346ee7daa2b2ab5 |
def resolve_unique_keys(self, info, **args): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return json.dumps(self.unique_keys) <NEW_LINE> <DEDENT> except AttributeError as exception: <NEW_LINE> <INDENT> logger.exception(exception) <NEW_LINE> return [] | Return a string dump of the unique keys. This is a string because
we don't have a polymorphic GraphQL representation of why
might be defined as a unique key and it is therefore easier to
just return it as a string and have the client parse it.
..note:
The AttributeError must be handled here now because at one
point deletion transactions did not have `unique_keys`.
Because of the silly way that graphene tries to magically
proxy attributes to root objects wrapped in `self` here,
we end up calling `None.unique_keys` if `unique_keys` was
not passed to TransactionResponseEntity.__init__() | 625941b03c8af77a43ae34ee |
def annotations(self): <NEW_LINE> <INDENT> return map(self.create_annotation_node, self.document.annotations) | Returns a list of annotation nodes | 625941b07b180e01f3dc4558 |
def main(req: func.HttpRequest) -> func.HttpResponse: <NEW_LINE> <INDENT> logging.info("Python HTTP trigger function processed a request.") <NEW_LINE> name = req.params.get("name") <NEW_LINE> if not name: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> req_body = req.get_json() <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> name = req_body.get("name") <NEW_LINE> <DEDENT> <DEDENT> if name: <NEW_LINE> <INDENT> return func.HttpResponse(f"Hello {name}") <NEW_LINE> <DEDENT> return func.HttpResponse( "Please pass a name on the query string or in the request body", status_code=400 ) | Says hello to user. | 625941b05e10d32532c5ec79 |
def _Plus(lhs=None, rhs=None, out=None, name=None, **kwargs): <NEW_LINE> <INDENT> return (0,) | Adds arguments element-wise.
The storage type of ``elemwise_add`` output depends on storage types of inputs
- elemwise_add(row_sparse, row_sparse) = row_sparse
- otherwise, ``elemwise_add`` generates output with default storage
Parameters
----------
lhs : NDArray
first input
rhs : NDArray
second input
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function. | 625941b0046cf37aa974ca9a |
def _add_keys(resource: Dict[str, Any], path: Tuple[str, ...], items: Dict[str, Any]): <NEW_LINE> <INDENT> if not any(i is not None for i in items.values()): <NEW_LINE> <INDENT> return resource <NEW_LINE> <DEDENT> obj = resource <NEW_LINE> for p in path: <NEW_LINE> <INDENT> if p not in obj: <NEW_LINE> <INDENT> obj[p] = {} <NEW_LINE> <DEDENT> obj = obj[p] <NEW_LINE> <DEDENT> for k, v in items.items(): <NEW_LINE> <INDENT> if v is not None: <NEW_LINE> <INDENT> obj[k] = v <NEW_LINE> <DEDENT> <DEDENT> return resource | A helper function to manipulate k8s resource configs. It will update a dict in resource
definition given a path (think jsonpath). Will ignore items set to None. Will not update
resource if all items are None. Note that it updates resource dict in-place.
Examples:
>>> _add_keys({}, ("foo", "bar"), {"A": 1, "B": 2})
{'foo': {'bar': {'A': 1, 'B': 2}}}
>>> _add_keys({}, ("foo", "bar"), {"A": 1, "B": None})
{'foo': {'bar': {'A': 1}}}
>>> _add_keys({}, ("foo", "bar"), {"A": None, "B": None})
{} | 625941b02c8b7c6e89b35514 |
def read_processed(fileloc, flux_file, ivar_file): <NEW_LINE> <INDENT> with open(fileloc + flux_file,"rb") as file: <NEW_LINE> <INDENT> f = pickle.load(file) <NEW_LINE> <DEDENT> with open(fileloc + ivar_file,"rb") as file: <NEW_LINE> <INDENT> i = pickle.load(file) <NEW_LINE> <DEDENT> return f, i | Reads processed fluxes, ivars from disk
Returns fluxes and ivars 2D arrays
Args:
fileloc : location of the processed files
flux_file : name of the processed fluxes pkl file
ivar_file: name of the processed ivars pkl file | 625941b0d7e4931a7ee9dc6b |
def _url_request(self, url, request_parameters, request_type='GET', files=None, repeat=0, error_text="Error", raise_on_failure=True): <NEW_LINE> <INDENT> if files is not None: <NEW_LINE> <INDENT> mpf = _MultiPartForm(param_dict=request_parameters, files=files) <NEW_LINE> req = request(url) <NEW_LINE> body = mpf.make_result <NEW_LINE> req.add_header('Content-type', mpf.get_content_type()) <NEW_LINE> req.add_header('Content-length', len(body)) <NEW_LINE> req.data = body <NEW_LINE> <DEDENT> elif request_type == 'GET': <NEW_LINE> <INDENT> req = request('?'.join((url, encode(request_parameters)))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',} <NEW_LINE> req = request(url, encode(request_parameters).encode('UTF-8'), headers) <NEW_LINE> <DEDENT> req.add_header('Accept-encoding', 'gzip') <NEW_LINE> response = urlopen(req) <NEW_LINE> if response.info().get('Content-Encoding') == 'gzip': <NEW_LINE> <INDENT> buf = io.BytesIO(response.read()) <NEW_LINE> with gzip.GzipFile(fileobj=buf) as gzip_file: <NEW_LINE> <INDENT> response_bytes = gzip_file.read() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> response_bytes = response.read() <NEW_LINE> <DEDENT> response_text = response_bytes.decode('UTF-8') <NEW_LINE> response_json = json.loads(response_text) <NEW_LINE> if "error" in response_json: <NEW_LINE> <INDENT> if repeat == 0: <NEW_LINE> <INDENT> if raise_on_failure: <NEW_LINE> <INDENT> raise Exception("{0}: {1}".format(error_text, response_json)) <NEW_LINE> <DEDENT> return response_json <NEW_LINE> <DEDENT> repeat -= 1 <NEW_LINE> time.sleep(2) <NEW_LINE> response_json = self._url_request( url, request_parameters, request_type, files, repeat, error_text) <NEW_LINE> <DEDENT> return response_json | Send a new request and format the json response.
Keyword arguments:
url - the url of the request
request_parameters - a dictionay containing the name of the parameter and its correspoinsding value
request_type - the type of request: 'GET', 'POST'
files - the files to be uploaded
repeat - the nuber of times to repeat the request in the case of a failure
error_text - the message to log if an error is returned
raise_on_failure - indicates if an exception should be raised if an error is returned and repeat is 0 | 625941b0293b9510aa2c2fe3 |
def _has_intersection(self, point, query): <NEW_LINE> <INDENT> has_intersection = False <NEW_LINE> if point == query[1]: <NEW_LINE> <INDENT> has_intersection = False <NEW_LINE> <DEDENT> elif point >= query[0] and point < query[1]: <NEW_LINE> <INDENT> has_intersection = True <NEW_LINE> <DEDENT> return has_intersection | Verify if a point has intersection with a query | 625941b0d164cc6175782a97 |
def create_channel(self, config): <NEW_LINE> <INDENT> with self.lock: <NEW_LINE> <INDENT> self._create_channel(config) <NEW_LINE> <DEDENT> logger.info('Added channel `%s` to AMQP connector `%s`', config.name, self.config.name) | Creates a channel.
| 625941b03c8af77a43ae34f0 |
def TestFunction(data_in=None): <NEW_LINE> <INDENT> pass | Define functions here. | 625941b0046cf37aa974ca9c |
def get_toolbar(self, language, fieldname, checksum): <NEW_LINE> <INDENT> groups = [] <NEW_LINE> extra_params = COPY_TEMPLATE.format( ugettext(u'Loading…'), reverse('js-get', kwargs={'checksum': checksum}), checksum, ) <NEW_LINE> groups.append( GROUP_TEMPLATE.format( '', BUTTON_TEMPLATE.format( 'copy-text', ugettext('Fill in with source string'), extra_params, ICON_TEMPLATE.format('clipboard', ugettext('Copy')) ) ) ) <NEW_LINE> chars = [] <NEW_LINE> for name, char in get_special_chars(language): <NEW_LINE> <INDENT> chars.append( BUTTON_TEMPLATE.format( 'specialchar', name, '', char ) ) <NEW_LINE> <DEDENT> groups.append( GROUP_TEMPLATE.format('', u'\n'.join(chars)) ) <NEW_LINE> if language.direction == 'rtl': <NEW_LINE> <INDENT> rtl_name = 'rtl-{0}'.format(fieldname) <NEW_LINE> rtl_switch = [ RADIO_TEMPLATE.format( 'direction-toggle active', ugettext('Toggle text direction'), rtl_name, 'rtl', 'checked="checked"', 'RTL', ), RADIO_TEMPLATE.format( 'direction-toggle', ugettext('Toggle text direction'), rtl_name, 'ltr', '', 'LTR' ), ] <NEW_LINE> groups.append( GROUP_TEMPLATE.format( 'data-toggle="buttons"', u'\n'.join(rtl_switch) ) ) <NEW_LINE> <DEDENT> return TOOLBAR_TEMPLATE.format(u'\n'.join(groups)) | Returns toolbar HTML code. | 625941b0d8ef3951e324328f |
def write(self): <NEW_LINE> <INDENT> dirs = [self.recipes_dir] <NEW_LINE> if self.expectations: <NEW_LINE> <INDENT> dirs.append(self.expect_dir) <NEW_LINE> <DEDENT> for d in dirs: <NEW_LINE> <INDENT> if not os.path.exists(d): <NEW_LINE> <INDENT> os.makedirs(d) <NEW_LINE> <DEDENT> <DEDENT> with open(self.recipe_path, 'w') as f: <NEW_LINE> <INDENT> f.write('\n'.join([ 'from recipe_engine import post_process', '', 'DEPS = %r' % self.DEPS, '', 'def RunSteps(api):', ] + [' %s' % l for l in self.RunStepsLines] + [ '', 'def GenTests(api):', ] + [' %s' % l for l in self.GenTestsLines])) <NEW_LINE> <DEDENT> for test_name, test_contents in self.expectations.iteritems(): <NEW_LINE> <INDENT> name = ''.join('_' if c in '<>:"\\/|?*\0' else c for c in test_name) <NEW_LINE> with open(os.path.join(self.expect_dir, '%s.json' % name), 'w') as f: <NEW_LINE> <INDENT> json.dump(test_contents, f, sort_keys=True, indent=2, separators=(',', ': ')) | Writes the recipe to disk. | 625941b0d7e4931a7ee9dc6d |
def DescribeFleetUtilization(self, request): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> params = request._serialize() <NEW_LINE> body = self.call("DescribeFleetUtilization", params) <NEW_LINE> response = json.loads(body) <NEW_LINE> if "Error" not in response["Response"]: <NEW_LINE> <INDENT> model = models.DescribeFleetUtilizationResponse() <NEW_LINE> model._deserialize(response["Response"]) <NEW_LINE> return model <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> code = response["Response"]["Error"]["Code"] <NEW_LINE> message = response["Response"]["Error"]["Message"] <NEW_LINE> reqid = response["Response"]["RequestId"] <NEW_LINE> raise TencentCloudSDKException(code, message, reqid) <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> if isinstance(e, TencentCloudSDKException): <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TencentCloudSDKException(e.message, e.message) | 本接口(DescribeFleetUtilization)用于查询服务器舰队的利用率信息。
:param request: Request instance for DescribeFleetUtilization.
:type request: :class:`tencentcloud.gse.v20191112.models.DescribeFleetUtilizationRequest`
:rtype: :class:`tencentcloud.gse.v20191112.models.DescribeFleetUtilizationResponse` | 625941b0d164cc6175782a98 |
def align_segments(s1, s2, data1, data2): <NEW_LINE> <INDENT> lengths_s1 = length_segment(s1, data1) <NEW_LINE> lengths_s2 = length_segment(s2, data2) <NEW_LINE> ratios_s1 = [l/lengths_s1[-1] for l in lengths_s1] <NEW_LINE> ratios_s2 = [l/lengths_s2[-1] for l in lengths_s2] <NEW_LINE> len_s1 = lengths_s1[-1] <NEW_LINE> len_s2 = lengths_s2[-1] <NEW_LINE> all_pos = list(set(ratios_s1+ratios_s2)) <NEW_LINE> all_pos.sort() <NEW_LINE> def _align(length, s, ratios, data): <NEW_LINE> <INDENT> align = [None]*(len(all_pos)-1) <NEW_LINE> ratios = set(ratios) <NEW_LINE> p1, p2 = 0, 1 <NEW_LINE> pos = data[s[p1]] <NEW_LINE> cur = s[p1] <NEW_LINE> next = None <NEW_LINE> w = data.walls[s[p1], s[p2]] <NEW_LINE> for j, (r1, r2) in enumerate(zip(all_pos[:-1], all_pos[1:])): <NEW_LINE> <INDENT> w1 = [pos] <NEW_LINE> if r2 in ratios: <NEW_LINE> <INDENT> w1.extend(w) <NEW_LINE> pos = data[s[p2]] <NEW_LINE> p1, p2 = p1+1, p2+1 <NEW_LINE> if p2 < len(s): <NEW_LINE> <INDENT> w = data.walls[s[p1], s[p2]] <NEW_LINE> next = s[p1] <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> l = (r2-r1)*length <NEW_LINE> acc = 0 <NEW_LINE> while w: <NEW_LINE> <INDENT> p = w[0] <NEW_LINE> vec = p - w1[-1] <NEW_LINE> dl = sqrt(vec.x()*vec.x() + vec.y()*vec.y()) <NEW_LINE> if acc+dl > l: <NEW_LINE> <INDENT> pos = w1[-1] + vec*((l-acc)/dl) <NEW_LINE> break <NEW_LINE> <DEDENT> acc += dl <NEW_LINE> w1.append(p) <NEW_LINE> w.pop(0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p = data[s[p2]] <NEW_LINE> vec = p - w1[-1] <NEW_LINE> dl = sqrt(vec.x()*vec.x() + vec.y()*vec.y()) <NEW_LINE> pos = w1[-1] + vec*((l-acc)/dl) <NEW_LINE> <DEDENT> <DEDENT> align[j] = (w1, cur) <NEW_LINE> l = (r2-r1)*len_s2 <NEW_LINE> cur, next = next, None <NEW_LINE> <DEDENT> return align <NEW_LINE> <DEDENT> align_s1 = _align(len_s1, s1, ratios_s1, data1) <NEW_LINE> align_s2 = _align(len_s2, s2, ratios_s2, data2) <NEW_LINE> return align_s1, align_s2 | Compute the alignment of segments s1 and s2,
such that the first and last elements of s1 and s2 are the same, but nothing else.
:return_type: list of (list of QPointF, int)
:returns: List of wall parts such that the first point is the vertex.
The integer is the id of the point (if it corresponds to one). | 625941b08a43f66fc4b53dbd |
def crypt_private(passwd, passwd_hash, hash_prefix='$P$'): <NEW_LINE> <INDENT> itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' <NEW_LINE> output = '*0' <NEW_LINE> if passwd_hash[0:2] == output: <NEW_LINE> <INDENT> output = '*1' <NEW_LINE> <DEDENT> if passwd_hash[0:3] != hash_prefix: <NEW_LINE> <INDENT> return output <NEW_LINE> <DEDENT> count_log2 = itoa64.index(passwd_hash[3]) <NEW_LINE> if count_log2<7 or count_log2>30: <NEW_LINE> <INDENT> return output <NEW_LINE> <DEDENT> count = 1<<count_log2 <NEW_LINE> salt = passwd_hash[4:12] <NEW_LINE> if len(salt) != 8: <NEW_LINE> <INDENT> return output <NEW_LINE> <DEDENT> m = hashlib.md5() <NEW_LINE> m.update(salt) <NEW_LINE> m.update(passwd) <NEW_LINE> tmp_hash = m.digest() <NEW_LINE> for i in xrange(count): <NEW_LINE> <INDENT> m = hashlib.md5() <NEW_LINE> m.update(tmp_hash) <NEW_LINE> m.update(passwd) <NEW_LINE> tmp_hash = m.digest() <NEW_LINE> <DEDENT> output = passwd_hash[0:12]+encode64(tmp_hash, 16, itoa64) <NEW_LINE> return output | Hash password, using same salt and number of
iterations as in passwd_hash.
This is useful when you want to check password match.
In this case you pass your raw password and password
hash to this function and then compare its return
value with password hash again:
is_valid = (crypt_private(passwd, hash) == hash)
hash_prefix is used to check that passwd_hash is of
supported type. It is compared with first 3 chars of
passwd_hash and if does not match error is returned.
NOTE: all arguments must be ASCII strings, not unicode!
If you want to support unicode passwords, you could
use any encoding you want. For compatibility with PHP
it is recommended to use UTF-8:
passwd_ascii = passwd.encode('utf-8')
is_valid = (crypt_private(passwd_ascii, hash) == hash)
Here hash is already assumed to be an ASCII string.
In case of error '*0' is usually returned. But if passwd_hash
begins with '*0', then '*1' is returned to prevent false
positive results of password check. | 625941b0d164cc6175782a99 |
def testIsGrantha_FalseCases(self): <NEW_LINE> <INDENT> print("Testing TamilLetter.is_grantha() with aytham", end=' ') <NEW_LINE> letter = AYTHAM <NEW_LINE> self.assertFalse(TamilLetter.is_grantha(letter), "\'%s\' is Aytham, but is_grantha() returned True" %letter) <NEW_LINE> print(".... pass") <NEW_LINE> print("Testing TamilLetter.is_grantha() with vowels", end=' ') <NEW_LINE> vowels = TamilLetter.get_vowels() <NEW_LINE> for vowel in vowels: <NEW_LINE> <INDENT> self.assertFalse(TamilLetter.is_grantha(vowel), "\'%s\' is a vowel, but is_grantha() returned True" %vowel) <NEW_LINE> <DEDENT> print(".... pass") <NEW_LINE> print("Testing TamilLetter.is_grantha() with combinations", end=' ') <NEW_LINE> combinations = list(COMBINATIONS.values()) <NEW_LINE> for combination in combinations: <NEW_LINE> <INDENT> self.assertFalse(TamilLetter.is_grantha(combination), "\'%s\' is a combination, but is_grantha() returned True" %combination) <NEW_LINE> <DEDENT> print(".... pass") <NEW_LINE> print("Testing TamilLetter.is_grantha() with vallinam consonant", end=' ') <NEW_LINE> for consonant in VALLINAM_CONSONANTS: <NEW_LINE> <INDENT> self.assertFalse(TamilLetter.is_grantha(consonant), "\'%s\' is a vallinam consonant, but is_grantha() returned True" %consonant) <NEW_LINE> <DEDENT> print(".... pass") <NEW_LINE> print("Testing TamilLetter.is_grantha() with mellinam consonant", end=' ') <NEW_LINE> for consonant in MELLINAM_CONSONANTS: <NEW_LINE> <INDENT> self.assertFalse(TamilLetter.is_grantha(consonant), "\'%s\' is an mellinam consonant, but is_grantha() returned True" %consonant) <NEW_LINE> <DEDENT> print(".... pass") <NEW_LINE> print("Testing TamilLetter.is_grantha() with idaiyinam consonant", end=' ') <NEW_LINE> for consonant in IDAIYINAM_CONSONANTS: <NEW_LINE> <INDENT> self.assertFalse(TamilLetter.is_grantha(consonant), "\'%s\' is an idaiyinam consonant, but is_grantha() returned True" %consonant) <NEW_LINE> <DEDENT> print(".... pass") | is_grantha() should return False for aytham and for all vowels, combinations and vallinam, mellinam and idaiyinam consonants | 625941b07b180e01f3dc455c |
def CalcFFT(self): <NEW_LINE> <INDENT> if not (self.flags["timeCalculated"]): <NEW_LINE> <INDENT> self.CalcTime() <NEW_LINE> <DEDENT> self.FFTData = np.fft.fft(self.Data[:, :4], axis=0) / (self.Data.shape[0]) <NEW_LINE> self.fftFreqs = np.fft.fftfreq(self.Data.shape[0], self.MeanDeltaT) <NEW_LINE> self.flags["FFTCalculated"] = True <NEW_LINE> fftPeakindexiposfreq = int(self.Data.shape[0] / 3) <NEW_LINE> self.REFFfftPeakIndex = ( np.argmax(abs(self.FFTData[1:fftPeakindexiposfreq, 3])) + 1 ) <NEW_LINE> self.FFTFreqPeak = self.fftFreqs[self.REFFfftPeakIndex] <NEW_LINE> self.FFTAmplitudePeak = abs(self.FFTData[self.REFFfftPeakIndex, :]) <NEW_LINE> self.FFTPhiPeak = np.angle(self.FFTData[self.REFFfftPeakIndex, :]) | Returns
-------
None. | 625941b076d4e153a657e883 |
def test_database_populate(cli_runner, db): <NEW_LINE> <INDENT> result = cli_runner(cli.database_cli, ['populate']) <NEW_LINE> assert result.exit_code == 0 <NEW_LINE> assert result.output.endswith('Database populate completed.\n') | Test `claimstore database populate` command. | 625941b02c8b7c6e89b35518 |
def __repr__(self): <NEW_LINE> <INDENT> return FS_REPR.format(type(self).__name__, super(SimpleNamespace, self).__repr__()) | Object representation | 625941b082261d6c526ab1f1 |
def _make_estimator(self, append=True): <NEW_LINE> <INDENT> estimator = clone(self.base_estimator_) <NEW_LINE> estimator.set_params(**dict((p, getattr(self, p)) for p in self.estimator_params)) <NEW_LINE> if append: <NEW_LINE> <INDENT> self.estimators_.append(estimator) <NEW_LINE> <DEDENT> return estimator | Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators. | 625941b05166f23b2e1a4ead |
def load_dataset(self, ds, level=1): <NEW_LINE> <INDENT> is_parent = level==1 <NEW_LINE> levsep = is_parent and "/--------" or "|__.." <NEW_LINE> treelog.info( "%s%s%s (%s)", level * ' ', levsep, ds.__class__.__name__, (is_parent and "parent" or level)) <NEW_LINE> for ref_ds in ds.meta.references: <NEW_LINE> <INDENT> r = ref_ds.shared_instance(default_refclass=self.dataclass) <NEW_LINE> new_level = level+1 <NEW_LINE> self.load_dataset(r, level=new_level) <NEW_LINE> <DEDENT> self.attach_storage_medium(ds) <NEW_LINE> if ds in self.loaded: <NEW_LINE> <INDENT> self.loaded.referenced(ds, level) <NEW_LINE> return <NEW_LINE> <DEDENT> log.info("LOADING rows in %s", ds) <NEW_LINE> ds.meta.storage_medium.visit_loader(self) <NEW_LINE> registered = False <NEW_LINE> for key, row in ds: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.resolve_row_references(ds, row) <NEW_LINE> if not isinstance(row, DataRow): <NEW_LINE> <INDENT> row = row(ds) <NEW_LINE> <DEDENT> def column_vals(): <NEW_LINE> <INDENT> for c in row.columns(): <NEW_LINE> <INDENT> yield (c, self.resolve_stored_object(getattr(row, c))) <NEW_LINE> <DEDENT> <DEDENT> obj = ds.meta.storage_medium.save(row, column_vals()) <NEW_LINE> ds.meta._stored_objects.store(key, obj) <NEW_LINE> ds._setdata(key, row) <NEW_LINE> if not registered: <NEW_LINE> <INDENT> self.loaded.register(ds, level) <NEW_LINE> registered = True <NEW_LINE> <DEDENT> <DEDENT> except Exception: <NEW_LINE> <INDENT> etype, val, tb = sys.exc_info() <NEW_LINE> reraise(LoadError, LoadError(etype, val, ds, key=key, row=row)) | load this dataset and all its dependent datasets.
level is essentially the order of processing (going from dataset to
dependent datasets). Child datasets are always loaded before the
parent. The level is important for visualizing the chain of
dependencies : 0 is the bottom, and thus should be the first set of
objects unloaded | 625941b08e71fb1e9831d502 |
def get_minibatch(self, sentences, tokenize=False, add_start_end=True): <NEW_LINE> <INDENT> if tokenize: <NEW_LINE> <INDENT> sentences = [nltk.word_tokenize(sentence) for sentence in sentences] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sentences = [sentence.split() for sentence in sentences] <NEW_LINE> <DEDENT> if add_start_end: <NEW_LINE> <INDENT> sentences = [['<s>'] + sentence + ['</s>'] for sentence in sentences] <NEW_LINE> <DEDENT> lens = [len(sentence) for sentence in sentences] <NEW_LINE> sorted_idx = np.argsort(lens)[::-1] <NEW_LINE> sorted_sentences = [sentences[idx] for idx in sorted_idx] <NEW_LINE> rev = np.argsort(sorted_idx) <NEW_LINE> sorted_lens = [len(sentence) for sentence in sorted_sentences] <NEW_LINE> max_len = max(sorted_lens) <NEW_LINE> sentences = [ [self.task_word2id[w] if w in self.task_word2id else self.task_word2id['<unk>'] for w in sentence] + [self.task_word2id['<pad>']] * (max_len - len(sentence)) for sentence in sorted_sentences ] <NEW_LINE> sentences = Variable(torch.LongTensor(sentences), volatile=True) <NEW_LINE> rev = Variable(torch.LongTensor(rev), volatile=True) <NEW_LINE> lengths = sorted_lens <NEW_LINE> if self.cuda: <NEW_LINE> <INDENT> sentences = sentences.cuda() <NEW_LINE> rev = rev.cuda() <NEW_LINE> <DEDENT> return { 'sentences': sentences, 'lengths': lengths, 'rev': rev } | Prepare minibatch. | 625941b06aa9bd52df036af7 |
def __init__(self, size=0, position=(0, 0)): <NEW_LINE> <INDENT> self.__size = size <NEW_LINE> if type(self.__size) != int: <NEW_LINE> <INDENT> raise TypeError('size must be an integer') <NEW_LINE> <DEDENT> if self.__size < 0: <NEW_LINE> <INDENT> raise ValueError('size must be >= 0') <NEW_LINE> <DEDENT> self.__position = position <NEW_LINE> m = 'position must be a tuple of 2 positive integers' <NEW_LINE> if type(self.__position) != tuple or len(self.__position) != 2: <NEW_LINE> <INDENT> raise TypeError(m) <NEW_LINE> <DEDENT> for element in self.__position: <NEW_LINE> <INDENT> if type(element) != int: <NEW_LINE> <INDENT> raise TypeError(m) | Example of docstring on the __init__ method.
Args:
size (int): size of square.
position (tuple): position of square | 625941b03317a56b869399ba |
def Clone(self): <NEW_LINE> <INDENT> pass | Clone(self: Vector3DAnimation) -> Vector3DAnimation
Creates a modifiable clone of this System.Windows.Media.Animation.Vector3DAnimation,making deep
copies of this object's values. When copying dependency properties,this method copies resource
references and data bindings (but they might no longer resolve) but not animations or their
current values.
Returns: A modifiable clone of the current object. The cloned object's System.Windows.Freezable.IsFrozen
property will be false even if the source's System.Windows.Freezable.IsFrozen property was true. | 625941b0046cf37aa974caa0 |
def join_path(*args): <NEW_LINE> <INDENT> return '/'.join(args) | Join an arbitrary number of strings as a Linux/Unix file path
Used in place of ``os.path.join`` so tests can run on Windows or Linux
without generating false positives.
Example::
>>> join_path('some', 'directory')
some/directory
>>> join_path('/home', )
:Returns: String
:params args: **Required** The strings to join together
:type args: String | 625941b0d8ef3951e3243293 |
def parser_xls_repescagem(self, file): <NEW_LINE> <INDENT> wb = xlrd.open_workbook(file) <NEW_LINE> s = wb.sheet_by_name('Plan1') <NEW_LINE> result = {} <NEW_LINE> for row in range(s.nrows): <NEW_LINE> <INDENT> for col in range(s.ncols): <NEW_LINE> <INDENT> cel = str(s.cell(row, col)).replace('text:', '').replace("'", '') <NEW_LINE> for item, value in self.maper_txt.items(): <NEW_LINE> <INDENT> if value not in result.keys(): <NEW_LINE> <INDENT> if cel.startswith(item): <NEW_LINE> <INDENT> result[value] = self.format_cel_value(cel, item = item) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> for col_nom in self.maper.keys(): <NEW_LINE> <INDENT> if col_nom not in result.keys(): <NEW_LINE> <INDENT> result[col_nom] = '' <NEW_LINE> <DEDENT> <DEDENT> result['file'] = file <NEW_LINE> return result | Parseia o arquivo .xls de forma mais custosa, buscando,
em cada célula da planilha, a presença dos nomes dos campos.
Esse método deve ser usado apenas para os arquivos que não puderam
ser parseados pelo método .parser_xls_file | 625941b082261d6c526ab1f3 |
def rpminfo(ui): <NEW_LINE> <INDENT> result = set() <NEW_LINE> rpmbin = ui.config("rage", "rpmbin", "rpm") <NEW_LINE> for name in ["hg", "hg.real"]: <NEW_LINE> <INDENT> path = which(name) <NEW_LINE> if not path: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> result.add(shcmd("%s -qf %s" % (rpmbin, path), check=False)) <NEW_LINE> <DEDENT> return "".join(result) | FBONLY: Information about RPM packages | 625941b05166f23b2e1a4eaf |
def _eval(self, indiv): <NEW_LINE> <INDENT> x = indiv[0] <NEW_LINE> return (exp(-2*log(2)*((x-0.08)/0.854)**2))*sin(5*pi*(x**(3.0/4)-0.05))**6 | f(x) = (exp^(...))*sin^6(5*pi*x)
| 625941b08e71fb1e9831d504 |
def testParseSystem(self): <NEW_LINE> <INDENT> knowledge_base_values = {u'current_control_set': u'ControlSet001'} <NEW_LINE> test_file = self._GetTestFilePath([u'SYSTEM']) <NEW_LINE> event_queue_consumer = self._ParseFile( self._parser, test_file, knowledge_base_values=knowledge_base_values) <NEW_LINE> event_objects = self._GetEventObjectsFromQueue(event_queue_consumer) <NEW_LINE> parser_chains = self._GetParserChains(event_objects) <NEW_LINE> plugin_names = [ u'windows_usbstor_devices', u'windows_boot_execute', u'windows_services'] <NEW_LINE> for plugin in plugin_names: <NEW_LINE> <INDENT> expected_chain = self._PluginNameToParserChain(plugin) <NEW_LINE> self.assertTrue( expected_chain in parser_chains, u'Chain {0:s} not found in events.'.format(expected_chain)) <NEW_LINE> <DEDENT> self.assertEqual(parser_chains.get( self._PluginNameToParserChain(u'windows_usbstor_devices'), 0), 3) <NEW_LINE> self.assertEqual(parser_chains.get( self._PluginNameToParserChain(u'windows_boot_execute'), 0), 2) <NEW_LINE> self.assertEqual(parser_chains.get( self._PluginNameToParserChain(u'windows_services'), 0), 831) | Tests the Parse function on a SYSTEM file. | 625941b0bf627c535bc12f25 |
def default_frame(self): <NEW_LINE> <INDENT> return self._def_frame | Return the default frame of on ``self``.
OUTPUT:
- a local frame as an instance of
:class:`~sage.manifolds.local_frame.LocalFrame`
EXAMPLES::
sage: M = Manifold(3, 'M', structure='top')
sage: E = M.vector_bundle(2, 'E')
sage: e = E.local_frame('e')
sage: E.default_frame()
Local frame (E|_M, (e_0,e_1)) | 625941b08a43f66fc4b53dc1 |
def vim_global(name, kind=str): <NEW_LINE> <INDENT> ret = GLOBALS_DEFAULTS.get(name, None) <NEW_LINE> try: <NEW_LINE> <INDENT> vname = "autotag" + name <NEW_LINE> v_buffer = "b:" + vname <NEW_LINE> exists_buffer = (vim.eval("exists('%s')" % v_buffer) == "1") <NEW_LINE> v_global = "g:" + vname <NEW_LINE> exists_global = (vim.eval("exists('%s')" % v_global) == "1") <NEW_LINE> if exists_buffer: <NEW_LINE> <INDENT> ret = vim.eval(v_buffer) <NEW_LINE> <DEDENT> elif exists_global: <NEW_LINE> <INDENT> ret = vim.eval(v_global) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if isinstance(ret, int): <NEW_LINE> <INDENT> vim.command("let %s=%s" % (v_global, ret)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> vim.command("let %s=\"%s\"" % (v_global, ret)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> if kind == bool: <NEW_LINE> <INDENT> ret = (ret not in [0, "0"]) <NEW_LINE> <DEDENT> elif kind == int: <NEW_LINE> <INDENT> ret = int(ret) <NEW_LINE> <DEDENT> elif kind == str: <NEW_LINE> <INDENT> ret = str(ret) <NEW_LINE> <DEDENT> <DEDENT> return ret | Get global variable from vim, cast it appropriately | 625941b0293b9510aa2c2fe9 |
def p_statement_block(p): <NEW_LINE> <INDENT> p[0] = PT_Block(p[1]) | statement : block | 625941b03317a56b869399bc |
def __init__(self, api_url, key): <NEW_LINE> <INDENT> self.api_url = api_url <NEW_LINE> self.headers = {'content-type': CONTENT_TYPE_JSON, 'X-Api-Key': key} <NEW_LINE> self.printer_last_reading = [{}, None] <NEW_LINE> self.job_last_reading = [{}, None] | Initialize OctoPrint API and set headers needed later. | 625941b0d8ef3951e3243295 |
def _collectSubstratesFromResults( results ): <NEW_LINE> <INDENT> substrates = collections.OrderedDict() <NEW_LINE> for res in results: <NEW_LINE> <INDENT> for lbl in res.foundLabels: <NEW_LINE> <INDENT> if lbl.sub is None: <NEW_LINE> <INDENT> fpath = misc.getLabelableFilePath(res.original) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> fpath = misc.getLabelableFilePath(lbl) <NEW_LINE> <DEDENT> if not fpath.filename: <NEW_LINE> <INDENT> subpath = getFSPath( res.original ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> subpath = getFSPath( fpath ) <NEW_LINE> <DEDENT> if subpath in substrates: <NEW_LINE> <INDENT> substrates[subpath].append( lbl ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> substrates[subpath] = [lbl] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return substrates | Collect all image substrates of found labels;
if the foundLabel does not have a path, use the path from
the original | 625941b073bcbd0ca4b2bdcd |
def _get_target_V( self, batch: ReplayBufferSample, task_info: TaskInfo ) -> TensorType: <NEW_LINE> <INDENT> mtobs = MTObs(env_obs=batch.next_env_obs, task_obs=None, task_info=task_info) <NEW_LINE> _, policy_action, log_pi, _ = self.actor(mtobs=mtobs) <NEW_LINE> target_Q1, target_Q2 = self.critic_target(mtobs=mtobs, action=policy_action) <NEW_LINE> return ( torch.min(target_Q1, target_Q2) - self.get_alpha(env_index=batch.task_obs).detach() * log_pi ) | Compute the target values.
Args:
batch (ReplayBufferSample): batch from the replay buffer.
task_info (TaskInfo): task_info object.
Returns:
TensorType: target values. | 625941b04f6381625f114797 |
def chunks(l, n): <NEW_LINE> <INDENT> return [l[i:i + n] for i in range(0, len(l), n)] | split list l into chunks of size n | 625941b08e71fb1e9831d506 |
def _fill_context(self, context, model): <NEW_LINE> <INDENT> return context | Fills the template generation context with data from the model.
The default implementation does nothing. You can override this if your template requires additional
context data that can be filled from the model.
Args:
context - The template context.
model - The model of the currently rendered row
Returns:
The context after it's been updated. | 625941b03346ee7daa2b2abf |
def offspring(self, parent1, parent2): <NEW_LINE> <INDENT> child = np.random.rand(Hypers.nchrom) <NEW_LINE> for n in range(Hypers.nchrom): <NEW_LINE> <INDENT> rando = np.random.rand(1) <NEW_LINE> child[n] = rando*parent1[n] + (1-rando)*parent2[n] <NEW_LINE> <DEDENT> return(child) | Return the child of both parents, taking each gene from only one parent at random. | 625941b0d164cc6175782a9f |
def __init__(self, auth, home=None): <NEW_LINE> <INDENT> self.auth = auth <NEW_LINE> self.homedata = None <NEW_LINE> self.home_ids = [] <NEW_LINE> self.home_names = [] <NEW_LINE> self.room_names = [] <NEW_LINE> self.schedules = [] <NEW_LINE> self.home = home <NEW_LINE> self.home_id = None | Initialize the HomeData object. | 625941b06aa9bd52df036af9 |
@click.option('--username', prompt=True, default=lambda: os.environ.get('USER', '')) <NEW_LINE> @account.command(help='Connect to baguette.io.') <NEW_LINE> def login(username): <NEW_LINE> <INDENT> password = click.prompt('Password', hide_input=True) <NEW_LINE> if api.login(username, password): <NEW_LINE> <INDENT> click.echo('Successfully logged in as {0}.'.format(username)) <NEW_LINE> return True <NEW_LINE> <DEDENT> click.echo('Authentication failed, please check your credentials.') <NEW_LINE> return False | Connect to baguette.io using username/password.
:param username: The username to log in with.
:type username: str
:returns: The status of the login.
:rtype: bool | 625941b03317a56b869399be |
def leg_count(self): <NEW_LINE> <INDENT> return self.legs | Return the number of legs this Arthropod possesses. | 625941b00a50d4780f666be8 |
def get_all_time_entry(self, user, spent_on=None): <NEW_LINE> <INDENT> redmine = Redmine(url=self.url, key=user.authkey) <NEW_LINE> try: <NEW_LINE> <INDENT> time_entries = list() <NEW_LINE> r_user_id = redmine.auth().id <NEW_LINE> r_time_entries = redmine.time_entry.filter(user_id=r_user_id, spent_on=spent_on) <NEW_LINE> for r_time_entry in r_time_entries: <NEW_LINE> <INDENT> if 'issue' in dir(r_time_entry): <NEW_LINE> <INDENT> time_entry = TimeEntry( id=r_time_entry.id, user=user, issue_id=r_time_entry.issue.id, spent_on=r_time_entry.spent_on, hours=r_time_entry.hours, comments=r_time_entry.comments) <NEW_LINE> time_entries.append(time_entry) <NEW_LINE> <DEDENT> <DEDENT> return time_entries <NEW_LINE> <DEDENT> except AuthError: <NEW_LINE> <INDENT> return list() | Get all time entry from redmine for user.
:param tracktime.models.User user:
:param spent_on:
:rtype: list | 625941b00a366e3fb873e56e |
def testFixedDisplay1(self): <NEW_LINE> <INDENT> V.ArithmeticClass(Options(dict(arithmetic='fixed', precision=6, display=7))) <NEW_LINE> self.assertEqual(F.display, 6) <NEW_LINE> self.assertTrue(F.info.find('display') < 0) | fixed display must be <= precision | 625941b0d8ef3951e3243297 |
def draw_side1(x, y, window): <NEW_LINE> <INDENT> draw_die_face(x, y, window) <NEW_LINE> draw_die_hole(x, y, window) | - This function draws the side of a die with one dot in the center
- draw_die_face(x, y, window)
- draw_die_hole(x, y, window) | 625941b0bf627c535bc12f29 |
def sortList(self, head): <NEW_LINE> <INDENT> if not head or not head.next: <NEW_LINE> <INDENT> return head <NEW_LINE> <DEDENT> slow = head <NEW_LINE> fast = head <NEW_LINE> while fast.next and fast.next.next: <NEW_LINE> <INDENT> slow = slow.next <NEW_LINE> fast = fast.next.next <NEW_LINE> <DEDENT> A = head <NEW_LINE> B = slow.next <NEW_LINE> slow.next = None <NEW_LINE> headA = self.sortList(A) <NEW_LINE> headB = self.sortList(B) <NEW_LINE> ans = self.merge(headA, headB) <NEW_LINE> return ans | :type head: ListNode
:rtype: ListNode | 625941b03c8af77a43ae34fa |
def finalize(self): <NEW_LINE> <INDENT> if self._is_finalized: <NEW_LINE> <INDENT> raise SHMACIssue.already_finalized() <NEW_LINE> <DEDENT> self._is_finalized = True <NEW_LINE> self._set_final_result() <NEW_LINE> self._result_is_ready = True <NEW_LINE> self._mac = DeletedAttribute(SHMACIssue.already_finalized) <NEW_LINE> return self._result | Caps off the instance's validation hash object & populates the
instance's final result with an HMAC of its state. This signals
the end of a stream of data that can be validated with the
current instance. | 625941b06aa9bd52df036afb |
def __init__(self): <NEW_LINE> <INDENT> self.throw() | Konstruktor bezparametrowy | 625941b0f9cc0f698b14035b |
def check_for_msn_groups_headers(self, msg, target=None): <NEW_LINE> <INDENT> to = ''.join(msg.get_decoded_header('To')) <NEW_LINE> if not Regex(r"<(\S+)\@groups\.msn\.com>").search(to): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> listname = Regex(r"<(\S+)\@groups\.msn\.com>").match(to).groups()[0] <NEW_LINE> server_rgx = Regex(r"from mail pickup service by " r"((?:p\d\d\.)groups\.msn\.com)\b") <NEW_LINE> server = '' <NEW_LINE> for rcvd in msg.get_decoded_header('Received'): <NEW_LINE> <INDENT> if server_rgx.search(rcvd): <NEW_LINE> <INDENT> server = server_rgx.search(rcvd).groups()[0] <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if not server: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> message_id = ''.join(msg.get_decoded_header('Message-Id')) <NEW_LINE> if listname == "notifications": <NEW_LINE> <INDENT> if not Regex(r"^<\S+\@{0}".format(server)).search(message_id): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> msn_addr = Regex(r"^<{0}-\S+\@groups\.msn\.com>".format(listname)) <NEW_LINE> if not msn_addr.search(message_id): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> msn_addr = "{0}[email protected]".format(listname) <NEW_LINE> if msg.sender_address != msn_addr: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True | Check if the email's destination is a msn group | 625941b0b57a9660fec335d3 |
def re_compile_maybe_verbose(regex: str) -> Pattern[str]: <NEW_LINE> <INDENT> if '\n' in regex: <NEW_LINE> <INDENT> regex = '(?x)' + regex <NEW_LINE> <DEDENT> compiled: Pattern[str] = re.compile(regex) <NEW_LINE> return compiled | Compile a regular expression string in `regex`.
If it contains newlines, use verbose mode. | 625941b04a966d76dd550d66 |
def read(self): <NEW_LINE> <INDENT> while not self.buffer: <NEW_LINE> <INDENT> self._read() <NEW_LINE> <DEDENT> data = self.buffer.pop(0) <NEW_LINE> try: <NEW_LINE> <INDENT> json_data = json_converter.loads(data) <NEW_LINE> _logger.debug("Data read %r", json_data) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> _logger.warning("Could not decode data %r", data) <NEW_LINE> raise <NEW_LINE> <DEDENT> return json_data | This method waits until new data is available at the connection
or in the buffer and returns it to the caller. | 625941b0507cdc57c6306a25 |
def get_property(self, name): <NEW_LINE> <INDENT> if name != "mode": <NEW_LINE> <INDENT> raise UnsupportedProperty(name) <NEW_LINE> <DEDENT> sound_mode = self.entity.attributes.get(media_player.ATTR_SOUND_MODE) <NEW_LINE> if sound_mode and sound_mode.upper() in self.VALID_SOUND_MODES: <NEW_LINE> <INDENT> return sound_mode.upper() <NEW_LINE> <DEDENT> return None | Read and return a property. | 625941b0b5575c28eb68dd58 |
def get_move(self, game, time_left): <NEW_LINE> <INDENT> self.time_left = time_left <NEW_LINE> self.best_move = (-1, -1) <NEW_LINE> if game.get_legal_moves(): <NEW_LINE> <INDENT> self.best_move = game.get_legal_moves()[0] <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> for depth in range(0, 10000): <NEW_LINE> <INDENT> self.best_move = self.alphabeta(game, depth) <NEW_LINE> <DEDENT> <DEDENT> except SearchTimeout: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return self.best_move | Search for the best move from the available legal moves and return a
result before the time limit expires.
Modify the get_move() method from the MinimaxPlayer class to implement
iterative deepening search instead of fixed-depth search.
**********************************************************************
NOTE: If time_left() < 0 when this function returns, the agent will
forfeit the game due to timeout. You must return _before_ the
timer reaches 0.
**********************************************************************
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves. | 625941b0e5267d203edcd9fe |
def approximate(func, from_val, to_val, degree): <NEW_LINE> <INDENT> print('------ Started Calculating Approximation ------') <NEW_LINE> print() <NEW_LINE> print('f(x) = {0}'.format(func)) <NEW_LINE> print() <NEW_LINE> start_time = time.time() <NEW_LINE> orth_basis = orthonormal_basis(_get_float_value(from_val), _get_float_value(to_val), degree) <NEW_LINE> print('Orthonormal basis:') <NEW_LINE> for idx, ele in enumerate(orth_basis): <NEW_LINE> <INDENT> print('e{0} = {1}'.format(idx + 1, ele)) <NEW_LINE> <DEDENT> print() <NEW_LINE> x = sp.Symbol('x') <NEW_LINE> res = 0 <NEW_LINE> func_str = '({0})'.format(func) <NEW_LINE> for idx, e_j in enumerate(orth_basis): <NEW_LINE> <INDENT> start_time_e_j = time.time() <NEW_LINE> if idx is 0: <NEW_LINE> <INDENT> print('Calculating projection on span(e{0}) --- '.format(idx + 1), end='') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Calculating projection on span(e1,...,e{0}) --- '.format(idx + 1), end='') <NEW_LINE> <DEDENT> e_j_str = '({0})'.format(e_j.standard_coeff_rep(show_mul_op=True, double_stars=True)) <NEW_LINE> product_str = '{0} * {1}'.format(func_str, e_j_str) <NEW_LINE> func_product = parse_expr(product_str) <NEW_LINE> tmp = Integral(func_product, (x, from_val, to_val)).as_sum(100, method="midpoint").n() <NEW_LINE> tmp *= parse_expr(e_j_str) <NEW_LINE> res += tmp <NEW_LINE> end_time_e_j = time.time() <NEW_LINE> print('%.2fs' % (end_time_e_j - start_time_e_j)) <NEW_LINE> tmp_res = str(res) <NEW_LINE> tmp_res = tmp_res.replace('**', '^') <NEW_LINE> tmp_res = tmp_res.replace('*', '') <NEW_LINE> tmp_res = Polynomial(tmp_res) <NEW_LINE> print() <NEW_LINE> print(' f{0}(x) = {1}'.format(idx + 1, tmp_res)) <NEW_LINE> print() <NEW_LINE> <DEDENT> end_time = time.time() <NEW_LINE> print() <NEW_LINE> print("Duration: %.2fs" % (end_time - start_time)) <NEW_LINE> print() <NEW_LINE> print('------ Finished Calculating Approximation ------') <NEW_LINE> res = str(res) <NEW_LINE> res = res.replace('**', '^') <NEW_LINE> res = res.replace('*', '') <NEW_LINE> res = Polynomial(res) <NEW_LINE> return res | Approximate given continuous function over real numbers, using a polynomial of given degree, with inner product
defined as <f, g> = INTEGRATE f(x) * g(x) dx from a to b.
:param func: Function to approximate represented in a string, with variable as 'x'.
:param from_val: a as a string.
:param to_val: b as a string.
:param degree: Highest degree of result polynomial, as an integer.
:return: Approximated polynomial function in string format. | 625941b02c8b7c6e89b35522 |
def spin_wigner(rho, theta, phi): <NEW_LINE> <INDENT> if rho.type == 'bra': <NEW_LINE> <INDENT> rho = rho.dag() <NEW_LINE> <DEDENT> if rho.type == 'ket': <NEW_LINE> <INDENT> rho = ket2dm(rho) <NEW_LINE> <DEDENT> J = rho.shape[0] <NEW_LINE> j = (J - 1) / 2 <NEW_LINE> THETA, PHI = meshgrid(theta, phi) <NEW_LINE> W = np.zeros_like(THETA, dtype=complex) <NEW_LINE> for k in range(int(2 * j)+1): <NEW_LINE> <INDENT> for q in arange(-k, k+1): <NEW_LINE> <INDENT> W += _rho_kq(rho, j, k, q) * sph_harm(q, k, PHI, THETA) <NEW_LINE> <DEDENT> <DEDENT> return W.real, THETA, PHI | Wigner function for a spin-j system.
The spin W function is normal when integrated over the surface of the
sphere
.. math:: \sqrt{\frac{4 \pi}{2j + 1}}\int_\phi \int_\theta
W(\theta,\phi) \sin(\theta) d\theta d\phi = 1
Parameters
----------
state : qobj
A state vector or density matrix for a spin-j quantum system.
theta : array_like
Polar (colatitude) angle at which to calculate the W function.
phi : array_like
Azimuthal angle at which to calculate the W function.
Returns
-------
W, THETA, PHI : 2d-array
Values representing the spin Wigner function at the values specified
by THETA and PHI.
References
----------
[1] Agarwal, G. S. (1981). Phys. Rev. A, 24(6), 2889–2896.
https://doi.org/10.1103/PhysRevA.24.2889
[2] Dowling, J. P., Agarwal, G. S., & Schleich, W. P. (1994).
Phys. Rev. A, 49(5), 4101–4109. https://doi.org/10.1103/PhysRevA.49.4101
[3] Conversion between Wigner 3-j symbol and Clebsch-Gordan coefficients
taken from Wikipedia (https://en.wikipedia.org/wiki/3-j_symbol) | 625941b04f6381625f11479d |
def get_location_string(self): <NEW_LINE> <INDENT> location_string = ('Rows ' + str(self.r_1) + '-' + str(self.r_2) + ' of ' + str(self.total_rows) + '\nCols ' + str(self.c_1) + '-' + str(self.c_2) + ' of ' + str(self.total_cols)) <NEW_LINE> return location_string | Creates a header showing where the top left df corner is | 625941b08a43f66fc4b53dc9 |
def test_operator_exceptions(self): <NEW_LINE> <INDENT> radix1 = Radix(0, [], [], [], 3) <NEW_LINE> radix2 = Radix(0, [], [], [], 2) <NEW_LINE> with self.assertRaises(BasesError): <NEW_LINE> <INDENT> radix1 > radix2 <NEW_LINE> <DEDENT> with self.assertRaises(BasesError): <NEW_LINE> <INDENT> radix1 < radix2 <NEW_LINE> <DEDENT> with self.assertRaises(BasesError): <NEW_LINE> <INDENT> radix1 <= radix2 <NEW_LINE> <DEDENT> with self.assertRaises(BasesError): <NEW_LINE> <INDENT> radix1 >= radix2 <NEW_LINE> <DEDENT> with self.assertRaises(BasesError): <NEW_LINE> <INDENT> radix1 >= 1 <NEW_LINE> <DEDENT> with self.assertRaises(BasesError): <NEW_LINE> <INDENT> radix1 == 1 <NEW_LINE> <DEDENT> with self.assertRaises(BasesError): <NEW_LINE> <INDENT> radix1 != 1 | Test that comparsion operators yield exceptions. | 625941b03346ee7daa2b2ac5 |
def deserialize(self, str): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if self.ONELEG is None: <NEW_LINE> <INDENT> self.ONELEG = hexapodservice.msg.legjoints() <NEW_LINE> <DEDENT> if self.ALLLEGS is None: <NEW_LINE> <INDENT> self.ALLLEGS = hexapodservice.msg.legs() <NEW_LINE> <DEDENT> end = 0 <NEW_LINE> _x = self <NEW_LINE> start = end <NEW_LINE> end += 40 <NEW_LINE> (_x.MODE, _x.SIMPLEMOTION_MODE, _x.ONELEG.coxa, _x.ONELEG.femur, _x.ONELEG.tibia, _x.ONELEG.tarsus,) = _struct_2i4d.unpack(str[start:end]) <NEW_LINE> self.ALLLEGS.leg = [] <NEW_LINE> for i in range(0, 6): <NEW_LINE> <INDENT> val1 = hexapodservice.msg.legjoints() <NEW_LINE> _x = val1 <NEW_LINE> start = end <NEW_LINE> end += 32 <NEW_LINE> (_x.coxa, _x.femur, _x.tibia, _x.tarsus,) = _struct_4d.unpack(str[start:end]) <NEW_LINE> self.ALLLEGS.leg.append(val1) <NEW_LINE> <DEDENT> return self <NEW_LINE> <DEDENT> except struct.error as e: <NEW_LINE> <INDENT> raise genpy.DeserializationError(e) | unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str`` | 625941b05e10d32532c5ec89 |
def computeActionFromValues(self, state): <NEW_LINE> <INDENT> value = util.Counter() <NEW_LINE> actions = self.mdp.getPossibleActions(state) <NEW_LINE> for action in actions: <NEW_LINE> <INDENT> value[action] = self.getQValue(state, action) <NEW_LINE> <DEDENT> return value.argMax() <NEW_LINE> util.raiseNotDefined() | The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None. | 625941b0e5267d203edcda00 |
def get_budget(self): <NEW_LINE> <INDENT> return self.budget | Returns base budget for Department | 625941b0099cdd3c635f09bd |
def intersect(self, nums1, nums2): <NEW_LINE> <INDENT> num_result = [] <NEW_LINE> for i in range(len(nums1)): <NEW_LINE> <INDENT> if nums1[i] in nums2: <NEW_LINE> <INDENT> num_result.append(nums1[i]) <NEW_LINE> nums2.remove(nums1[i]) <NEW_LINE> <DEDENT> <DEDENT> return num_result | :type nums1: List[int]
:type nums2: List[int]
:rtype: List[int] | 625941b0046cf37aa974caaa |
def partition(self, head, x): <NEW_LINE> <INDENT> if head is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> leftDummy = ListNode(0) <NEW_LINE> left = leftDummy <NEW_LINE> rightDummy = ListNode(0) <NEW_LINE> right = rightDummy <NEW_LINE> node = head <NEW_LINE> while node: <NEW_LINE> <INDENT> if node.val < x: <NEW_LINE> <INDENT> left.next = node <NEW_LINE> left = left.next <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> right.next = node <NEW_LINE> right = right.next <NEW_LINE> <DEDENT> node = node.next <NEW_LINE> <DEDENT> right.next = None <NEW_LINE> left.next = rightDummy.next <NEW_LINE> return leftDummy.next | :type head: ListNode
:type x: int
:rtype: ListNode | 625941b0d7e4931a7ee9dc7b |
def getMappingTagToDirectory(tag): <NEW_LINE> <INDENT> tag = tag.replace("VO.glast.org-","") <NEW_LINE> items = tag.split("/") <NEW_LINE> if len(items)>3: <NEW_LINE> <INDENT> return S_ERROR("Bad tag structure") <NEW_LINE> <DEDENT> soft_os = items[0] <NEW_LINE> variant = items[1] <NEW_LINE> package = items[2] <NEW_LINE> version = items[3] <NEW_LINE> directory = os.path.join(["glast/ground/releases",soft_os,variant,package,version]) <NEW_LINE> return S_OK(directory) | Returns the directory given a tag name
| 625941b0d8ef3951e324329d |
def edit_link_tag(self,pk,text): <NEW_LINE> <INDENT> query_str = self.request.GET.urlencode() <NEW_LINE> print("query_str",query_str) <NEW_LINE> params = QueryDict(mutable=True) <NEW_LINE> params[self.config._query_param_key] = query_str <NEW_LINE> print("params", params) <NEW_LINE> print("params_urlencode",params.urlencode()) <NEW_LINE> return mark_safe('<a href="%s?%s">%s</a>' % (self.config.get_change_url(pk), params.urlencode(), text,)) | 此函数的作用是返回一个可以点击的标签
:param pk:
:param text:
:return: 返回一个标签, | 625941b04f6381625f11479f |
def recording_setting_update(self, meeting_id, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('async_req'): <NEW_LINE> <INDENT> return self.recording_setting_update_with_http_info(meeting_id, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.recording_setting_update_with_http_info(meeting_id, **kwargs) <NEW_LINE> return data | Get Meeting Recording Settings # noqa: E501
Retrieve settings applied to a meeting's [Cloud Recording](https://support.zoom.us/hc/en-us/articles/203741855-Cloud-Recording).<br><br> **Scopes**: `recording:read:admin` `recording:read`<br> **[Rate Limit Label](https://marketplace.zoom.us/docs/api-reference/rate-limits#rate-limits):** `Light` <br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recording_setting_update(meeting_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str meeting_id: The meeting ID or meeting UUID. If the meeting ID is provided instead of UUID,the response will be for the latest meeting instance. If a UUID starts with "/" or contains "//" (example: "/ajXp112QmuoKj4854875=="), you must **double encode** the UUID before making an API request. (required)
:return: RecordingSettings
If the method is called asynchronously,
returns the request thread. | 625941b0b57a9660fec335d7 |
def config_setup( env: LibraryEnvironment, site_list, arbitrator_list, instance_name=None, overwrite_existing=False, ): <NEW_LINE> <INDENT> instance_name = instance_name or constants.DEFAULT_INSTANCE_NAME <NEW_LINE> report_processor = env.report_processor <NEW_LINE> report_processor.report_list( config_validators.check_instance_name(instance_name) ) <NEW_LINE> report_processor.report_list( config_validators.create(site_list, arbitrator_list) ) <NEW_LINE> if report_processor.has_errors: <NEW_LINE> <INDENT> raise LibraryError() <NEW_LINE> <DEDENT> booth_env = env.get_booth_env(instance_name) <NEW_LINE> booth_conf = booth_env.create_facade(site_list, arbitrator_list) <NEW_LINE> booth_conf.set_authfile(booth_env.key_path) <NEW_LINE> conf_dir = ( None if booth_env.ghost_file_codes else os.path.dirname(booth_env.config_path) ) <NEW_LINE> try: <NEW_LINE> <INDENT> booth_env.key.write_raw( tools.generate_binary_key( random_bytes_count=settings.booth_authkey_bytes ), can_overwrite=overwrite_existing, ) <NEW_LINE> booth_env.config.write_facade( booth_conf, can_overwrite=overwrite_existing ) <NEW_LINE> <DEDENT> except FileAlreadyExists as e: <NEW_LINE> <INDENT> report_processor.report( ReportItem( severity=reports.item.get_severity( reports.codes.FORCE, overwrite_existing, ), message=reports.messages.FileAlreadyExists( e.metadata.file_type_code, e.metadata.path, ), ) ) <NEW_LINE> <DEDENT> except RawFileError as e: <NEW_LINE> <INDENT> if conf_dir and not os.path.exists(conf_dir): <NEW_LINE> <INDENT> report_processor.report( ReportItem.error(reports.messages.BoothPathNotExists(conf_dir)) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> report_processor.report(raw_file_error_report(e)) <NEW_LINE> <DEDENT> <DEDENT> if report_processor.has_errors: <NEW_LINE> <INDENT> raise LibraryError() | create booth configuration
env
list site_list -- site adresses of multisite
list arbitrator_list -- arbitrator adresses of multisite
string instance_name -- booth instance name
bool overwrite_existing -- allow overwriting existing files | 625941b03c8af77a43ae3500 |
def XTRA_echo2(socket_handler, params): <NEW_LINE> <INDENT> MANAGER.app_log.warning("Extra command test2") <NEW_LINE> print(params) <NEW_LINE> data = "Echo2, you said {}".format((',').join(params)) <NEW_LINE> MANAGER.execute_filter_hook('send_data_back', {'socket': socket_handler, 'data': data}, first_only=True) | This command takes inline param(s): XTRA_echo2 hello 2 | 625941b026238365f5f0ebca |
def to_json(self, value): <NEW_LINE> <INDENT> return to_json(value) | Converts value to JSON.
This can be overridden if necessary. | 625941b0293b9510aa2c2ff4 |
@app.route('/signin', methods=['GET', 'POST']) <NEW_LINE> def signin(): <NEW_LINE> <INDENT> username = request.values.getlist('username')[0] <NEW_LINE> password = request.values.getlist('password')[0] <NEW_LINE> nickname = request.values.getlist('nickname')[0] <NEW_LINE> gender = request.values.getlist('gender')[0] <NEW_LINE> age = request.values.getlist('age')[0] <NEW_LINE> phone = request.values.getlist('phone')[0] <NEW_LINE> data = { 'username': username, 'password': password, 'nickname': nickname, 'gender': gender, 'age': age, 'phone': phone, } <NEW_LINE> db, collection = mongodb.get_db_client( settings.DB, settings.COLLECTION) <NEW_LINE> if mongodb.insert_one(collection, data): <NEW_LINE> <INDENT> data = { 'nickname': nickname, 'msg': '注册成功!', 'code': 1, } <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> data = { 'nickname': nickname, 'msg': '用户名已存在!', 'code': -1, } <NEW_LINE> <DEDENT> return jsonify(data) | 若用户名不冲突 存库并返回注册成功信息
若冲突 返回错误信息 | 625941b0627d3e7fe0d68ba8 |
def t_run_av_scores(): <NEW_LINE> <INDENT> alg_params = solver.AlgoParams(generations=500, pop_size=30, num_children=20, mutation_rate=0.1, crossover_rate=0.2) <NEW_LINE> algs = [algo_ea] <NEW_LINE> gens = [gen_schwefel] <NEW_LINE> for gen in gens: <NEW_LINE> <INDENT> print() <NEW_LINE> print() <NEW_LINE> print(gen.__name__, ' D:', gen.n) <NEW_LINE> compare_algs(gen, algs, alg_params, 15, True) | Call compare_algs() [prints average fitness results for range of algorithms] for
multiple generators | 625941b04f88993c3716bdcf |
def create_record(hostname, port): <NEW_LINE> <INDENT> p = {} <NEW_LINE> for f in min_rqd_fields: <NEW_LINE> <INDENT> p[f] = getattr(port, f, None) <NEW_LINE> <DEDENT> p['errors'] = [] <NEW_LINE> p['errors_cat'] = create_error_categories() <NEW_LINE> try: <NEW_LINE> <INDENT> p["lldpnbr"] = str((p["lldpnbr"].nbr.model.host.hostname)) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> if self._is_class(p["lldpnbr"]): <NEW_LINE> <INDENT> p["lldpnbr"] = str(p["lldpnbr"].nbr) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p["lldpnbr"] = str(p["lldpnbr"]) <NEW_LINE> <DEDENT> <DEDENT> p["_vlans"] = [ str(v.vlan) if self._is_class(v) else str(v) for v in p["_vlans"] ] <NEW_LINE> if 'SAS87' not in hostname and '127' in p['_vlans']: <NEW_LINE> <INDENT> p['errors'].append("{} port {} - Vlan 127 should not be configured on this port".format(hostname, p["name"])) <NEW_LINE> p['errors_cat']['vlan_issues']['count'] += 1 <NEW_LINE> <DEDENT> if p['adminstate'] == 'enabled' and '1' in p['_vlans']: <NEW_LINE> <INDENT> p['errors'].append("{} port {} (admin state = {}) - Vlan 1 should not be configured on this port".format(hostname, p["name"], p["adminstate"])) <NEW_LINE> p['errors_cat']['vlan_issues']['count'] += 1 <NEW_LINE> <DEDENT> if p['adminstate'] == 'enabled' and p['vplsporttype'] == 'NNI' and p['mtu'] != '9100': <NEW_LINE> <INDENT> p['errors'].append("{} NNI port {} - MTU size is {} but should be 9100".format(hostname, p["name"], p["mtu"])) <NEW_LINE> p['errors_cat']['mtu_issues']['count'] += 1 <NEW_LINE> <DEDENT> if p['adminstate'] == 'enabled' and p['vplsporttype'] == 'UNI' and p['mtu'] != '9216': <NEW_LINE> <INDENT> p['errors'].append("{} UNI port {} - MTU size is {} but should be 9216".format(hostname, p["name"], p["mtu"])) <NEW_LINE> p['errors_cat']['mtu_issues']['count'] += 1 <NEW_LINE> <DEDENT> if p['operspeedduplex'] and p['adminstate'] == 'enabled' and 'HD' in p['operspeedduplex']: <NEW_LINE> <INDENT> p['errors'].append("{} port {} - duplex issues {}".format(hostname, p["name"], p["operspeedduplex"])) <NEW_LINE> p['errors_cat']['duplex_issues']['count'] += 1 <NEW_LINE> <DEDENT> if p['operspeedduplex'] and p['adminstate'] == 'enabled' and p['egressshaper']: <NEW_LINE> <INDENT> m = re.match("(?P<OPERSPEED>[0-9]+).*", p['operspeedduplex']) <NEW_LINE> if m: <NEW_LINE> <INDENT> if m.groupdict().get('OPERSPEED', 0) != p['egressshaper']: <NEW_LINE> <INDENT> p['errors'].append("{} port {} - egress shaper mismatch {} with operational speed {}".format(hostname, p["name"], p["egressshaper"], p['operspeedduplex'])) <NEW_LINE> p['errors_cat']['speed_issues']['count'] += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return p | Make sure each object has the minimum required fields
to make it easier for reporting later | 625941b0099cdd3c635f09bf |
def Simulate(self): <NEW_LINE> <INDENT> self.setDynamicFunction() <NEW_LINE> t = np.arange(0,self.finalTime,self.separation) <NEW_LINE> return odeint(self.DynamicFunction,self.initCondition,t,()) | simulation routine using the Odeint solver from the scipy optimize package , Odeint is a python implementation
of the ODEPACK package in FORTRAN which uses a multi-step solver in the non stiff case | 625941b0d7e4931a7ee9dc7d |
def get_layers(name): <NEW_LINE> <INDENT> assert name == "cornet_irl" <NEW_LINE> return ["V1.output", "V2.output", "V4.output", "IT.output"] | This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to
layers and uses this list as a set of possible layers. The lists doesn't have to contain all layers, the less the
faster the benchmark process works. Additionally the given layers have to produce an activations vector of at least
size 25! The layer names are delivered back to the model instance and have to be resolved in there. For a pytorch
model, the layer name are for instance dot concatenated per module, e.g. "features.2".
:param name: the name of the model, to return the layers for
:return: a list of strings containing all layers, that should be considered as brain area. | 625941b0507cdc57c6306a2b |
def nlm_coeff(k, nmax, lmax): <NEW_LINE> <INDENT> i = 0 <NEW_LINE> for n in range(nmax+1): <NEW_LINE> <INDENT> for l in range(lmax+1): <NEW_LINE> <INDENT> for m in range(l+1): <NEW_LINE> <INDENT> if (i==k): <NEW_LINE> <INDENT> return(n,l,m) <NEW_LINE> break <NEW_LINE> <DEDENT> i+=1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> print('Error:', k, 'is not in the range of the matrix') | Returns the coefficients n, l, m for a given number of
the.....
e.g k=1 is n=0, l=1, m=0
Input:
------
k: Number of the coefficient. e.g
nmax: maximum n in the expansion.
lmax: maximum l in the expansion.
Output:
-------
(n, l, m) | 625941b0d164cc6175782aa8 |
def update(self, obj_index, option, value, set_index=None): <NEW_LINE> <INDENT> option = Options.get(option) <NEW_LINE> if option.symmetric: <NEW_LINE> <INDENT> self._update_symmetric_option(obj_index, option, value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._update_asymmetric_option(obj_index, set_index, option, value) | Updates an object option value. | 625941b0b57a9660fec335d9 |
def get_linked_workflow(workflow, steps, settings): <NEW_LINE> <INDENT> workflow_nodes = {} <NEW_LINE> for step_title, step in workflow[CWL_STEPS].items(): <NEW_LINE> <INDENT> step_name = get_step_name_from_title(step_title, workflow) <NEW_LINE> workflow_nodes[step_name] = { 'inputs': {}, 'outputs': {}, 'ancestors': {}, } <NEW_LINE> for input_key, input_value in step[CWL_WORKFLOW_IN].items(): <NEW_LINE> <INDENT> if '/' not in input_value: <NEW_LINE> <INDENT> if workflow[CWL_INPUTS][input_value] == 'File': <NEW_LINE> <INDENT> workflow_nodes[step_name]['inputs'][input_key] = settings[input_value][CWL_YAML_PATH] <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> ancestor = input_value[:input_value.index('/')] <NEW_LINE> workflow_nodes[step_name]['ancestors'][ancestor] = input_value <NEW_LINE> <DEDENT> <DEDENT> outputs_string = step[CWL_WORKFLOW_OUT] <NEW_LINE> outputs_list = [] <NEW_LINE> if isinstance(outputs_string, list): <NEW_LINE> <INDENT> outputs_list = outputs_string <NEW_LINE> <DEDENT> elif isinstance(outputs_string, str): <NEW_LINE> <INDENT> if outputs_string.startswith('[') and outputs_string.endswith(']'): <NEW_LINE> <INDENT> outputs_string = outputs_string[1:-1] <NEW_LINE> outputs_list = outputs_string.split(',') <NEW_LINE> for i in range(len(outputs_list)): <NEW_LINE> <INDENT> outputs_list[i] = outputs_list[i].replace(' ', '') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for output in outputs_list: <NEW_LINE> <INDENT> full_output = '%s/%s' % (step_name, output) <NEW_LINE> if step_name not in steps: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if steps[step_name][CWL_OUTPUTS][output][CWL_OUTPUT_TYPE] == 'File': <NEW_LINE> <INDENT> output_value = steps[step_name][CWL_OUTPUTS][output][CWL_OUTPUT_BINDING] <NEW_LINE> if isinstance(output_value, dict): <NEW_LINE> <INDENT> glob = output_value[CWL_OUTPUT_GLOB] <NEW_LINE> if glob.startswith('$(inputs'): <NEW_LINE> <INDENT> glob = glob[glob.index('s')+1:glob.index(')')] <NEW_LINE> if glob.startswith('.'): <NEW_LINE> <INDENT> glob = glob[1:] <NEW_LINE> <DEDENT> if glob.startswith('[') and glob.endswith(']'): <NEW_LINE> <INDENT> glob = glob[1:-1] <NEW_LINE> <DEDENT> if glob.startswith('\'') and glob.endswith('\''): <NEW_LINE> <INDENT> glob = glob[1:-1] <NEW_LINE> <DEDENT> if glob.startswith('"') and glob.endswith('"'): <NEW_LINE> <INDENT> glob = glob[1:-1] <NEW_LINE> <DEDENT> if glob in step[CWL_WORKFLOW_IN]: <NEW_LINE> <INDENT> output_key = step[CWL_WORKFLOW_IN][glob] <NEW_LINE> output_value = settings[output_key] <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> workflow_nodes[step_name]['outputs'][full_output] = output_value <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return workflow_nodes | Determines static workflow using CWL definitions. This will attempt to get
all steps defined in the 'workflow' and construct a dict of nodes to be
displayed.
:param workflow: (dict) A CWL workflow dictionary.
:param steps: (dict) A dict of CWL step dictionaries.
:param settings: (dict) A CWL arguments dictionary.
:return: Tuple (dict) Returns a dict of workflow nodes. Format is:
{
'inputs': dict,
'outputs': dict,
'ancestors': dict
} | 625941b07b180e01f3dc456c |
def test_module_instructions_print(self): <NEW_LINE> <INDENT> module_tuples = getmembers(commands, ismodule) <NEW_LINE> with mock.patch("buf.commands.help.print") as mock_print: <NEW_LINE> <INDENT> for module_name, module in module_tuples: <NEW_LINE> <INDENT> test_options_dict = {"<subcommand_name>": module_name} <NEW_LINE> help.help(test_options_dict) <NEW_LINE> mock_print.assert_called_with(module.instructions) <NEW_LINE> mock_print.reset_mock() | Tests that a module's 'instructions' docstring is printed when the modules name is called with help. | 625941b076d4e153a657e893 |
def decode_b64_file(src, dst): <NEW_LINE> <INDENT> with open(src, 'r') as source_file: <NEW_LINE> <INDENT> with open(dst, 'w') as destination_file: <NEW_LINE> <INDENT> for line in source_file: <NEW_LINE> <INDENT> destination_file.write(base64.b64decode(line)) | Read src base64 encoded file and output its content to dst file
| 625941b08a349b6b435e7ed9 |
def control_samples(controls: Sequence[dict], sampleslist: Sequence[str]): <NEW_LINE> <INDENT> def __process_control__(trait_data): <NEW_LINE> <INDENT> def __process_sample__(acc, sample): <NEW_LINE> <INDENT> if sample in trait_data["data"].keys(): <NEW_LINE> <INDENT> sample_item = trait_data["data"][sample] <NEW_LINE> val = sample_item["value"] <NEW_LINE> if val is not None: <NEW_LINE> <INDENT> return ( acc[0] + (sample,), acc[1] + (val,), acc[2] + (sample_item["variance"],)) <NEW_LINE> <DEDENT> <DEDENT> return acc <NEW_LINE> <DEDENT> return reduce( __process_sample__, sampleslist, (tuple(), tuple(), tuple())) <NEW_LINE> <DEDENT> return reduce( lambda acc, item: ( acc[0] + (item[0],), acc[1] + (item[1],), acc[2] + (item[2],), acc[3] + (len(item[0]),), ), [__process_control__(trait_data) for trait_data in controls], (tuple(), tuple(), tuple(), tuple())) | Fetches data for the control traits.
This migrates `web/webqtl/correlation/correlationFunction.controlStrain` in
GN1, with a few modifications to the arguments passed in.
PARAMETERS:
controls: A map of sample names to trait data. Equivalent to the `cvals`
value in the corresponding source function in GN1.
sampleslist: A list of samples. Equivalent to `strainlst` in the
corresponding source function in GN1 | 625941b06fece00bbac2d49c |
def reset(self): <NEW_LINE> <INDENT> self.d0 = np.random.randint(10, 30) <NEW_LINE> self.d = self.d0 <NEW_LINE> self.init_v = np.random.random() * SPEED_LIMIT <NEW_LINE> self.v_head = self.init_v <NEW_LINE> self.v = self.v_head <NEW_LINE> self.step_number = 0 <NEW_LINE> return self.v_cal_raw | 初始化环境, 返回未经攻击的前车速度的观测值
:return
v_head: 前车速度
v: 自车速度 | 625941b0293b9510aa2c2ff7 |
def decode(self, v): <NEW_LINE> <INDENT> immediate = None <NEW_LINE> if getattr(self.parent, "imm7_align4", False): <NEW_LINE> <INDENT> v = self.parent.imm7_align4.value & 0x1F <NEW_LINE> immediate = v << 2 <NEW_LINE> <DEDENT> elif getattr(self.parent, "imm7", False): <NEW_LINE> <INDENT> immediate = self.parent.imm7.value & 0x7F <NEW_LINE> <DEDENT> elif getattr(self.parent, "disp7_align2", False): <NEW_LINE> <INDENT> disp7_align2 = self.parent.disp7_align2.value & 0x3F <NEW_LINE> immediate = disp7_align2 << 1 <NEW_LINE> <DEDENT> if immediate is not None: <NEW_LINE> <INDENT> self.expr = ExprMem(self.implicit_reg + ExprInt(immediate, 32), 32) <NEW_LINE> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False | Modify the decoded value using the previously decoded
immediate. | 625941b03346ee7daa2b2acb |
def logconfig(level, file=None): <NEW_LINE> <INDENT> logger = logging.getLogger() <NEW_LINE> logging.addLevelName(100, "CRITICAL") <NEW_LINE> logging.addLevelName(60, "ERROR") <NEW_LINE> logging.addLevelName(50, "WARNING") <NEW_LINE> logging.addLevelName(40, "DEBUG") <NEW_LINE> logging.addLevelName(30, "DEBUG2") <NEW_LINE> logging.addLevelName(20, "TRIVIA") <NEW_LINE> logging.addLevelName(10, "TRACE") <NEW_LINE> logging.addLevelName(0, "") <NEW_LINE> if file == None: <NEW_LINE> <INDENT> handler = logging.StreamHandler(sys.stdout) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> handler = logging.FileHandler(file) <NEW_LINE> <DEDENT> formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s', '%b %2d %H:%M:%S') <NEW_LINE> handler.setFormatter(formatter) <NEW_LINE> logger.addHandler(handler) <NEW_LINE> logger.setLevel(level) <NEW_LINE> logger.info("Logging initialized.") | Initialize the python logger for Current. | 625941b0627d3e7fe0d68bac |
def calculate_file_url(name_bytes, base_url): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> rel_name = base64.b64decode(name_bytes).decode('utf8') <NEW_LINE> <DEDENT> except binascii.Error: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> full_url = urllib.parse.urljoin(base_url, rel_name) <NEW_LINE> log.debug('rel_name: {}, full_url: {}'.format(rel_name, full_url)) <NEW_LINE> if urllib.parse.urlparse(full_url).scheme not in ('http', 'https'): <NEW_LINE> <INDENT> raise ValueError('Unknown URL scheme in {}'.format(repr(full_url))) <NEW_LINE> <DEDENT> if base_url and not full_url.startswith(base_url): <NEW_LINE> <INDENT> raise ValueError('URL outside of {}: {}'.format(base_url, repr(full_url))) <NEW_LINE> <DEDENT> return full_url | Calculate and return a full URL for file name.
File name is interpreted as base64-encoded bytes and joined to base URL. | 625941b0046cf37aa974cab0 |
def locRec_dict(dataDF, locColName): <NEW_LINE> <INDENT> temp_dict = {} <NEW_LINE> locList = dataDF[locColName].unique() <NEW_LINE> rowIndex = dataDF.index.values <NEW_LINE> for loc in locList: <NEW_LINE> <INDENT> recs = [] <NEW_LINE> for row in rowIndex: <NEW_LINE> <INDENT> rowloc = dataDF.loc[row, "Location"] <NEW_LINE> if rowloc == loc: <NEW_LINE> <INDENT> rec = list(dataDF.loc[row, "Department":"Percent Department"]) <NEW_LINE> recs.append(rec) <NEW_LINE> <DEDENT> <DEDENT> recsDF = pd.DataFrame(recs, columns=['Department', 'Category', "Scores with Improvement Potential", 'People Affected', 'Percent Department']) <NEW_LINE> temp_dict[loc] = recsDF <NEW_LINE> <DEDENT> return temp_dict | function to turn recommendation dataframe into dictionary by location;
:param dataDF: dataframe of recommendations
:param locColName: column name that will be reference of dictionary key
:return: | 625941b03539df3088e2e0b1 |