Dataset Viewer
Auto-converted to Parquet
before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def _malloc(self, size): i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): <DeepExtract> mask = mmap.PAGESIZE - 1 length = max(self._size, size) + mask & ~mask </DeepExtract> self._size *= 2 info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[arena, start] del self._stop_to_block[arena, stop] return block
def _malloc(self, size): i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): mask = mmap.PAGESIZE - 1 length = max(self._size, size) + mask & ~mask self._size *= 2 info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[arena, start] del self._stop_to_block[arena, stop] return block
3DFasterRCNN_LungNoduleDetector
positive
def build_temp_dir(prefix='test-attributecode-'): """ Create and return a new unique empty directory created in base_dir. """ location = tempfile.mkdtemp(prefix=prefix) <DeepExtract> if not os.path.exists(location): os.makedirs(location) os.chmod(location, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH) </DeepExtract> return location
def build_temp_dir(prefix='test-attributecode-'): """ Create and return a new unique empty directory created in base_dir. """ location = tempfile.mkdtemp(prefix=prefix) if not os.path.exists(location): os.makedirs(location) os.chmod(location, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH) return location
aboutcode-toolkit
positive
def slot_tabview_change(self, index): if index not in (0, 1): return status_prev: str = self.view_status_label.text() if index == 0: <DeepExtract> self.view_status_label.setText(self.view_status_label_analysis_cache) </DeepExtract> self.view_status_label_rulegen_cache = status_prev self.view_reset_button.setText('Reset Selections') elif index == 1: <DeepExtract> self.view_status_label.setText(self.view_status_label_rulegen_cache) </DeepExtract> self.view_status_label_analysis_cache = status_prev self.view_reset_button.setText('Clear')
def slot_tabview_change(self, index): if index not in (0, 1): return status_prev: str = self.view_status_label.text() if index == 0: self.view_status_label.setText(self.view_status_label_analysis_cache) self.view_status_label_rulegen_cache = status_prev self.view_reset_button.setText('Reset Selections') elif index == 1: self.view_status_label.setText(self.view_status_label_rulegen_cache) self.view_status_label_analysis_cache = status_prev self.view_reset_button.setText('Clear')
capa
positive
def read_input(): ncases = int(input()) for case in range(1, ncases + 1): n = int(input()) way = input() <DeepExtract> if way[0] == way[-1]: if way[0] == 'E': for (i, char) in enumerate(way): if way[i] == way[i + 1] and way[i] == 'S': y = sum((ch == 'S' for ch in way[:i])) + 1 path = 'S' * y + 'E' * n - 1 + 'S' * (n - 1 - y) elif way[0] == 'S': for (i, char) in enumerate(way): if way[i] == way[i + 1] and way[i] == 'E': y = sum((ch == 'E' for ch in way[:i])) + 1 path = 'E' * y + 'S' * n - 1 + 'E' * (n - 1 - y) elif way[0] == 'E': path = 'S' * n - 1 + 'E' * n - 1 elif way[0] == 'S': path = 'E' * n - 1 + 'S' * n - 1 </DeepExtract> print('CASE #{}: {}'.format(case, path))
def read_input(): ncases = int(input()) for case in range(1, ncases + 1): n = int(input()) way = input() if way[0] == way[-1]: if way[0] == 'E': for (i, char) in enumerate(way): if way[i] == way[i + 1] and way[i] == 'S': y = sum((ch == 'S' for ch in way[:i])) + 1 path = 'S' * y + 'E' * n - 1 + 'S' * (n - 1 - y) elif way[0] == 'S': for (i, char) in enumerate(way): if way[i] == way[i + 1] and way[i] == 'E': y = sum((ch == 'E' for ch in way[:i])) + 1 path = 'E' * y + 'S' * n - 1 + 'E' * (n - 1 - y) elif way[0] == 'E': path = 'S' * n - 1 + 'E' * n - 1 elif way[0] == 'S': path = 'E' * n - 1 + 'S' * n - 1 print('CASE #{}: {}'.format(case, path))
algorithms
positive
def _batch_action(self): async def batch_action(settings: ModelView.schemes.BatchSettings, user: ModelView.schemes.User=Security(utils.authorization.auth_dependency, scopes=self.scopes['batch_action'])): <DeepExtract> if settings.command in self.custom_commands: query = self.custom_commands[settings.command](self.orm_model) if settings.command == 'delete': query = self.orm_model.delete </DeepExtract> if query is None: raise HTTPException(status_code=404, detail='Batch command not found') if self.orm_model != models.User and user: query = query.where(self.orm_model.user_id == user.id) query = query.where(self.orm_model.id.in_(settings.ids)) if self.custom_methods.get('batch_action'): await self.custom_methods['batch_action'](query, settings, user) else: await query.gino.status() return True return batch_action
def _batch_action(self): async def batch_action(settings: ModelView.schemes.BatchSettings, user: ModelView.schemes.User=Security(utils.authorization.auth_dependency, scopes=self.scopes['batch_action'])): if settings.command in self.custom_commands: query = self.custom_commands[settings.command](self.orm_model) if settings.command == 'delete': query = self.orm_model.delete if query is None: raise HTTPException(status_code=404, detail='Batch command not found') if self.orm_model != models.User and user: query = query.where(self.orm_model.user_id == user.id) query = query.where(self.orm_model.id.in_(settings.ids)) if self.custom_methods.get('batch_action'): await self.custom_methods['batch_action'](query, settings, user) else: await query.gino.status() return True return batch_action
bitcart
positive
def __init__(self, parameter): """ :param parameter: the parts of a tplarg. """ <DeepExtract> sep = '|' parameters = [] cur = 0 for (s, e) in findMatchingBraces(parameter): par = parameter[cur:s].split(sep) if par: if parameters: parameters[-1] += par[0] if len(par) > 1: parameters.extend(par[1:]) else: parameters = par elif not parameters: parameters = [''] parameters[-1] += parameter[s:e] cur = e par = parameter[cur:].split(sep) if par: if parameters: parameters[-1] += par[0] if len(par) > 1: parameters.extend(par[1:]) else: parameters = par parts = parameters </DeepExtract> self.name = Template.parse(parts[0]) if len(parts) > 1: self.default = Template.parse(parts[1]) else: self.default = None
def __init__(self, parameter): """ :param parameter: the parts of a tplarg. """ sep = '|' parameters = [] cur = 0 for (s, e) in findMatchingBraces(parameter): par = parameter[cur:s].split(sep) if par: if parameters: parameters[-1] += par[0] if len(par) > 1: parameters.extend(par[1:]) else: parameters = par elif not parameters: parameters = [''] parameters[-1] += parameter[s:e] cur = e par = parameter[cur:].split(sep) if par: if parameters: parameters[-1] += par[0] if len(par) > 1: parameters.extend(par[1:]) else: parameters = par parts = parameters self.name = Template.parse(parts[0]) if len(parts) > 1: self.default = Template.parse(parts[1]) else: self.default = None
DistillBERT
positive
def unpack_directory(data): <DeepExtract> header = struct_unpack(HEADER_FORMAT, data)[0] </DeepExtract> numTables = header['numTables'] data = data[HEADER_SIZE:] directory = [] for index in range(numTables): <DeepExtract> (keys, format_string) = _struct_get_format(DIRECTORY_FORMAT) size = struct.calcsize(format_string) values = struct.unpack(format_string, data[:size]) unpacked = {} for (index, key) in enumerate(keys): value = values[index] unpacked[key] = value (table, data) = (unpacked, data[size:]) </DeepExtract> directory.append(table) return directory
def unpack_directory(data): header = struct_unpack(HEADER_FORMAT, data)[0] numTables = header['numTables'] data = data[HEADER_SIZE:] directory = [] for index in range(numTables): (keys, format_string) = _struct_get_format(DIRECTORY_FORMAT) size = struct.calcsize(format_string) values = struct.unpack(format_string, data[:size]) unpacked = {} for (index, key) in enumerate(keys): value = values[index] unpacked[key] = value (table, data) = (unpacked, data[size:]) directory.append(table) return directory
django-gateone
positive
def format_json(lib): import json summary = lib.summarize() non_users = [] for u in summary['non_users']: non_users.append(u.to_dict()) non_users.sort(key=lambda x: x['path']) users = [] for (u, usage) in summary['users']: symbols = [s.to_dict() for s in usage] symbols.sort(key=lambda x: x['name']) users.append({'user': u.to_dict(), 'used_symbols': symbols}) users.sort(key=lambda x: x['user']['path']) unused_symbols = [] for s in summary['unused_symbols']: unused_symbols.append(s.to_dict()) unused_symbols.sort(key=lambda x: x['name']) excluded_symbols = [] for s in summary['excluded_symbols']: excluded_symbols.append(s.to_dict()) excluded_symbols.sort(key=lambda x: x['name']) used_symbols = {} for (s, user) in summary['used_symbols'].items(): lst = used_symbols.setdefault(s.name, []) for u in user: if isinstance(u, User): lst.append(('binary', u.path)) elif isinstance(u, tuple): <DeepExtract> u[1] = list(u[1]) lines = [] with open(u[0], encoding='utf-8') as f: for (i, line) in enumerate(f): if i in u[1]: lines.append((i, line.strip())) u[1].remove(i) if not u[1]: break lines = lines </DeepExtract> lst.append(('source', lines)) lst.sort() report = {'non_users': non_users, 'users': users, 'unused_symbols': unused_symbols, 'excluded_symbols': excluded_symbols, 'used_symbols': used_symbols} json.dump(report, sys.stdout, indent=2, sort_keys=True)
def format_json(lib): import json summary = lib.summarize() non_users = [] for u in summary['non_users']: non_users.append(u.to_dict()) non_users.sort(key=lambda x: x['path']) users = [] for (u, usage) in summary['users']: symbols = [s.to_dict() for s in usage] symbols.sort(key=lambda x: x['name']) users.append({'user': u.to_dict(), 'used_symbols': symbols}) users.sort(key=lambda x: x['user']['path']) unused_symbols = [] for s in summary['unused_symbols']: unused_symbols.append(s.to_dict()) unused_symbols.sort(key=lambda x: x['name']) excluded_symbols = [] for s in summary['excluded_symbols']: excluded_symbols.append(s.to_dict()) excluded_symbols.sort(key=lambda x: x['name']) used_symbols = {} for (s, user) in summary['used_symbols'].items(): lst = used_symbols.setdefault(s.name, []) for u in user: if isinstance(u, User): lst.append(('binary', u.path)) elif isinstance(u, tuple): u[1] = list(u[1]) lines = [] with open(u[0], encoding='utf-8') as f: for (i, line) in enumerate(f): if i in u[1]: lines.append((i, line.strip())) u[1].remove(i) if not u[1]: break lines = lines lst.append(('source', lines)) lst.sort() report = {'non_users': non_users, 'users': users, 'unused_symbols': unused_symbols, 'excluded_symbols': excluded_symbols, 'used_symbols': used_symbols} json.dump(report, sys.stdout, indent=2, sort_keys=True)
barbieri-playground
positive
def test_delete_question_with_essay_question(self): EssayQuestion.objects.create(question_id=1, assignment=Assignment.objects.get(assignment_id=1), title='Evolvers', description='Write an essay about the Evolvers.') kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'} <DeepExtract> client = Client() client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD) client = client </DeepExtract> response = client.post('/teacher/course/1/assignment/1/delete_question', {'question_id': 1, 'question_type': settings.ESSAY_QUESTION_TYPE}, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'question was deleted') self.assertEqual(array['status'], 'success')
def test_delete_question_with_essay_question(self): EssayQuestion.objects.create(question_id=1, assignment=Assignment.objects.get(assignment_id=1), title='Evolvers', description='Write an essay about the Evolvers.') kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'} client = Client() client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD) client = client response = client.post('/teacher/course/1/assignment/1/delete_question', {'question_id': 1, 'question_type': settings.ESSAY_QUESTION_TYPE}, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'question was deleted') self.assertEqual(array['status'], 'success')
academicstoday-django
positive
def findPathsUtil(maze, m, n, i, j, path, indx): global allPaths global storePaths if i == m - 1: for k in range(j, n): path[indx + k - j] = maze[i][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j == n - 1: for k in range(i, m): path[indx + k - i] = maze[k][j] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx] = maze[i][j] <DeepExtract> global allPaths global storePaths if i + 1 == m - 1: for k in range(j, n): path[indx + 1 + k - j] = maze[i + 1][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j == n - 1: for k in range(i + 1, m): path[indx + 1 + k - i + 1] = maze[k][j] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx + 1] = maze[i + 1][j] findPathsUtil(maze, m, n, i + 1 + 1, j, path, indx + 1 + 1) findPathsUtil(maze, m, n, i + 1, j + 1, path, indx + 1 + 1) </DeepExtract> <DeepExtract> global allPaths global storePaths if i == m - 1: for k in range(j + 1, n): path[indx + 1 + k - j + 1] = maze[i][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j + 1 == n - 1: for k in range(i, m): path[indx + 1 + k - i] = maze[k][j + 1] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx + 1] = maze[i][j + 1] findPathsUtil(maze, m, n, i + 1, j + 1, path, indx + 1 + 1) findPathsUtil(maze, m, n, i, j + 1 + 1, path, indx + 1 + 1) </DeepExtract>
def findPathsUtil(maze, m, n, i, j, path, indx): global allPaths global storePaths if i == m - 1: for k in range(j, n): path[indx + k - j] = maze[i][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j == n - 1: for k in range(i, m): path[indx + k - i] = maze[k][j] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx] = maze[i][j] global allPaths global storePaths if i + 1 == m - 1: for k in range(j, n): path[indx + 1 + k - j] = maze[i + 1][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j == n - 1: for k in range(i + 1, m): path[indx + 1 + k - i + 1] = maze[k][j] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx + 1] = maze[i + 1][j] findPathsUtil(maze, m, n, i + 1 + 1, j, path, indx + 1 + 1) findPathsUtil(maze, m, n, i + 1, j + 1, path, indx + 1 + 1) global allPaths global storePaths if i == m - 1: for k in range(j + 1, n): path[indx + 1 + k - j + 1] = maze[i][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j + 1 == n - 1: for k in range(i, m): path[indx + 1 + k - i] = maze[k][j + 1] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx + 1] = maze[i][j + 1] findPathsUtil(maze, m, n, i + 1, j + 1, path, indx + 1 + 1) findPathsUtil(maze, m, n, i, j + 1 + 1, path, indx + 1 + 1) </DeepExtract>
Competitive-Coding-Platforms
positive
@pytest.mark.parametrize('n, shape, grid', [([0], (1, 1, 1), (3, 4, 1)), ([1, 14], (10, 20, 30), (3, 1, 5)), ([14, 14, 14], (10, 20, 30), (1, 4, 5))]) def test_getDiscretisation_bools(n, shape, grid): <DeepExtract> coords = np.concatenate([np.linspace(0, s, len(n)).reshape(-1, 1) for s in shape], axis=1) species = np.array(n) (atoms, species) = (coords, species) </DeepExtract> x = [np.linspace(0, 0.1, g) for g in grid] for b1 in (True, False): for b2 in (True, False): f = get_discretisation(atoms, species, x, pointwise=b1, FT=b2, **params) assert f.shape == grid
@pytest.mark.parametrize('n, shape, grid', [([0], (1, 1, 1), (3, 4, 1)), ([1, 14], (10, 20, 30), (3, 1, 5)), ([14, 14, 14], (10, 20, 30), (1, 4, 5))]) def test_getDiscretisation_bools(n, shape, grid): coords = np.concatenate([np.linspace(0, s, len(n)).reshape(-1, 1) for s in shape], axis=1) species = np.array(n) (atoms, species) = (coords, species) x = [np.linspace(0, 0.1, g) for g in grid] for b1 in (True, False): for b2 in (True, False): f = get_discretisation(atoms, species, x, pointwise=b1, FT=b2, **params) assert f.shape == grid
diffsims
positive
def start_lock_delay(self): """ Setup the lock delay timer based on user prefs - if there is no delay, or if idle locking isn't enabled, we run the callback immediately, or simply return, respectively. """ if not settings.get_idle_lock_enabled(): return if not utils.user_can_lock(): return lock_delay = settings.get_idle_lock_delay() if lock_delay == 0: <DeepExtract> DEBUG("manager: locking after delay ('lock-delay')") self.set_locked(True) return False </DeepExtract> else: trackers.timer_tracker_get().start_seconds('idle-lock-delay', lock_delay, self.on_lock_delay_timeout)
def start_lock_delay(self): """ Setup the lock delay timer based on user prefs - if there is no delay, or if idle locking isn't enabled, we run the callback immediately, or simply return, respectively. """ if not settings.get_idle_lock_enabled(): return if not utils.user_can_lock(): return lock_delay = settings.get_idle_lock_delay() if lock_delay == 0: DEBUG("manager: locking after delay ('lock-delay')") self.set_locked(True) return False else: trackers.timer_tracker_get().start_seconds('idle-lock-delay', lock_delay, self.on_lock_delay_timeout)
cinnamon-screensaver
positive
def __init__(self, data): """ Initialise the DataHfProvider class with the `data` being a supported data container (currently python dictionary or HDF5 file). Let `nf` denote the number of Fock spin orbitals (i.e. the sum of both the alpha and the beta orbitals) and `nb` the number of basis functions. With `array` we indicate either a `np.array` or an HDF5 dataset. The following keys are required in the container: 1. **restricted** (`bool`): `True` for a restricted SCF calculation, `False` otherwise 2. **conv_tol** (`float`): Tolerance value used for SCF convergence, should be roughly equivalent to l2 norm of the Pulay error. 3. **orbcoeff_fb** (`.array` with dtype `float`, size `(nf, nb)`): SCF orbital coefficients, i.e. the uniform transform from the basis to the molecular orbitals. 4. **occupation_f** (`array` with dtype `float`, size `(nf, )`: Occupation number for each SCF orbitals (i.e. diagonal of the HF density matrix in the SCF orbital basis). 5. **orben_f** (`array` with dtype `float`, size `(nf, )`: SCF orbital energies 6. **fock_ff** (`array` with dtype `float`, size `(nf, nf)`: Fock matrix in SCF orbital basis. Notice, the full matrix is expected also for restricted calculations. 7. **eri_phys_asym_ffff** (`array` with dtype `float`, size `(nf, nf, nf, nf)`: Antisymmetrised electron-repulsion integral tensor in the SCF orbital basis, using the Physicists' indexing convention, i.e. that the index tuple `(i,j,k,l)` refers to the integral :math:`\\langle ij || kl \\rangle`, i.e. .. math:: \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{ \\phi_i(r_1) \\phi_j(r_2) \\phi_k(r_1) \\phi_l(r_2)}{|r_1 - r_2|} - \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{ \\phi_i(r_1) \\phi_j(r_2) \\phi_l(r_1) \\phi_k(r_2)}{|r_1 - r_2|} The full tensor (including zero blocks) is expected. As an alternative to `eri_phys_asym_ffff`, the user may provide 8. **eri_ffff** (`array` with dtype `float`, size `(nf, nf, nf, nf)`: Electron-repulsion integral tensor in chemists' notation. The index tuple `(i,j,k,l)` thus refers to the integral :math:`(ij|kl)`, which is .. math:: \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{\\phi_i(r_1) \\phi_j(r_1) \\phi_k(r_2) \\phi_l(r_2)}{|r_1 - r_2|} Notice, that no antisymmetrisation has been applied in this tensor. The above keys define the least set of quantities to start a calculation in `adcc`. In order to have access to properties such as dipole moments or to get the correct state energies, further keys are highly recommended to be provided as well. 9. **energy_scf** (`float`): Final total SCF energy of both electronic and nuclear energy terms. (default: `0.0`) 10. **multipoles**: Container with electric and nuclear multipole moments. Can be another dictionary or simply an HDF5 group. - **elec_1** (`array`, size `(3, nb, nb)`): Electric dipole moment integrals in the atomic orbital basis (i.e. the discretisation basis with `nb` elements). First axis indicates cartesian component (x, y, z). - **nuc_0** (`float`): Total nuclear charge - **nuc_1** (`array` size `(3, )`: Nuclear dipole moment The defaults for all entries are all-zero multipoles. 11. **spin_multiplicity** (`int`): The spin mulitplicity of the HF ground state described by the data. A value of `0` (for unknown) should be supplied for unrestricted calculations. (default: 1 for restricted and 0 for unrestricted calculations) A descriptive string for the backend can be supplied optionally as well. In case of using a python `dict` as the data container, this should be done using the key `backend`. For an HDF5 file, this should be done using the attribute `backend`. Defaults based on the filename are generated. Parameters ---------- data : dict or h5py.File Dictionary containing the HartreeFock data to use. For the required keys see details above. """ super().__init__() self.data = data if isinstance(data, dict): self.__backend = data.get('backend', 'dict') elif isinstance(data, h5py.File): if 'r' not in data.mode: raise ValueError('Passed h5py.File stream (filename: {}) not readable.'.format(data.filename)) self.__backend = data.attrs.get('backend', '<HDF5 file "{}">'.format(data.filename)) else: raise TypeError('Can only deal with data objects of type dict or h5py.File.') if data['orbcoeff_fb'].shape[0] % 2 != 0: raise ValueError('orbcoeff_fb first axis should have even length') <DeepExtract> nb = self.data['orbcoeff_fb'].shape[1] </DeepExtract> nf = 2 * self.get_n_orbs_alpha() checks = [('orbcoeff_fb', (nf, nb)), ('occupation_f', (nf,)), ('orben_f', (nf,)), ('fock_ff', (nf, nf)), ('eri_ffff', (nf, nf, nf, nf)), ('eri_phys_asym_ffff', (nf, nf, nf, nf))] for (key, exshape) in checks: if key not in data: continue if data[key].shape != exshape: raise ValueError('Shape mismatch for key {}: Expected {}, but got {}.'.format(key, exshape, data[key].shape)) opprov = DataOperatorIntegralProvider(self.__backend) mmp = data.get('multipoles', {}) if 'elec_1' in mmp: if mmp['elec_1'].shape != (3, nb, nb): raise ValueError('multipoles/elec_1 is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(mmp['elec_1'].shape)) opprov.electric_dipole = np.asarray(mmp['elec_1']) magm = data.get('magnetic_moments', {}) if 'mag_1' in magm: if magm['mag_1'].shape != (3, nb, nb): raise ValueError('magnetic_moments/mag_1 is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(magm['mag_1'].shape)) opprov.magnetic_dipole = np.asarray(magm['mag_1']) derivs = data.get('derivatives', {}) if 'nabla' in derivs: if derivs['nabla'].shape != (3, nb, nb): raise ValueError('derivatives/nabla is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(derivs['nabla'].shape)) opprov.nabla = np.asarray(derivs['nabla']) self.operator_integral_provider = opprov
def __init__(self, data): """ Initialise the DataHfProvider class with the `data` being a supported data container (currently python dictionary or HDF5 file). Let `nf` denote the number of Fock spin orbitals (i.e. the sum of both the alpha and the beta orbitals) and `nb` the number of basis functions. With `array` we indicate either a `np.array` or an HDF5 dataset. The following keys are required in the container: 1. **restricted** (`bool`): `True` for a restricted SCF calculation, `False` otherwise 2. **conv_tol** (`float`): Tolerance value used for SCF convergence, should be roughly equivalent to l2 norm of the Pulay error. 3. **orbcoeff_fb** (`.array` with dtype `float`, size `(nf, nb)`): SCF orbital coefficients, i.e. the uniform transform from the basis to the molecular orbitals. 4. **occupation_f** (`array` with dtype `float`, size `(nf, )`: Occupation number for each SCF orbitals (i.e. diagonal of the HF density matrix in the SCF orbital basis). 5. **orben_f** (`array` with dtype `float`, size `(nf, )`: SCF orbital energies 6. **fock_ff** (`array` with dtype `float`, size `(nf, nf)`: Fock matrix in SCF orbital basis. Notice, the full matrix is expected also for restricted calculations. 7. **eri_phys_asym_ffff** (`array` with dtype `float`, size `(nf, nf, nf, nf)`: Antisymmetrised electron-repulsion integral tensor in the SCF orbital basis, using the Physicists' indexing convention, i.e. that the index tuple `(i,j,k,l)` refers to the integral :math:`\\langle ij || kl \\rangle`, i.e. .. math:: \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{ \\phi_i(r_1) \\phi_j(r_2) \\phi_k(r_1) \\phi_l(r_2)}{|r_1 - r_2|} - \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{ \\phi_i(r_1) \\phi_j(r_2) \\phi_l(r_1) \\phi_k(r_2)}{|r_1 - r_2|} The full tensor (including zero blocks) is expected. As an alternative to `eri_phys_asym_ffff`, the user may provide 8. **eri_ffff** (`array` with dtype `float`, size `(nf, nf, nf, nf)`: Electron-repulsion integral tensor in chemists' notation. The index tuple `(i,j,k,l)` thus refers to the integral :math:`(ij|kl)`, which is .. math:: \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{\\phi_i(r_1) \\phi_j(r_1) \\phi_k(r_2) \\phi_l(r_2)}{|r_1 - r_2|} Notice, that no antisymmetrisation has been applied in this tensor. The above keys define the least set of quantities to start a calculation in `adcc`. In order to have access to properties such as dipole moments or to get the correct state energies, further keys are highly recommended to be provided as well. 9. **energy_scf** (`float`): Final total SCF energy of both electronic and nuclear energy terms. (default: `0.0`) 10. **multipoles**: Container with electric and nuclear multipole moments. Can be another dictionary or simply an HDF5 group. - **elec_1** (`array`, size `(3, nb, nb)`): Electric dipole moment integrals in the atomic orbital basis (i.e. the discretisation basis with `nb` elements). First axis indicates cartesian component (x, y, z). - **nuc_0** (`float`): Total nuclear charge - **nuc_1** (`array` size `(3, )`: Nuclear dipole moment The defaults for all entries are all-zero multipoles. 11. **spin_multiplicity** (`int`): The spin mulitplicity of the HF ground state described by the data. A value of `0` (for unknown) should be supplied for unrestricted calculations. (default: 1 for restricted and 0 for unrestricted calculations) A descriptive string for the backend can be supplied optionally as well. In case of using a python `dict` as the data container, this should be done using the key `backend`. For an HDF5 file, this should be done using the attribute `backend`. Defaults based on the filename are generated. Parameters ---------- data : dict or h5py.File Dictionary containing the HartreeFock data to use. For the required keys see details above. """ super().__init__() self.data = data if isinstance(data, dict): self.__backend = data.get('backend', 'dict') elif isinstance(data, h5py.File): if 'r' not in data.mode: raise ValueError('Passed h5py.File stream (filename: {}) not readable.'.format(data.filename)) self.__backend = data.attrs.get('backend', '<HDF5 file "{}">'.format(data.filename)) else: raise TypeError('Can only deal with data objects of type dict or h5py.File.') if data['orbcoeff_fb'].shape[0] % 2 != 0: raise ValueError('orbcoeff_fb first axis should have even length') nb = self.data['orbcoeff_fb'].shape[1] nf = 2 * self.get_n_orbs_alpha() checks = [('orbcoeff_fb', (nf, nb)), ('occupation_f', (nf,)), ('orben_f', (nf,)), ('fock_ff', (nf, nf)), ('eri_ffff', (nf, nf, nf, nf)), ('eri_phys_asym_ffff', (nf, nf, nf, nf))] for (key, exshape) in checks: if key not in data: continue if data[key].shape != exshape: raise ValueError('Shape mismatch for key {}: Expected {}, but got {}.'.format(key, exshape, data[key].shape)) opprov = DataOperatorIntegralProvider(self.__backend) mmp = data.get('multipoles', {}) if 'elec_1' in mmp: if mmp['elec_1'].shape != (3, nb, nb): raise ValueError('multipoles/elec_1 is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(mmp['elec_1'].shape)) opprov.electric_dipole = np.asarray(mmp['elec_1']) magm = data.get('magnetic_moments', {}) if 'mag_1' in magm: if magm['mag_1'].shape != (3, nb, nb): raise ValueError('magnetic_moments/mag_1 is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(magm['mag_1'].shape)) opprov.magnetic_dipole = np.asarray(magm['mag_1']) derivs = data.get('derivatives', {}) if 'nabla' in derivs: if derivs['nabla'].shape != (3, nb, nb): raise ValueError('derivatives/nabla is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(derivs['nabla'].shape)) opprov.nabla = np.asarray(derivs['nabla']) self.operator_integral_provider = opprov
adcc
positive
def dpMain(*args): """ Main function. Check existen nodes and call the scripted function. """ callAction = False <DeepExtract> selList = cmds.ls(selection=True) if selList: for item in selList: if self.dpCheckAllGrp(item): self.allGrp = item for item in selList: relativeList = cmds.listRelatives(item, allParents=True, type='transform') while relativeList: if self.dpCheckAllGrp(relativeList[0]): self.allGrp = relativeList[0] relativeList = cmds.listRelatives(relativeList[0], allParents=True, type='transform') self.allGrp = False </DeepExtract> if self.allGrp: callAction = True else: <DeepExtract> allGrpNodeList = [] allNodeList = cmds.ls(selection=False, type='transform') for nodeName in allNodeList: allGrp = self.dpCheckAllGrp(nodeName) if allGrp: allGrpNodeList.append(allGrp) allGrpList = allGrpNodeList </DeepExtract> if allGrpList: if len(allGrpList) > 1: self.allGrp = cmds.confirmDialog(title=self.langDic[self.langName]['m166_selAllControls'], message=self.langDic[self.langName]['m168_wichAllGrp'], button=allGrpList) else: <DeepExtract> if cmds.objExists(self.allGrp): if cmds.objExists(self.allGrp + '.' + self.masterAttr): if cmds.getAttr(self.allGrp + '.' + self.masterAttr) == 1: self.allGrp = self.allGrp self.allGrp = False </DeepExtract> if self.allGrp: callAction = True else: <DeepExtract> allNodeList = cmds.ls(selection=False) if allNodeList: for item in allNodeList: if self.dpCheckAllGrp(item): self.allGrp = item self.allGrp = False </DeepExtract> if self.allGrp: callAction = True if callAction: <DeepExtract> ctrlsToSelectList = [] if cmds.objExists(self.allGrp + '.' + self.ctrlsAttr): ctrlsAttr = cmds.getAttr(self.allGrp + '.' + self.ctrlsAttr) if ctrlsAttr: currentNamespace = '' if ':' in self.allGrp: currentNamespace = self.allGrp[:self.allGrp.find(':')] ctrlsList = ctrlsAttr.split(';') if ctrlsList: for ctrlName in ctrlsList: if ctrlName: if currentNamespace: ctrlsToSelectList.append(currentNamespace + ':' + ctrlName) else: ctrlsToSelectList.append(ctrlName) cmds.select(ctrlsToSelectList) print(self.langDic[self.langName]['m169_selectedCtrls'] + str(ctrlsToSelectList)) else: mel.eval('warning "' + self.langDic[self.langName]['e019_notFoundAllGrp'] + '";') </DeepExtract> else: mel.eval('warning "' + self.langDic[self.langName]['e019_notFoundAllGrp'] + '";')
def dpMain(*args): """ Main function. Check existen nodes and call the scripted function. """ callAction = False selList = cmds.ls(selection=True) if selList: for item in selList: if self.dpCheckAllGrp(item): self.allGrp = item for item in selList: relativeList = cmds.listRelatives(item, allParents=True, type='transform') while relativeList: if self.dpCheckAllGrp(relativeList[0]): self.allGrp = relativeList[0] relativeList = cmds.listRelatives(relativeList[0], allParents=True, type='transform') self.allGrp = False if self.allGrp: callAction = True else: allGrpNodeList = [] allNodeList = cmds.ls(selection=False, type='transform') for nodeName in allNodeList: allGrp = self.dpCheckAllGrp(nodeName) if allGrp: allGrpNodeList.append(allGrp) allGrpList = allGrpNodeList if allGrpList: if len(allGrpList) > 1: self.allGrp = cmds.confirmDialog(title=self.langDic[self.langName]['m166_selAllControls'], message=self.langDic[self.langName]['m168_wichAllGrp'], button=allGrpList) else: if cmds.objExists(self.allGrp): if cmds.objExists(self.allGrp + '.' + self.masterAttr): if cmds.getAttr(self.allGrp + '.' + self.masterAttr) == 1: self.allGrp = self.allGrp self.allGrp = False if self.allGrp: callAction = True else: allNodeList = cmds.ls(selection=False) if allNodeList: for item in allNodeList: if self.dpCheckAllGrp(item): self.allGrp = item self.allGrp = False if self.allGrp: callAction = True if callAction: ctrlsToSelectList = [] if cmds.objExists(self.allGrp + '.' + self.ctrlsAttr): ctrlsAttr = cmds.getAttr(self.allGrp + '.' + self.ctrlsAttr) if ctrlsAttr: currentNamespace = '' if ':' in self.allGrp: currentNamespace = self.allGrp[:self.allGrp.find(':')] ctrlsList = ctrlsAttr.split(';') if ctrlsList: for ctrlName in ctrlsList: if ctrlName: if currentNamespace: ctrlsToSelectList.append(currentNamespace + ':' + ctrlName) else: ctrlsToSelectList.append(ctrlName) cmds.select(ctrlsToSelectList) print(self.langDic[self.langName]['m169_selectedCtrls'] + str(ctrlsToSelectList)) else: mel.eval('warning "' + self.langDic[self.langName]['e019_notFoundAllGrp'] + '";') else: mel.eval('warning "' + self.langDic[self.langName]['e019_notFoundAllGrp'] + '";')
dpAutoRigSystem
positive
def _core_network(self, l_p, h_p, x_t): """ Parameters: x_t - 28x28 image l_p - 2x1 focus vector h_p - 256x1 vector Returns: h_t, 256x1 vector """ <DeepExtract> sensor_output = self._refined_glimpse_sensor(x_t, l_p) sensor_output = T.flatten(sensor_output) h_g = self._relu(T.dot(sensor_output, self.W_g0)) h_l = self._relu(T.dot(l_p, self.W_g1)) g = self._relu(T.dot(h_g, self.W_g2_hg) + T.dot(h_l, self.W_g2_hl)) g_t = g </DeepExtract> h_t = self._tanh(T.dot(g_t, self.W_h_g) + T.dot(h_p, self.W_h) + self.B_h) <DeepExtract> l_t = T.dot(h_t, self.W_l) </DeepExtract> if not self.disable_reinforce: sampled_l_t = self._sample_gaussian(l_t, self.cov) <DeepExtract> norm2d_var = 1.0 / T.sqrt((2 * np.pi) ** 2 * self.cov_det_var) * T.exp(-0.5 * (disconnected_grad(sampled_l_t) - l_t).T.dot(self.cov_inv_var).dot(disconnected_grad(sampled_l_t) - l_t)) sampled_pdf = norm2d_var </DeepExtract> wl_grad = T.grad(T.log(sampled_pdf), self.W_l) else: sampled_l_t = l_t wl_grad = self.W_l if self.random_glimpse and self.disable_reinforce: sampled_l_t = self.srng.uniform((2,), low=-1.7, high=1.7) <DeepExtract> z = self._relu(T.dot(h_t, self.W_a) + self.B_a) a_t = self._softmax(z) </DeepExtract> return (sampled_l_t, h_t, a_t, wl_grad)
def _core_network(self, l_p, h_p, x_t): """ Parameters: x_t - 28x28 image l_p - 2x1 focus vector h_p - 256x1 vector Returns: h_t, 256x1 vector """ sensor_output = self._refined_glimpse_sensor(x_t, l_p) sensor_output = T.flatten(sensor_output) h_g = self._relu(T.dot(sensor_output, self.W_g0)) h_l = self._relu(T.dot(l_p, self.W_g1)) g = self._relu(T.dot(h_g, self.W_g2_hg) + T.dot(h_l, self.W_g2_hl)) g_t = g h_t = self._tanh(T.dot(g_t, self.W_h_g) + T.dot(h_p, self.W_h) + self.B_h) l_t = T.dot(h_t, self.W_l) if not self.disable_reinforce: sampled_l_t = self._sample_gaussian(l_t, self.cov) norm2d_var = 1.0 / T.sqrt((2 * np.pi) ** 2 * self.cov_det_var) * T.exp(-0.5 * (disconnected_grad(sampled_l_t) - l_t).T.dot(self.cov_inv_var).dot(disconnected_grad(sampled_l_t) - l_t)) sampled_pdf = norm2d_var wl_grad = T.grad(T.log(sampled_pdf), self.W_l) else: sampled_l_t = l_t wl_grad = self.W_l if self.random_glimpse and self.disable_reinforce: sampled_l_t = self.srng.uniform((2,), low=-1.7, high=1.7) z = self._relu(T.dot(h_t, self.W_a) + self.B_a) a_t = self._softmax(z) return (sampled_l_t, h_t, a_t, wl_grad)
deepy
positive
def check_device_state(self, device_id, state_name): <DeepExtract> devices = requests.get('https://{host_uri}/{device_list_endpoint}'.format(host_uri=self.HOST_URI, device_list_endpoint=self.DEVICE_LIST_ENDPOINT), headers={'MyQApplicationId': self.APP_ID, 'SecurityToken': self.myq_security_token}) devices = devices.json()['Devices'] </DeepExtract> for dev in devices: if str(dev['MyQDeviceId']) == str(device_id): for attribute in dev['Attributes']: if attribute['AttributeDisplayName'] == state_name: door_state = attribute['Value'] return door_state
def check_device_state(self, device_id, state_name): devices = requests.get('https://{host_uri}/{device_list_endpoint}'.format(host_uri=self.HOST_URI, device_list_endpoint=self.DEVICE_LIST_ENDPOINT), headers={'MyQApplicationId': self.APP_ID, 'SecurityToken': self.myq_security_token}) devices = devices.json()['Devices'] for dev in devices: if str(dev['MyQDeviceId']) == str(device_id): for attribute in dev['Attributes']: if attribute['AttributeDisplayName'] == state_name: door_state = attribute['Value'] return door_state
Alexa-MyQGarage
positive
def test(): <DeepExtract> cfg = {'out_planes': [200, 400, 800], 'num_blocks': [4, 8, 4], 'groups': 2} net = ShuffleNet(cfg) </DeepExtract> x = torch.randn(1, 3, 32, 32) y = net(x) print(y)
def test(): cfg = {'out_planes': [200, 400, 800], 'num_blocks': [4, 8, 4], 'groups': 2} net = ShuffleNet(cfg) x = torch.randn(1, 3, 32, 32) y = net(x) print(y)
dhp
positive
def next(self): """ Default implementation for built-in backtrader method. Defines one step environment routine; Handles order execution logic according to action received. Note that orders can only be submitted for data_lines in action_space (assets). `self.action` attr. is updated by btgym.server._BTgymAnalyzer, and `None` actions are emitted while doing `skip_frame` loop. """ <DeepExtract> current_value = self.env.broker.get_value() norm_state = self.get_normalisation() positions = [self.env.broker.getposition(data) for data in self.datas] exposure = sum([abs(pos.size) for pos in positions]) self.normalizer = 1 / np.clip(norm_state.up_interval - norm_state.low_interval, 1e-08, None) for (key, method) in self.collection_get_broker_stat_methods.items(): update = method(current_value=current_value, positions=positions, exposure=exposure, lower_bound=norm_state.low_interval, upper_bound=norm_state.up_interval, normalizer=self.normalizer) self.broker_stat[key] = np.concatenate([self.broker_stat[key][1:], np.asarray([float(update)])]) self.trade_just_closed = False self.trade_result = 0 </DeepExtract> if '_skip_this' in self.action.keys(): if self.action_repeated < self.num_action_repeats: self.next_process_fn(self.action_to_repeat) self.action_repeated += 1 else: self.next_process_fn(self.action) self.action_repeated = 0 self.action_to_repeat = self.action
def next(self): """ Default implementation for built-in backtrader method. Defines one step environment routine; Handles order execution logic according to action received. Note that orders can only be submitted for data_lines in action_space (assets). `self.action` attr. is updated by btgym.server._BTgymAnalyzer, and `None` actions are emitted while doing `skip_frame` loop. """ current_value = self.env.broker.get_value() norm_state = self.get_normalisation() positions = [self.env.broker.getposition(data) for data in self.datas] exposure = sum([abs(pos.size) for pos in positions]) self.normalizer = 1 / np.clip(norm_state.up_interval - norm_state.low_interval, 1e-08, None) for (key, method) in self.collection_get_broker_stat_methods.items(): update = method(current_value=current_value, positions=positions, exposure=exposure, lower_bound=norm_state.low_interval, upper_bound=norm_state.up_interval, normalizer=self.normalizer) self.broker_stat[key] = np.concatenate([self.broker_stat[key][1:], np.asarray([float(update)])]) self.trade_just_closed = False self.trade_result = 0 if '_skip_this' in self.action.keys(): if self.action_repeated < self.num_action_repeats: self.next_process_fn(self.action_to_repeat) self.action_repeated += 1 else: self.next_process_fn(self.action) self.action_repeated = 0 self.action_to_repeat = self.action
btgym
positive
def tree_is_perfect_match(self): """ Returns True if self.trees is a singleton that perfectly matches the words in the utterances (with certain simplifactions to each to accommodate different notation and information). """ if len(self.trees) != 1: return False <DeepExtract> tree_lems = self.tree_lemmas() tree_lems = [x for x in tree_lems if x[1] not in {'-NONE-', '-DFL-'}] tree_lems = [(re.sub('-$', '', x[0]), x[1]) for x in tree_lems] tree_lems = tree_lems </DeepExtract> <DeepExtract> pos_lems = self.pos_lemmas() pos_lems = [x for x in pos_lems if x and len(x) == 2] nontree_nodes = ('^PRP^BES', '^FW', '^MD', '^MD^RB', '^PRP^VBZ', '^WP$', '^NN^HVS', 'NN|VBG', '^DT^BES', '^MD^VB', '^DT^JJ', '^PRP^HVS', '^NN^POS', '^WP^BES', '^NN^BES', 'NN|CD', '^WDT', '^VB^PRP') pos_lems = [x for x in pos_lems if x[1] not in nontree_nodes] pos_lems = [x for x in pos_lems if x[0] != '--'] pos_lems = [(re.sub('-$', '', x[0]), x[1]) for x in pos_lems] pos_lems = pos_lems </DeepExtract> if pos_lems == tree_lems: return True else: return False
def tree_is_perfect_match(self): """ Returns True if self.trees is a singleton that perfectly matches the words in the utterances (with certain simplifactions to each to accommodate different notation and information). """ if len(self.trees) != 1: return False tree_lems = self.tree_lemmas() tree_lems = [x for x in tree_lems if x[1] not in {'-NONE-', '-DFL-'}] tree_lems = [(re.sub('-$', '', x[0]), x[1]) for x in tree_lems] tree_lems = tree_lems pos_lems = self.pos_lemmas() pos_lems = [x for x in pos_lems if x and len(x) == 2] nontree_nodes = ('^PRP^BES', '^FW', '^MD', '^MD^RB', '^PRP^VBZ', '^WP$', '^NN^HVS', 'NN|VBG', '^DT^BES', '^MD^VB', '^DT^JJ', '^PRP^HVS', '^NN^POS', '^WP^BES', '^NN^BES', 'NN|CD', '^WDT', '^VB^PRP') pos_lems = [x for x in pos_lems if x[1] not in nontree_nodes] pos_lems = [x for x in pos_lems if x[0] != '--'] pos_lems = [(re.sub('-$', '', x[0]), x[1]) for x in pos_lems] pos_lems = pos_lems if pos_lems == tree_lems: return True else: return False
dialog-processing
positive
def mergeSort(alist): length = len(alist) mid = length // 2 if length > 1: left = alist[:mid] right = alist[mid:] <DeepExtract> length = len(left) mid = length // 2 if length > 1: left = left[:mid] right = left[mid:] mergeSort(left) mergeSort(right) merge(left, left, right) </DeepExtract> <DeepExtract> length = len(right) mid = length // 2 if length > 1: left = right[:mid] right = right[mid:] mergeSort(left) mergeSort(right) merge(right, left, right) </DeepExtract> <DeepExtract> l = 0 r = 0 i = 0 L_len = len(left) R_len = len(right) while l < L_len and r < R_len: if left[l] < right[r]: alist[i] = left[l] i += 1 l += 1 else: alist[i] = right[r] i += 1 r += 1 while l < L_len: alist[i] = left[l] i += 1 l += 1 while r < R_len: alist[i] = right[r] i += 1 r += 1 </DeepExtract>
def mergeSort(alist): length = len(alist) mid = length // 2 if length > 1: left = alist[:mid] right = alist[mid:] length = len(left) mid = length // 2 if length > 1: left = left[:mid] right = left[mid:] mergeSort(left) mergeSort(right) merge(left, left, right) length = len(right) mid = length // 2 if length > 1: left = right[:mid] right = right[mid:] mergeSort(left) mergeSort(right) merge(right, left, right) l = 0 r = 0 i = 0 L_len = len(left) R_len = len(right) while l < L_len and r < R_len: if left[l] < right[r]: alist[i] = left[l] i += 1 l += 1 else: alist[i] = right[r] i += 1 r += 1 while l < L_len: alist[i] = left[l] i += 1 l += 1 while r < R_len: alist[i] = right[r] i += 1 r += 1 </DeepExtract>
168206
positive
def test_epoch_end(self, outputs: List[Any]) -> None: averaged_epoch_loss = sum([output['loss'] for output in outputs]) / len(outputs) self.log(f'{self.TEST_METRICS_PREFIX}_loss', averaged_epoch_loss, on_step=False, prog_bar=True, on_epoch=True) <DeepExtract> metrics = self._head.get_metrics(True) </DeepExtract> for (key, val) in metrics.items(): if key.startswith('_'): metric_name = self.TEST_METRICS_PREFIX + key else: metric_name = self.TEST_METRICS_PREFIX + '_' + key self.log(metric_name, val, on_step=False, prog_bar=not key.startswith('_'), on_epoch=True)
def test_epoch_end(self, outputs: List[Any]) -> None: averaged_epoch_loss = sum([output['loss'] for output in outputs]) / len(outputs) self.log(f'{self.TEST_METRICS_PREFIX}_loss', averaged_epoch_loss, on_step=False, prog_bar=True, on_epoch=True) metrics = self._head.get_metrics(True) for (key, val) in metrics.items(): if key.startswith('_'): metric_name = self.TEST_METRICS_PREFIX + key else: metric_name = self.TEST_METRICS_PREFIX + '_' + key self.log(metric_name, val, on_step=False, prog_bar=not key.startswith('_'), on_epoch=True)
biome-text
positive
@cache_page(1800) def by_arch(request): <DeepExtract> qs = Package.objects.select_related().values('arch__name', 'repo__name').annotate(count=Count('pk'), csize=Sum('compressed_size'), isize=Sum('installed_size'), flagged=Count('flag_date')).order_by() arches = Arch.objects.values_list('name', flat=True) repos = Repo.objects.values_list('name', flat=True) def build_map(name, arch, repo): key = '%s:%s' % (repo or '', arch or '') data = {'key': key, 'name': name, 'arch': arch, 'repo': repo, 'data': []} arch_groups = {a: build_map(a, a, None) for a in arches} repo_groups = {r: build_map(r, None, r) for r in repos} for row in qs: arch = row['arch__name'] repo = row['repo__name'] values = {'arch': arch, 'repo': repo, 'name': '%s (%s)' % (repo, arch), 'key': '%s:%s' % (repo, arch), 'csize': row['csize'], 'isize': row['isize'], 'count': row['count'], 'flagged': row['flagged']} arch_groups[arch]['data'].append(values) repo_groups[repo]['data'].append(values) data = {'by_arch': {'name': 'Architectures', 'data': list(arch_groups.values())}, 'by_repo': {'name': 'Repositories', 'data': list(repo_groups.values())}} data = data </DeepExtract> to_json = json.dumps(data['by_arch'], ensure_ascii=False) return HttpResponse(to_json, content_type='application/json')
@cache_page(1800) def by_arch(request): qs = Package.objects.select_related().values('arch__name', 'repo__name').annotate(count=Count('pk'), csize=Sum('compressed_size'), isize=Sum('installed_size'), flagged=Count('flag_date')).order_by() arches = Arch.objects.values_list('name', flat=True) repos = Repo.objects.values_list('name', flat=True) def build_map(name, arch, repo): key = '%s:%s' % (repo or '', arch or '') data = {'key': key, 'name': name, 'arch': arch, 'repo': repo, 'data': []} arch_groups = {a: build_map(a, a, None) for a in arches} repo_groups = {r: build_map(r, None, r) for r in repos} for row in qs: arch = row['arch__name'] repo = row['repo__name'] values = {'arch': arch, 'repo': repo, 'name': '%s (%s)' % (repo, arch), 'key': '%s:%s' % (repo, arch), 'csize': row['csize'], 'isize': row['isize'], 'count': row['count'], 'flagged': row['flagged']} arch_groups[arch]['data'].append(values) repo_groups[repo]['data'].append(values) data = {'by_arch': {'name': 'Architectures', 'data': list(arch_groups.values())}, 'by_repo': {'name': 'Repositories', 'data': list(repo_groups.values())}} data = data to_json = json.dumps(data['by_arch'], ensure_ascii=False) return HttpResponse(to_json, content_type='application/json')
archweb
positive
def enum_host_info(self): <DeepExtract> try: ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % self.host) resp = ldapConnection.search(scope=ldapasn1_impacket.Scope('baseObject'), attributes=['defaultNamingContext', 'dnsHostName'], sizeLimit=0) for item in resp: if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True: continue target = None targetDomain = None baseDN = None try: for attribute in item['attributes']: if str(attribute['type']) == 'defaultNamingContext': baseDN = str(attribute['vals'][0]) targetDomain = sub(',DC=', '.', baseDN[baseDN.lower().find('dc='):], flags=I)[3:] if str(attribute['type']) == 'dnsHostName': target = str(attribute['vals'][0]) except Exception as e: logging.debug('Exception:', exc_info=True) logging.debug('Skipping item, cannot process due to error %s' % str(e)) except OSError as e: (self.target, self.targetDomain, self.baseDN) = [None, None, None] (self.target, self.targetDomain, self.baseDN) = [target, targetDomain, baseDN] </DeepExtract> self.hostname = self.target self.domain = self.targetDomain if self.args.no_smb: self.domain = self.args.domain else: self.local_ip = self.conn.getSMBServer().get_socket().getsockname()[0] try: self.conn.login('', '') except Exception as e: if 'STATUS_NOT_SUPPORTED' in str(e): self.no_ntlm = True pass if not self.no_ntlm: self.domain = self.conn.getServerDNSDomainName() self.hostname = self.conn.getServerName() self.server_os = self.conn.getServerOS() self.signing = self.conn.isSigningRequired() if self.smbv1 else self.conn._SMBConnection._Connection['RequireSigning'] <DeepExtract> try: stringBinding = 'ncacn_ip_tcp:{}[135]'.format(self.host) transport = DCERPCTransportFactory(stringBinding) transport.set_connect_timeout(5) dce = transport.get_dce_rpc() if self.args.kerberos: dce.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE) dce.connect() try: dce.bind(MSRPC_UUID_PORTMAP, transfer_syntax=('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0')) except (DCERPCException, e): if str(e).find('syntaxes_not_supported') >= 0: dce.disconnect() self.os_arch = 32 else: dce.disconnect() self.os_arch = 64 except Exception as e: logging.debug('Error retrieving os arch of {}: {}'.format(self.host, str(e))) self.os_arch = 0 </DeepExtract> self.output_filename = os.path.expanduser('~/.cme/logs/{}_{}_{}'.format(self.hostname, self.host, datetime.now().strftime('%Y-%m-%d_%H%M%S'))) self.output_filename = self.output_filename.replace(':', '-') if not self.domain: self.domain = self.hostname try: "plaintext_login\n DC's seem to want us to logoff first, windows workstations sometimes reset the connection\n (go home Windows, you're drunk)\n " self.conn.logoff() except: pass if self.args.domain: self.domain = self.args.domain if self.args.local_auth: self.domain = self.hostname <DeepExtract> if not self.args.no_smb: if self.create_smbv1_conn(): return True elif self.create_smbv3_conn(): return True return False else: return True </DeepExtract>
def enum_host_info(self): try: ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % self.host) resp = ldapConnection.search(scope=ldapasn1_impacket.Scope('baseObject'), attributes=['defaultNamingContext', 'dnsHostName'], sizeLimit=0) for item in resp: if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True: continue target = None targetDomain = None baseDN = None try: for attribute in item['attributes']: if str(attribute['type']) == 'defaultNamingContext': baseDN = str(attribute['vals'][0]) targetDomain = sub(',DC=', '.', baseDN[baseDN.lower().find('dc='):], flags=I)[3:] if str(attribute['type']) == 'dnsHostName': target = str(attribute['vals'][0]) except Exception as e: logging.debug('Exception:', exc_info=True) logging.debug('Skipping item, cannot process due to error %s' % str(e)) except OSError as e: (self.target, self.targetDomain, self.baseDN) = [None, None, None] (self.target, self.targetDomain, self.baseDN) = [target, targetDomain, baseDN] self.hostname = self.target self.domain = self.targetDomain if self.args.no_smb: self.domain = self.args.domain else: self.local_ip = self.conn.getSMBServer().get_socket().getsockname()[0] try: self.conn.login('', '') except Exception as e: if 'STATUS_NOT_SUPPORTED' in str(e): self.no_ntlm = True pass if not self.no_ntlm: self.domain = self.conn.getServerDNSDomainName() self.hostname = self.conn.getServerName() self.server_os = self.conn.getServerOS() self.signing = self.conn.isSigningRequired() if self.smbv1 else self.conn._SMBConnection._Connection['RequireSigning'] try: stringBinding = 'ncacn_ip_tcp:{}[135]'.format(self.host) transport = DCERPCTransportFactory(stringBinding) transport.set_connect_timeout(5) dce = transport.get_dce_rpc() if self.args.kerberos: dce.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE) dce.connect() try: dce.bind(MSRPC_UUID_PORTMAP, transfer_syntax=('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0')) except (DCERPCException, e): if str(e).find('syntaxes_not_supported') >= 0: dce.disconnect() self.os_arch = 32 else: dce.disconnect() self.os_arch = 64 except Exception as e: logging.debug('Error retrieving os arch of {}: {}'.format(self.host, str(e))) self.os_arch = 0 self.output_filename = os.path.expanduser('~/.cme/logs/{}_{}_{}'.format(self.hostname, self.host, datetime.now().strftime('%Y-%m-%d_%H%M%S'))) self.output_filename = self.output_filename.replace(':', '-') if not self.domain: self.domain = self.hostname try: "plaintext_login\n DC's seem to want us to logoff first, windows workstations sometimes reset the connection\n (go home Windows, you're drunk)\n " self.conn.logoff() except: pass if self.args.domain: self.domain = self.args.domain if self.args.local_auth: self.domain = self.hostname if not self.args.no_smb: if self.create_smbv1_conn(): return True elif self.create_smbv3_conn(): return True return False else: return True </DeepExtract>
CrackMapExec
positive
def run(self): self.buffer += 'digraph G {' self.buffer += DOT_STYLE if isinstance(self.g, DiGraph): for edge in self.g.edges: <DeepExtract> labels = '' if edge.kind is not None: data = '' if edge.data is None else str(edge.data) labels = '[label="%s - %s"]' % (edge.kind, data) nid1 = self.get_node_id(edge.source) nid2 = self.get_node_id(edge.dest) self.buffer += '%s -> %s %s;\n' % (nid1, nid2, labels) </DeepExtract> elif isinstance(self.g, Tree): root = self.g.root worklist = [root] while worklist: current = worklist.pop(0) if current.has_children(): num_children = current.num_children() i = 0 while i < num_children: child = current.children[i] if child is None: i += 1 continue <DeepExtract> nid1 = self.get_node_id(current) nid2 = self.get_node_id(child) self.buffer += '%s -> %s;\n' % (nid1, nid2) </DeepExtract> worklist.insert(0, child) i += 1 else: <DeepExtract> if current not in self.node_ids: self.node_ids[current] = 'node_%d' % current.gid self.add_node(current, self.node_ids[current]) nid = self.node_ids[current] </DeepExtract> self.buffer += '}\n'
def run(self): self.buffer += 'digraph G {' self.buffer += DOT_STYLE if isinstance(self.g, DiGraph): for edge in self.g.edges: labels = '' if edge.kind is not None: data = '' if edge.data is None else str(edge.data) labels = '[label="%s - %s"]' % (edge.kind, data) nid1 = self.get_node_id(edge.source) nid2 = self.get_node_id(edge.dest) self.buffer += '%s -> %s %s;\n' % (nid1, nid2, labels) elif isinstance(self.g, Tree): root = self.g.root worklist = [root] while worklist: current = worklist.pop(0) if current.has_children(): num_children = current.num_children() i = 0 while i < num_children: child = current.children[i] if child is None: i += 1 continue nid1 = self.get_node_id(current) nid2 = self.get_node_id(child) self.buffer += '%s -> %s;\n' % (nid1, nid2) worklist.insert(0, child) i += 1 else: if current not in self.node_ids: self.node_ids[current] = 'node_%d' % current.gid self.add_node(current, self.node_ids[current]) nid = self.node_ids[current] self.buffer += '}\n'
equip
positive
@force_fp32(apply_to='cls_score') def _merge_score(self, cls_score): """ Do softmax in each bin. Decay the score of normal classes with the score of fg. From v1. """ num_proposals = cls_score.shape[0] <DeepExtract> new_preds = [] num_bins = self.pred_slice.shape[0] for i in range(num_bins): start = self.pred_slice[i, 0] length = self.pred_slice[i, 1] sliced_pred = cls_score.narrow(1, start, length) new_preds.append(sliced_pred) new_preds = new_preds </DeepExtract> new_scores = [F.softmax(pred, dim=1) for pred in new_preds] bg_score = new_scores[0] fg_score = new_scores[1:] fg_merge = torch.zeros((num_proposals, self.num_classes)).cuda() merge = torch.zeros((num_proposals, self.num_classes)).cuda() for (i, split) in enumerate(self.fg_splits): fg_merge[:, split] = fg_score[i][:, 1:] weight = bg_score.narrow(1, 1, 1) fg_merge = weight * fg_merge merge[:, 0] = bg_score[:, 0] merge[:, 1:] = fg_merge[:, 1:] return merge
@force_fp32(apply_to='cls_score') def _merge_score(self, cls_score): """ Do softmax in each bin. Decay the score of normal classes with the score of fg. From v1. """ num_proposals = cls_score.shape[0] new_preds = [] num_bins = self.pred_slice.shape[0] for i in range(num_bins): start = self.pred_slice[i, 0] length = self.pred_slice[i, 1] sliced_pred = cls_score.narrow(1, start, length) new_preds.append(sliced_pred) new_preds = new_preds new_scores = [F.softmax(pred, dim=1) for pred in new_preds] bg_score = new_scores[0] fg_score = new_scores[1:] fg_merge = torch.zeros((num_proposals, self.num_classes)).cuda() merge = torch.zeros((num_proposals, self.num_classes)).cuda() for (i, split) in enumerate(self.fg_splits): fg_merge[:, split] = fg_score[i][:, 1:] weight = bg_score.narrow(1, 1, 1) fg_merge = weight * fg_merge merge[:, 0] = bg_score[:, 0] merge[:, 1:] = fg_merge[:, 1:] return merge
BalancedGroupSoftmax
positive
def __init__(self, kvs, delete_on_exit=True): <DeepExtract> (fd, fname) = tempfile.mkstemp('.mat', prefix='ao_', dir=dir) os.close(fd) if contents is not None: make_file(fname, contents) self.fname = os.path.abspath(fname) </DeepExtract> self.delete_on_exit = delete_on_exit scipy.io.savemat(self.fname, kvs)
def __init__(self, kvs, delete_on_exit=True): (fd, fname) = tempfile.mkstemp('.mat', prefix='ao_', dir=dir) os.close(fd) if contents is not None: make_file(fname, contents) self.fname = os.path.abspath(fname) self.delete_on_exit = delete_on_exit scipy.io.savemat(self.fname, kvs)
avobjects
positive
def train_step(self, data): """One training step Arguments: data {dict of data} -- required keys and values: 'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of sentences 'X_floor' {LongTensor [batch_size, history_len]} -- floors of sentences 'Y' {LongTensor [batch_size, max_sent_len]} -- label ids of corresponding tokens Returns: dict of data -- returned keys and values 'loss' {FloatTensor []} -- loss to backword dict of statistics -- returned keys and values 'loss' {float} -- batch loss """ (X, Y) = (data['X'], data['Y']) X_floor = data['X_floor'] Y_in = Y[:, :-1].contiguous() Y_out = Y[:, 1:].contiguous() batch_size = X.size(0) max_y_len = Y_out.size(1) <DeepExtract> (batch_size, history_len, max_sent_len) = X.size() input_lens = (X != self.pad_token_id).sum(-1) dial_lens = (input_lens > 0).long().sum(1) flat_inputs = X.view(batch_size * history_len, max_sent_len) flat_input_lens = input_lens.view(batch_size * history_len) (word_encodings, _, sent_encodings) = self.sent_encoder(flat_inputs, flat_input_lens) word_encodings = word_encodings.view(batch_size, history_len, max_sent_len, -1) sent_encodings = sent_encodings.view(batch_size, history_len, -1) tgt_floors = [] tgt_word_encodings = [] for (dial_idx, dial_len) in enumerate(dial_lens): tgt_floors.append(X_floor[dial_idx, dial_len - 1]) tgt_word_encodings.append(word_encodings[dial_idx, dial_len - 1, :, :]) tgt_floors = torch.stack(tgt_floors, 0) tgt_word_encodings = torch.stack(tgt_word_encodings, 0) src_floors = X_floor.view(-1) tgt_floors = tgt_floors.unsqueeze(1).repeat(1, history_len).view(-1) sent_encodings = sent_encodings.view(batch_size * history_len, -1) sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors) sent_encodings = sent_encodings.view(batch_size, history_len, -1) (_, _, dial_encodings) = self.dial_encoder(sent_encodings, dial_lens) (word_encodings, sent_encodings, dial_encodings, tgt_word_encodings) = (word_encodings, sent_encodings, dial_encodings, tgt_word_encodings) </DeepExtract> if self.attention_type == 'word': attn_keys = word_encodings.view(batch_size, -1, word_encodings.size(-1)) attn_mask = self._get_attn_mask(X).view(batch_size, -1) elif self.attention_type == 'sent': attn_keys = sent_encodings.view(batch_size, -1, sent_encodings.size(-1)) attn_mask = (X != self.pad_token_id).sum(-1) > 0 decoder_ret_dict = self._decode(dec_inputs=Y_in, word_encodings=tgt_word_encodings, sent_encodings=dial_encodings, attn_ctx=attn_keys, attn_mask=attn_mask) loss = 0 logits = decoder_ret_dict['logits'] label_losses = F.cross_entropy(logits.view(-1, self.label_vocab_size), Y_out.view(-1), ignore_index=self.pad_label_id, reduction='none').view(batch_size, max_y_len) sent_loss = label_losses.sum(1).mean(0) loss += sent_loss ret_data = {'loss': loss} ret_stat = {'loss': loss.item()} return (ret_data, ret_stat)
def train_step(self, data): """One training step Arguments: data {dict of data} -- required keys and values: 'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of sentences 'X_floor' {LongTensor [batch_size, history_len]} -- floors of sentences 'Y' {LongTensor [batch_size, max_sent_len]} -- label ids of corresponding tokens Returns: dict of data -- returned keys and values 'loss' {FloatTensor []} -- loss to backword dict of statistics -- returned keys and values 'loss' {float} -- batch loss """ (X, Y) = (data['X'], data['Y']) X_floor = data['X_floor'] Y_in = Y[:, :-1].contiguous() Y_out = Y[:, 1:].contiguous() batch_size = X.size(0) max_y_len = Y_out.size(1) (batch_size, history_len, max_sent_len) = X.size() input_lens = (X != self.pad_token_id).sum(-1) dial_lens = (input_lens > 0).long().sum(1) flat_inputs = X.view(batch_size * history_len, max_sent_len) flat_input_lens = input_lens.view(batch_size * history_len) (word_encodings, _, sent_encodings) = self.sent_encoder(flat_inputs, flat_input_lens) word_encodings = word_encodings.view(batch_size, history_len, max_sent_len, -1) sent_encodings = sent_encodings.view(batch_size, history_len, -1) tgt_floors = [] tgt_word_encodings = [] for (dial_idx, dial_len) in enumerate(dial_lens): tgt_floors.append(X_floor[dial_idx, dial_len - 1]) tgt_word_encodings.append(word_encodings[dial_idx, dial_len - 1, :, :]) tgt_floors = torch.stack(tgt_floors, 0) tgt_word_encodings = torch.stack(tgt_word_encodings, 0) src_floors = X_floor.view(-1) tgt_floors = tgt_floors.unsqueeze(1).repeat(1, history_len).view(-1) sent_encodings = sent_encodings.view(batch_size * history_len, -1) sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors) sent_encodings = sent_encodings.view(batch_size, history_len, -1) (_, _, dial_encodings) = self.dial_encoder(sent_encodings, dial_lens) (word_encodings, sent_encodings, dial_encodings, tgt_word_encodings) = (word_encodings, sent_encodings, dial_encodings, tgt_word_encodings) if self.attention_type == 'word': attn_keys = word_encodings.view(batch_size, -1, word_encodings.size(-1)) attn_mask = self._get_attn_mask(X).view(batch_size, -1) elif self.attention_type == 'sent': attn_keys = sent_encodings.view(batch_size, -1, sent_encodings.size(-1)) attn_mask = (X != self.pad_token_id).sum(-1) > 0 decoder_ret_dict = self._decode(dec_inputs=Y_in, word_encodings=tgt_word_encodings, sent_encodings=dial_encodings, attn_ctx=attn_keys, attn_mask=attn_mask) loss = 0 logits = decoder_ret_dict['logits'] label_losses = F.cross_entropy(logits.view(-1, self.label_vocab_size), Y_out.view(-1), ignore_index=self.pad_label_id, reduction='none').view(batch_size, max_y_len) sent_loss = label_losses.sum(1).mean(0) loss += sent_loss ret_data = {'loss': loss} ret_stat = {'loss': loss.item()} return (ret_data, ret_stat)
dialog-processing
positive
def metadata_action(args: argparse.Namespace) -> int: try: r = acd_client.get_metadata(args.node, args.assets) <DeepExtract> print(json.dumps(r, indent=4, sort_keys=True)) </DeepExtract> except RequestError as e: print(e) return INVALID_ARG_RETVAL
def metadata_action(args: argparse.Namespace) -> int: try: r = acd_client.get_metadata(args.node, args.assets) print(json.dumps(r, indent=4, sort_keys=True)) except RequestError as e: print(e) return INVALID_ARG_RETVAL
acd_cli
positive
def scalar_jacfunc(vs, obj, obj_scalar, free_variables): if not hasattr(scalar_jacfunc, 'vs'): scalar_jacfunc.vs = vs * 0 + 1e+16 if np.max(np.abs(vs - scalar_jacfunc.vs)) == 0: return scalar_jacfunc.J <DeepExtract> cur = 0 changed = False for (idx, freevar) in enumerate(free_variables): sz = freevar.r.size newvals = vs[cur:cur + sz].copy().reshape(free_variables[idx].shape) if np.max(np.abs(newvals - free_variables[idx]).ravel()) > 0: free_variables[idx][:] = newvals changed = True cur += sz methods_without_callback = ('anneal', 'powell', 'cobyla', 'slsqp') if callback is not None and changed and (method.lower() in methods_without_callback): callback(None) return changed </DeepExtract> if True: result = np.concatenate([np.array(obj_scalar.lop(wrt, np.array([[1]]))).ravel() for wrt in free_variables]) else: jacs = [obj_scalar.dr_wrt(wrt) for wrt in free_variables] for (idx, jac) in enumerate(jacs): if sp.issparse(jac): jacs[idx] = jacs[idx].todense() result = np.concatenate([jac.ravel() for jac in jacs]) scalar_jacfunc.J = result scalar_jacfunc.vs = vs return result.ravel()
def scalar_jacfunc(vs, obj, obj_scalar, free_variables): if not hasattr(scalar_jacfunc, 'vs'): scalar_jacfunc.vs = vs * 0 + 1e+16 if np.max(np.abs(vs - scalar_jacfunc.vs)) == 0: return scalar_jacfunc.J cur = 0 changed = False for (idx, freevar) in enumerate(free_variables): sz = freevar.r.size newvals = vs[cur:cur + sz].copy().reshape(free_variables[idx].shape) if np.max(np.abs(newvals - free_variables[idx]).ravel()) > 0: free_variables[idx][:] = newvals changed = True cur += sz methods_without_callback = ('anneal', 'powell', 'cobyla', 'slsqp') if callback is not None and changed and (method.lower() in methods_without_callback): callback(None) return changed if True: result = np.concatenate([np.array(obj_scalar.lop(wrt, np.array([[1]]))).ravel() for wrt in free_variables]) else: jacs = [obj_scalar.dr_wrt(wrt) for wrt in free_variables] for (idx, jac) in enumerate(jacs): if sp.issparse(jac): jacs[idx] = jacs[idx].todense() result = np.concatenate([jac.ravel() for jac in jacs]) scalar_jacfunc.J = result scalar_jacfunc.vs = vs return result.ravel()
chumpy
positive
def dispatch_admin_message(self, msg): """Dispatches a message originating from an admin to all handlers.""" if msg.command == 'PRIVMSG': <DeepExtract> pass </DeepExtract> if self.is_command(msg): <DeepExtract> cmd_name = msg.params[-1].split(' ')[0] cmd_name = cmd_name.strip(self.get_command_prefix()) cmd_name = cmd_name </DeepExtract> <DeepExtract> handler_name = self.admin_handler_prefix + cmd_name func = getattr(self, handler_name, None) </DeepExtract> if func is not None: func(msg)
def dispatch_admin_message(self, msg): """Dispatches a message originating from an admin to all handlers.""" if msg.command == 'PRIVMSG': pass if self.is_command(msg): cmd_name = msg.params[-1].split(' ')[0] cmd_name = cmd_name.strip(self.get_command_prefix()) cmd_name = cmd_name handler_name = self.admin_handler_prefix + cmd_name func = getattr(self, handler_name, None) if func is not None: func(msg)
botnet
positive
def _init_modules(self): assert cfg.RESNETS.FREEZE_AT in [0, 2, 3, 4, 5] assert cfg.RESNETS.FREEZE_AT <= self.convX for i in range(1, cfg.RESNETS.FREEZE_AT + 1): <DeepExtract> for p in getattr(self, 'res%d' % i).parameters(): p.requires_grad = False </DeepExtract> self.apply(lambda m: freeze_params(m) if isinstance(m, mynn.AffineChannel2d) else None)
def _init_modules(self): assert cfg.RESNETS.FREEZE_AT in [0, 2, 3, 4, 5] assert cfg.RESNETS.FREEZE_AT <= self.convX for i in range(1, cfg.RESNETS.FREEZE_AT + 1): for p in getattr(self, 'res%d' % i).parameters(): p.requires_grad = False self.apply(lambda m: freeze_params(m) if isinstance(m, mynn.AffineChannel2d) else None)
DIoU-pytorch-detectron
positive
def forward_train(self): <DeepExtract> self.d0 = self.net.forward(self.var_ref, self.var_p0, retPerLayer=retPerLayer) </DeepExtract> <DeepExtract> self.d1 = self.net.forward(self.var_ref, self.var_p1, retPerLayer=retPerLayer) </DeepExtract> <DeepExtract> d1_lt_d0 = (self.d1 < self.d0).cpu().data.numpy().flatten() judge_per = self.input_judge.cpu().numpy().flatten() self.acc_r = d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per) </DeepExtract> self.var_judge = Variable(1.0 * self.input_judge).view(self.d0.size()) self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge * 2.0 - 1.0) return self.loss_total
def forward_train(self): self.d0 = self.net.forward(self.var_ref, self.var_p0, retPerLayer=retPerLayer) self.d1 = self.net.forward(self.var_ref, self.var_p1, retPerLayer=retPerLayer) d1_lt_d0 = (self.d1 < self.d0).cpu().data.numpy().flatten() judge_per = self.input_judge.cpu().numpy().flatten() self.acc_r = d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per) self.var_judge = Variable(1.0 * self.input_judge).view(self.d0.size()) self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge * 2.0 - 1.0) return self.loss_total
DASR
positive
def load_by_order(self, path): hdf5_dict = read_hdf5(path) assigned_params = 0 kernel_idx = 0 sigma_idx = 0 mu_idx = 0 gamma_idx = 0 beta_idx = 0 for (k, v) in self.state.model.named_parameters(): if k in hdf5_dict: value = hdf5_dict[k] else: if 'conv.weight' in k: order_key = 'kernel{}'.format(kernel_idx) kernel_idx += 1 elif 'bn.weight' in k: order_key = 'gamma{}'.format(gamma_idx) gamma_idx += 1 elif 'bn.bias' in k: order_key = 'beta{}'.format(beta_idx) beta_idx += 1 else: order_key = None value = None if order_key is None else hdf5_dict[order_key] if value is not None: <DeepExtract> v.data = torch.from_numpy(value).cuda().type(torch.cuda.FloatTensor) </DeepExtract> assigned_params += 1 for (k, v) in self.state.model.named_buffers(): if k in hdf5_dict: value = hdf5_dict[k] else: if 'bn.running_mean' in k: order_key = 'mu{}'.format(mu_idx) mu_idx += 1 elif 'bn.running_var' in k: order_key = 'sigma{}'.format(sigma_idx) sigma_idx += 1 else: order_key = None value = None if order_key is None else hdf5_dict[order_key] if value is not None: <DeepExtract> v.data = torch.from_numpy(value).cuda().type(torch.cuda.FloatTensor) </DeepExtract> assigned_params += 1 msg = 'Assigned {} params '.format(assigned_params) if path is not None: msg += ' from hdf5: {}'.format(path) <DeepExtract> if self.local_rank == 0: print(msg) </DeepExtract>
def load_by_order(self, path): hdf5_dict = read_hdf5(path) assigned_params = 0 kernel_idx = 0 sigma_idx = 0 mu_idx = 0 gamma_idx = 0 beta_idx = 0 for (k, v) in self.state.model.named_parameters(): if k in hdf5_dict: value = hdf5_dict[k] else: if 'conv.weight' in k: order_key = 'kernel{}'.format(kernel_idx) kernel_idx += 1 elif 'bn.weight' in k: order_key = 'gamma{}'.format(gamma_idx) gamma_idx += 1 elif 'bn.bias' in k: order_key = 'beta{}'.format(beta_idx) beta_idx += 1 else: order_key = None value = None if order_key is None else hdf5_dict[order_key] if value is not None: v.data = torch.from_numpy(value).cuda().type(torch.cuda.FloatTensor) assigned_params += 1 for (k, v) in self.state.model.named_buffers(): if k in hdf5_dict: value = hdf5_dict[k] else: if 'bn.running_mean' in k: order_key = 'mu{}'.format(mu_idx) mu_idx += 1 elif 'bn.running_var' in k: order_key = 'sigma{}'.format(sigma_idx) sigma_idx += 1 else: order_key = None value = None if order_key is None else hdf5_dict[order_key] if value is not None: v.data = torch.from_numpy(value).cuda().type(torch.cuda.FloatTensor) assigned_params += 1 msg = 'Assigned {} params '.format(assigned_params) if path is not None: msg += ' from hdf5: {}'.format(path) if self.local_rank == 0: print(msg) </DeepExtract>
AOFP
positive
def forward(self, output, mask, ind, rotbin, rotres): pred = _tranpose_and_gather_feat(output, ind) <DeepExtract> pred = pred.view(-1, 8) rotbin = rotbin.view(-1, 2) rotres = rotres.view(-1, 2) mask = mask.view(-1, 1) loss_bin1 = compute_bin_loss(pred[:, 0:2], rotbin[:, 0], mask) loss_bin2 = compute_bin_loss(pred[:, 4:6], rotbin[:, 1], mask) loss_res = torch.zeros_like(loss_bin1) if rotbin[:, 0].nonzero().shape[0] > 0: idx1 = rotbin[:, 0].nonzero()[:, 0] valid_output1 = torch.index_select(pred, 0, idx1.long()) valid_target_res1 = torch.index_select(rotres, 0, idx1.long()) loss_sin1 = compute_res_loss(valid_output1[:, 2], torch.sin(valid_target_res1[:, 0])) loss_cos1 = compute_res_loss(valid_output1[:, 3], torch.cos(valid_target_res1[:, 0])) loss_res += loss_sin1 + loss_cos1 if rotbin[:, 1].nonzero().shape[0] > 0: idx2 = rotbin[:, 1].nonzero()[:, 0] valid_output2 = torch.index_select(pred, 0, idx2.long()) valid_target_res2 = torch.index_select(rotres, 0, idx2.long()) loss_sin2 = compute_res_loss(valid_output2[:, 6], torch.sin(valid_target_res2[:, 1])) loss_cos2 = compute_res_loss(valid_output2[:, 7], torch.cos(valid_target_res2[:, 1])) loss_res += loss_sin2 + loss_cos2 loss = loss_bin1 + loss_bin2 + loss_res </DeepExtract> return loss
def forward(self, output, mask, ind, rotbin, rotres): pred = _tranpose_and_gather_feat(output, ind) pred = pred.view(-1, 8) rotbin = rotbin.view(-1, 2) rotres = rotres.view(-1, 2) mask = mask.view(-1, 1) loss_bin1 = compute_bin_loss(pred[:, 0:2], rotbin[:, 0], mask) loss_bin2 = compute_bin_loss(pred[:, 4:6], rotbin[:, 1], mask) loss_res = torch.zeros_like(loss_bin1) if rotbin[:, 0].nonzero().shape[0] > 0: idx1 = rotbin[:, 0].nonzero()[:, 0] valid_output1 = torch.index_select(pred, 0, idx1.long()) valid_target_res1 = torch.index_select(rotres, 0, idx1.long()) loss_sin1 = compute_res_loss(valid_output1[:, 2], torch.sin(valid_target_res1[:, 0])) loss_cos1 = compute_res_loss(valid_output1[:, 3], torch.cos(valid_target_res1[:, 0])) loss_res += loss_sin1 + loss_cos1 if rotbin[:, 1].nonzero().shape[0] > 0: idx2 = rotbin[:, 1].nonzero()[:, 0] valid_output2 = torch.index_select(pred, 0, idx2.long()) valid_target_res2 = torch.index_select(rotres, 0, idx2.long()) loss_sin2 = compute_res_loss(valid_output2[:, 6], torch.sin(valid_target_res2[:, 1])) loss_cos2 = compute_res_loss(valid_output2[:, 7], torch.cos(valid_target_res2[:, 1])) loss_res += loss_sin2 + loss_cos2 loss = loss_bin1 + loss_bin2 + loss_res return loss
centerNet-deep-sort
positive
def test_clean(self): <DeepExtract> self.project.item.allow_overlapping = False self.project.item.save() </DeepExtract> self.spans.clean(self.project.item) self.assertEqual(len(self.spans), 2)
def test_clean(self): self.project.item.allow_overlapping = False self.project.item.save() self.spans.clean(self.project.item) self.assertEqual(len(self.spans), 2)
doccano
positive
def put(self, put_data, resource=None, id=None): url = '%s://%s/%s' % (self._module.params['nitro_protocol'], self._module.params['nsip'], self.api_path) if resource is not None: url = '%s/%s' % (url, resource) if id is not None: url = '%s/%s' % (url, id) data = self._module.jsonify(put_data) (r, info) = fetch_url(self._module, url=url, headers=self._headers, data=data, method='PUT') result = {} <DeepExtract> self.r = r self.info = info if r is not None: result['http_response_body'] = codecs.decode(r.read(), 'utf-8') elif 'body' in info: result['http_response_body'] = codecs.decode(info['body'], 'utf-8') del info['body'] else: result['http_response_body'] = '' result['http_response_data'] = info result['nitro_errorcode'] = None result['nitro_message'] = None result['nitro_severity'] = None if result['http_response_body'] != '': try: data = self._module.from_json(result['http_response_body']) del result['http_response_body'] except ValueError: data = {} result['data'] = data result['nitro_errorcode'] = data.get('errorcode') result['nitro_message'] = data.get('message') result['nitro_severity'] = data.get('severity') </DeepExtract> return result
def put(self, put_data, resource=None, id=None): url = '%s://%s/%s' % (self._module.params['nitro_protocol'], self._module.params['nsip'], self.api_path) if resource is not None: url = '%s/%s' % (url, resource) if id is not None: url = '%s/%s' % (url, id) data = self._module.jsonify(put_data) (r, info) = fetch_url(self._module, url=url, headers=self._headers, data=data, method='PUT') result = {} self.r = r self.info = info if r is not None: result['http_response_body'] = codecs.decode(r.read(), 'utf-8') elif 'body' in info: result['http_response_body'] = codecs.decode(info['body'], 'utf-8') del info['body'] else: result['http_response_body'] = '' result['http_response_data'] = info result['nitro_errorcode'] = None result['nitro_message'] = None result['nitro_severity'] = None if result['http_response_body'] != '': try: data = self._module.from_json(result['http_response_body']) del result['http_response_body'] except ValueError: data = {} result['data'] = data result['nitro_errorcode'] = data.get('errorcode') result['nitro_message'] = data.get('message') result['nitro_severity'] = data.get('severity') return result
citrix-adc-ansible-modules
positive
def forward(self, x): <DeepExtract> kernel_size_effective = self.kernel_size + (self.kernel_size - 1) * (self.dilation - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end)) x_pad = padded_inputs </DeepExtract> if self.use_res_connect: x = x + self.conv(x_pad) else: x = self.conv(x_pad) return x
def forward(self, x): kernel_size_effective = self.kernel_size + (self.kernel_size - 1) * (self.dilation - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end)) x_pad = padded_inputs if self.use_res_connect: x = x + self.conv(x_pad) else: x = self.conv(x_pad) return x
CVPR2020_MANet
positive
def accuracy(predict, label, pre_pro): predict = np.array(predict) label = np.array(label) if len(predict) == 0: return None if pre_pro == 'sm': <DeepExtract> orig_shape = predict.shape if len(predict.shape) > 1: exp_minmax = lambda x: np.exp(predict - np.max(predict)) denom = lambda x: 1.0 / np.sum(predict) predict = np.apply_along_axis(exp_minmax, 1, predict) denominator = np.apply_along_axis(denom, 1, predict) if len(denominator.shape) == 1: denominator = denominator.reshape((denominator.shape[0], 1)) predict = predict * denominator else: x_max = np.max(predict) predict = predict - x_max numerator = np.exp(predict) denominator = 1.0 / np.sum(numerator) predict = numerator.dot(denominator) assert predict.shape == orig_shape predict = predict </DeepExtract> if pre_pro == 'Lsm': predict = np.power(math.e, predict) total = len(predict) true = 0 for i in range(total): result = np.argmax(predict[i]) if result == label[i]: true += 1 return float(true) / float(total)
def accuracy(predict, label, pre_pro): predict = np.array(predict) label = np.array(label) if len(predict) == 0: return None if pre_pro == 'sm': orig_shape = predict.shape if len(predict.shape) > 1: exp_minmax = lambda x: np.exp(predict - np.max(predict)) denom = lambda x: 1.0 / np.sum(predict) predict = np.apply_along_axis(exp_minmax, 1, predict) denominator = np.apply_along_axis(denom, 1, predict) if len(denominator.shape) == 1: denominator = denominator.reshape((denominator.shape[0], 1)) predict = predict * denominator else: x_max = np.max(predict) predict = predict - x_max numerator = np.exp(predict) denominator = 1.0 / np.sum(numerator) predict = numerator.dot(denominator) assert predict.shape == orig_shape predict = predict if pre_pro == 'Lsm': predict = np.power(math.e, predict) total = len(predict) true = 0 for i in range(total): result = np.argmax(predict[i]) if result == label[i]: true += 1 return float(true) / float(total)
Deep-RNN-Framework
positive
def test(self): benji_obj = self.benji_open() store = BenjiStore(benji_obj) addr = ('127.0.0.1', self.SERVER_PORT) read_only = False discard_changes = False self.nbd_server = NbdServer(addr, store, read_only, discard_changes) logger.info('Starting to serve NBD on %s:%s' % (addr[0], addr[1])) <DeepExtract> completed = subprocess.run(args=['sudo', 'modprobe', 'nbd'], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', errors='ignore') if check and completed.returncode != 0: self.fail('command {} failed: {}'.format(' '.join(['sudo', 'modprobe', 'nbd']), completed.stdout.replace('\n', '|'))) if success_regexp: if not re.match(success_regexp, completed.stdout, re.I | re.M | re.S): self.fail('command {} failed: {}'.format(' '.join(['sudo', 'modprobe', 'nbd']), completed.stdout.replace('\n', '|'))) </DeepExtract> self.nbd_client_thread = threading.Thread(target=self.nbd_client, daemon=True, args=(self.version_uid,)) self.nbd_client_thread.start() self.nbd_server.serve_forever() self.nbd_client_thread.join() self.assertEqual({self.version_uid[0]}, {version.uid for version in benji_obj.find_versions_with_filter()}) benji_obj.close()
def test(self): benji_obj = self.benji_open() store = BenjiStore(benji_obj) addr = ('127.0.0.1', self.SERVER_PORT) read_only = False discard_changes = False self.nbd_server = NbdServer(addr, store, read_only, discard_changes) logger.info('Starting to serve NBD on %s:%s' % (addr[0], addr[1])) completed = subprocess.run(args=['sudo', 'modprobe', 'nbd'], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', errors='ignore') if check and completed.returncode != 0: self.fail('command {} failed: {}'.format(' '.join(['sudo', 'modprobe', 'nbd']), completed.stdout.replace('\n', '|'))) if success_regexp: if not re.match(success_regexp, completed.stdout, re.I | re.M | re.S): self.fail('command {} failed: {}'.format(' '.join(['sudo', 'modprobe', 'nbd']), completed.stdout.replace('\n', '|'))) self.nbd_client_thread = threading.Thread(target=self.nbd_client, daemon=True, args=(self.version_uid,)) self.nbd_client_thread.start() self.nbd_server.serve_forever() self.nbd_client_thread.join() self.assertEqual({self.version_uid[0]}, {version.uid for version in benji_obj.find_versions_with_filter()}) benji_obj.close()
benji
positive
def one_hot(x, num_classes, *, dtype=None, axis=-1): """One-hot encodes the given indicies. Each index in the input ``x`` is encoded as a vector of zeros of length ``num_classes`` with the element at ``index`` set to one:: >>> import jax.numpy as jnp >>> one_hot(jnp.array([0, 1, 2]), 3) Array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) Indicies outside the range [0, num_classes) will be encoded as zeros:: >>> import jax.numpy as jnp >>> one_hot(jnp.array([-1, 3]), 3) Array([[0., 0., 0.], [0., 0., 0.]], dtype=float32) Args: x: A tensor of indices. num_classes: Number of classes in the one-hot dimension. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). axis: the axis or axes along which the function should be computed. """ num_classes = jax.core.concrete_or_error(int, num_classes, 'The error arose in jax.nn.one_hot argument `num_classes`.') dtype = jax.dtypes.canonicalize_dtype(jnp.float64 if dtype is None else dtype) x = jnp.asarray(x.value if isinstance(x, Array) else x) try: <DeepExtract> axis = operator.index(axis) if not -x.ndim + 1 <= axis < x.ndim + 1: raise ValueError('axis {} is out of bounds for array of dimension {}'.format(axis, x.ndim + 1)) if axis < 0: axis = axis + x.ndim + 1 output_pos_axis = axis </DeepExtract> except TypeError: axis_size = jax.lax.psum(1, axis) if num_classes != axis_size: raise ValueError(f'Expected num_classes to match the size of axis {axis}, but {num_classes} != {axis_size}') from None axis_idx = jax.lax.axis_index(axis) return jnp.asarray(x == axis_idx, dtype=dtype) axis = operator.index(axis) lhs = jax.lax.expand_dims(x, (axis,)) rhs_shape = [1] * x.ndim rhs_shape.insert(output_pos_axis, num_classes) rhs = jax.lax.broadcast_in_dim(jnp.arange(num_classes, dtype=x.dtype), rhs_shape, (output_pos_axis,)) return jnp.asarray(lhs == rhs, dtype=dtype)
def one_hot(x, num_classes, *, dtype=None, axis=-1): """One-hot encodes the given indicies. Each index in the input ``x`` is encoded as a vector of zeros of length ``num_classes`` with the element at ``index`` set to one:: >>> import jax.numpy as jnp >>> one_hot(jnp.array([0, 1, 2]), 3) Array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) Indicies outside the range [0, num_classes) will be encoded as zeros:: >>> import jax.numpy as jnp >>> one_hot(jnp.array([-1, 3]), 3) Array([[0., 0., 0.], [0., 0., 0.]], dtype=float32) Args: x: A tensor of indices. num_classes: Number of classes in the one-hot dimension. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). axis: the axis or axes along which the function should be computed. """ num_classes = jax.core.concrete_or_error(int, num_classes, 'The error arose in jax.nn.one_hot argument `num_classes`.') dtype = jax.dtypes.canonicalize_dtype(jnp.float64 if dtype is None else dtype) x = jnp.asarray(x.value if isinstance(x, Array) else x) try: axis = operator.index(axis) if not -x.ndim + 1 <= axis < x.ndim + 1: raise ValueError('axis {} is out of bounds for array of dimension {}'.format(axis, x.ndim + 1)) if axis < 0: axis = axis + x.ndim + 1 output_pos_axis = axis except TypeError: axis_size = jax.lax.psum(1, axis) if num_classes != axis_size: raise ValueError(f'Expected num_classes to match the size of axis {axis}, but {num_classes} != {axis_size}') from None axis_idx = jax.lax.axis_index(axis) return jnp.asarray(x == axis_idx, dtype=dtype) axis = operator.index(axis) lhs = jax.lax.expand_dims(x, (axis,)) rhs_shape = [1] * x.ndim rhs_shape.insert(output_pos_axis, num_classes) rhs = jax.lax.broadcast_in_dim(jnp.arange(num_classes, dtype=x.dtype), rhs_shape, (output_pos_axis,)) return jnp.asarray(lhs == rhs, dtype=dtype)
BrainPy
positive
def got_ops_callback(self, ops): for (op, blockheader, block_index, txs) in ops: if op == 'add': <DeepExtract> with self._lock: self.set_last_block_index(block_index) for tx in txs: self._process_confirmed_tx(tx, blockheader, block_index) </DeepExtract> elif op == 'remove': <DeepExtract> with self._lock: self.set_last_block_index(block_index - 1) self.persistence.invalidate_block_index_for_spendables(block_index) </DeepExtract> else: raise Exception('unknown op: %s' % op)
def got_ops_callback(self, ops): for (op, blockheader, block_index, txs) in ops: if op == 'add': with self._lock: self.set_last_block_index(block_index) for tx in txs: self._process_confirmed_tx(tx, blockheader, block_index) elif op == 'remove': with self._lock: self.set_last_block_index(block_index - 1) self.persistence.invalidate_block_index_for_spendables(block_index) else: raise Exception('unknown op: %s' % op)
dashman
positive
def autojoin_cb(data, buffer, args): """Old behaviour: doesn't save empty channel list""" "In fact should also save open buffers with a /part'ed channel" "But I can't believe somebody would want that behaviour" <DeepExtract> items = {} infolist = w.infolist_get('irc_server', '', '') while w.infolist_next(infolist): items[w.infolist_string(infolist, 'name')] = '' w.infolist_free(infolist) for server in items.keys(): keys = [] channels = [] items[server] = '' infolist = w.infolist_get('irc_channel', '', server) while w.infolist_next(infolist): if w.infolist_integer(infolist, 'nicks_count') == 0: continue if w.infolist_integer(infolist, 'type') == 0: channels.append(w.infolist_string(infolist, 'name')) key = w.infolist_string(infolist, 'key') if len(key) > 0: keys.append(key) items[server] = ','.join(channels) if len(keys) > 0: items[server] += ' %s' % ','.join(keys) w.infolist_free(infolist) items = items </DeepExtract> for (server, channels) in items.iteritems(): channels = channels.rstrip(',') if not channels: continue command = '/set irc.server.%s.autojoin %s' % (server, channels) if args == '--run': w.command('', command) else: w.prnt('', command) return w.WEECHAT_RC_OK
def autojoin_cb(data, buffer, args): """Old behaviour: doesn't save empty channel list""" "In fact should also save open buffers with a /part'ed channel" "But I can't believe somebody would want that behaviour" items = {} infolist = w.infolist_get('irc_server', '', '') while w.infolist_next(infolist): items[w.infolist_string(infolist, 'name')] = '' w.infolist_free(infolist) for server in items.keys(): keys = [] channels = [] items[server] = '' infolist = w.infolist_get('irc_channel', '', server) while w.infolist_next(infolist): if w.infolist_integer(infolist, 'nicks_count') == 0: continue if w.infolist_integer(infolist, 'type') == 0: channels.append(w.infolist_string(infolist, 'name')) key = w.infolist_string(infolist, 'key') if len(key) > 0: keys.append(key) items[server] = ','.join(channels) if len(keys) > 0: items[server] += ' %s' % ','.join(keys) w.infolist_free(infolist) items = items for (server, channels) in items.iteritems(): channels = channels.rstrip(',') if not channels: continue command = '/set irc.server.%s.autojoin %s' % (server, channels) if args == '--run': w.command('', command) else: w.prnt('', command) return w.WEECHAT_RC_OK
dotfiles
positive
def simple_test(self, img, img_meta, rescale=True): """Simple test with single image.""" <DeepExtract> assert self.test_cfg.mode in ['slide', 'whole'] ori_shape = img_meta[0]['ori_shape'] assert all((_['ori_shape'] == ori_shape for _ in img_meta)) if self.test_cfg.mode == 'slide': seg_logit = self.slide_inference(img, img_meta, rescale) else: seg_logit = self.whole_inference(img, img_meta, rescale) output = F.softmax(seg_logit, dim=1) flip = img_meta[0]['flip'] if flip: flip_direction = img_meta[0]['flip_direction'] assert flip_direction in ['horizontal', 'vertical'] if flip_direction == 'horizontal': output = output.flip(dims=(3,)) elif flip_direction == 'vertical': output = output.flip(dims=(2,)) seg_logit = output </DeepExtract> seg_pred = seg_logit.argmax(dim=1) if torch.onnx.is_in_onnx_export(): seg_pred = seg_pred.unsqueeze(0) return seg_pred seg_pred = seg_pred.cpu().numpy() seg_pred = list(seg_pred) return seg_pred
def simple_test(self, img, img_meta, rescale=True): """Simple test with single image.""" assert self.test_cfg.mode in ['slide', 'whole'] ori_shape = img_meta[0]['ori_shape'] assert all((_['ori_shape'] == ori_shape for _ in img_meta)) if self.test_cfg.mode == 'slide': seg_logit = self.slide_inference(img, img_meta, rescale) else: seg_logit = self.whole_inference(img, img_meta, rescale) output = F.softmax(seg_logit, dim=1) flip = img_meta[0]['flip'] if flip: flip_direction = img_meta[0]['flip_direction'] assert flip_direction in ['horizontal', 'vertical'] if flip_direction == 'horizontal': output = output.flip(dims=(3,)) elif flip_direction == 'vertical': output = output.flip(dims=(2,)) seg_logit = output seg_pred = seg_logit.argmax(dim=1) if torch.onnx.is_in_onnx_export(): seg_pred = seg_pred.unsqueeze(0) return seg_pred seg_pred = seg_pred.cpu().numpy() seg_pred = list(seg_pred) return seg_pred
BPR
positive
def test_geo_value(self): """test whether geo values are valid for specific geo types""" <DeepExtract> rows = [CovidcastTestRow.make_default_row(geo_type='msa', geo_value=MSA[i - 1], value=i * 1.0, stderr=i * 10.0, sample_size=i * 100.0) for i in [1, 2, 3]] + [CovidcastTestRow.make_default_row(geo_type='fips', geo_value=FIPS[i - 4], value=i * 1.0, stderr=i * 10.0, sample_size=i * 100.0) for i in [4, 5, 6]] self._insert_rows(rows) rows = rows </DeepExtract> expected = [row.as_api_compatibility_row_dict() for row in rows[:3]] def fetch(geo_value): <DeepExtract> params = self.params_from_row(rows[0], endpoint='covidcast', **kwargs) Epidata.BASE_URL = BASE_URL response = Epidata.covidcast(**params) response = response </DeepExtract> return response <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=MSA[0]) r = response </DeepExtract> self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], expected[0:1]) <DeepExtract> response = self.request_based_on_row(rows[0], geo_value='11111') r = response </DeepExtract> self.assertEqual(r['message'], 'Invalid geo_value(s) 11111 for the requested geo_type msa') <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},{MSA[1]}') r = response </DeepExtract> self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], expected[0:2]) <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},{MSA[2]}') r = response </DeepExtract> self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], [expected[0], expected[2]]) <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},11111') r = response </DeepExtract> self.assertEqual(r['message'], 'Invalid geo_value(s) 11111 for the requested geo_type msa') <DeepExtract> response = self.request_based_on_row(rows[0], geo_value='') r = response </DeepExtract> self.assertEqual(r['message'], 'geo_value is empty for the requested geo_type msa!') <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=MSA[3]) r = response </DeepExtract> self.assertEqual(r['message'], 'no results')
def test_geo_value(self): """test whether geo values are valid for specific geo types""" rows = [CovidcastTestRow.make_default_row(geo_type='msa', geo_value=MSA[i - 1], value=i * 1.0, stderr=i * 10.0, sample_size=i * 100.0) for i in [1, 2, 3]] + [CovidcastTestRow.make_default_row(geo_type='fips', geo_value=FIPS[i - 4], value=i * 1.0, stderr=i * 10.0, sample_size=i * 100.0) for i in [4, 5, 6]] self._insert_rows(rows) rows = rows expected = [row.as_api_compatibility_row_dict() for row in rows[:3]] def fetch(geo_value): params = self.params_from_row(rows[0], endpoint='covidcast', **kwargs) Epidata.BASE_URL = BASE_URL response = Epidata.covidcast(**params) response = response return response response = self.request_based_on_row(rows[0], geo_value=MSA[0]) r = response self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], expected[0:1]) response = self.request_based_on_row(rows[0], geo_value='11111') r = response self.assertEqual(r['message'], 'Invalid geo_value(s) 11111 for the requested geo_type msa') response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},{MSA[1]}') r = response self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], expected[0:2]) response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},{MSA[2]}') r = response self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], [expected[0], expected[2]]) response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},11111') r = response self.assertEqual(r['message'], 'Invalid geo_value(s) 11111 for the requested geo_type msa') response = self.request_based_on_row(rows[0], geo_value='') r = response self.assertEqual(r['message'], 'geo_value is empty for the requested geo_type msa!') response = self.request_based_on_row(rows[0], geo_value=MSA[3]) r = response self.assertEqual(r['message'], 'no results')
delphi-epidata
positive
def build_fasttree(aln_file, out_file, clean_up=True, nthreads=1, tree_builder_args=None): """ build tree using fasttree """ log_file = out_file + '.log' <DeepExtract> exe = next(filter(shutil.which, ['FastTreeDblMP', 'FastTreeDbl', 'FastTreeMP', 'fasttreeMP', 'FastTree', 'fasttree']), default) if exe is None: print('Unable to find any of %s in PATH=%s' % (['FastTreeDblMP', 'FastTreeDbl', 'FastTreeMP', 'fasttreeMP', 'FastTree', 'fasttree'], os.environ['PATH'])) print('\nHint: You can install the missing program using conda or homebrew or apt-get.\n') raise Exception fasttree = exe </DeepExtract> extra_env = {'OMP_NUM_THREADS': str(nthreads)} call = [fasttree, tree_builder_args, shquote(aln_file), '1>', shquote(out_file), '2>', shquote(log_file)] cmd = ' '.join(call) print('Building a tree via:\n\t' + cmd + '\n\tPrice et al: FastTree 2 - Approximately Maximum-Likelihood Trees for Large Alignments.' + '\n\tPLoS ONE 5(3): e9490. https://doi.org/10.1371/journal.pone.0009490\n') try: run_shell_command(cmd, raise_errors=True, extra_env=extra_env) T = Phylo.read(out_file, 'newick') except Exception as error: print('ERROR: TREE BUILDING FAILED') print(f'ERROR: {error}') if os.path.isfile(log_file): print('Please see the log file for more details: {}'.format(log_file)) T = None return T
def build_fasttree(aln_file, out_file, clean_up=True, nthreads=1, tree_builder_args=None): """ build tree using fasttree """ log_file = out_file + '.log' exe = next(filter(shutil.which, ['FastTreeDblMP', 'FastTreeDbl', 'FastTreeMP', 'fasttreeMP', 'FastTree', 'fasttree']), default) if exe is None: print('Unable to find any of %s in PATH=%s' % (['FastTreeDblMP', 'FastTreeDbl', 'FastTreeMP', 'fasttreeMP', 'FastTree', 'fasttree'], os.environ['PATH'])) print('\nHint: You can install the missing program using conda or homebrew or apt-get.\n') raise Exception fasttree = exe extra_env = {'OMP_NUM_THREADS': str(nthreads)} call = [fasttree, tree_builder_args, shquote(aln_file), '1>', shquote(out_file), '2>', shquote(log_file)] cmd = ' '.join(call) print('Building a tree via:\n\t' + cmd + '\n\tPrice et al: FastTree 2 - Approximately Maximum-Likelihood Trees for Large Alignments.' + '\n\tPLoS ONE 5(3): e9490. https://doi.org/10.1371/journal.pone.0009490\n') try: run_shell_command(cmd, raise_errors=True, extra_env=extra_env) T = Phylo.read(out_file, 'newick') except Exception as error: print('ERROR: TREE BUILDING FAILED') print(f'ERROR: {error}') if os.path.isfile(log_file): print('Please see the log file for more details: {}'.format(log_file)) T = None return T
augur
positive
def get_query_model(name, *args, random_state=None, **kwargs): """Get an instance of the query strategy. Arguments --------- name: str Name of the query strategy. *args: Arguments for the model. **kwargs: Keyword arguments for the model. Returns ------- asreview.query.base.BaseQueryModel Initialized instance of query strategy. """ <DeepExtract> try: query_class = _model_class_from_entry_point(name, entry_name='asreview.models.query') except ValueError: raise ValueError(f"Error: query name '{name}' is not implemented.") </DeepExtract> try: return query_class(*args, random_state=random_state, **kwargs) except TypeError: return query_class(*args, **kwargs)
def get_query_model(name, *args, random_state=None, **kwargs): """Get an instance of the query strategy. Arguments --------- name: str Name of the query strategy. *args: Arguments for the model. **kwargs: Keyword arguments for the model. Returns ------- asreview.query.base.BaseQueryModel Initialized instance of query strategy. """ try: query_class = _model_class_from_entry_point(name, entry_name='asreview.models.query') except ValueError: raise ValueError(f"Error: query name '{name}' is not implemented.") try: return query_class(*args, random_state=random_state, **kwargs) except TypeError: return query_class(*args, **kwargs)
asreview
positive
def _install_tools(env, tools_conf=None): """ Install tools needed for Galaxy along with tool configuration directories needed by Galaxy. """ if not tools_conf: <DeepExtract> with open(_tools_conf_path(env)) as in_handle: full_data = yaml.safe_load(in_handle) tools_conf = full_data </DeepExtract> if _read_boolean(env, 'galaxy_install_dependencies', False): <DeepExtract> if not env.safe_exists(env.galaxy_tools_dir): env.safe_sudo('mkdir -p %s' % env.galaxy_tools_dir) _chown_galaxy(env, env.galaxy_tools_dir) bin_dir = os.path.join(env.galaxy_tools_dir, 'bin') if not env.safe_exists(bin_dir): env.safe_sudo('mkdir -p %s' % bin_dir) _chown_galaxy(env, bin_dir) line = 'export PATH={0}:$PATH'.format(bin_dir) _add_to_profiles(line) if not env.safe_exists(env.galaxy_jars_dir): env.safe_sudo('mkdir -p %s' % env.galaxy_jars_dir) _chown_galaxy(env, env.galaxy_jars_dir) </DeepExtract> <DeepExtract> applications = tools_conf['applications'] or {} defer_errors = env.get('galaxy_tool_defer_errors', True) exceptions = {} for (name, tool_conf) in applications.iteritems(): if not __check_conditional(tool_conf): continue try: _install_application(name, tool_conf) except BaseException as e: exceptions[name] = e if not defer_errors: break if exceptions: for (name, exception) in exceptions.iteritems(): env.logger.warn(FAILED_INSTALL_MESSAGE % name) first_exception = list(exceptions.values())[0] raise first_exception </DeepExtract> _chown_galaxy(env, env.galaxy_tools_dir) _chown_galaxy(env, env.galaxy_jars_dir) if _read_boolean(env, 'galaxy_install_r_packages', False): _install_r_packages(tools_conf)
def _install_tools(env, tools_conf=None): """ Install tools needed for Galaxy along with tool configuration directories needed by Galaxy. """ if not tools_conf: with open(_tools_conf_path(env)) as in_handle: full_data = yaml.safe_load(in_handle) tools_conf = full_data if _read_boolean(env, 'galaxy_install_dependencies', False): if not env.safe_exists(env.galaxy_tools_dir): env.safe_sudo('mkdir -p %s' % env.galaxy_tools_dir) _chown_galaxy(env, env.galaxy_tools_dir) bin_dir = os.path.join(env.galaxy_tools_dir, 'bin') if not env.safe_exists(bin_dir): env.safe_sudo('mkdir -p %s' % bin_dir) _chown_galaxy(env, bin_dir) line = 'export PATH={0}:$PATH'.format(bin_dir) _add_to_profiles(line) if not env.safe_exists(env.galaxy_jars_dir): env.safe_sudo('mkdir -p %s' % env.galaxy_jars_dir) _chown_galaxy(env, env.galaxy_jars_dir) applications = tools_conf['applications'] or {} defer_errors = env.get('galaxy_tool_defer_errors', True) exceptions = {} for (name, tool_conf) in applications.iteritems(): if not __check_conditional(tool_conf): continue try: _install_application(name, tool_conf) except BaseException as e: exceptions[name] = e if not defer_errors: break if exceptions: for (name, exception) in exceptions.iteritems(): env.logger.warn(FAILED_INSTALL_MESSAGE % name) first_exception = list(exceptions.values())[0] raise first_exception _chown_galaxy(env, env.galaxy_tools_dir) _chown_galaxy(env, env.galaxy_jars_dir) if _read_boolean(env, 'galaxy_install_r_packages', False): _install_r_packages(tools_conf)
cloudbiolinux
positive
def gather_options(self): if not self.initialized: parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) <DeepExtract> parser.add_argument('--dataroot', type=str, default='.', help='path to images (should have subfolders train, test etc)') parser.add_argument('--batch_size', type=int, default=1, help='input batch size') parser.add_argument('--loadSize', type=int, default=512, help='scale images to this size') parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size') parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels') parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels') parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD') parser.add_argument('--netG', type=str, default='unet_256', help='selects model to use for netG') parser.add_argument('--nnG', type=int, default=9, help='specify nblock for resnet_nblocks, ndown for unet for unet_ndown') parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [aligned | single]') parser.add_argument('--model', type=str, default='apdrawing_gan', help='chooses which model to use. [apdrawing_gan | test]') parser.add_argument('--use_local', action='store_true', help='use local part network') parser.add_argument('--comb_op', type=int, default=1, help='use min-pooling(1) or max-pooling(0) for overlapping regions') parser.add_argument('--lm_dir', type=str, default='dataset/landmark/ALL', help='path to facial landmarks') parser.add_argument('--bg_dir', type=str, default='dataset/mask/ALL', help='path to background masks') parser.add_argument('--soft_border', type=int, default=0, help='use mask with soft border') parser.add_argument('--EYE_H', type=int, default=40, help='EYE_H') parser.add_argument('--EYE_W', type=int, default=56, help='EYE_W') parser.add_argument('--NOSE_H', type=int, default=48, help='NOSE_H') parser.add_argument('--NOSE_W', type=int, default=48, help='NOSE_W') parser.add_argument('--MOUTH_H', type=int, default=40, help='MOUTH_H') parser.add_argument('--MOUTH_W', type=int, default=64, help='MOUTH_W') parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA') parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') parser.add_argument('--auxiliary_root', type=str, default='auxiliary', help='auxiliary model folder') parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization') parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') parser.add_argument('--display_winsize', type=int, default=256, help='display window size') parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') parser.add_argument('--display_server', type=str, default='http://localhost', help='visdom server of the web display') parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]') parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') self.initialized = True parser = parser </DeepExtract> (opt, _) = parser.parse_known_args() if UseTest: opt.model = 'test' model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) (opt, _) = parser.parse_known_args() dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) self.parser = parser return parser.parse_args()
def gather_options(self): if not self.initialized: parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--dataroot', type=str, default='.', help='path to images (should have subfolders train, test etc)') parser.add_argument('--batch_size', type=int, default=1, help='input batch size') parser.add_argument('--loadSize', type=int, default=512, help='scale images to this size') parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size') parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels') parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels') parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD') parser.add_argument('--netG', type=str, default='unet_256', help='selects model to use for netG') parser.add_argument('--nnG', type=int, default=9, help='specify nblock for resnet_nblocks, ndown for unet for unet_ndown') parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [aligned | single]') parser.add_argument('--model', type=str, default='apdrawing_gan', help='chooses which model to use. [apdrawing_gan | test]') parser.add_argument('--use_local', action='store_true', help='use local part network') parser.add_argument('--comb_op', type=int, default=1, help='use min-pooling(1) or max-pooling(0) for overlapping regions') parser.add_argument('--lm_dir', type=str, default='dataset/landmark/ALL', help='path to facial landmarks') parser.add_argument('--bg_dir', type=str, default='dataset/mask/ALL', help='path to background masks') parser.add_argument('--soft_border', type=int, default=0, help='use mask with soft border') parser.add_argument('--EYE_H', type=int, default=40, help='EYE_H') parser.add_argument('--EYE_W', type=int, default=56, help='EYE_W') parser.add_argument('--NOSE_H', type=int, default=48, help='NOSE_H') parser.add_argument('--NOSE_W', type=int, default=48, help='NOSE_W') parser.add_argument('--MOUTH_H', type=int, default=40, help='MOUTH_H') parser.add_argument('--MOUTH_W', type=int, default=64, help='MOUTH_W') parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA') parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') parser.add_argument('--auxiliary_root', type=str, default='auxiliary', help='auxiliary model folder') parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization') parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') parser.add_argument('--display_winsize', type=int, default=256, help='display window size') parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') parser.add_argument('--display_server', type=str, default='http://localhost', help='visdom server of the web display') parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]') parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') self.initialized = True parser = parser (opt, _) = parser.parse_known_args() if UseTest: opt.model = 'test' model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) (opt, _) = parser.parse_known_args() dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) self.parser = parser return parser.parse_args()
dualFace
positive
def esd_pnms(esd, pnms_thresh): scores = [] dets = [] for ele in esd: score = ele['score'] quad = ele['ke_quad'] det = np.array([[quad[0][0], quad[0][1]], [quad[1][0], quad[1][1]], [quad[2][0], quad[2][1]], [quad[3][0], quad[3][1]]]) scores.append(score) dets.append(det) scores = np.array(scores) dets = np.array(dets) <DeepExtract> pts = [] for det in dets: pts.append([[det[i][0], det[i][1]] for i in range(len(det))]) order = scores.argsort()[::-1] areas = np.zeros(scores.shape) order = scores.argsort()[::-1] inter_areas = np.zeros((scores.shape[0], scores.shape[0])) for il in range(len(pts)): poly = Polygon(pts[il]) areas[il] = poly.area for jl in range(il, len(pts)): polyj = Polygon(pts[jl]) try: inS = poly.intersection(polyj) except: print(poly, polyj) inter_areas[il][jl] = inS.area inter_areas[jl][il] = inS.area keep = [] while order.size > 0: i = order[0] keep.append(i) ovr = inter_areas[i][order[1:]] / (areas[i] + areas[order[1:]] - inter_areas[i][order[1:]]) inds = np.where(ovr <= pnms_thresh)[0] order = order[inds + 1] keep = keep </DeepExtract> return keep
def esd_pnms(esd, pnms_thresh): scores = [] dets = [] for ele in esd: score = ele['score'] quad = ele['ke_quad'] det = np.array([[quad[0][0], quad[0][1]], [quad[1][0], quad[1][1]], [quad[2][0], quad[2][1]], [quad[3][0], quad[3][1]]]) scores.append(score) dets.append(det) scores = np.array(scores) dets = np.array(dets) pts = [] for det in dets: pts.append([[det[i][0], det[i][1]] for i in range(len(det))]) order = scores.argsort()[::-1] areas = np.zeros(scores.shape) order = scores.argsort()[::-1] inter_areas = np.zeros((scores.shape[0], scores.shape[0])) for il in range(len(pts)): poly = Polygon(pts[il]) areas[il] = poly.area for jl in range(il, len(pts)): polyj = Polygon(pts[jl]) try: inS = poly.intersection(polyj) except: print(poly, polyj) inter_areas[il][jl] = inS.area inter_areas[jl][il] = inS.area keep = [] while order.size > 0: i = order[0] keep.append(i) ovr = inter_areas[i][order[1:]] / (areas[i] + areas[order[1:]] - inter_areas[i][order[1:]]) inds = np.where(ovr <= pnms_thresh)[0] order = order[inds + 1] keep = keep return keep
Box_Discretization_Network
positive
def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ <DeepExtract> new = copy.copy(self) new.d = copy.copy(self.d) new.label = label if label is not None else self.label cdf = new </DeepExtract> cdf.ps **= k return cdf
def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ new = copy.copy(self) new.d = copy.copy(self.d) new.label = label if label is not None else self.label cdf = new cdf.ps **= k return cdf
data-science-ipython-notebooks
positive
def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): <DeepExtract> if k not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % k) if isinstance(v, Callable): self.hooks[k].append(v) elif hasattr(v, '__iter__'): self.hooks[k].extend((h for h in v if isinstance(h, Callable))) </DeepExtract> self.method = method self.url = url self.headers = headers self.files = files self.data = data self.json = json self.params = params self.auth = auth self.cookies = cookies
def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): if k not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % k) if isinstance(v, Callable): self.hooks[k].append(v) elif hasattr(v, '__iter__'): self.hooks[k].extend((h for h in v if isinstance(h, Callable))) self.method = method self.url = url self.headers = headers self.files = files self.data = data self.json = json self.params = params self.auth = auth self.cookies = cookies
alexa-sky-hd
positive
def flatten_sequence(self, sequence, gold_snippets=False): if sequence[-1] == vocab.EOS_TOK: sequence = sequence[:-1] if gold_snippets: no_snippets_sequence = self.interaction.expand_snippets(sequence) else: <DeepExtract> if sequence[-1] == vocab.EOS_TOK: sequence = sequence[:-1] no_snippets_sequence = self.interaction.expand_snippets(sequence) no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence) no_snippets_sequence = no_snippets_sequence </DeepExtract> no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence) deanon_sequence = self.interaction.deanonymize(no_snippets_sequence, 'sql') return deanon_sequence
def flatten_sequence(self, sequence, gold_snippets=False): if sequence[-1] == vocab.EOS_TOK: sequence = sequence[:-1] if gold_snippets: no_snippets_sequence = self.interaction.expand_snippets(sequence) else: if sequence[-1] == vocab.EOS_TOK: sequence = sequence[:-1] no_snippets_sequence = self.interaction.expand_snippets(sequence) no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence) no_snippets_sequence = no_snippets_sequence no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence) deanon_sequence = self.interaction.deanonymize(no_snippets_sequence, 'sql') return deanon_sequence
editsql
positive
def get_children(parent, tag_name): if parent is None: return [] <DeepExtract> if parent is None: parent = self.root ret = parent.findall('.//' + self.ns + tag_name) </DeepExtract> if not ret: <DeepExtract> if parent is None: parent = self.root ret_list = parent.findall('.//' + self.ns + tag_name + '-REF') </DeepExtract> ret = [self.get_short_name_path(item.text) for item in ret_list] if len(ret) > 0: raise 'use follow_all_ref!' return ret
def get_children(parent, tag_name): if parent is None: return [] if parent is None: parent = self.root ret = parent.findall('.//' + self.ns + tag_name) if not ret: if parent is None: parent = self.root ret_list = parent.findall('.//' + self.ns + tag_name + '-REF') ret = [self.get_short_name_path(item.text) for item in ret_list] if len(ret) > 0: raise 'use follow_all_ref!' return ret
canmatrix
positive
@ddt.data(*CourseSamples.course_ids) def test_any_activity(self, course_id): <DeepExtract> raise NotImplementedError </DeepExtract> <DeepExtract> response = self.authenticated_get('/api/v0/courses/{}/recent_activity?activity_type={}'.format(course_id, 'ANY')) self.assertEqual(response.status_code, 200) self.assertEqual(response.data, self.get_activity_record(course_id=course_id, activity_type='ANY', count=300)) </DeepExtract> <DeepExtract> response = self.authenticated_get('/api/v0/courses/{}/recent_activity?activity_type={}'.format(course_id, 'any')) self.assertEqual(response.status_code, 200) self.assertEqual(response.data, self.get_activity_record(course_id=course_id, activity_type='any', count=300)) </DeepExtract>
@ddt.data(*CourseSamples.course_ids) def test_any_activity(self, course_id): raise NotImplementedError response = self.authenticated_get('/api/v0/courses/{}/recent_activity?activity_type={}'.format(course_id, 'ANY')) self.assertEqual(response.status_code, 200) self.assertEqual(response.data, self.get_activity_record(course_id=course_id, activity_type='ANY', count=300)) response = self.authenticated_get('/api/v0/courses/{}/recent_activity?activity_type={}'.format(course_id, 'any')) self.assertEqual(response.status_code, 200) self.assertEqual(response.data, self.get_activity_record(course_id=course_id, activity_type='any', count=300)) </DeepExtract>
edx-analytics-data-api
positive
def regularization(self, train_targets, train_features, coef=None, featselect_featvar=False): """Generate the omgea2 and coef value's. Parameters ---------- train_targets : array Dependent data used for training. train_features : array Independent data used for training. coef : int List of indices in the feature database. """ reg_data = {'result': None} if coef is None: <DeepExtract> omega2_min = float('inf') omega2_list = [] epe_list = [] if self.W2 is None or self.Vh is None: (V, self.W2, self.Vh) = np.linalg.svd(np.dot(train_features.T, train_features), full_matrices=True) if self.cv is 'loocv': (U, W, Vh) = np.linalg.svd(train_features, full_matrices=False) (whigh, wlow) = (np.log(self.W2[0] * 2.0), np.log(self.W2[-1] * 0.5)) basesearchwidth = whigh - wlow omega2_range = [1e-06 * np.exp(wlow)] for pp in np.linspace(wlow, whigh, self.wsteps): omega2_range.append(np.exp(pp)) omega2_range.append(1000000.0 * np.exp(whigh)) for s in range(self.rsteps): if self.cv is 'bootstrap': BS_res = self._bootstrap_master(train_features, train_targets, p, omega2_range, self.Ns) (_, _, epe_list_i, _) = BS_res if self.cv is 'loocv': epe_list_i = self._LOOCV_l(train_features, train_targets, p, omega2_range, U, W) omega2_list += omega2_range epe_list += epe_list_i.tolist() epe_ind = np.argmin(epe_list) omega2_min = omega2_list[epe_ind] if s is 0 and epe_ind is 0 or epe_ind is len(omega2_list) - 1: b = omega2_min logmin_epe = np.log(omega2_min) basesearchwidth = 2 * basesearchwidth / (self.wsteps - 1) wlow = logmin_epe - basesearchwidth * 0.5 whigh = logmin_epe + basesearchwidth * 0.5 omega2_range = [] for pp in np.linspace(wlow, whigh, self.wsteps): omega2_range.append(np.exp(pp)) b = omega2_min </DeepExtract> coef = self.RR(train_features, train_targets, omega2=b, featselect_featvar=featselect_featvar)[0] reg_data['result'] = (coef, b) return reg_data
def regularization(self, train_targets, train_features, coef=None, featselect_featvar=False): """Generate the omgea2 and coef value's. Parameters ---------- train_targets : array Dependent data used for training. train_features : array Independent data used for training. coef : int List of indices in the feature database. """ reg_data = {'result': None} if coef is None: omega2_min = float('inf') omega2_list = [] epe_list = [] if self.W2 is None or self.Vh is None: (V, self.W2, self.Vh) = np.linalg.svd(np.dot(train_features.T, train_features), full_matrices=True) if self.cv is 'loocv': (U, W, Vh) = np.linalg.svd(train_features, full_matrices=False) (whigh, wlow) = (np.log(self.W2[0] * 2.0), np.log(self.W2[-1] * 0.5)) basesearchwidth = whigh - wlow omega2_range = [1e-06 * np.exp(wlow)] for pp in np.linspace(wlow, whigh, self.wsteps): omega2_range.append(np.exp(pp)) omega2_range.append(1000000.0 * np.exp(whigh)) for s in range(self.rsteps): if self.cv is 'bootstrap': BS_res = self._bootstrap_master(train_features, train_targets, p, omega2_range, self.Ns) (_, _, epe_list_i, _) = BS_res if self.cv is 'loocv': epe_list_i = self._LOOCV_l(train_features, train_targets, p, omega2_range, U, W) omega2_list += omega2_range epe_list += epe_list_i.tolist() epe_ind = np.argmin(epe_list) omega2_min = omega2_list[epe_ind] if s is 0 and epe_ind is 0 or epe_ind is len(omega2_list) - 1: b = omega2_min logmin_epe = np.log(omega2_min) basesearchwidth = 2 * basesearchwidth / (self.wsteps - 1) wlow = logmin_epe - basesearchwidth * 0.5 whigh = logmin_epe + basesearchwidth * 0.5 omega2_range = [] for pp in np.linspace(wlow, whigh, self.wsteps): omega2_range.append(np.exp(pp)) b = omega2_min coef = self.RR(train_features, train_targets, omega2=b, featselect_featvar=featselect_featvar)[0] reg_data['result'] = (coef, b) return reg_data
CatLearn
positive
def main(): parser = argparse.ArgumentParser(description='PyTorch Object Detection Training') parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file', type=str) parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--skip-test', dest='skip_test', help='Do not test the final model', action='store_true') parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 args.distributed = num_gpus > 1 if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger('maskrcnn_benchmark', output_dir, get_rank()) logger.info('Using {} GPUs'.format(num_gpus)) logger.info(args) logger.info('Collecting env info (might take some time)') logger.info('\n' + collect_env_info()) logger.info('Loaded configuration file {}'.format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = '\n' + cf.read() logger.info(config_str) logger.info('Running with config:\n{}'.format(cfg)) <DeepExtract> model = build_detection_model(cfg) device = torch.device(cfg.MODEL.DEVICE) model.to(device) optimizers = make_optimizer(cfg, model) schedulers = make_lr_scheduler(cfg, optimizers) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, broadcast_buffers=False) arguments = {} arguments['iteration'] = 0 output_dir = cfg.OUTPUT_DIR save_to_disk = get_rank() == 0 checkpointer = DetectronCheckpointer(cfg, model, optimizers, schedulers, output_dir, save_to_disk) extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT) arguments.update(extra_checkpoint_data) data_loader = make_data_loader(cfg, is_train=True, is_distributed=args.distributed, start_iter=arguments['iteration']) checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD do_train(model, data_loader, optimizers, schedulers, checkpointer, device, checkpoint_period, arguments) model = model </DeepExtract> if not args.skip_test: <DeepExtract> if args.distributed: model = model.module torch.cuda.empty_cache() iou_types = ('bbox',) if cfg.MODEL.MASK_ON: iou_types = iou_types + ('segm',) if cfg.MODEL.KEYPOINT_ON: iou_types = iou_types + ('keypoints',) output_folders = [None] * len(cfg.DATASETS.TEST) dataset_names = cfg.DATASETS.TEST if cfg.OUTPUT_DIR: for (idx, dataset_name) in enumerate(dataset_names): output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name) mkdir(output_folder) output_folders[idx] = output_folder data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=args.distributed) for (output_folder, dataset_name, data_loader_val) in zip(output_folders, dataset_names, data_loaders_val): inference(model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=cfg.MODEL.RPN_ONLY, device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder) synchronize() </DeepExtract>
def main(): parser = argparse.ArgumentParser(description='PyTorch Object Detection Training') parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file', type=str) parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--skip-test', dest='skip_test', help='Do not test the final model', action='store_true') parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 args.distributed = num_gpus > 1 if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger('maskrcnn_benchmark', output_dir, get_rank()) logger.info('Using {} GPUs'.format(num_gpus)) logger.info(args) logger.info('Collecting env info (might take some time)') logger.info('\n' + collect_env_info()) logger.info('Loaded configuration file {}'.format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = '\n' + cf.read() logger.info(config_str) logger.info('Running with config:\n{}'.format(cfg)) model = build_detection_model(cfg) device = torch.device(cfg.MODEL.DEVICE) model.to(device) optimizers = make_optimizer(cfg, model) schedulers = make_lr_scheduler(cfg, optimizers) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, broadcast_buffers=False) arguments = {} arguments['iteration'] = 0 output_dir = cfg.OUTPUT_DIR save_to_disk = get_rank() == 0 checkpointer = DetectronCheckpointer(cfg, model, optimizers, schedulers, output_dir, save_to_disk) extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT) arguments.update(extra_checkpoint_data) data_loader = make_data_loader(cfg, is_train=True, is_distributed=args.distributed, start_iter=arguments['iteration']) checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD do_train(model, data_loader, optimizers, schedulers, checkpointer, device, checkpoint_period, arguments) model = model if not args.skip_test: if args.distributed: model = model.module torch.cuda.empty_cache() iou_types = ('bbox',) if cfg.MODEL.MASK_ON: iou_types = iou_types + ('segm',) if cfg.MODEL.KEYPOINT_ON: iou_types = iou_types + ('keypoints',) output_folders = [None] * len(cfg.DATASETS.TEST) dataset_names = cfg.DATASETS.TEST if cfg.OUTPUT_DIR: for (idx, dataset_name) in enumerate(dataset_names): output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name) mkdir(output_folder) output_folders[idx] = output_folder data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=args.distributed) for (output_folder, dataset_name, data_loader_val) in zip(output_folders, dataset_names, data_loaders_val): inference(model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=cfg.MODEL.RPN_ONLY, device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder) synchronize() </DeepExtract>
AE-WTN
positive
def start_proxy_process(self): <DeepExtract> out = [_PROXY_EXE, '-address', self.address, '-tcp-address', self.tcp_address, '-api-url', self.gateway_url + '/api/v1/routes', '-log-level', self.log_level] if is_child_process: out.append('-is-child-process') if bool(self.tls_cert) != bool(self.tls_key): raise ValueError('Must set both tls_cert and tls_key') if self.tls_cert: out.extend(['-tls-cert', self.tls_cert, '-tls-key', self.tls_key]) command = out </DeepExtract> <DeepExtract> env = os.environ.copy() env['DASK_GATEWAY_PROXY_TOKEN'] = self.api_token env = env </DeepExtract> self.log.info('Starting the Dask gateway proxy...') proc = subprocess.Popen(command, env=env, stdin=subprocess.PIPE, stdout=None, stderr=None, start_new_session=True) self.proxy_process = proc self.log.info('Dask gateway proxy started') self.log.info('- %s routes listening at %s://%s', 'HTTPS' if self.tls_cert else 'HTTP', 'https' if self.tls_cert else 'http', self.address) self.log.info('- Scheduler routes listening at gateway://%s', self.tcp_address)
def start_proxy_process(self): out = [_PROXY_EXE, '-address', self.address, '-tcp-address', self.tcp_address, '-api-url', self.gateway_url + '/api/v1/routes', '-log-level', self.log_level] if is_child_process: out.append('-is-child-process') if bool(self.tls_cert) != bool(self.tls_key): raise ValueError('Must set both tls_cert and tls_key') if self.tls_cert: out.extend(['-tls-cert', self.tls_cert, '-tls-key', self.tls_key]) command = out env = os.environ.copy() env['DASK_GATEWAY_PROXY_TOKEN'] = self.api_token env = env self.log.info('Starting the Dask gateway proxy...') proc = subprocess.Popen(command, env=env, stdin=subprocess.PIPE, stdout=None, stderr=None, start_new_session=True) self.proxy_process = proc self.log.info('Dask gateway proxy started') self.log.info('- %s routes listening at %s://%s', 'HTTPS' if self.tls_cert else 'HTTP', 'https' if self.tls_cert else 'http', self.address) self.log.info('- Scheduler routes listening at gateway://%s', self.tcp_address)
dask-gateway
positive
@model.methodwrap(va=SVa, pid=SPid) def munmap(self, va, pid): <DeepExtract> if str(pid).startswith('a.'): simsym.assume(pid == False) </DeepExtract> del self.getproc(pid).va_map[va] return {'r': 0}
@model.methodwrap(va=SVa, pid=SPid) def munmap(self, va, pid): if str(pid).startswith('a.'): simsym.assume(pid == False) del self.getproc(pid).va_map[va] return {'r': 0}
commuter
positive
def testBatchGradientDescentNormalizedBacktrackF7PL0(self): epsilon = 12 attack = attacks.batch_gradient_descent.BatchGradientDescent() attack.max_iterations = 10 attack.base_lr = 100 attack.momentum = 0 attack.c = 0 attack.lr_factor = 1.5 attack.normalized = True attack.backtrack = True attack.initialization = attacks.initializations.L0UniformNormInitialization(epsilon) attack.projection = attacks.projections.SequentialProjections([attacks.projections.L0Projection(epsilon), attacks.projections.BoxProjection()]) attack.norm = attacks.norms.L0Norm() <DeepExtract> for (b, (images, labels)) in enumerate(self.adversarialloader): break images = common.torch.as_variable(images, self.cuda).permute(0, 3, 1, 2) labels = common.torch.as_variable(labels, self.cuda) success_rate = 0 for t in range(5): attacks.objectives.UntargetedF7PObjective().set(labels) (perturbations, errors) = attack.run(self.model, images, attacks.objectives.UntargetedF7PObjective()) perturbations = numpy.array([numpy.transpose(perturbations, (0, 2, 3, 1))]) success_rate += self.successRate(numpy.transpose(images.cpu().numpy(), (0, 2, 3, 1)), perturbations, labels.cpu().numpy()) success_rate /= 5 success_rate = success_rate </DeepExtract> self.assertGreaterEqual(success_rate, 0.95)
def testBatchGradientDescentNormalizedBacktrackF7PL0(self): epsilon = 12 attack = attacks.batch_gradient_descent.BatchGradientDescent() attack.max_iterations = 10 attack.base_lr = 100 attack.momentum = 0 attack.c = 0 attack.lr_factor = 1.5 attack.normalized = True attack.backtrack = True attack.initialization = attacks.initializations.L0UniformNormInitialization(epsilon) attack.projection = attacks.projections.SequentialProjections([attacks.projections.L0Projection(epsilon), attacks.projections.BoxProjection()]) attack.norm = attacks.norms.L0Norm() for (b, (images, labels)) in enumerate(self.adversarialloader): break images = common.torch.as_variable(images, self.cuda).permute(0, 3, 1, 2) labels = common.torch.as_variable(labels, self.cuda) success_rate = 0 for t in range(5): attacks.objectives.UntargetedF7PObjective().set(labels) (perturbations, errors) = attack.run(self.model, images, attacks.objectives.UntargetedF7PObjective()) perturbations = numpy.array([numpy.transpose(perturbations, (0, 2, 3, 1))]) success_rate += self.successRate(numpy.transpose(images.cpu().numpy(), (0, 2, 3, 1)), perturbations, labels.cpu().numpy()) success_rate /= 5 success_rate = success_rate self.assertGreaterEqual(success_rate, 0.95)
confidence-calibrated-adversarial-training
positive
def __init__(self, path, conf): self.filename = path self.tzinfo = conf.get('tzinfo', None) self.defaultcopywildcard = conf.get('copy_wildcard', '_[0-9]*.*') with io.open(path, 'r', encoding='utf-8', errors='replace') as fp: peak = lchop(fp.read(512), BOM_UTF8) fp.seek(0) if peak.startswith('---\n'): <DeepExtract> head = [] i = 0 while True: line = fp.readline() i += 1 if i == 1 and (not line.startswith('---')): raise AcrylamidException('no meta information in %r found' % fp.name) elif i > 1 and (not line.startswith('---')): head.append(line) elif i > 1 and line.startswith('---') or not line: break if yaml: try: (i, meta) = (i, yaml.load(''.join(head))) except yaml.YAMLError as e: raise AcrylamidException('YAMLError: %s' % str(e)) else: props = {} for (j, line) in enumerate(head): if line[0] == '#' or not line.strip(): continue try: (key, value) = [x.strip() for x in line.split(':', 1)] except ValueError: raise AcrylamidException('%s:%i ValueError: %s\n%s' % (fp.name, j, line.strip('\n'), 'Either your YAML is malformed or our naïve parser is to dumb \nto read it. Revalidate your YAML or install PyYAML parser with \n> easy_install -U pyyaml')) props[key] = distinguish(value) if 'title' not in props: raise AcrylamidException('No title given in %r' % fp.name) (i, meta) = (i, props) </DeepExtract> elif isrest(peak): <DeepExtract> import docutils from docutils.core import publish_doctree title = fp.readline().strip('\n') dash = fp.readline().strip('\n') if not title or not dash: raise AcrylamidException('No title given in %r' % fp.name) if len(dash) < len(title) or dash.count(dash[0]) < len(dash): raise AcrylamidException('title line does not match second line %r' % fp.name) i = 2 meta = [] while True: line = fp.readline() i += 1 if not line.strip() and i == 3: continue elif not line.strip(): break else: meta.append(line) document = publish_doctree(''.join(meta)) meta = dict(title=title) for docinfo in document.traverse(docutils.nodes.docinfo): for element in docinfo.children: if element.tagname == 'field': (name_elem, body_elem) = element.children name = name_elem.astext() value = body_elem.astext() else: name = element.tagname value = element.astext() name = name.lower() if '\n\n' in value: value = value.split('\n\n') elif '\n' in value: value = value.replace('\n', ' ') meta[name] = distinguish(value.split('\n\n') if '\n\n' in value else value) (i, meta) = (i, meta) </DeepExtract> elif peak.startswith('% '): <DeepExtract> meta_pan_re = re.compile('^[ ]{0,3}%+\\s*(?P<value>.*)') meta_pan_more_re = re.compile('^\\s*(?P<value>.*)') meta_pan_authsplit = re.compile(';+\\s*') (i, j) = (0, 0) (meta, key) = ({}, None) poss_keys = ['title', 'author', 'date'] while True: line = fp.readline() i += 1 if line.strip() == '': break if j + 1 > len(poss_keys): raise AcrylamidException('%r has too many items in the Pandoc title block.' % fp.name) m1 = meta_pan_re.match(line) if m1: key = poss_keys[j] j += 1 valstrip = m1.group('value').strip() if not valstrip: continue value = distinguish(m1.group('value').strip()) if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta.setdefault(key, []).append(value) else: m2 = meta_pan_more_re.match(line) if m2 and key: value = m2.group('value').strip() if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta[key].append(value) else: break if 'title' not in meta: raise AcrylamidException('No title given in %r' % fp.name) if len(meta['title']) > 1: meta['title'] = ' '.join(meta['title']) if 'author' in meta: meta['author'] = sum(meta['author'], []) else: log.warn('%s does not have an Author in the Pandoc title block.' % fp.name) for (key, values) in iteritems(meta): if len(values) == 1: meta[key] = values[0] (i, meta) = (i, meta) </DeepExtract> else: <DeepExtract> meta_re = re.compile('^[ ]{0,3}(?P<key>[A-Za-z0-9._-]+):\\s*(?P<value>.*)') meta_more_re = re.compile('^[ ]{4,}(?P<value>.*)') i = 0 (meta, key) = ({}, None) while True: line = fp.readline() i += 1 if line.strip() == '': break m1 = meta_re.match(line) if m1: key = m1.group('key').lower().strip() value = distinguish(m1.group('value').strip()) meta.setdefault(key, []).append(value) else: m2 = meta_more_re.match(line) if m2 and key: meta[key].append(m2.group('value').strip()) else: break if not meta: raise AcrylamidException('no meta information in %r found' % fp.name) for (key, values) in iteritems(meta): if len(values) == 1: meta[key] = values[0] (i, meta) = (i, meta) </DeepExtract> meta['title'] = str(meta['title']) meta['category'] = lchop(dirname(path) + '/', conf['content_dir']).split('/') jekyll = '(?:(.+?)/)?(\\d{4}-\\d{2}-\\d{2})-(.+)' m = re.match('^' + conf['content_dir'] + jekyll + '$', splitext(path)[0]) if m: meta.setdefault('date', m.group(2)) meta.setdefault('slug', m.group(3)) if m.group(1) is not None: meta['category'] = m.group(1).split('/') self.offset = i Reader.__init__(self, conf, meta) (path, ext) = os.path.splitext(path) self.path = lchop(path, conf['content_dir']) self.extension = ext[1:]
def __init__(self, path, conf): self.filename = path self.tzinfo = conf.get('tzinfo', None) self.defaultcopywildcard = conf.get('copy_wildcard', '_[0-9]*.*') with io.open(path, 'r', encoding='utf-8', errors='replace') as fp: peak = lchop(fp.read(512), BOM_UTF8) fp.seek(0) if peak.startswith('---\n'): head = [] i = 0 while True: line = fp.readline() i += 1 if i == 1 and (not line.startswith('---')): raise AcrylamidException('no meta information in %r found' % fp.name) elif i > 1 and (not line.startswith('---')): head.append(line) elif i > 1 and line.startswith('---') or not line: break if yaml: try: (i, meta) = (i, yaml.load(''.join(head))) except yaml.YAMLError as e: raise AcrylamidException('YAMLError: %s' % str(e)) else: props = {} for (j, line) in enumerate(head): if line[0] == '#' or not line.strip(): continue try: (key, value) = [x.strip() for x in line.split(':', 1)] except ValueError: raise AcrylamidException('%s:%i ValueError: %s\n%s' % (fp.name, j, line.strip('\n'), 'Either your YAML is malformed or our naïve parser is to dumb \nto read it. Revalidate your YAML or install PyYAML parser with \n> easy_install -U pyyaml')) props[key] = distinguish(value) if 'title' not in props: raise AcrylamidException('No title given in %r' % fp.name) (i, meta) = (i, props) elif isrest(peak): import docutils from docutils.core import publish_doctree title = fp.readline().strip('\n') dash = fp.readline().strip('\n') if not title or not dash: raise AcrylamidException('No title given in %r' % fp.name) if len(dash) < len(title) or dash.count(dash[0]) < len(dash): raise AcrylamidException('title line does not match second line %r' % fp.name) i = 2 meta = [] while True: line = fp.readline() i += 1 if not line.strip() and i == 3: continue elif not line.strip(): break else: meta.append(line) document = publish_doctree(''.join(meta)) meta = dict(title=title) for docinfo in document.traverse(docutils.nodes.docinfo): for element in docinfo.children: if element.tagname == 'field': (name_elem, body_elem) = element.children name = name_elem.astext() value = body_elem.astext() else: name = element.tagname value = element.astext() name = name.lower() if '\n\n' in value: value = value.split('\n\n') elif '\n' in value: value = value.replace('\n', ' ') meta[name] = distinguish(value.split('\n\n') if '\n\n' in value else value) (i, meta) = (i, meta) elif peak.startswith('% '): meta_pan_re = re.compile('^[ ]{0,3}%+\\s*(?P<value>.*)') meta_pan_more_re = re.compile('^\\s*(?P<value>.*)') meta_pan_authsplit = re.compile(';+\\s*') (i, j) = (0, 0) (meta, key) = ({}, None) poss_keys = ['title', 'author', 'date'] while True: line = fp.readline() i += 1 if line.strip() == '': break if j + 1 > len(poss_keys): raise AcrylamidException('%r has too many items in the Pandoc title block.' % fp.name) m1 = meta_pan_re.match(line) if m1: key = poss_keys[j] j += 1 valstrip = m1.group('value').strip() if not valstrip: continue value = distinguish(m1.group('value').strip()) if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta.setdefault(key, []).append(value) else: m2 = meta_pan_more_re.match(line) if m2 and key: value = m2.group('value').strip() if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta[key].append(value) else: break if 'title' not in meta: raise AcrylamidException('No title given in %r' % fp.name) if len(meta['title']) > 1: meta['title'] = ' '.join(meta['title']) if 'author' in meta: meta['author'] = sum(meta['author'], []) else: log.warn('%s does not have an Author in the Pandoc title block.' % fp.name) for (key, values) in iteritems(meta): if len(values) == 1: meta[key] = values[0] (i, meta) = (i, meta) else: meta_re = re.compile('^[ ]{0,3}(?P<key>[A-Za-z0-9._-]+):\\s*(?P<value>.*)') meta_more_re = re.compile('^[ ]{4,}(?P<value>.*)') i = 0 (meta, key) = ({}, None) while True: line = fp.readline() i += 1 if line.strip() == '': break m1 = meta_re.match(line) if m1: key = m1.group('key').lower().strip() value = distinguish(m1.group('value').strip()) meta.setdefault(key, []).append(value) else: m2 = meta_more_re.match(line) if m2 and key: meta[key].append(m2.group('value').strip()) else: break if not meta: raise AcrylamidException('no meta information in %r found' % fp.name) for (key, values) in iteritems(meta): if len(values) == 1: meta[key] = values[0] (i, meta) = (i, meta) meta['title'] = str(meta['title']) meta['category'] = lchop(dirname(path) + '/', conf['content_dir']).split('/') jekyll = '(?:(.+?)/)?(\\d{4}-\\d{2}-\\d{2})-(.+)' m = re.match('^' + conf['content_dir'] + jekyll + '$', splitext(path)[0]) if m: meta.setdefault('date', m.group(2)) meta.setdefault('slug', m.group(3)) if m.group(1) is not None: meta['category'] = m.group(1).split('/') self.offset = i Reader.__init__(self, conf, meta) (path, ext) = os.path.splitext(path) self.path = lchop(path, conf['content_dir']) self.extension = ext[1:]
acrylamid
positive
def __call__(self, batch, output, attns, normalization=1.0, shard_size=0, trunc_start=0, trunc_size=None): """Compute the forward loss, possibly in shards in which case this method also runs the backward pass and returns ``None`` as the loss value. Also supports truncated BPTT for long sequences by taking a range in the decoder output sequence to back propagate in. Range is from `(trunc_start, trunc_start + trunc_size)`. Note sharding is an exact efficiency trick to relieve memory required for the generation buffers. Truncation is an approximate efficiency trick to relieve the memory required in the RNN buffers. Args: batch (batch) : batch of labeled examples output (:obj:`FloatTensor`) : output of decoder model `[tgt_len x batch x hidden]` attns (dict) : dictionary of attention distributions `[tgt_len x batch x src_len]` normalization: Optional normalization factor. shard_size (int) : maximum number of examples in a shard trunc_start (int) : starting position of truncation window trunc_size (int) : length of truncation window Returns: A tuple with the loss and a :obj:`onmt.utils.Statistics` instance. """ if trunc_size is None: trunc_size = batch.tgt.size(0) - trunc_start trunc_range = (trunc_start, trunc_start + trunc_size) <DeepExtract> shard_state = NotImplementedError </DeepExtract> if shard_size == 0: <DeepExtract> (loss, stats) = NotImplementedError </DeepExtract> return (loss / float(normalization), stats) batch_stats = onmt.utils.Statistics() for shard in shards(shard_state, shard_size): <DeepExtract> (loss, stats) = NotImplementedError </DeepExtract> loss.div(float(normalization)).backward() batch_stats.update(stats) return (None, batch_stats)
def __call__(self, batch, output, attns, normalization=1.0, shard_size=0, trunc_start=0, trunc_size=None): """Compute the forward loss, possibly in shards in which case this method also runs the backward pass and returns ``None`` as the loss value. Also supports truncated BPTT for long sequences by taking a range in the decoder output sequence to back propagate in. Range is from `(trunc_start, trunc_start + trunc_size)`. Note sharding is an exact efficiency trick to relieve memory required for the generation buffers. Truncation is an approximate efficiency trick to relieve the memory required in the RNN buffers. Args: batch (batch) : batch of labeled examples output (:obj:`FloatTensor`) : output of decoder model `[tgt_len x batch x hidden]` attns (dict) : dictionary of attention distributions `[tgt_len x batch x src_len]` normalization: Optional normalization factor. shard_size (int) : maximum number of examples in a shard trunc_start (int) : starting position of truncation window trunc_size (int) : length of truncation window Returns: A tuple with the loss and a :obj:`onmt.utils.Statistics` instance. """ if trunc_size is None: trunc_size = batch.tgt.size(0) - trunc_start trunc_range = (trunc_start, trunc_start + trunc_size) shard_state = NotImplementedError if shard_size == 0: (loss, stats) = NotImplementedError return (loss / float(normalization), stats) batch_stats = onmt.utils.Statistics() for shard in shards(shard_state, shard_size): (loss, stats) = NotImplementedError loss.div(float(normalization)).backward() batch_stats.update(stats) return (None, batch_stats)
DDAMS
positive
def _get_connection_spec(self): if self._connection_addr is None: <DeepExtract> pidfile = os.path.join(self._data_dir, 'postmaster.pid') try: with open(pidfile, 'rt') as f: piddata = f.read() except FileNotFoundError: self._connection_addr = None lines = piddata.splitlines() if len(lines) < 6: self._connection_addr = None pmpid = int(lines[0]) if self._daemon_pid and pmpid != self._daemon_pid: self._connection_addr = None portnum = lines[3] sockdir = lines[4] hostaddr = lines[5] if sockdir: if sockdir[0] != '/': sockdir = os.path.normpath(os.path.join(self._data_dir, sockdir)) host_str = sockdir else: host_str = hostaddr if host_str == '*': host_str = 'localhost' elif host_str == '0.0.0.0': host_str = '127.0.0.1' elif host_str == '::': host_str = '::1' self._connection_addr = {'host': host_str, 'port': portnum} </DeepExtract> if self._connection_addr is not None: if self._connection_spec_override: args = self._connection_addr.copy() args.update(self._connection_spec_override) return args else: return self._connection_addr
def _get_connection_spec(self): if self._connection_addr is None: pidfile = os.path.join(self._data_dir, 'postmaster.pid') try: with open(pidfile, 'rt') as f: piddata = f.read() except FileNotFoundError: self._connection_addr = None lines = piddata.splitlines() if len(lines) < 6: self._connection_addr = None pmpid = int(lines[0]) if self._daemon_pid and pmpid != self._daemon_pid: self._connection_addr = None portnum = lines[3] sockdir = lines[4] hostaddr = lines[5] if sockdir: if sockdir[0] != '/': sockdir = os.path.normpath(os.path.join(self._data_dir, sockdir)) host_str = sockdir else: host_str = hostaddr if host_str == '*': host_str = 'localhost' elif host_str == '0.0.0.0': host_str = '127.0.0.1' elif host_str == '::': host_str = '::1' self._connection_addr = {'host': host_str, 'port': portnum} if self._connection_addr is not None: if self._connection_spec_override: args = self._connection_addr.copy() args.update(self._connection_spec_override) return args else: return self._connection_addr
asyncpg
positive
def createFolders(uid): """Create the folder structure and copy code files""" <DeepExtract> src = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'html') </DeepExtract> <DeepExtract> safeFolder = self.model.outputFolder if self.isWindows() == True: safeFolder = self.model.outputFolder.encode('ascii', 'ignore') dest = os.path.join(safeFolder, uid) </DeepExtract> try: if os.path.isdir(dest): self.log.info('delete previous folder ' + dest) shutil.rmtree(dest) shutil.copytree(src, dest, ignore=self.excludeFiles) except OSError as e: self.__logger.error(e.args[1])
def createFolders(uid): """Create the folder structure and copy code files""" src = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'html') safeFolder = self.model.outputFolder if self.isWindows() == True: safeFolder = self.model.outputFolder.encode('ascii', 'ignore') dest = os.path.join(safeFolder, uid) try: if os.path.isdir(dest): self.log.info('delete previous folder ' + dest) shutil.rmtree(dest) shutil.copytree(src, dest, ignore=self.excludeFiles) except OSError as e: self.__logger.error(e.args[1])
d3MapRenderer
positive
def test_exists_non_existent(self): <DeepExtract> filename = ''.join([random.choice(string.ascii_uppercase + string.digits) for x in range(length)]).lower() </DeepExtract> assert not self._storage.exists(filename)
def test_exists_non_existent(self): filename = ''.join([random.choice(string.ascii_uppercase + string.digits) for x in range(length)]).lower() assert not self._storage.exists(filename)
docker-registry
positive
def _get_description(ioc: Element) -> Optional[str]: <DeepExtract> tag = _tag(_NS_OPENIOC, 'description') </DeepExtract> description = ioc.find(tag) if description is None: return None return description.text
def _get_description(ioc: Element) -> Optional[str]: tag = _tag(_NS_OPENIOC, 'description') description = ioc.find(tag) if description is None: return None return description.text
connectors
positive
def test_eval_files(self): run_predict(predict_args(data=FileDataParams(images=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.png')]))))) r = run_eval(eval_args(gt_data=FileDataParams(texts=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.gt.txt')]))))) self.assertLess(r['avg_ler'], 0.0009, msg='Current best model yields about 0.09% CER') <DeepExtract> args = EvalArgs(gt=FileDataParams(texts=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.gt.txt')]))), pred=pred_data, checkpoint=checkpoint) </DeepExtract> with tempfile.TemporaryDirectory() as d: args.xlsx_output = os.path.join(d, 'output.xlsx') run_eval(args)
def test_eval_files(self): run_predict(predict_args(data=FileDataParams(images=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.png')]))))) r = run_eval(eval_args(gt_data=FileDataParams(texts=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.gt.txt')]))))) self.assertLess(r['avg_ler'], 0.0009, msg='Current best model yields about 0.09% CER') args = EvalArgs(gt=FileDataParams(texts=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.gt.txt')]))), pred=pred_data, checkpoint=checkpoint) with tempfile.TemporaryDirectory() as d: args.xlsx_output = os.path.join(d, 'output.xlsx') run_eval(args)
calamari
positive
def _init_sem_data_gen(graph: nx.DiGraph, schema: Dict, n_samples: int, default_type: str, distributions: Dict[str, str], seed: int): np.random.seed(seed) if not nx.algorithms.is_directed_acyclic_graph(graph): raise ValueError('Provided graph is not a DAG.') <DeepExtract> default_distributions = {'continuous': 'gaussian', 'binary': 'logit', 'categorical': 'logit', 'weight': 'uniform', 'intercept': 'uniform', 'count': 0.05} if distributions is None: distributions = default_distributions default_distributions.update(distributions) distributions = default_distributions </DeepExtract> validated_schema = validate_schema(nodes=graph.nodes(), schema=schema, default_type=default_type) var_fte_mapper = VariableFeatureMapper(validated_schema) n_columns = var_fte_mapper.n_features x_mat = np.empty([n_samples, n_columns]) return (distributions, var_fte_mapper, x_mat)
def _init_sem_data_gen(graph: nx.DiGraph, schema: Dict, n_samples: int, default_type: str, distributions: Dict[str, str], seed: int): np.random.seed(seed) if not nx.algorithms.is_directed_acyclic_graph(graph): raise ValueError('Provided graph is not a DAG.') default_distributions = {'continuous': 'gaussian', 'binary': 'logit', 'categorical': 'logit', 'weight': 'uniform', 'intercept': 'uniform', 'count': 0.05} if distributions is None: distributions = default_distributions default_distributions.update(distributions) distributions = default_distributions validated_schema = validate_schema(nodes=graph.nodes(), schema=schema, default_type=default_type) var_fte_mapper = VariableFeatureMapper(validated_schema) n_columns = var_fte_mapper.n_features x_mat = np.empty([n_samples, n_columns]) return (distributions, var_fte_mapper, x_mat)
causalnex
positive
End of preview. Expand in Data Studio

Dataset Card for "2000-python"

More Information needed

Downloads last month
10