Unnamed: 0
int64 0
2.44k
| repo
stringlengths 32
81
| hash
stringlengths 40
40
| diff
stringlengths 113
1.17k
| old_path
stringlengths 5
84
| rewrite
stringlengths 34
79
| initial_state
stringlengths 75
980
| final_state
stringlengths 76
980
|
---|---|---|---|---|---|---|---|
1,900 | https://:@github.com/TimHessels/WaporTranslator.git | fab158818a8bcc5a90b04347469916bdb2cd8fa9 | @@ -311,7 +311,7 @@ def main(Start_year_analyses, End_year_analyses, output_folder):
if not np.isnan(np.nanmean(Crop_S1_End.Data)):
for Date_Year in Dates_Years:
year_diff = int(Date_Year.year - Dates_Years[0].year)
- for dekad in range(0,int(np.nanmax(Crop_S2_End.Data))):
+ for dekad in range(0,int(np.nanmax(Crop_S3_End.Data))):
Accumulated_NPP_Data_Start_S1[year_diff, Crop_S1_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S1_End.Data[year_diff, :, :] == dekad]
Accumulated_NPP_Data_Start_S2[year_diff, Crop_S2_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S2_End.Data[year_diff, :, :] == dekad]
Accumulated_NPP_Data_Start_S3[year_diff, Crop_S3_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S3_End.Data[year_diff, :, :] == dekad]
| LEVEL_3/Food_Security/LEVEL_3_Calc_Food_Security.py | ReplaceText(target='Crop_S3_End' @(314,47)->(314,58)) | def main(Start_year_analyses, End_year_analyses, output_folder):
if not np.isnan(np.nanmean(Crop_S1_End.Data)):
for Date_Year in Dates_Years:
year_diff = int(Date_Year.year - Dates_Years[0].year)
for dekad in range(0,int(np.nanmax(Crop_S2_End.Data))):
Accumulated_NPP_Data_Start_S1[year_diff, Crop_S1_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S1_End.Data[year_diff, :, :] == dekad]
Accumulated_NPP_Data_Start_S2[year_diff, Crop_S2_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S2_End.Data[year_diff, :, :] == dekad]
Accumulated_NPP_Data_Start_S3[year_diff, Crop_S3_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S3_End.Data[year_diff, :, :] == dekad] | def main(Start_year_analyses, End_year_analyses, output_folder):
if not np.isnan(np.nanmean(Crop_S1_End.Data)):
for Date_Year in Dates_Years:
year_diff = int(Date_Year.year - Dates_Years[0].year)
for dekad in range(0,int(np.nanmax(Crop_S3_End.Data))):
Accumulated_NPP_Data_Start_S1[year_diff, Crop_S1_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S1_End.Data[year_diff, :, :] == dekad]
Accumulated_NPP_Data_Start_S2[year_diff, Crop_S2_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S2_End.Data[year_diff, :, :] == dekad]
Accumulated_NPP_Data_Start_S3[year_diff, Crop_S3_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S3_End.Data[year_diff, :, :] == dekad] |
1,901 | https://:@github.com/padraic-padraic/StabilizerSearch.git | 9a7b666656f60cbeb5a271e8e6c4ceb168f754fd | @@ -121,7 +121,7 @@ def get_positive_stabilizer_groups(n_qubits, n_states):
continue
subspaces.append(res)
generators.append(tuple(candidate.generators))
- if len(generators) == n_states:
+ if len(generators) == target:
break
return generators
| stabilizer_search/stabilizers/py_generators.py | ReplaceText(target='target' @(124,30)->(124,38)) | def get_positive_stabilizer_groups(n_qubits, n_states):
continue
subspaces.append(res)
generators.append(tuple(candidate.generators))
if len(generators) == n_states:
break
return generators
| def get_positive_stabilizer_groups(n_qubits, n_states):
continue
subspaces.append(res)
generators.append(tuple(candidate.generators))
if len(generators) == target:
break
return generators
|
1,902 | https://:@github.com/jwg4/volly.git | fabca57aac55f7350f24c006d2035360e94d29fc | @@ -16,4 +16,4 @@ class TestService(TestCase):
def test_write_missing_value(self):
svc = Service("https://volatile.wtf")
- self.assertRaises(lambda: svc["UNGYIZFHIA"], MissingKeyException)
+ self.assertRaises(MissingKeyException, lambda: svc["UNGYIZFHIA"])
| tests/__init__.py | ArgSwap(idxs=0<->1 @(19,8)->(19,25)) | class TestService(TestCase):
def test_write_missing_value(self):
svc = Service("https://volatile.wtf")
self.assertRaises(lambda: svc["UNGYIZFHIA"], MissingKeyException) | class TestService(TestCase):
def test_write_missing_value(self):
svc = Service("https://volatile.wtf")
self.assertRaises(MissingKeyException, lambda: svc["UNGYIZFHIA"]) |
1,903 | https://:@github.com/ismaelpessa/Muse_Cube.git | e01ea96ba7095c502c18faf927f43b8528eb76a2 | @@ -934,7 +934,7 @@ class MuseCube:
print('Fit Aceptado')
print(str(x[i]) + ',' + str(y[i]))
units = u.km / u.s
- vel = ltu.dv_from_z((mean / wv_line_vac) - 1, z_line).to(units).value
+ vel = ltu.dv_from_z((mean / wv_line_vac) - 1, z).to(units).value
kine_im[y[i]][x[i]] = vel
else:
if debug:
| PyMUSE/musecube.py | ReplaceText(target='z' @(937,62)->(937,68)) | class MuseCube:
print('Fit Aceptado')
print(str(x[i]) + ',' + str(y[i]))
units = u.km / u.s
vel = ltu.dv_from_z((mean / wv_line_vac) - 1, z_line).to(units).value
kine_im[y[i]][x[i]] = vel
else:
if debug: | class MuseCube:
print('Fit Aceptado')
print(str(x[i]) + ',' + str(y[i]))
units = u.km / u.s
vel = ltu.dv_from_z((mean / wv_line_vac) - 1, z).to(units).value
kine_im[y[i]][x[i]] = vel
else:
if debug: |
1,904 | https://:@github.com/chie8842/mldatautils.git | ccc35e5d07c30e7ec685bfe305ad5e2623015147 | @@ -20,7 +20,7 @@ def _config_parse(config_file):
'port': os.getenv('DB_PORT'),
'database': os.getenv('DATABASE'),
}
- return config_file
+ return dwh_config
def create_engine(config_file=None):
dwh_config = _config_parse(config_file)
| mldatautils/db_utils.py | ReplaceText(target='dwh_config' @(23,11)->(23,22)) | def _config_parse(config_file):
'port': os.getenv('DB_PORT'),
'database': os.getenv('DATABASE'),
}
return config_file
def create_engine(config_file=None):
dwh_config = _config_parse(config_file) | def _config_parse(config_file):
'port': os.getenv('DB_PORT'),
'database': os.getenv('DATABASE'),
}
return dwh_config
def create_engine(config_file=None):
dwh_config = _config_parse(config_file) |
1,905 | https://:@github.com/andrewbihl/bted.git | d04bda7b1d287b1b1f06983c306455f5fee0f152 | @@ -24,7 +24,7 @@ class TestAppend(unittest.TestCase):
expected = fin.read()
cmd, flags = self.interpreter.build_command(command, input_file)
res = self.interpreter.execute_command(cmd, flags, return_output=True)
- self.assertEqual(res, expected)
+ self.assertEqual(expected, res)
def perform_test_from_key(self, key: str):
tests = self.tests[key]
| bted/tests/test_append.py | ArgSwap(idxs=0<->1 @(27,8)->(27,24)) | class TestAppend(unittest.TestCase):
expected = fin.read()
cmd, flags = self.interpreter.build_command(command, input_file)
res = self.interpreter.execute_command(cmd, flags, return_output=True)
self.assertEqual(res, expected)
def perform_test_from_key(self, key: str):
tests = self.tests[key] | class TestAppend(unittest.TestCase):
expected = fin.read()
cmd, flags = self.interpreter.build_command(command, input_file)
res = self.interpreter.execute_command(cmd, flags, return_output=True)
self.assertEqual(expected, res)
def perform_test_from_key(self, key: str):
tests = self.tests[key] |
1,906 | https://:@github.com/muammar/mlchem.git | dbb7de0379cb8881538d211899e4bec8794f16e3 | @@ -344,7 +344,7 @@ def train(inputs, targets, model=None, data=None, optimizer=None, lr=None,
logger.info('Training finished in {} hours {} minutes {:.2f} seconds.'
.format(h, m, s))
logger.info('outputs')
- logger.info(outputs)
+ logger.info(outputs_)
logger.info('targets')
logger.info(targets)
| mlchem/models/neuralnetwork.py | ReplaceText(target='outputs_' @(347,16)->(347,23)) | def train(inputs, targets, model=None, data=None, optimizer=None, lr=None,
logger.info('Training finished in {} hours {} minutes {:.2f} seconds.'
.format(h, m, s))
logger.info('outputs')
logger.info(outputs)
logger.info('targets')
logger.info(targets)
| def train(inputs, targets, model=None, data=None, optimizer=None, lr=None,
logger.info('Training finished in {} hours {} minutes {:.2f} seconds.'
.format(h, m, s))
logger.info('outputs')
logger.info(outputs_)
logger.info('targets')
logger.info(targets)
|
1,907 | https://:@github.com/ibrokemypie/m3uspiff.git | e44882b66620c92ba437313d4b305c835506a5d5 | @@ -39,7 +39,7 @@ def mdata(path, track_element):
for tag in tags:
tagstring = tag+":"
if tagstring in linecheck:
- stringf = out.split(': ')[1]
+ stringf = decoded.split(': ')[1]
ttag = tag
if tag == "artist":
ttag = "creator"
| m3uspiff.py | ReplaceText(target='decoded' @(42,30)->(42,33)) | def mdata(path, track_element):
for tag in tags:
tagstring = tag+":"
if tagstring in linecheck:
stringf = out.split(': ')[1]
ttag = tag
if tag == "artist":
ttag = "creator" | def mdata(path, track_element):
for tag in tags:
tagstring = tag+":"
if tagstring in linecheck:
stringf = decoded.split(': ')[1]
ttag = tag
if tag == "artist":
ttag = "creator" |
1,908 | https://:@github.com/ibrokemypie/m3uspiff.git | aca86931f7453d9c90c2ef779ede1659d10af00d | @@ -45,7 +45,7 @@ def mdata(path, track_element):
ttag = "creator"
if tag == "genre":
ttag = "info"
- ttag = SubElement(track_element, tag)
+ ttag = SubElement(track_element, ttag)
ttag.text = stringf.rstrip()
else:
break
| m3uspiff.py | ReplaceText(target='ttag' @(48,53)->(48,56)) | def mdata(path, track_element):
ttag = "creator"
if tag == "genre":
ttag = "info"
ttag = SubElement(track_element, tag)
ttag.text = stringf.rstrip()
else:
break | def mdata(path, track_element):
ttag = "creator"
if tag == "genre":
ttag = "info"
ttag = SubElement(track_element, ttag)
ttag.text = stringf.rstrip()
else:
break |
1,909 | https://:@github.com/LanguageMachines/CLIN28_ST_spelling_correction.git | b95343c354ae7ee1934b9bba9a9ded0a89bd3048 | @@ -8,7 +8,7 @@ class ValidationError(Exception):
class CLIN28JSON:
def __init__(self, filename):
- if os.path.exists(filename):
+ if not os.path.exists(filename):
raise FileExistsError("File not found: " + filename)
with open(filename,'r', encoding='utf-8') as f:
| clin28tools/format.py | ReplaceText(target='not ' @(11,11)->(11,11)) | class ValidationError(Exception):
class CLIN28JSON:
def __init__(self, filename):
if os.path.exists(filename):
raise FileExistsError("File not found: " + filename)
with open(filename,'r', encoding='utf-8') as f: | class ValidationError(Exception):
class CLIN28JSON:
def __init__(self, filename):
if not os.path.exists(filename):
raise FileExistsError("File not found: " + filename)
with open(filename,'r', encoding='utf-8') as f: |
1,910 | https://:@github.com/LanguageMachines/CLIN28_ST_spelling_correction.git | f6d60c45406614fc6fbf930d3a44cc5e7b1453fb | @@ -57,7 +57,7 @@ class CLIN28JSON:
correction['confidence'] = float(correction['confidence'])
except:
raise ValidationError("Invalid confidence value (" + str(correction['confidence']) + ") " + repr(correction))
- if correction['confidence'] < 0 or correction['confidence'] > 0:
+ if correction['confidence'] < 0 or correction['confidence'] > 1:
raise ValidationError("Confidence value out of bounds (" + str(correction['confidence']) + ") " + repr(correction))
def words(self):
| clin28tools/format.py | ReplaceText(target='1' @(60,82)->(60,83)) | class CLIN28JSON:
correction['confidence'] = float(correction['confidence'])
except:
raise ValidationError("Invalid confidence value (" + str(correction['confidence']) + ") " + repr(correction))
if correction['confidence'] < 0 or correction['confidence'] > 0:
raise ValidationError("Confidence value out of bounds (" + str(correction['confidence']) + ") " + repr(correction))
def words(self): | class CLIN28JSON:
correction['confidence'] = float(correction['confidence'])
except:
raise ValidationError("Invalid confidence value (" + str(correction['confidence']) + ") " + repr(correction))
if correction['confidence'] < 0 or correction['confidence'] > 1:
raise ValidationError("Confidence value out of bounds (" + str(correction['confidence']) + ") " + repr(correction))
def words(self): |
1,911 | https://:@github.com/drmartiner/django-smsaero.git | 067445a9613fcdb635f49750ac2d49b3eac5a38a | @@ -24,7 +24,7 @@ class SmsSenderTest(TestCase):
@patch('urllib2.urlopen', _fake_urlopen)
def test_send_request(self):
sender = SmsSender()
- response = sender.send_request('/link/', {})
+ response = sender.send_request({}, '/link/')
self.assertIn(SMSMessage.STATUS_ACCEPTED, response)
@patch('smsaero.conf.SMSAERO_PASSWORD', 'FAKE')
| smsaero/tests.py | ArgSwap(idxs=0<->1 @(27,19)->(27,38)) | class SmsSenderTest(TestCase):
@patch('urllib2.urlopen', _fake_urlopen)
def test_send_request(self):
sender = SmsSender()
response = sender.send_request('/link/', {})
self.assertIn(SMSMessage.STATUS_ACCEPTED, response)
@patch('smsaero.conf.SMSAERO_PASSWORD', 'FAKE') | class SmsSenderTest(TestCase):
@patch('urllib2.urlopen', _fake_urlopen)
def test_send_request(self):
sender = SmsSender()
response = sender.send_request({}, '/link/')
self.assertIn(SMSMessage.STATUS_ACCEPTED, response)
@patch('smsaero.conf.SMSAERO_PASSWORD', 'FAKE') |
1,912 | https://:@github.com/drmartiner/django-smsaero.git | 916bcb1b7a9b1546a0944752f909e5a752cb99a6 | @@ -68,7 +68,7 @@ def send_sms(to, text, signature_id=None, date=None, link='/send/'):
'from': signature.name,
'date': date or '',
}
- response = sender.send_request(link, params)
+ response = sender.send_request(params, link)
sms_id, status = sender.parse_response(response)
if not sms_id or not status:
| smsaero/utils.py | ArgSwap(idxs=0<->1 @(71,15)->(71,34)) | def send_sms(to, text, signature_id=None, date=None, link='/send/'):
'from': signature.name,
'date': date or '',
}
response = sender.send_request(link, params)
sms_id, status = sender.parse_response(response)
if not sms_id or not status: | def send_sms(to, text, signature_id=None, date=None, link='/send/'):
'from': signature.name,
'date': date or '',
}
response = sender.send_request(params, link)
sms_id, status = sender.parse_response(response)
if not sms_id or not status: |
1,913 | https://:@github.com/jakirkham/kenjutsu.git | c532fe8f06fd9facc284639e0a87f88e44de852a | @@ -103,7 +103,7 @@ def reformat_slice(a_slice, a_length=None):
start = a_length - 1
if stop_i and stop < -a_length:
stop = None
- stop_i = True
+ stop_i = False
# Catch some known empty slices.
if (step > 0) and (stop == 0):
| kenjutsu/kenjutsu.py | ReplaceText(target='False' @(106,29)->(106,33)) | def reformat_slice(a_slice, a_length=None):
start = a_length - 1
if stop_i and stop < -a_length:
stop = None
stop_i = True
# Catch some known empty slices.
if (step > 0) and (stop == 0): | def reformat_slice(a_slice, a_length=None):
start = a_length - 1
if stop_i and stop < -a_length:
stop = None
stop_i = False
# Catch some known empty slices.
if (step > 0) and (stop == 0): |
1,914 | https://:@github.com/jakirkham/kenjutsu.git | a3b1486a8711d57b93f43a35bb7dea2ab70a83ff | @@ -55,7 +55,7 @@ def reformat_slice(a_slice, a_length=None):
new_slice = a_slice
if new_slice is Ellipsis:
new_slice = slice(None)
- elif not isinstance(new_slice, slice):
+ elif not isinstance(a_slice, slice):
raise ValueError(
"Expected a `slice` type. Instead got `%s`." % str(new_slice)
)
| kenjutsu/kenjutsu.py | ReplaceText(target='a_slice' @(58,24)->(58,33)) | def reformat_slice(a_slice, a_length=None):
new_slice = a_slice
if new_slice is Ellipsis:
new_slice = slice(None)
elif not isinstance(new_slice, slice):
raise ValueError(
"Expected a `slice` type. Instead got `%s`." % str(new_slice)
) | def reformat_slice(a_slice, a_length=None):
new_slice = a_slice
if new_slice is Ellipsis:
new_slice = slice(None)
elif not isinstance(a_slice, slice):
raise ValueError(
"Expected a `slice` type. Instead got `%s`." % str(new_slice)
) |
1,915 | https://:@github.com/jakirkham/kenjutsu.git | dbedbd6ff58c9aadf79edc7cc840d6ec15552674 | @@ -57,7 +57,7 @@ def reformat_slice(a_slice, a_length=None):
new_slice = slice(None)
elif not isinstance(a_slice, slice):
raise ValueError(
- "Expected a `slice` type. Instead got `%s`." % str(new_slice)
+ "Expected a `slice` type. Instead got `%s`." % str(a_slice)
)
if new_slice.step == 0:
| kenjutsu/kenjutsu.py | ReplaceText(target='a_slice' @(60,63)->(60,72)) | def reformat_slice(a_slice, a_length=None):
new_slice = slice(None)
elif not isinstance(a_slice, slice):
raise ValueError(
"Expected a `slice` type. Instead got `%s`." % str(new_slice)
)
if new_slice.step == 0: | def reformat_slice(a_slice, a_length=None):
new_slice = slice(None)
elif not isinstance(a_slice, slice):
raise ValueError(
"Expected a `slice` type. Instead got `%s`." % str(a_slice)
)
if new_slice.step == 0: |
1,916 | https://:@github.com/bh/python-keepass-httpd.git | 548473a3d4044f89e3a30639fa2aafb71bb321b6 | @@ -86,7 +86,7 @@ def main():
if success is False:
sys.exit("Wrong passphrase after %d attempts" % max_try_count)
- server.set_backend(backend)
+ kpconf.set_backend(backend)
# config daemon
if is_daemon:
| src/keepass_http/scripts/python_keepass_httpd.py | ReplaceText(target='kpconf' @(89,4)->(89,10)) | def main():
if success is False:
sys.exit("Wrong passphrase after %d attempts" % max_try_count)
server.set_backend(backend)
# config daemon
if is_daemon: | def main():
if success is False:
sys.exit("Wrong passphrase after %d attempts" % max_try_count)
kpconf.set_backend(backend)
# config daemon
if is_daemon: |
1,917 | https://:@github.com/frkhit/pyxtools.git | 678039c852edb8b94a45aa39393043019a52bdc7 | @@ -20,7 +20,7 @@ class IndexType(Enum):
def to_train(self) -> bool:
if self.name == "compress":
return True
- return True
+ return False
@property
def index_factory(self) -> str:
| pyxtools/faiss_tools/faiss_utils.py | ReplaceText(target='False' @(23,15)->(23,19)) | class IndexType(Enum):
def to_train(self) -> bool:
if self.name == "compress":
return True
return True
@property
def index_factory(self) -> str: | class IndexType(Enum):
def to_train(self) -> bool:
if self.name == "compress":
return True
return False
@property
def index_factory(self) -> str: |
1,918 | https://:@github.com/combatopera/Concern.git | 32c056654b62e455261ac6381c7207c6c1e4be39 | @@ -69,7 +69,7 @@ def main_Concern():
(-Concern).printf('vimArgs := $list()')
for arg in vimargs:
(-Concern).printf("vimArgs += %s", arg)
- import_module(f".consumer.{Concern.consumerName}", package = __package__).configure(config)
+ import_module(f".consumer.{Concern.consumerName}", package = __package__).configure(Concern)
(-Concern).processtemplate(resource_filename(templates.__name__, 'vimrc.aridt'), concernvimrc)
(-Concern).printf('" = $(pystr)')
(-Concern).processtemplate(resource_filename(templates.__name__, 'sendblock.py.aridt'), sendblock)
| concern/concern.py | ReplaceText(target='Concern' @(72,92)->(72,98)) | def main_Concern():
(-Concern).printf('vimArgs := $list()')
for arg in vimargs:
(-Concern).printf("vimArgs += %s", arg)
import_module(f".consumer.{Concern.consumerName}", package = __package__).configure(config)
(-Concern).processtemplate(resource_filename(templates.__name__, 'vimrc.aridt'), concernvimrc)
(-Concern).printf('" = $(pystr)')
(-Concern).processtemplate(resource_filename(templates.__name__, 'sendblock.py.aridt'), sendblock) | def main_Concern():
(-Concern).printf('vimArgs := $list()')
for arg in vimargs:
(-Concern).printf("vimArgs += %s", arg)
import_module(f".consumer.{Concern.consumerName}", package = __package__).configure(Concern)
(-Concern).processtemplate(resource_filename(templates.__name__, 'vimrc.aridt'), concernvimrc)
(-Concern).printf('" = $(pystr)')
(-Concern).processtemplate(resource_filename(templates.__name__, 'sendblock.py.aridt'), sendblock) |
1,919 | https://:@github.com/brianhie/ample.git | 8fc0e7a08beb33770dcad583debf60b1bd06cc51 | @@ -66,7 +66,7 @@ def kmeanspp(X, n_clusters, seed=None, replace=False,
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
- centers_idx.append(c)
+ centers_idx.append(best_candidate)
current_pot = best_pot
closest_dist_sq = best_dist_sq
| geosketch/kmeanspp.py | ReplaceText(target='best_candidate' @(69,27)->(69,28)) | def kmeanspp(X, n_clusters, seed=None, replace=False,
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
centers_idx.append(c)
current_pot = best_pot
closest_dist_sq = best_dist_sq
| def kmeanspp(X, n_clusters, seed=None, replace=False,
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
centers_idx.append(best_candidate)
current_pot = best_pot
closest_dist_sq = best_dist_sq
|
1,920 | https://:@github.com/AWehrhahn/PyReduce.git | 3971160a9dc6f308d452b99fadc27da10a8eb36a | @@ -35,7 +35,7 @@ def UVES_HD132205(local_dir="./"):
with tarfile.open(filename) as file:
file.extractall(path=target_dir)
- return local_dir
+ return target_dir
if __name__ == "__main__":
| pyreduce/datasets.py | ReplaceText(target='target_dir' @(38,11)->(38,20)) | def UVES_HD132205(local_dir="./"):
with tarfile.open(filename) as file:
file.extractall(path=target_dir)
return local_dir
if __name__ == "__main__": | def UVES_HD132205(local_dir="./"):
with tarfile.open(filename) as file:
file.extractall(path=target_dir)
return target_dir
if __name__ == "__main__": |
1,921 | https://:@github.com/4degrees/segue.git | 06d1a4945dcf6c99630412967d6c20ba400f8bb7 | @@ -120,7 +120,7 @@ class SelectorWidget(QtGui.QFrame):
'''
matches = self.list_widget.findItems(
item,
- QtCore.Qt.MatchFixedString | QtCore.Qt.CaseSensitive
+ QtCore.Qt.MatchFixedString & QtCore.Qt.CaseSensitive
)
if matches:
| source/segue/frontend/selector.py | ReplaceText(target='&' @(123,39)->(123,40)) | class SelectorWidget(QtGui.QFrame):
'''
matches = self.list_widget.findItems(
item,
QtCore.Qt.MatchFixedString | QtCore.Qt.CaseSensitive
)
if matches: | class SelectorWidget(QtGui.QFrame):
'''
matches = self.list_widget.findItems(
item,
QtCore.Qt.MatchFixedString & QtCore.Qt.CaseSensitive
)
if matches: |
1,922 | https://:@github.com/gmrukwa/divik.git | 7a46f680e9c5832ef3b81ab6a73bc6a1f25efa21 | @@ -143,6 +143,6 @@ class SpearmanDistance(DistanceMetric):
if first is not self._last:
self._last = first
self._last_ranks = np.apply_along_axis(st.rankdata, 0, first)
- second_ranks = np.apply_along_axis(st.rankdata, 0, first)
+ second_ranks = np.apply_along_axis(st.rankdata, 0, second)
return dist.cdist(self._last_ranks, second_ranks, metric='correlation')
| spdivik/distance.py | ReplaceText(target='second' @(146,59)->(146,64)) | class SpearmanDistance(DistanceMetric):
if first is not self._last:
self._last = first
self._last_ranks = np.apply_along_axis(st.rankdata, 0, first)
second_ranks = np.apply_along_axis(st.rankdata, 0, first)
return dist.cdist(self._last_ranks, second_ranks, metric='correlation')
| class SpearmanDistance(DistanceMetric):
if first is not self._last:
self._last = first
self._last_ranks = np.apply_along_axis(st.rankdata, 0, first)
second_ranks = np.apply_along_axis(st.rankdata, 0, second)
return dist.cdist(self._last_ranks, second_ranks, metric='correlation')
|
1,923 | https://:@github.com/takahi-i/hideout.git | 49d6acc882c0d666ed214c5c33360b4e8ac2ea3b | @@ -15,7 +15,7 @@ def resume(file_name):
with open(file_path, mode='rb') as f:
target = pickle.load(f)
yield target
- if target is None:
+ if target is not None:
freeze(target, file_name)
| hideout/__init__.py | ReplaceText(target=' is not ' @(18,13)->(18,17)) | def resume(file_name):
with open(file_path, mode='rb') as f:
target = pickle.load(f)
yield target
if target is None:
freeze(target, file_name)
| def resume(file_name):
with open(file_path, mode='rb') as f:
target = pickle.load(f)
yield target
if target is not None:
freeze(target, file_name)
|
1,924 | https://:@github.com/LordFlashmeow/pycent.git | a51cc6b53b9da5b5ee26026d51648eabbc9c0c61 | @@ -3,7 +3,7 @@ class pycent:
pass
def percent_of(self, percent, whole):
- return (percent * whole) * 100
+ return (percent * whole) / 100
def percentage(self, part, whole):
return 100 * float(part)/float(whole)
| pycent.py | ReplaceText(target='/' @(6,33)->(6,34)) | class pycent:
pass
def percent_of(self, percent, whole):
return (percent * whole) * 100
def percentage(self, part, whole):
return 100 * float(part)/float(whole) | class pycent:
pass
def percent_of(self, percent, whole):
return (percent * whole) / 100
def percentage(self, part, whole):
return 100 * float(part)/float(whole) |
1,925 | https://:@github.com/jdrubin91/GeneLab-Microarray.git | 06df0ad32f1d93ead0b76557cf1137b570eb82d2 | @@ -52,7 +52,7 @@ def run():
if batch:
import batch_process
- batch_process.run(batch)
+ batch_process.run(indir)
else:
metadata_dir = os.path.join(indir,'metadata')
if os.path.isdir(metadata_dir):
| GeneLab-Microarray/__main__.py | ReplaceText(target='indir' @(55,26)->(55,31)) | def run():
if batch:
import batch_process
batch_process.run(batch)
else:
metadata_dir = os.path.join(indir,'metadata')
if os.path.isdir(metadata_dir): | def run():
if batch:
import batch_process
batch_process.run(indir)
else:
metadata_dir = os.path.join(indir,'metadata')
if os.path.isdir(metadata_dir): |
1,926 | https://:@github.com/kszucs/sequely.git | 61dec39fd7d7ff2beb2dd051e761c2004f6dcbed | @@ -680,7 +680,7 @@ class IsNullOperator(UnaryPostfixOperator):
"""
def __init__(self, operand, invert=False):
- super(IsNullOperator, self).__init__(u' IS NOT NULL' if invert else u' IS NULL', operand)
+ super(IsNullOperator, self).__init__(operand, u' IS NOT NULL' if invert else u' IS NULL')
self.invert = invert
def NOT(self):
| sqlbuilder/sql.py | ArgSwap(idxs=0<->1 @(683,8)->(683,44)) | class IsNullOperator(UnaryPostfixOperator):
"""
def __init__(self, operand, invert=False):
super(IsNullOperator, self).__init__(u' IS NOT NULL' if invert else u' IS NULL', operand)
self.invert = invert
def NOT(self): | class IsNullOperator(UnaryPostfixOperator):
"""
def __init__(self, operand, invert=False):
super(IsNullOperator, self).__init__(operand, u' IS NOT NULL' if invert else u' IS NULL')
self.invert = invert
def NOT(self): |
1,927 | https://:@github.com/muteria/muteria.git | 6609a8e8e8acd2c0b5bcfbca516de7c746f02d14 | @@ -102,7 +102,7 @@ class MetaCriteriaTool(object):
self.tools_config_by_criterion_dict = tools_config_by_criterion_dict
# Verify Direct Arguments Variables
- ERROR_HANDLER.assert_true(self.criteria_working_dir is None, \
+ ERROR_HANDLER.assert_true(self.criteria_working_dir is not None, \
"Must specify criteria_working_dir", __file__)
for criterion in self.tools_config_by_criterion_dict:
ERROR_HANDLER.assert_true( \
| muteria/drivers/criteria/meta_testcriteriatool.py | ReplaceText(target=' is not ' @(105,59)->(105,63)) | class MetaCriteriaTool(object):
self.tools_config_by_criterion_dict = tools_config_by_criterion_dict
# Verify Direct Arguments Variables
ERROR_HANDLER.assert_true(self.criteria_working_dir is None, \
"Must specify criteria_working_dir", __file__)
for criterion in self.tools_config_by_criterion_dict:
ERROR_HANDLER.assert_true( \ | class MetaCriteriaTool(object):
self.tools_config_by_criterion_dict = tools_config_by_criterion_dict
# Verify Direct Arguments Variables
ERROR_HANDLER.assert_true(self.criteria_working_dir is not None, \
"Must specify criteria_working_dir", __file__)
for criterion in self.tools_config_by_criterion_dict:
ERROR_HANDLER.assert_true( \ |
1,928 | https://:@github.com/muteria/muteria.git | 6609a8e8e8acd2c0b5bcfbca516de7c746f02d14 | @@ -127,7 +127,7 @@ class MetaTestcaseTool(object):
self.test_tool_config_list = test_tool_config_list
# Verify Direct Arguments Variables
- ERROR_HANDLER.assert_true(self.tests_working_dir is None, \
+ ERROR_HANDLER.assert_true(self.tests_working_dir is not None, \
"Must specify tests_working_dir", __file__)
ERROR_HANDLER.assert_true(len(self.test_tool_config_list) != \
len(set([c.get_tool_config_alias() for c in \
| muteria/drivers/testgeneration/meta_testcasetool.py | ReplaceText(target=' is not ' @(130,56)->(130,60)) | class MetaTestcaseTool(object):
self.test_tool_config_list = test_tool_config_list
# Verify Direct Arguments Variables
ERROR_HANDLER.assert_true(self.tests_working_dir is None, \
"Must specify tests_working_dir", __file__)
ERROR_HANDLER.assert_true(len(self.test_tool_config_list) != \
len(set([c.get_tool_config_alias() for c in \ | class MetaTestcaseTool(object):
self.test_tool_config_list = test_tool_config_list
# Verify Direct Arguments Variables
ERROR_HANDLER.assert_true(self.tests_working_dir is not None, \
"Must specify tests_working_dir", __file__)
ERROR_HANDLER.assert_true(len(self.test_tool_config_list) != \
len(set([c.get_tool_config_alias() for c in \ |
1,929 | https://:@github.com/muteria/muteria.git | 43adff6c76b0fbeeacc6a87b54c29ac82b4d38e8 | @@ -56,7 +56,7 @@ class IdentityCodeConverter(BaseCodeFormatConverter):
for src, dest in list(file_src_dest_map.items()):
abs_src = os.path.join(self.repository_rootdir, src)
if os.path.abspath(abs_src) != os.path.abspath(dest):
- shutil.copy2(src, dest)
+ shutil.copy2(abs_src, dest)
return DefaultCallbackObject.after_command(self)
#~ def after_command()
#~ class CopyCallbackObject
| muteria/repositoryandcode/codes_convert_support.py | ReplaceText(target='abs_src' @(59,33)->(59,36)) | class IdentityCodeConverter(BaseCodeFormatConverter):
for src, dest in list(file_src_dest_map.items()):
abs_src = os.path.join(self.repository_rootdir, src)
if os.path.abspath(abs_src) != os.path.abspath(dest):
shutil.copy2(src, dest)
return DefaultCallbackObject.after_command(self)
#~ def after_command()
#~ class CopyCallbackObject | class IdentityCodeConverter(BaseCodeFormatConverter):
for src, dest in list(file_src_dest_map.items()):
abs_src = os.path.join(self.repository_rootdir, src)
if os.path.abspath(abs_src) != os.path.abspath(dest):
shutil.copy2(abs_src, dest)
return DefaultCallbackObject.after_command(self)
#~ def after_command()
#~ class CopyCallbackObject |
1,930 | https://:@github.com/muteria/muteria.git | 69571204b176aade524ca8d4259db5cf14550599 | @@ -489,7 +489,7 @@ class TestcasesToolKlee(BaseTestcaseTool):
KTestTestFormat.get_dir(dp, folders)) \
for dp in dup_tuple[1:]]
for df in dup_tuple[1:]:
- if KTestTestFormat.get_dir(kt, folders) == \
+ if KTestTestFormat.get_dir(df, folders) == \
self.tests_storage_dir:
os.remove(df)
common_fs.dumpJSON(kepttest2duptest_map, self.keptktest2dupktests)
| muteria/drivers/testgeneration/tools_by_languages/c/klee/klee.py | ReplaceText(target='df' @(492,43)->(492,45)) | class TestcasesToolKlee(BaseTestcaseTool):
KTestTestFormat.get_dir(dp, folders)) \
for dp in dup_tuple[1:]]
for df in dup_tuple[1:]:
if KTestTestFormat.get_dir(kt, folders) == \
self.tests_storage_dir:
os.remove(df)
common_fs.dumpJSON(kepttest2duptest_map, self.keptktest2dupktests) | class TestcasesToolKlee(BaseTestcaseTool):
KTestTestFormat.get_dir(dp, folders)) \
for dp in dup_tuple[1:]]
for df in dup_tuple[1:]:
if KTestTestFormat.get_dir(df, folders) == \
self.tests_storage_dir:
os.remove(df)
common_fs.dumpJSON(kepttest2duptest_map, self.keptktest2dupktests) |
1,931 | https://:@github.com/muteria/muteria.git | 99f435844fc4a30e5b8351943feee77c96f9630e | @@ -56,7 +56,7 @@ class CliUserInterface(object):
parser_customexec = subparsers.add_parser('customexec', \
help="Make some custom execution AFTER the"
" main execution is done")
- parser_run.add_argument("--nohashoutlog", action='store_true', \
+ parser_customexec.add_argument("--nohashoutlog", action='store_true', \
help="When set, enforce no hash log")
if len(sys.argv)==1:
| muteria/cli/cli.py | ReplaceText(target='parser_customexec' @(59,8)->(59,18)) | class CliUserInterface(object):
parser_customexec = subparsers.add_parser('customexec', \
help="Make some custom execution AFTER the"
" main execution is done")
parser_run.add_argument("--nohashoutlog", action='store_true', \
help="When set, enforce no hash log")
if len(sys.argv)==1: | class CliUserInterface(object):
parser_customexec = subparsers.add_parser('customexec', \
help="Make some custom execution AFTER the"
" main execution is done")
parser_customexec.add_argument("--nohashoutlog", action='store_true', \
help="When set, enforce no hash log")
if len(sys.argv)==1: |
1,932 | https://:@github.com/Tetrite/cBinder.git | 36123e9438d3dd26c34c95a5e6de613ddcb0d788 | @@ -17,7 +17,7 @@ def get_definitions_pairs(defines_list):
def_pairs = {}
for define_statement_string in defines_list:
elems = re.split(" ", define_statement_string)
- if len(elems) > 3: # When define statement is not a simple NAME <--> VALUE PAIR
+ if len(elems) != 3: # When define statement is not a simple NAME <--> VALUE PAIR
continue # Do not preprocess this
name = elems[1]
value = elems[2]
| MiniPreprocessing.py | ReplaceText(target='!=' @(20,22)->(20,23)) | def get_definitions_pairs(defines_list):
def_pairs = {}
for define_statement_string in defines_list:
elems = re.split(" ", define_statement_string)
if len(elems) > 3: # When define statement is not a simple NAME <--> VALUE PAIR
continue # Do not preprocess this
name = elems[1]
value = elems[2] | def get_definitions_pairs(defines_list):
def_pairs = {}
for define_statement_string in defines_list:
elems = re.split(" ", define_statement_string)
if len(elems) != 3: # When define statement is not a simple NAME <--> VALUE PAIR
continue # Do not preprocess this
name = elems[1]
value = elems[2] |
1,933 | https://:@github.com/CovertLab/vivarium.git | 7a3dc56b996fd44f4c028dcd299e2fc78cbb3144 | @@ -133,7 +133,7 @@ class Motor(Analysis):
max_length = max(run_lengths + tumble_lengths)
bins = np.linspace(0, max_length, 10)
logbins = np.logspace(0, np.log10(bins[-1]), len(bins))
- ax5.hist([run_lengths, tumble_lengths], bins=logbins, label=['run_lengths', 'tumble_lengths'], color=['b', 'm'])
+ ax5.hist([run_lengths, tumble_lengths], bins=bins, label=['run_lengths', 'tumble_lengths'], color=['b', 'm'])
# plot expected values
ax5.axvline(x=expected_tumble, color='m', linestyle='dashed', label='expected tumble')
| vivarium/analysis/motor.py | ReplaceText(target='bins' @(136,53)->(136,60)) | class Motor(Analysis):
max_length = max(run_lengths + tumble_lengths)
bins = np.linspace(0, max_length, 10)
logbins = np.logspace(0, np.log10(bins[-1]), len(bins))
ax5.hist([run_lengths, tumble_lengths], bins=logbins, label=['run_lengths', 'tumble_lengths'], color=['b', 'm'])
# plot expected values
ax5.axvline(x=expected_tumble, color='m', linestyle='dashed', label='expected tumble') | class Motor(Analysis):
max_length = max(run_lengths + tumble_lengths)
bins = np.linspace(0, max_length, 10)
logbins = np.logspace(0, np.log10(bins[-1]), len(bins))
ax5.hist([run_lengths, tumble_lengths], bins=bins, label=['run_lengths', 'tumble_lengths'], color=['b', 'm'])
# plot expected values
ax5.axvline(x=expected_tumble, color='m', linestyle='dashed', label='expected tumble') |
1,934 | https://:@github.com/CovertLab/vivarium.git | 365a08b14a6d25915605e68c586933f702b4994c | @@ -43,7 +43,7 @@ class ShepherdControl(ActorControl):
experiment_id, number, agent_type, environment_type))
# boot environment
- self.add_agent(experiment_id, environment_type, lattice_config)
+ self.add_agent(experiment_id, environment_type, actor_config)
time.sleep(10) # wait for the environment to boot
# boot agents
| vivarium/environment/control.py | ReplaceText(target='actor_config' @(46,56)->(46,70)) | class ShepherdControl(ActorControl):
experiment_id, number, agent_type, environment_type))
# boot environment
self.add_agent(experiment_id, environment_type, lattice_config)
time.sleep(10) # wait for the environment to boot
# boot agents | class ShepherdControl(ActorControl):
experiment_id, number, agent_type, environment_type))
# boot environment
self.add_agent(experiment_id, environment_type, actor_config)
time.sleep(10) # wait for the environment to boot
# boot agents |
1,935 | https://:@github.com/CovertLab/vivarium.git | 7b17a044e3d13866f61e7530952f5da27f21e896 | @@ -771,7 +771,7 @@ def load_compartment(composite, boot_config={}):
'emitter': boot_config.get('emitter', 'timeseries'),
'time_step': boot_config.get('time_step', 1.0)})
- return Compartment(processes, states, derivers, options)
+ return Compartment(processes, derivers, states, options)
def simulate_compartment(compartment, settings={}):
| vivarium/compartment/composition.py | ArgSwap(idxs=1<->2 @(774,11)->(774,22)) | def load_compartment(composite, boot_config={}):
'emitter': boot_config.get('emitter', 'timeseries'),
'time_step': boot_config.get('time_step', 1.0)})
return Compartment(processes, states, derivers, options)
def simulate_compartment(compartment, settings={}): | def load_compartment(composite, boot_config={}):
'emitter': boot_config.get('emitter', 'timeseries'),
'time_step': boot_config.get('time_step', 1.0)})
return Compartment(processes, derivers, states, options)
def simulate_compartment(compartment, settings={}): |
1,936 | https://:@github.com/CovertLab/vivarium.git | 049723f40d7994f2511f9b52e72082152a88cc3d | @@ -67,7 +67,7 @@ def plot_signal_transduction(timeseries, out_dir='out', filename='signal_transdu
ax2.tick_params(right=False, top=False)
ax2.set_ylabel("cluster activity \n P(on)", fontsize=10)
- ax2.set_xticklabels([])
+ ax3.set_xticklabels([])
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.tick_params(right=False, top=False)
| vivarium/plots/chemotaxis_flagella.py | ReplaceText(target='ax3' @(70,4)->(70,7)) | def plot_signal_transduction(timeseries, out_dir='out', filename='signal_transdu
ax2.tick_params(right=False, top=False)
ax2.set_ylabel("cluster activity \n P(on)", fontsize=10)
ax2.set_xticklabels([])
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.tick_params(right=False, top=False) | def plot_signal_transduction(timeseries, out_dir='out', filename='signal_transdu
ax2.tick_params(right=False, top=False)
ax2.set_ylabel("cluster activity \n P(on)", fontsize=10)
ax3.set_xticklabels([])
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.tick_params(right=False, top=False) |
1,937 | https://:@github.com/wtsi-hgi/gitlab-build-variables.git | da55a3494cb0c375b5efb9f5248f0f3774d9c0c5 | @@ -63,7 +63,7 @@ class ProjectVariablesUpdater(VariablesUpdater):
_logger.info("Set variables for \"%s\": %s" % (self.project, variables))
def update_required(self) -> bool:
- return self._variables_manager.get_variables() == self._get_required_variables()
+ return self._variables_manager.get_variables() != self._get_required_variables()
def _get_required_variables(self) -> Dict[str, str]:
"""
| gitlabbuildvariables/updater.py | ReplaceText(target='!=' @(66,55)->(66,57)) | class ProjectVariablesUpdater(VariablesUpdater):
_logger.info("Set variables for \"%s\": %s" % (self.project, variables))
def update_required(self) -> bool:
return self._variables_manager.get_variables() == self._get_required_variables()
def _get_required_variables(self) -> Dict[str, str]:
""" | class ProjectVariablesUpdater(VariablesUpdater):
_logger.info("Set variables for \"%s\": %s" % (self.project, variables))
def update_required(self) -> bool:
return self._variables_manager.get_variables() != self._get_required_variables()
def _get_required_variables(self) -> Dict[str, str]:
""" |
1,938 | https://:@github.com/felfel/logging-py.git | 27cc7bab2404993e04a07ee8f676fb646bd0f640 | @@ -115,7 +115,7 @@ class LogEntryParser:
Logger.data_property_placeholder_name: data # here we set the data property with a special key
}
- if log_entry.message is not "" or log_entry.message is not None:
+ if log_entry.message is not "" and log_entry.message is not None:
dto["message"] = log_entry.message
if exception_info is not None:
| loggingpy/log.py | ReplaceText(target='and' @(118,39)->(118,41)) | class LogEntryParser:
Logger.data_property_placeholder_name: data # here we set the data property with a special key
}
if log_entry.message is not "" or log_entry.message is not None:
dto["message"] = log_entry.message
if exception_info is not None: | class LogEntryParser:
Logger.data_property_placeholder_name: data # here we set the data property with a special key
}
if log_entry.message is not "" and log_entry.message is not None:
dto["message"] = log_entry.message
if exception_info is not None: |
1,939 | https://:@github.com/urban48/debpackager.git | fa24b2f2eb79059ec65edb8fc32eb1aaa46e689c | @@ -94,7 +94,7 @@ class GeneralPackage(object):
install_path=deb.get('install_path'),
dependencies=deb_dependencies,
description=deb.get('description'),
- excludes=project.get('excludes', []))
+ excludes=deb.get('excludes', []))
generated_builds.append(dpm.generate())
return generated_builds
| debpackager/packages/general_package.py | ReplaceText(target='deb' @(97,31)->(97,38)) | class GeneralPackage(object):
install_path=deb.get('install_path'),
dependencies=deb_dependencies,
description=deb.get('description'),
excludes=project.get('excludes', []))
generated_builds.append(dpm.generate())
return generated_builds | class GeneralPackage(object):
install_path=deb.get('install_path'),
dependencies=deb_dependencies,
description=deb.get('description'),
excludes=deb.get('excludes', []))
generated_builds.append(dpm.generate())
return generated_builds |
1,940 | https://:@github.com/noobermin/lspreader.git | 4ab4049cfe95c5927f01406bd6a4653335752904 | @@ -88,7 +88,7 @@ if __name__ == "__main__":
else:
angleopt = None;
KE, good = totalKE(d, ecut, angleopt, return_bools=True);
- LE = laserE(E_0, w, T, dim=dim);
+ LE = laserE(E_0, T, w, dim=dim);
totalq = d['q'][good].sum()*1e12;
print('total charge: {} {}'.format(totalq,'pC/cm' if opts['--2D'] else 'pC'));
print("total energy: {} J".format(KE));
| pext/quantities.py | ArgSwap(idxs=1<->2 @(91,9)->(91,15)) | if __name__ == "__main__":
else:
angleopt = None;
KE, good = totalKE(d, ecut, angleopt, return_bools=True);
LE = laserE(E_0, w, T, dim=dim);
totalq = d['q'][good].sum()*1e12;
print('total charge: {} {}'.format(totalq,'pC/cm' if opts['--2D'] else 'pC'));
print("total energy: {} J".format(KE)); | if __name__ == "__main__":
else:
angleopt = None;
KE, good = totalKE(d, ecut, angleopt, return_bools=True);
LE = laserE(E_0, T, w, dim=dim);
totalq = d['q'][good].sum()*1e12;
print('total charge: {} {}'.format(totalq,'pC/cm' if opts['--2D'] else 'pC'));
print("total energy: {} J".format(KE)); |
1,941 | https://:@github.com/andycasey/sick.git | 580f4072957817f483ed79de3f6b5208a07cfc42 | @@ -516,7 +516,7 @@ def load_aaomega_multispec(filename, fill_value=-1, clean=True):
for i, index in enumerate(program_indices):
headers = base_headers.copy()
- headers['FIBRE_NUM'] = i + 1
+ headers['FIBRE_NUM'] = index + 1
for header in req_fibre_headers:
headers[header] = image[2].data[index][header]
| scope/specutils.py | ReplaceText(target='index' @(519,31)->(519,32)) | def load_aaomega_multispec(filename, fill_value=-1, clean=True):
for i, index in enumerate(program_indices):
headers = base_headers.copy()
headers['FIBRE_NUM'] = i + 1
for header in req_fibre_headers:
headers[header] = image[2].data[index][header] | def load_aaomega_multispec(filename, fill_value=-1, clean=True):
for i, index in enumerate(program_indices):
headers = base_headers.copy()
headers['FIBRE_NUM'] = index + 1
for header in req_fibre_headers:
headers[header] = image[2].data[index][header] |
1,942 | https://:@github.com/gilsondev/django-faleconosco.git | eb394ea946b658ffe4706620e12a0992e847ae4c | @@ -26,7 +26,7 @@ def form(request, template_name='contato/contato_form.html',
mensagem.update(dict)
# Enviando o email
- enviar_email(email, settings.DEFAULT_FROM_EMAIL, nome,
+ enviar_email(settings.DEFAULT_FROM_EMAIL, email, nome,
assunto, template_email, mensagem)
# Mostra mensagem de sucesso
| contato/views.py | ArgSwap(idxs=0<->1 @(29,8)->(29,20)) | def form(request, template_name='contato/contato_form.html',
mensagem.update(dict)
# Enviando o email
enviar_email(email, settings.DEFAULT_FROM_EMAIL, nome,
assunto, template_email, mensagem)
# Mostra mensagem de sucesso | def form(request, template_name='contato/contato_form.html',
mensagem.update(dict)
# Enviando o email
enviar_email(settings.DEFAULT_FROM_EMAIL, email, nome,
assunto, template_email, mensagem)
# Mostra mensagem de sucesso |
1,943 | https://:@github.com/lijinbio/cmsip.git | a9ac427f65d7fe2dd116b01d2506261686700231 | @@ -115,7 +115,7 @@ def bsmap(config):
def mcall_stat_parse(infile):
with open(infile) as f:
dstr=f.read()
- return float(re.search('bisulfite conversion ratio = ([\d.]+)', f).groups()[0])
+ return float(re.search('bisulfite conversion ratio = ([\d.]+)', dstr).groups()[0])
def mcall_runcmd(infile, outdir, sampleid, reference, numthread, verbose=False):
if os.path.exists(outdir):
| cmsip/cmsip.py | ReplaceText(target='dstr' @(118,65)->(118,66)) | def bsmap(config):
def mcall_stat_parse(infile):
with open(infile) as f:
dstr=f.read()
return float(re.search('bisulfite conversion ratio = ([\d.]+)', f).groups()[0])
def mcall_runcmd(infile, outdir, sampleid, reference, numthread, verbose=False):
if os.path.exists(outdir): | def bsmap(config):
def mcall_stat_parse(infile):
with open(infile) as f:
dstr=f.read()
return float(re.search('bisulfite conversion ratio = ([\d.]+)', dstr).groups()[0])
def mcall_runcmd(infile, outdir, sampleid, reference, numthread, verbose=False):
if os.path.exists(outdir): |
1,944 | https://:@github.com/TwoRavens/raven-metadata-service.git | dae36f1eab88f12cfb14d451124cc9d293f89ce0 | @@ -34,7 +34,7 @@ class PlotValuesUtil(object):
# x-data for the ECDF: x_
x_value = np.sort(data)
- size_data = x_value.size
+ size_data = raw_data.size
# y-data for the ECDF: y
y_value = []
| preprocess/code/plot_values.py | ReplaceText(target='raw_data' @(37,20)->(37,27)) | class PlotValuesUtil(object):
# x-data for the ECDF: x_
x_value = np.sort(data)
size_data = x_value.size
# y-data for the ECDF: y
y_value = []
| class PlotValuesUtil(object):
# x-data for the ECDF: x_
x_value = np.sort(data)
size_data = raw_data.size
# y-data for the ECDF: y
y_value = []
|
1,945 | https://:@github.com/TwoRavens/raven-metadata-service.git | 1c19733c86b514a93342126535de899aed40b40e | @@ -137,7 +137,7 @@ class JobUtil(object):
@staticmethod
def retrieve_rows_csv(request, job, **kwargs):
- if request.method != 'POST':
+ if request.method == 'POST':
print('kwargs', kwargs)
start_row = kwargs.get('start_row')
num_rows = kwargs.get('number_rows')
| preprocess_web/code/ravens_metadata_apps/preprocess_jobs/job_util.py | ReplaceText(target='==' @(140,26)->(140,28)) | class JobUtil(object):
@staticmethod
def retrieve_rows_csv(request, job, **kwargs):
if request.method != 'POST':
print('kwargs', kwargs)
start_row = kwargs.get('start_row')
num_rows = kwargs.get('number_rows') | class JobUtil(object):
@staticmethod
def retrieve_rows_csv(request, job, **kwargs):
if request.method == 'POST':
print('kwargs', kwargs)
start_row = kwargs.get('start_row')
num_rows = kwargs.get('number_rows') |
1,946 | https://:@github.com/Clinical-Genomics/cgbeacon.git | 98f6705d3e6971111831cedfc4926e84880ec341 | @@ -74,7 +74,7 @@ def cli( dataset, vcf, db_connection, qual, ref, use_panel, outfile, customer, s
vcfsamples = _compare_samples(vcfsamples, samples)
## returns a this tuple-> ( total_vars, beacon_vars(type: dict), discaded_vars(type: dict))
- vcf_results = get_variants(vcf_obj, vcfsamples, raw_variants, qual)
+ vcf_results = get_variants(vcf_obj, raw_variants , vcfsamples, qual)
## Print overall results of VCF file parsing to terminal
vars_to_beacon = _print_results(vcf_results, qual)
| cgbeacon/cli/root.py | ArgSwap(idxs=1<->2 @(77,18)->(77,30)) | def cli( dataset, vcf, db_connection, qual, ref, use_panel, outfile, customer, s
vcfsamples = _compare_samples(vcfsamples, samples)
## returns a this tuple-> ( total_vars, beacon_vars(type: dict), discaded_vars(type: dict))
vcf_results = get_variants(vcf_obj, vcfsamples, raw_variants, qual)
## Print overall results of VCF file parsing to terminal
vars_to_beacon = _print_results(vcf_results, qual) | def cli( dataset, vcf, db_connection, qual, ref, use_panel, outfile, customer, s
vcfsamples = _compare_samples(vcfsamples, samples)
## returns a this tuple-> ( total_vars, beacon_vars(type: dict), discaded_vars(type: dict))
vcf_results = get_variants(vcf_obj, raw_variants , vcfsamples, qual)
## Print overall results of VCF file parsing to terminal
vars_to_beacon = _print_results(vcf_results, qual) |
1,947 | https://:@github.com/Clinical-Genomics/cgbeacon.git | 98f6705d3e6971111831cedfc4926e84880ec341 | @@ -38,7 +38,7 @@ def beacon_upload(connection, vcf_path, panel_path, dataset, outfile="", custome
# returns a this tuple-> ( n_total_vars, beacon_vars(type: dict), discaded_vars(type: dict))
### beacon_vars is a disctionary with key --> sample, and value --> list of tuples containing the non-reference variants. Each tuple is defined as: (chr, start, alt_allele)
### discaded_vars is a dictionary with key --> sample and value --> number of discarded vars due to quality for that sample.
- vcf_results = get_variants(panel_filtered_results[0], samples, raw_variants, qual)
+ vcf_results = get_variants(panel_filtered_results[0], raw_variants, samples, qual)
# Insert variants into the beacon. It returns a tuple: (vars_before_upload, vars_after_upload)
beacon_update_result = bare_variants_uploader(connection, dataset, vcf_results, genome_reference)
| cgbeacon/utils/Utility.py | ArgSwap(idxs=1<->2 @(41,18)->(41,30)) | def beacon_upload(connection, vcf_path, panel_path, dataset, outfile="", custome
# returns a this tuple-> ( n_total_vars, beacon_vars(type: dict), discaded_vars(type: dict))
### beacon_vars is a disctionary with key --> sample, and value --> list of tuples containing the non-reference variants. Each tuple is defined as: (chr, start, alt_allele)
### discaded_vars is a dictionary with key --> sample and value --> number of discarded vars due to quality for that sample.
vcf_results = get_variants(panel_filtered_results[0], samples, raw_variants, qual)
# Insert variants into the beacon. It returns a tuple: (vars_before_upload, vars_after_upload)
beacon_update_result = bare_variants_uploader(connection, dataset, vcf_results, genome_reference) | def beacon_upload(connection, vcf_path, panel_path, dataset, outfile="", custome
# returns a this tuple-> ( n_total_vars, beacon_vars(type: dict), discaded_vars(type: dict))
### beacon_vars is a disctionary with key --> sample, and value --> list of tuples containing the non-reference variants. Each tuple is defined as: (chr, start, alt_allele)
### discaded_vars is a dictionary with key --> sample and value --> number of discarded vars due to quality for that sample.
vcf_results = get_variants(panel_filtered_results[0], raw_variants, samples, qual)
# Insert variants into the beacon. It returns a tuple: (vars_before_upload, vars_after_upload)
beacon_update_result = bare_variants_uploader(connection, dataset, vcf_results, genome_reference) |
1,948 | https://:@github.com/magistral-io/MagistralPython.git | 29e567448385b02c287ffeb64593ca21d745b23b | @@ -84,7 +84,7 @@ class JsonConverter(object):
for ch in channels:
permissions[int(ch)] = (read, write)
else:
- permissions[int(ch)] = (read, write)
+ permissions[int(channels)] = (read, write)
return permissions;
| src/magistral/client/util/JsonConverter.py | ReplaceText(target='channels' @(87,28)->(87,30)) | class JsonConverter(object):
for ch in channels:
permissions[int(ch)] = (read, write)
else:
permissions[int(ch)] = (read, write)
return permissions;
| class JsonConverter(object):
for ch in channels:
permissions[int(ch)] = (read, write)
else:
permissions[int(channels)] = (read, write)
return permissions;
|
1,949 | https://:@github.com/ebachelet/pyLIMA.git | 1e18750dcdf80430af3b48a8225110bcbdc70447 | @@ -26,7 +26,7 @@ def microlensing_flux_priors(size_dataset, f_source, g_blending):
def microlensing_parameters_limits_priors(parameters, limits):
- for i in xrange(len(parameters)):
+ for i in xrange(len(limits)):
if (parameters[i] > limits[i][1]) | (parameters[i] < limits[i][0]):
| pyLIMA/microlpriors.py | ReplaceText(target='limits' @(29,24)->(29,34)) | def microlensing_flux_priors(size_dataset, f_source, g_blending):
def microlensing_parameters_limits_priors(parameters, limits):
for i in xrange(len(parameters)):
if (parameters[i] > limits[i][1]) | (parameters[i] < limits[i][0]):
| def microlensing_flux_priors(size_dataset, f_source, g_blending):
def microlensing_parameters_limits_priors(parameters, limits):
for i in xrange(len(limits)):
if (parameters[i] > limits[i][1]) | (parameters[i] < limits[i][0]):
|
1,950 | https://:@github.com/ebachelet/pyLIMA.git | 7b358f94c59afc973bce950f1a0051fe74693e80 | @@ -178,7 +178,7 @@ def sort_2lenses_wide_caustics(caustic_points, critical_curves_points):
first_branch = positive_y_branches[0]
second_branch = positive_y_branches[1]
- if np.max((caustic_points[:, first_branch]).real) > np.max((caustic_points[:, second_branch]).real):
+ if np.max((caustic_points[:, first_branch]).real) < np.max((caustic_points[:, second_branch]).real):
central_caustic = np.r_[caustic_points[:, first_branch], np.conj(caustic_points[:, first_branch])[::-1]]
central_cc = np.r_[critical_curves_points[:, first_branch],
| pyLIMA/microlcaustics.py | ReplaceText(target='<' @(181,58)->(181,59)) | def sort_2lenses_wide_caustics(caustic_points, critical_curves_points):
first_branch = positive_y_branches[0]
second_branch = positive_y_branches[1]
if np.max((caustic_points[:, first_branch]).real) > np.max((caustic_points[:, second_branch]).real):
central_caustic = np.r_[caustic_points[:, first_branch], np.conj(caustic_points[:, first_branch])[::-1]]
central_cc = np.r_[critical_curves_points[:, first_branch], | def sort_2lenses_wide_caustics(caustic_points, critical_curves_points):
first_branch = positive_y_branches[0]
second_branch = positive_y_branches[1]
if np.max((caustic_points[:, first_branch]).real) < np.max((caustic_points[:, second_branch]).real):
central_caustic = np.r_[caustic_points[:, first_branch], np.conj(caustic_points[:, first_branch])[::-1]]
central_cc = np.r_[critical_curves_points[:, first_branch], |
1,951 | https://:@github.com/RedFantom/mtTkinter.git | 666f0351850ba1eed400e49ca33b2291cb6f3fb3 | @@ -141,7 +141,7 @@ class _TkAttr(object):
if is_exception:
ex_type, ex_value, ex_tb = response
raise ex_type(ex_value, ex_tb)
- return response_queue
+ return response
def _Tk__init__(self, *args, **kwargs):
| mttkinter/mtTkinter.py | ReplaceText(target='response' @(144,23)->(144,37)) | class _TkAttr(object):
if is_exception:
ex_type, ex_value, ex_tb = response
raise ex_type(ex_value, ex_tb)
return response_queue
def _Tk__init__(self, *args, **kwargs): | class _TkAttr(object):
if is_exception:
ex_type, ex_value, ex_tb = response
raise ex_type(ex_value, ex_tb)
return response
def _Tk__init__(self, *args, **kwargs): |
1,952 | https://:@github.com/wheeler-microfluidics/dmf-device-ui.git | 2443e29f710e516ebb3df7fc56db4f7e56f75893 | @@ -454,7 +454,7 @@ class DmfDeviceViewBase(SlaveView):
# Find the closest corner point in the frame to the starting point.
frame_corner_i = find_closest(slave.df_frame_corners, frame_point_i)
# Find the closest corner point in the canvas to the end point.
- canvas_corner_i = find_closest(slave.df_canvas_corners, end_xy)
+ canvas_corner_i = find_closest(slave.df_canvas_corners, start_xy)
# Save current state of corners to allow undo.
corners_state = {'df_frame_corners':
| dmf_device_ui/view.py | ReplaceText(target='start_xy' @(457,64)->(457,70)) | class DmfDeviceViewBase(SlaveView):
# Find the closest corner point in the frame to the starting point.
frame_corner_i = find_closest(slave.df_frame_corners, frame_point_i)
# Find the closest corner point in the canvas to the end point.
canvas_corner_i = find_closest(slave.df_canvas_corners, end_xy)
# Save current state of corners to allow undo.
corners_state = {'df_frame_corners': | class DmfDeviceViewBase(SlaveView):
# Find the closest corner point in the frame to the starting point.
frame_corner_i = find_closest(slave.df_frame_corners, frame_point_i)
# Find the closest corner point in the canvas to the end point.
canvas_corner_i = find_closest(slave.df_canvas_corners, start_xy)
# Save current state of corners to allow undo.
corners_state = {'df_frame_corners': |
1,953 | https://:@github.com/jisunglim/ethereum-etl.git | f7e7e55441816e291d73a90c3aa19e287b881989 | @@ -247,7 +247,7 @@ while True:
token_transfers = token_transfers_item_exporter.get_items('token_transfer')
enriched_transactions = enrich_transactions(blocks, transactions, receipts)
- if len(enriched_transactions) == len(transactions):
+ if len(enriched_transactions) != len(transactions):
raise ValueError('The number of transactions is wrong ' + str(enriched_transactions))
enriched_logs = enrich_logs(blocks, logs)
if len(enriched_logs) != len(logs):
| stream.py | ReplaceText(target='!=' @(250,38)->(250,40)) | while True:
token_transfers = token_transfers_item_exporter.get_items('token_transfer')
enriched_transactions = enrich_transactions(blocks, transactions, receipts)
if len(enriched_transactions) == len(transactions):
raise ValueError('The number of transactions is wrong ' + str(enriched_transactions))
enriched_logs = enrich_logs(blocks, logs)
if len(enriched_logs) != len(logs): | while True:
token_transfers = token_transfers_item_exporter.get_items('token_transfer')
enriched_transactions = enrich_transactions(blocks, transactions, receipts)
if len(enriched_transactions) != len(transactions):
raise ValueError('The number of transactions is wrong ' + str(enriched_transactions))
enriched_logs = enrich_logs(blocks, logs)
if len(enriched_logs) != len(logs): |
1,954 | https://:@github.com/cpnota/autonomous-learning-library.git | a0debf34fdff31c56e93b572edfe2b1578772c77 | @@ -18,7 +18,7 @@ class TestAccumulatingTraces(unittest.TestCase):
self.basis = FourierBasis(space, 2, 2)
self.approximation = DiscreteLinearApproximation(0.1, self.basis, actions=3)
self.env = Env()
- self.traces = AccumulatingTraces(self.env, self.approximation, 0.5)
+ self.traces = AccumulatingTraces(self.approximation, self.env, 0.5)
def test_init(self):
np.testing.assert_equal(self.traces.call(x), np.array([0, 0, 0]))
| all/approximation/traces/accumulating_test.py | ArgSwap(idxs=0<->1 @(21,18)->(21,36)) | class TestAccumulatingTraces(unittest.TestCase):
self.basis = FourierBasis(space, 2, 2)
self.approximation = DiscreteLinearApproximation(0.1, self.basis, actions=3)
self.env = Env()
self.traces = AccumulatingTraces(self.env, self.approximation, 0.5)
def test_init(self):
np.testing.assert_equal(self.traces.call(x), np.array([0, 0, 0])) | class TestAccumulatingTraces(unittest.TestCase):
self.basis = FourierBasis(space, 2, 2)
self.approximation = DiscreteLinearApproximation(0.1, self.basis, actions=3)
self.env = Env()
self.traces = AccumulatingTraces(self.approximation, self.env, 0.5)
def test_init(self):
np.testing.assert_equal(self.traces.call(x), np.array([0, 0, 0])) |
1,955 | https://:@github.com/cpnota/autonomous-learning-library.git | f6c89200ee016ac98c856254defdaf52cc8ba454 | @@ -18,7 +18,7 @@ class TestLinearFunctionApproximation(unittest.TestCase):
approximation = LinearApproximation(0.1, basis)
x = np.array([0.5, 1])
self.assertEqual(approximation.call(x), 0)
- approximation.update(x, 1)
+ approximation.update(1, x)
self.assertAlmostEqual(approximation.call(x), 0.6)
if __name__ == '__main__':
| all/approximation/state/linear_test.py | ArgSwap(idxs=0<->1 @(21,4)->(21,24)) | class TestLinearFunctionApproximation(unittest.TestCase):
approximation = LinearApproximation(0.1, basis)
x = np.array([0.5, 1])
self.assertEqual(approximation.call(x), 0)
approximation.update(x, 1)
self.assertAlmostEqual(approximation.call(x), 0.6)
if __name__ == '__main__': | class TestLinearFunctionApproximation(unittest.TestCase):
approximation = LinearApproximation(0.1, basis)
x = np.array([0.5, 1])
self.assertEqual(approximation.call(x), 0)
approximation.update(1, x)
self.assertAlmostEqual(approximation.call(x), 0.6)
if __name__ == '__main__': |
1,956 | https://:@github.com/redhog/fcdjangoutils.git | 8302cf9148f8034930d5dca7f46392431a3ed866 | @@ -35,7 +35,7 @@ def duration_verbose(duration):
if minutes != 0:
if not first: res += ", "
- res += _("%d min") % hours;
+ res += _("%d min") % minutes;
first = False
if seconds != 0:
| templatetags/time_tags.py | ReplaceText(target='minutes' @(38,29)->(38,34)) | def duration_verbose(duration):
if minutes != 0:
if not first: res += ", "
res += _("%d min") % hours;
first = False
if seconds != 0: | def duration_verbose(duration):
if minutes != 0:
if not first: res += ", "
res += _("%d min") % minutes;
first = False
if seconds != 0: |
1,957 | https://:@github.com/chairbender/fantasy-football-auction.git | 2f31b12d9fccae46e4ad6f389808f9b93046ad1b | @@ -189,7 +189,7 @@ class Auction:
if self.state != AuctionState.BID:
raise InvalidActionError("Bid was attempted, but it is not currently time to submit bids.")
- elif self.bid > bid:
+ elif self.bid >= bid:
raise InvalidActionError("Bid amount " + str(bid) + " must be greater than current bid of " + str(self.bid))
elif not self.owners[owner_id].can_buy(self.nominee, bid):
raise InvalidActionError("The owner with index " + str(owner_id) +
| fantasy_football_auction/auction.py | ReplaceText(target='>=' @(192,22)->(192,23)) | class Auction:
if self.state != AuctionState.BID:
raise InvalidActionError("Bid was attempted, but it is not currently time to submit bids.")
elif self.bid > bid:
raise InvalidActionError("Bid amount " + str(bid) + " must be greater than current bid of " + str(self.bid))
elif not self.owners[owner_id].can_buy(self.nominee, bid):
raise InvalidActionError("The owner with index " + str(owner_id) + | class Auction:
if self.state != AuctionState.BID:
raise InvalidActionError("Bid was attempted, but it is not currently time to submit bids.")
elif self.bid >= bid:
raise InvalidActionError("Bid amount " + str(bid) + " must be greater than current bid of " + str(self.bid))
elif not self.owners[owner_id].can_buy(self.nominee, bid):
raise InvalidActionError("The owner with index " + str(owner_id) + |
1,958 | https://:@github.com/slazarov/python-signalr-client.git | 33f58244b15ab6056cb0a0ad4ad53b040aacb8e8 | @@ -40,7 +40,7 @@ class HubClient(object):
if hub.lower() == self.name.lower():
method = inner_data['M']
message = inner_data['A']
- await self.__handlers[method](message)
+ await self.__handlers[method](inner_data)
connection.received += handle
| signalr_aio/hubs/_hub.py | ReplaceText(target='inner_data' @(43,50)->(43,57)) | class HubClient(object):
if hub.lower() == self.name.lower():
method = inner_data['M']
message = inner_data['A']
await self.__handlers[method](message)
connection.received += handle
| class HubClient(object):
if hub.lower() == self.name.lower():
method = inner_data['M']
message = inner_data['A']
await self.__handlers[method](inner_data)
connection.received += handle
|
1,959 | https://:@github.com/slazarov/python-signalr-client.git | afdb4f05445acdfa4b1c9dfbbfccf5fb990cd6b0 | @@ -40,7 +40,7 @@ class HubClient(object):
if hub.lower() == self.name.lower():
method = inner_data['M']
message = inner_data['A']
- await self.__handlers[method](inner_data)
+ await self.__handlers[method](message)
connection.received += handle
| signalr_aio/hubs/_hub.py | ReplaceText(target='message' @(43,50)->(43,60)) | class HubClient(object):
if hub.lower() == self.name.lower():
method = inner_data['M']
message = inner_data['A']
await self.__handlers[method](inner_data)
connection.received += handle
| class HubClient(object):
if hub.lower() == self.name.lower():
method = inner_data['M']
message = inner_data['A']
await self.__handlers[method](message)
connection.received += handle
|
1,960 | https://:@gitlab.com/nsbl/nsbl.git | d853aa6323d0ba9f14cd25ae8f76b67b8376d422 | @@ -232,7 +232,7 @@ class NsblTasklist(Frklist):
elif res_type == "ansible-tasklist":
tasklists = res_urls
- if isinstance(tasklist, string_types):
+ if isinstance(tasklists, string_types):
tasklists = [tasklists]
for tl_name in tasklists:
| src/nsbl/nsbl_tasklist.py | ReplaceText(target='tasklists' @(235,34)->(235,42)) | class NsblTasklist(Frklist):
elif res_type == "ansible-tasklist":
tasklists = res_urls
if isinstance(tasklist, string_types):
tasklists = [tasklists]
for tl_name in tasklists: | class NsblTasklist(Frklist):
elif res_type == "ansible-tasklist":
tasklists = res_urls
if isinstance(tasklists, string_types):
tasklists = [tasklists]
for tl_name in tasklists: |
1,961 | https://:@github.com/vishalsubbiah/darkchess.git | 399c8bde5e12f905a47ee06c6a4ec8297e57cf96 | @@ -14,7 +14,7 @@ class Board(object):
def __init__(self, starting_board=None):
self.board = np.empty((8,8),dtype=Piece)
- if starting_board is not None:
+ if starting_board is None:
self._start_pos()
else:
self.board = starting_board
| src/board.py | ReplaceText(target=' is ' @(17,25)->(17,33)) | class Board(object):
def __init__(self, starting_board=None):
self.board = np.empty((8,8),dtype=Piece)
if starting_board is not None:
self._start_pos()
else:
self.board = starting_board | class Board(object):
def __init__(self, starting_board=None):
self.board = np.empty((8,8),dtype=Piece)
if starting_board is None:
self._start_pos()
else:
self.board = starting_board |
1,962 | https://:@github.com/dongkai1993/social-core.git | d1d23e7e3cf4364c0d35289290b27787b84f5211 | @@ -50,7 +50,7 @@ def sanitize_redirect(host, redirect_to):
"""
# Quick sanity check.
if not redirect_to or \
- not isinstance(redirect_to, six.string_types) and \
+ not isinstance(redirect_to, six.string_types) or \
getattr(redirect_to, 'decode', None) and \
not isinstance(redirect_to.decode(), six.string_types):
return None
| social/utils.py | ReplaceText(target='or' @(53,53)->(53,56)) | def sanitize_redirect(host, redirect_to):
"""
# Quick sanity check.
if not redirect_to or \
not isinstance(redirect_to, six.string_types) and \
getattr(redirect_to, 'decode', None) and \
not isinstance(redirect_to.decode(), six.string_types):
return None | def sanitize_redirect(host, redirect_to):
"""
# Quick sanity check.
if not redirect_to or \
not isinstance(redirect_to, six.string_types) or \
getattr(redirect_to, 'decode', None) and \
not isinstance(redirect_to.decode(), six.string_types):
return None |
1,963 | https://:@github.com/dongkai1993/social-core.git | 7d0628e7a756526b50449435eb02b2806e815755 | @@ -132,7 +132,7 @@ def partial_pipeline_data(strategy, user, *args, **kwargs):
kwargs.setdefault('user', user)
kwargs.setdefault('request', strategy.request)
kwargs.update(xkwargs)
- return idx, backend, xargs, xkwargs
+ return idx, backend, xargs, kwargs
def build_absolute_uri(host_url, path=None):
| social/utils.py | ReplaceText(target='kwargs' @(135,36)->(135,43)) | def partial_pipeline_data(strategy, user, *args, **kwargs):
kwargs.setdefault('user', user)
kwargs.setdefault('request', strategy.request)
kwargs.update(xkwargs)
return idx, backend, xargs, xkwargs
def build_absolute_uri(host_url, path=None): | def partial_pipeline_data(strategy, user, *args, **kwargs):
kwargs.setdefault('user', user)
kwargs.setdefault('request', strategy.request)
kwargs.update(xkwargs)
return idx, backend, xargs, kwargs
def build_absolute_uri(host_url, path=None): |
1,964 | https://:@github.com/dongkai1993/social-core.git | d53529b57f0a4992889ad490e5314a2244155afa | @@ -27,7 +27,7 @@ class SocialAuthExceptionMiddleware(object):
return
if isinstance(exception, SocialAuthBaseException):
- backend_name = strategy.backend.name
+ backend_name = request.backend.name
message = self.get_message(request, exception)
url = self.get_redirect_uri(request, exception)
try:
| social/apps/django_app/middleware.py | ReplaceText(target='request' @(30,27)->(30,35)) | class SocialAuthExceptionMiddleware(object):
return
if isinstance(exception, SocialAuthBaseException):
backend_name = strategy.backend.name
message = self.get_message(request, exception)
url = self.get_redirect_uri(request, exception)
try: | class SocialAuthExceptionMiddleware(object):
return
if isinstance(exception, SocialAuthBaseException):
backend_name = request.backend.name
message = self.get_message(request, exception)
url = self.get_redirect_uri(request, exception)
try: |
1,965 | https://:@github.com/barseghyanartur/django-dummy-thumbnails.git | a97e0e6a75b3408484b736c541794e489b436f2a | @@ -27,7 +27,7 @@ def get_setting(setting, override=None):
if hasattr(settings, attr_name):
value = getattr(settings, attr_name)
else:
- if hasattr(defaults, attr_name):
+ if hasattr(defaults, setting):
value = getattr(defaults, setting)
else:
return override
| src/dummy_thumbnails/conf.py | ReplaceText(target='setting' @(30,29)->(30,38)) | def get_setting(setting, override=None):
if hasattr(settings, attr_name):
value = getattr(settings, attr_name)
else:
if hasattr(defaults, attr_name):
value = getattr(defaults, setting)
else:
return override | def get_setting(setting, override=None):
if hasattr(settings, attr_name):
value = getattr(settings, attr_name)
else:
if hasattr(defaults, setting):
value = getattr(defaults, setting)
else:
return override |
1,966 | https://:@github.com/phil1425/jupyter-pc.git | d96265319699f32e575280623e0b67ec588214d6 | @@ -48,7 +48,7 @@ def fit(data_x, data_y, sigma_x=None, sigma_y=None, func=None, beta=[1., 0.], *a
if type(data_x[0]) in ucvar:
values_x = [d.n for d in data_x]
- sigma_x = [d.s if d.s!=0 else 1e-5 for d in data_y]
+ sigma_x = [d.s if d.s!=0 else 1e-5 for d in data_x]
elif type(data_x[0]) in [float, int]:
values_x = data_x
| jupyterpc/jupyterpc.py | ReplaceText(target='data_x' @(51,52)->(51,58)) | def fit(data_x, data_y, sigma_x=None, sigma_y=None, func=None, beta=[1., 0.], *a
if type(data_x[0]) in ucvar:
values_x = [d.n for d in data_x]
sigma_x = [d.s if d.s!=0 else 1e-5 for d in data_y]
elif type(data_x[0]) in [float, int]:
values_x = data_x
| def fit(data_x, data_y, sigma_x=None, sigma_y=None, func=None, beta=[1., 0.], *a
if type(data_x[0]) in ucvar:
values_x = [d.n for d in data_x]
sigma_x = [d.s if d.s!=0 else 1e-5 for d in data_x]
elif type(data_x[0]) in [float, int]:
values_x = data_x
|
1,967 | https://:@github.com/mrshu/python-imhdsk-api.git | bbd3b584f1ed575ec2cb6493e4e99de22038b8bf | @@ -93,6 +93,6 @@ def routes(start, dest, city='ba'):
route.begin_time = route.drives[0].begin_time
route.end_time = route.drives[-1].end_time
- route.append(route)
+ routes.append(route)
return routes
| imhdsk/__init__.py | ReplaceText(target='routes' @(96,8)->(96,13)) | def routes(start, dest, city='ba'):
route.begin_time = route.drives[0].begin_time
route.end_time = route.drives[-1].end_time
route.append(route)
return routes | def routes(start, dest, city='ba'):
route.begin_time = route.drives[0].begin_time
route.end_time = route.drives[-1].end_time
routes.append(route)
return routes |
1,968 | https://:@github.com/juliusvonkohout/sparkmagic.git | 54804b9adb02f02e5fccca8c21bcfd390426098e | @@ -50,7 +50,7 @@ class UserCommandParser(object):
# When no magic, add run command
if not first_line.startswith("%"):
- first_line = "%{} {}".format(UserCommandParser.run_command, code)
+ first_line = "%{} {}".format(UserCommandParser.run_command, first_line)
# Remove percentage sign
first_line = first_line[1:]
| remotespark/wrapperkernel/usercommandparser.py | ReplaceText(target='first_line' @(53,72)->(53,76)) | class UserCommandParser(object):
# When no magic, add run command
if not first_line.startswith("%"):
first_line = "%{} {}".format(UserCommandParser.run_command, code)
# Remove percentage sign
first_line = first_line[1:] | class UserCommandParser(object):
# When no magic, add run command
if not first_line.startswith("%"):
first_line = "%{} {}".format(UserCommandParser.run_command, first_line)
# Remove percentage sign
first_line = first_line[1:] |
1,969 | https://:@github.com/morinted/plover_layout_display.git | 8f071445dd2da69bfec2f5caf184627e713fd395 | @@ -58,7 +58,7 @@ class LayoutDisplayView(QGraphicsView):
if key.label:
label = QGraphicsTextItem(key.label)
label.setFont(font)
- label.setDefaultTextColor(QColor(steno_layout.font_color))
+ label.setDefaultTextColor(QColor(key.font_color))
label_rect = label.boundingRect()
label_rect.moveCenter(path.boundingRect().center())
| layout_display/layout_graphics.py | ReplaceText(target='key' @(61,49)->(61,61)) | class LayoutDisplayView(QGraphicsView):
if key.label:
label = QGraphicsTextItem(key.label)
label.setFont(font)
label.setDefaultTextColor(QColor(steno_layout.font_color))
label_rect = label.boundingRect()
label_rect.moveCenter(path.boundingRect().center()) | class LayoutDisplayView(QGraphicsView):
if key.label:
label = QGraphicsTextItem(key.label)
label.setFont(font)
label.setDefaultTextColor(QColor(key.font_color))
label_rect = label.boundingRect()
label_rect.moveCenter(path.boundingRect().center()) |
1,970 | https://:@github.com/JulienPeloton/s4cmb.git | c4533367bf725c8486dc2140f841a8db649887f4 | @@ -445,7 +445,7 @@ class HealpixFitsMap():
alm = hp.map2alm([self.I,self.Q,self.U], self.lmax)
Elm=alm[1]
Blm=alm[2]
- lmax=hp.Alm.getlmax(alm.size)
+ lmax=hp.Alm.getlmax(Elm.size)
if 'P1' in self.derivatives_type:
out = alm2map_spin_der1([Elm,Blm], self.nside_in, 2)
self.dQdt =out[1][0]
| s4cmb/input_sky.py | ReplaceText(target='Elm' @(448,28)->(448,31)) | class HealpixFitsMap():
alm = hp.map2alm([self.I,self.Q,self.U], self.lmax)
Elm=alm[1]
Blm=alm[2]
lmax=hp.Alm.getlmax(alm.size)
if 'P1' in self.derivatives_type:
out = alm2map_spin_der1([Elm,Blm], self.nside_in, 2)
self.dQdt =out[1][0] | class HealpixFitsMap():
alm = hp.map2alm([self.I,self.Q,self.U], self.lmax)
Elm=alm[1]
Blm=alm[2]
lmax=hp.Alm.getlmax(Elm.size)
if 'P1' in self.derivatives_type:
out = alm2map_spin_der1([Elm,Blm], self.nside_in, 2)
self.dQdt =out[1][0] |
1,971 | https://:@github.com/tjstretchalot/pympanim.git | ac45774fcbc0532095c17be74fdabe8186879ed6 | @@ -42,7 +42,7 @@ def find_child(ends_arr: typing.List[float],
last = 0
for i, etime in enumerate(ends_arr):
if time < etime:
- return i, etime - last
+ return i, time - last
last = etime
if time == last:
return len(ends_arr) - 1, 0
| pympanim/utils.py | ReplaceText(target='time' @(45,22)->(45,27)) | def find_child(ends_arr: typing.List[float],
last = 0
for i, etime in enumerate(ends_arr):
if time < etime:
return i, etime - last
last = etime
if time == last:
return len(ends_arr) - 1, 0 | def find_child(ends_arr: typing.List[float],
last = 0
for i, etime in enumerate(ends_arr):
if time < etime:
return i, time - last
last = etime
if time == last:
return len(ends_arr) - 1, 0 |
1,972 | https://:@github.com/j-walker23/cattrs.git | 416f032481f9eca1867a85a0efa989595d7e44bf | @@ -347,7 +347,7 @@ class Converter(object):
# Check the union registry first.
handler = self._union_registry.get(union)
if handler is not None:
- return handler(union, obj)
+ return handler(obj, union)
# Unions with NoneType in them are basically optionals.
union_params = union.__args__
| cattr/converters.py | ArgSwap(idxs=0<->1 @(350,19)->(350,26)) | class Converter(object):
# Check the union registry first.
handler = self._union_registry.get(union)
if handler is not None:
return handler(union, obj)
# Unions with NoneType in them are basically optionals.
union_params = union.__args__ | class Converter(object):
# Check the union registry first.
handler = self._union_registry.get(union)
if handler is not None:
return handler(obj, union)
# Unions with NoneType in them are basically optionals.
union_params = union.__args__ |
1,973 | https://:@github.com/mitchnegus/pyleiades.git | 85a44d967870cd395675d594ea56f1d0a18748e5 | @@ -42,7 +42,7 @@ class EClass:
data = load_dataset(dataset_date=data_date,dataset_type=stat_type)
# Isolate this energy's data, separate frequencies, and format the data
- self.E_data = self._isolate_energy(data,E_code)
+ self.E_data = self._isolate_energy(E_code,data)
self.monthly_data, self.yearly_data = self._sep_freqs(self.E_data)
for data_df in self.monthly_data,self.yearly_data:
data_df.set_index('Date_code',inplace=True)
| main/eclass.py | ArgSwap(idxs=0<->1 @(45,22)->(45,42)) | class EClass:
data = load_dataset(dataset_date=data_date,dataset_type=stat_type)
# Isolate this energy's data, separate frequencies, and format the data
self.E_data = self._isolate_energy(data,E_code)
self.monthly_data, self.yearly_data = self._sep_freqs(self.E_data)
for data_df in self.monthly_data,self.yearly_data:
data_df.set_index('Date_code',inplace=True) | class EClass:
data = load_dataset(dataset_date=data_date,dataset_type=stat_type)
# Isolate this energy's data, separate frequencies, and format the data
self.E_data = self._isolate_energy(E_code,data)
self.monthly_data, self.yearly_data = self._sep_freqs(self.E_data)
for data_df in self.monthly_data,self.yearly_data:
data_df.set_index('Date_code',inplace=True) |
1,974 | https://:@github.com/vgalisson/pySankey.git | 44c01d55c6132a003adae938132f9f5bcc4e6b32 | @@ -158,7 +158,7 @@ def sankey(
if len(rightLabels) == 0:
rightLabels = pd.Series(dataFrame.right.unique()).unique()
else:
- check_data_matches_labels(leftLabels, dataFrame["right"], "right")
+ check_data_matches_labels(rightLabels, dataFrame["right"], "right")
# If no colorDict given, make one
if colorDict is None:
colorDict = {}
| pysankey/sankey.py | ReplaceText(target='rightLabels' @(161,34)->(161,44)) | def sankey(
if len(rightLabels) == 0:
rightLabels = pd.Series(dataFrame.right.unique()).unique()
else:
check_data_matches_labels(leftLabels, dataFrame["right"], "right")
# If no colorDict given, make one
if colorDict is None:
colorDict = {} | def sankey(
if len(rightLabels) == 0:
rightLabels = pd.Series(dataFrame.right.unique()).unique()
else:
check_data_matches_labels(rightLabels, dataFrame["right"], "right")
# If no colorDict given, make one
if colorDict is None:
colorDict = {} |
1,975 | https://:@github.com/kshitij10496/lexico.git | a47f45857e2c389833e3f4d3f151b44973520714 | @@ -52,7 +52,7 @@ def handle_word(word):
else:
word_object = fetch_word(word)
click.echo_via_pager(word_object.stringify())
- word_save_status = save_word(word)
+ word_save_status = save_word(word_object)
if word_save_status:
click.echo('{} has been added to your personal dictionary.'.format(word))
else:
| familiarize/cli.py | ReplaceText(target='word_object' @(55,37)->(55,41)) | def handle_word(word):
else:
word_object = fetch_word(word)
click.echo_via_pager(word_object.stringify())
word_save_status = save_word(word)
if word_save_status:
click.echo('{} has been added to your personal dictionary.'.format(word))
else: | def handle_word(word):
else:
word_object = fetch_word(word)
click.echo_via_pager(word_object.stringify())
word_save_status = save_word(word_object)
if word_save_status:
click.echo('{} has been added to your personal dictionary.'.format(word))
else: |
1,976 | https://:@github.com/speedcell4/aku.git | b96231d5ae8da4987bdf350ab8b8b4bbcf5f8059 | @@ -19,7 +19,7 @@ class Tp(object, metaclass=ABCMeta):
origin = get_origin(tp)
if origin is None and args == ():
- return PrimitiveTp(origin)
+ return PrimitiveTp(tp)
if origin is list and len(args) == 1:
return ListTp(origin, cls[args[0]])
if origin is tuple:
| aku/tp.py | ReplaceText(target='tp' @(22,31)->(22,37)) | class Tp(object, metaclass=ABCMeta):
origin = get_origin(tp)
if origin is None and args == ():
return PrimitiveTp(origin)
if origin is list and len(args) == 1:
return ListTp(origin, cls[args[0]])
if origin is tuple: | class Tp(object, metaclass=ABCMeta):
origin = get_origin(tp)
if origin is None and args == ():
return PrimitiveTp(tp)
if origin is list and len(args) == 1:
return ListTp(origin, cls[args[0]])
if origin is tuple: |
1,977 | https://:@github.com/rembish/cfb.git | 220ec866dbbae13cebbff3e5ba2da95cf433ef32 | @@ -76,7 +76,7 @@ class CfbIO(FileIO, MaybeDefected, ByteHelpers):
sector_size = self.header.sector_size // 4
sector = self.header.minifat_sector_start
- while sector != ENDOFCHAIN and (current + 1) * sector_size <= current:
+ while sector != ENDOFCHAIN and (position + 1) * sector_size <= current:
sector = self.next_fat(sector)
position += 1
| cfb/__init__.py | ReplaceText(target='position' @(79,40)->(79,47)) | class CfbIO(FileIO, MaybeDefected, ByteHelpers):
sector_size = self.header.sector_size // 4
sector = self.header.minifat_sector_start
while sector != ENDOFCHAIN and (current + 1) * sector_size <= current:
sector = self.next_fat(sector)
position += 1
| class CfbIO(FileIO, MaybeDefected, ByteHelpers):
sector_size = self.header.sector_size // 4
sector = self.header.minifat_sector_start
while sector != ENDOFCHAIN and (position + 1) * sector_size <= current:
sector = self.next_fat(sector)
position += 1
|
1,978 | https://:@github.com/acgt-tax-consultants/orchard.git | 05d5b3ca4b5b6eaf6da380b8d5655b6f8d10342c | @@ -58,7 +58,7 @@ def build(link_file_path, config_file_path, output):
try:
link_file = LinkFile(link_file_path)
config_file = ConfigFile(config_file_path, True)
- if validate(link_file_path, config_file_path):
+ if not validate(link_file_path, config_file_path):
click.secho('Invalid configuration file.', fg='red', err=True)
click.get_current_context().exit(1)
| orchard/cli.py | ReplaceText(target='not ' @(61,11)->(61,11)) | def build(link_file_path, config_file_path, output):
try:
link_file = LinkFile(link_file_path)
config_file = ConfigFile(config_file_path, True)
if validate(link_file_path, config_file_path):
click.secho('Invalid configuration file.', fg='red', err=True)
click.get_current_context().exit(1)
| def build(link_file_path, config_file_path, output):
try:
link_file = LinkFile(link_file_path)
config_file = ConfigFile(config_file_path, True)
if not validate(link_file_path, config_file_path):
click.secho('Invalid configuration file.', fg='red', err=True)
click.get_current_context().exit(1)
|
1,979 | https://:@github.com/ostdotcom/ost-kyc-sdk-python.git | c348eb67f5beb94e238746b6c52987c10eb53542 | @@ -54,7 +54,7 @@ class HTTPHelper:
#
def verify_required(self):
if self.urlparse()(self.api_base_url).scheme == "http":
- return True
+ return False
return True
#
| ost_kyc_sdk_python/util/http_helper.py | ReplaceText(target='False' @(57,19)->(57,23)) | class HTTPHelper:
#
def verify_required(self):
if self.urlparse()(self.api_base_url).scheme == "http":
return True
return True
# | class HTTPHelper:
#
def verify_required(self):
if self.urlparse()(self.api_base_url).scheme == "http":
return False
return True
# |
1,980 | https://:@github.com/IBM/yaps.git | 43d298bdbf36cb2f1dde75a4d85a4a3ee66aff7a | @@ -516,7 +516,7 @@ class Slice(Expression):
# is this an operator precedence issue?
if self.lower:
self.to_stan_prec(self.lower, acc, indent)
- if self.lower and self.upper:
+ if self.lower or self.upper:
acc += self.mkString(":")
if self.upper:
self.to_stan_prec(self.upper, acc, indent)
| yaps/ir.py | ReplaceText(target='or' @(519,22)->(519,25)) | class Slice(Expression):
# is this an operator precedence issue?
if self.lower:
self.to_stan_prec(self.lower, acc, indent)
if self.lower and self.upper:
acc += self.mkString(":")
if self.upper:
self.to_stan_prec(self.upper, acc, indent) | class Slice(Expression):
# is this an operator precedence issue?
if self.lower:
self.to_stan_prec(self.lower, acc, indent)
if self.lower or self.upper:
acc += self.mkString(":")
if self.upper:
self.to_stan_prec(self.upper, acc, indent) |
1,981 | https://:@github.com/larsyunker/PythoMS.git | 9d393ed6083fe08e3f58439c99cedd084571df2f | @@ -552,7 +552,7 @@ def estimated_exact_mass(
"""
# narrow range to that of the isotope pattern
l = bisect_left(x, simmin - lookwithin)
- r = bisect_right(x, simmax - lookwithin)
+ r = bisect_right(x, simmax + lookwithin)
locmax = max(y[l:r]) # find local max in that range
for ind, val in enumerate(y):
if val == locmax: # if the y-value equals the local max
| pythoms/tome.py | ReplaceText(target='+' @(555,31)->(555,32)) | def estimated_exact_mass(
"""
# narrow range to that of the isotope pattern
l = bisect_left(x, simmin - lookwithin)
r = bisect_right(x, simmax - lookwithin)
locmax = max(y[l:r]) # find local max in that range
for ind, val in enumerate(y):
if val == locmax: # if the y-value equals the local max | def estimated_exact_mass(
"""
# narrow range to that of the isotope pattern
l = bisect_left(x, simmin - lookwithin)
r = bisect_right(x, simmax + lookwithin)
locmax = max(y[l:r]) # find local max in that range
for ind, val in enumerate(y):
if val == locmax: # if the y-value equals the local max |
1,982 | https://:@github.com/kcl-tscm/mff.git | 0c7f60c26344c4a68f8e06b518e3ef7b0b11c834 | @@ -613,7 +613,7 @@ class Sampling(object):
SMAE = np.std(np.sqrt(np.sum(np.square(error), axis=1)))
RMSE = np.sqrt(np.mean((error) ** 2))
else:
- m.fit_energy(train_confs, train_forces)
+ m.fit_energy(train_confs, train_energy)
y_hat = m.predict_energy(self.x)
error = y_hat - self.y
MAE = np.mean(np.abs(error))
| mff/advanced_sampling.py | ReplaceText(target='train_energy' @(616,38)->(616,50)) | class Sampling(object):
SMAE = np.std(np.sqrt(np.sum(np.square(error), axis=1)))
RMSE = np.sqrt(np.mean((error) ** 2))
else:
m.fit_energy(train_confs, train_forces)
y_hat = m.predict_energy(self.x)
error = y_hat - self.y
MAE = np.mean(np.abs(error)) | class Sampling(object):
SMAE = np.std(np.sqrt(np.sum(np.square(error), axis=1)))
RMSE = np.sqrt(np.mean((error) ** 2))
else:
m.fit_energy(train_confs, train_energy)
y_hat = m.predict_energy(self.x)
error = y_hat - self.y
MAE = np.mean(np.abs(error)) |
1,983 | https://:@github.com/kcl-tscm/mff.git | 0dd4e341b2fb23bb8b39a267ccb32728c841650f | @@ -39,7 +39,7 @@ def eam_descriptor(dist, norm, rc, alpha, r0):
try:
dqdrij = -1/(2*q) * (dq1*q2 + q1*dq2)
except ZeroDivisionError:
- dqdrij = np.zeros(len(q))
+ dqdrij = np.zeros(len(q1))
dqdr = -dqdrij[:, None]*norm
return q, dqdr
| mff/calculators.py | ReplaceText(target='q1' @(42,30)->(42,31)) | def eam_descriptor(dist, norm, rc, alpha, r0):
try:
dqdrij = -1/(2*q) * (dq1*q2 + q1*dq2)
except ZeroDivisionError:
dqdrij = np.zeros(len(q))
dqdr = -dqdrij[:, None]*norm
return q, dqdr
| def eam_descriptor(dist, norm, rc, alpha, r0):
try:
dqdrij = -1/(2*q) * (dq1*q2 + q1*dq2)
except ZeroDivisionError:
dqdrij = np.zeros(len(q1))
dqdr = -dqdrij[:, None]*norm
return q, dqdr
|
1,984 | https://:@github.com/mlavin/django-hilbert.git | f550bd0292f4d0e3a32a1da894d7f70711e5ad67 | @@ -38,7 +38,7 @@ class SSLRedirectMiddleware(object):
urls = tuple([re.compile(url) for url in getattr(settings, 'SSL_PATTERNS', [])])
secure = any([url.search(request.path) for url in urls])
if request.is_secure():
- if not secure and not getattr(request, 'keep_secure', False):
+ if secure and not getattr(request, 'keep_secure', False):
if getattr(settings, 'SSL_WHITELIST', False):
# Redirect off SSL
return _redirect(request, False)
| hilbert/middleware.py | ReplaceText(target='' @(41,15)->(41,19)) | class SSLRedirectMiddleware(object):
urls = tuple([re.compile(url) for url in getattr(settings, 'SSL_PATTERNS', [])])
secure = any([url.search(request.path) for url in urls])
if request.is_secure():
if not secure and not getattr(request, 'keep_secure', False):
if getattr(settings, 'SSL_WHITELIST', False):
# Redirect off SSL
return _redirect(request, False) | class SSLRedirectMiddleware(object):
urls = tuple([re.compile(url) for url in getattr(settings, 'SSL_PATTERNS', [])])
secure = any([url.search(request.path) for url in urls])
if request.is_secure():
if secure and not getattr(request, 'keep_secure', False):
if getattr(settings, 'SSL_WHITELIST', False):
# Redirect off SSL
return _redirect(request, False) |
1,985 | https://:@github.com/jbaiter/zotero-cli.git | 4e1f926aa016e5a081609e24b8498d94139949ad | @@ -45,7 +45,7 @@ def find_storage_directories():
if zotero_dir.exists():
candidates.append(zotero_dir.iterdir())
zotero5_dir = home_dir/"Zotero/storage"
- if zotero_dir.exists():
+ if zotero5_dir.exists():
yield ('default', zotero5_dir)
candidate_iter = itertools.chain.from_iterable(candidates)
for fpath in candidate_iter:
| zotero_cli/cli.py | ReplaceText(target='zotero5_dir' @(48,7)->(48,17)) | def find_storage_directories():
if zotero_dir.exists():
candidates.append(zotero_dir.iterdir())
zotero5_dir = home_dir/"Zotero/storage"
if zotero_dir.exists():
yield ('default', zotero5_dir)
candidate_iter = itertools.chain.from_iterable(candidates)
for fpath in candidate_iter: | def find_storage_directories():
if zotero_dir.exists():
candidates.append(zotero_dir.iterdir())
zotero5_dir = home_dir/"Zotero/storage"
if zotero5_dir.exists():
yield ('default', zotero5_dir)
candidate_iter = itertools.chain.from_iterable(candidates)
for fpath in candidate_iter: |
1,986 | https://:@github.com/mjwen/kliff.git | 6d15ef5257545fe7c716db02cc80a34120e93a04 | @@ -129,7 +129,7 @@ def get_descriptor():
desc_params['g5'] = [{'zeta': 1, 'lambda': -1, 'eta': 0.0001},
{'zeta': 2, 'lambda': 1, 'eta': 0.003}]
- desc = SymmetryFunction(cutfunc, cutvalue, desc_params)
+ desc = SymmetryFunction(cutvalue, cutfunc, desc_params)
return desc
| tests/descriptors/test_symmetry_function.py | ArgSwap(idxs=0<->1 @(132,11)->(132,27)) | def get_descriptor():
desc_params['g5'] = [{'zeta': 1, 'lambda': -1, 'eta': 0.0001},
{'zeta': 2, 'lambda': 1, 'eta': 0.003}]
desc = SymmetryFunction(cutfunc, cutvalue, desc_params)
return desc
| def get_descriptor():
desc_params['g5'] = [{'zeta': 1, 'lambda': -1, 'eta': 0.0001},
{'zeta': 2, 'lambda': 1, 'eta': 0.003}]
desc = SymmetryFunction(cutvalue, cutfunc, desc_params)
return desc
|
1,987 | https://:@github.com/wheeler-microfluidics/mpm.git | ce40cbc346ba0bbb0770d2ab71165d84b8c1ffaa | @@ -441,7 +441,7 @@ def enable_plugin(plugin_name):
logger.debug('Plugin already enabled: `%s` -> `%s`', plugin_path_i,
plugin_link_path_i)
enabled_now[plugin_path_i.name] = False
- return enabled_now if not singleton else singleton.values()[0]
+ return enabled_now if not singleton else enabled_now.values()[0]
def disable_plugin(plugin_name):
| mpm/api.py | ReplaceText(target='enabled_now' @(444,45)->(444,54)) | def enable_plugin(plugin_name):
logger.debug('Plugin already enabled: `%s` -> `%s`', plugin_path_i,
plugin_link_path_i)
enabled_now[plugin_path_i.name] = False
return enabled_now if not singleton else singleton.values()[0]
def disable_plugin(plugin_name): | def enable_plugin(plugin_name):
logger.debug('Plugin already enabled: `%s` -> `%s`', plugin_path_i,
plugin_link_path_i)
enabled_now[plugin_path_i.name] = False
return enabled_now if not singleton else enabled_now.values()[0]
def disable_plugin(plugin_name): |
1,988 | https://:@github.com/N2ITN/RPiViz.git | 61199407198a9d674f7836b25da74a6956a93917 | @@ -46,4 +46,4 @@ def crop2face(pic, predictor):
clahe_crop = clahe_image[y1:y2, x1:x2]
#LBP_img = LBP.main(clahe_crop)
shape = predictor(clahe_crop, detections)
- return shape, clahe_image
+ return shape, clahe_crop
| raspiviz/identify.py | ReplaceText(target='clahe_crop' @(49,22)->(49,33)) | def crop2face(pic, predictor):
clahe_crop = clahe_image[y1:y2, x1:x2]
#LBP_img = LBP.main(clahe_crop)
shape = predictor(clahe_crop, detections)
return shape, clahe_image | def crop2face(pic, predictor):
clahe_crop = clahe_image[y1:y2, x1:x2]
#LBP_img = LBP.main(clahe_crop)
shape = predictor(clahe_crop, detections)
return shape, clahe_crop |
1,989 | https://:@github.com/cctbx/cctbx_project.git | bf735aeb2594c55e777037d8680cde1c45926f6d | @@ -279,7 +279,7 @@ class table:
if (alt_row_name is None): continue
if (alt_row_name == path):
result.extend(row_objects)
- elif (not path.startswith(alt_row_name+".")):
+ elif (path.startswith(alt_row_name+".")):
for row_object in row_objects:
result.extend(row_object.get(path=path[len(alt_row_name)+1:]))
return result
| iotbx/iotbx/parameters/__init__.py | ReplaceText(target='' @(282,14)->(282,18)) | class table:
if (alt_row_name is None): continue
if (alt_row_name == path):
result.extend(row_objects)
elif (not path.startswith(alt_row_name+".")):
for row_object in row_objects:
result.extend(row_object.get(path=path[len(alt_row_name)+1:]))
return result | class table:
if (alt_row_name is None): continue
if (alt_row_name == path):
result.extend(row_objects)
elif (path.startswith(alt_row_name+".")):
for row_object in row_objects:
result.extend(row_object.get(path=path[len(alt_row_name)+1:]))
return result |
1,990 | https://:@github.com/cctbx/cctbx_project.git | d2b0c38feb91a6d3ca5d23614cc7d67daff71bd6 | @@ -25,7 +25,7 @@ cns_dna_rna_residue_names = {
}
mon_lib_dna_rna_cif = ["AD", "AR", "CD", "CR", "GD", "GR", "TD", "UR"]
-if ("set" not in __builtins__):
+if ("set" in __builtins__):
mon_lib_dna_rna_cif = set(mon_lib_dna_rna_cif)
rna_dna_reference_residue_names = {
| iotbx/iotbx/pdb/__init__.py | ReplaceText(target=' in ' @(28,9)->(28,17)) | cns_dna_rna_residue_names = {
}
mon_lib_dna_rna_cif = ["AD", "AR", "CD", "CR", "GD", "GR", "TD", "UR"]
if ("set" not in __builtins__):
mon_lib_dna_rna_cif = set(mon_lib_dna_rna_cif)
rna_dna_reference_residue_names = { | cns_dna_rna_residue_names = {
}
mon_lib_dna_rna_cif = ["AD", "AR", "CD", "CR", "GD", "GR", "TD", "UR"]
if ("set" in __builtins__):
mon_lib_dna_rna_cif = set(mon_lib_dna_rna_cif)
rna_dna_reference_residue_names = { |
1,991 | https://:@github.com/cctbx/cctbx_project.git | bb89e67604a536127da9b373ddac969a43c21077 | @@ -568,7 +568,7 @@ def input(
lines=None,
pdb_id=None):
if (pdb_id is not None):
- assert file_name is not None
+ assert file_name is None
file_name = ent_path_local_mirror(pdb_id=pdb_id)
if (file_name is not None):
return ext.input(
| iotbx/pdb/__init__.py | ReplaceText(target=' is ' @(571,20)->(571,28)) | def input(
lines=None,
pdb_id=None):
if (pdb_id is not None):
assert file_name is not None
file_name = ent_path_local_mirror(pdb_id=pdb_id)
if (file_name is not None):
return ext.input( | def input(
lines=None,
pdb_id=None):
if (pdb_id is not None):
assert file_name is None
file_name = ent_path_local_mirror(pdb_id=pdb_id)
if (file_name is not None):
return ext.input( |
1,992 | https://:@github.com/cctbx/cctbx_project.git | 1f3ebf35ac25f18b43f5ce2454f898f4609e9e2e | @@ -1218,7 +1218,7 @@ class _(boost.python.injector, pair_sym_table):
if (pair_count == 0):
print >> out, " no neighbors"
pair_counts.append(pair_count)
- return pair_count
+ return pair_counts
def number_of_pairs_involving_symmetry(self):
result = 0
| cctbx/crystal/__init__.py | ReplaceText(target='pair_counts' @(1221,11)->(1221,21)) | class _(boost.python.injector, pair_sym_table):
if (pair_count == 0):
print >> out, " no neighbors"
pair_counts.append(pair_count)
return pair_count
def number_of_pairs_involving_symmetry(self):
result = 0 | class _(boost.python.injector, pair_sym_table):
if (pair_count == 0):
print >> out, " no neighbors"
pair_counts.append(pair_count)
return pair_counts
def number_of_pairs_involving_symmetry(self):
result = 0 |
1,993 | https://:@github.com/cctbx/cctbx_project.git | 5beb4735b28360b9f33f71020d3c1459330f4053 | @@ -50,7 +50,7 @@ class mod_hdf5(common_mode.common_mode_correction):
# If no detector distance is available set it to NaN, since
# Python's None is not permitted in HDF5
- distance = cspad_tbx.env_distance(env, self.address, self._detz_offset)
+ distance = cspad_tbx.env_distance(self.address, env, self._detz_offset)
if distance is None:
distance = float('nan')
| xfel/cxi/cspad_ana/mod_hdf5.py | ArgSwap(idxs=0<->1 @(53,15)->(53,37)) | class mod_hdf5(common_mode.common_mode_correction):
# If no detector distance is available set it to NaN, since
# Python's None is not permitted in HDF5
distance = cspad_tbx.env_distance(env, self.address, self._detz_offset)
if distance is None:
distance = float('nan')
| class mod_hdf5(common_mode.common_mode_correction):
# If no detector distance is available set it to NaN, since
# Python's None is not permitted in HDF5
distance = cspad_tbx.env_distance(self.address, env, self._detz_offset)
if distance is None:
distance = float('nan')
|
1,994 | https://:@github.com/cctbx/cctbx_project.git | 5beb4735b28360b9f33f71020d3c1459330f4053 | @@ -76,7 +76,7 @@ class mod_param(object):
return
# XXX This hardcodes the address for the front detector!
- detz = cspad_tbx.env_detz(env, 'CxiDs1-0|Cspad-0')
+ detz = cspad_tbx.env_detz('CxiDs1-0|Cspad-0', env)
if (detz is None):
self.m_no_detz += 1
| xfel/cxi/cspad_ana/mod_param.py | ArgSwap(idxs=0<->1 @(79,11)->(79,29)) | class mod_param(object):
return
# XXX This hardcodes the address for the front detector!
detz = cspad_tbx.env_detz(env, 'CxiDs1-0|Cspad-0')
if (detz is None):
self.m_no_detz += 1
| class mod_param(object):
return
# XXX This hardcodes the address for the front detector!
detz = cspad_tbx.env_detz('CxiDs1-0|Cspad-0', env)
if (detz is None):
self.m_no_detz += 1
|
1,995 | https://:@github.com/cctbx/cctbx_project.git | 5beb4735b28360b9f33f71020d3c1459330f4053 | @@ -342,7 +342,7 @@ class mod_view(common_mode.common_mode_correction):
# Get the distance for the detectors that should have it, and set
# it to NaN for those that should not.
if self.detector == 'CxiDs1' or self.detector == 'CxiDsd':
- distance = cspad_tbx.env_distance(env, self.address, self._detz_offset)
+ distance = cspad_tbx.env_distance(self.address, env, self._detz_offset)
if distance is None:
self.nfail += 1
self.logger.warning("event(): no distance, shot skipped")
| xfel/cxi/cspad_ana/mod_view.py | ArgSwap(idxs=0<->1 @(345,17)->(345,39)) | class mod_view(common_mode.common_mode_correction):
# Get the distance for the detectors that should have it, and set
# it to NaN for those that should not.
if self.detector == 'CxiDs1' or self.detector == 'CxiDsd':
distance = cspad_tbx.env_distance(env, self.address, self._detz_offset)
if distance is None:
self.nfail += 1
self.logger.warning("event(): no distance, shot skipped") | class mod_view(common_mode.common_mode_correction):
# Get the distance for the detectors that should have it, and set
# it to NaN for those that should not.
if self.detector == 'CxiDs1' or self.detector == 'CxiDsd':
distance = cspad_tbx.env_distance(self.address, env, self._detz_offset)
if distance is None:
self.nfail += 1
self.logger.warning("event(): no distance, shot skipped") |
1,996 | https://:@github.com/cctbx/cctbx_project.git | f60ef549166e775043ee96a9d55ea7946e6042af | @@ -1212,7 +1212,7 @@ def get_matching_atoms(chains_info,a_id,b_id,res_num_a,res_num_b,
a_altloc = chains_info[a_id].no_altloc.count(False) > 0
b_altloc = bool(chains_info[b_id].no_altloc)
if b_altloc:
- b_altloc = chains_info[a_id].no_altloc.count(False) > 0
+ b_altloc = chains_info[b_id].no_altloc.count(False) > 0
test_altloc = a_altloc or b_altloc
#
res_num_a_updated = []
| mmtbx/ncs/ncs_search.py | ReplaceText(target='b_id' @(1215,27)->(1215,31)) | def get_matching_atoms(chains_info,a_id,b_id,res_num_a,res_num_b,
a_altloc = chains_info[a_id].no_altloc.count(False) > 0
b_altloc = bool(chains_info[b_id].no_altloc)
if b_altloc:
b_altloc = chains_info[a_id].no_altloc.count(False) > 0
test_altloc = a_altloc or b_altloc
#
res_num_a_updated = [] | def get_matching_atoms(chains_info,a_id,b_id,res_num_a,res_num_b,
a_altloc = chains_info[a_id].no_altloc.count(False) > 0
b_altloc = bool(chains_info[b_id].no_altloc)
if b_altloc:
b_altloc = chains_info[b_id].no_altloc.count(False) > 0
test_altloc = a_altloc or b_altloc
#
res_num_a_updated = [] |
1,997 | https://:@github.com/cctbx/cctbx_project.git | 2621ef9d6d7a761b026f833837cf885945745986 | @@ -188,7 +188,7 @@ class torsion_ncs(object):
# and in another - MSE. They will be excluded without
# raising Sorry. They could matched, but it is difficult
# to figure out in this code how to make it happen.
- not (resname1 in ["MET", "MSE"] and resname1 in ["MET", "MSE"])):
+ not (resname1 in ["MET", "MSE"] and resname2 in ["MET", "MSE"])):
msg = "Error in matching procedure: matching "
msg += "'%s %s' and '%s %s'.\n" % (
resname1, rg1.id_str(), resname2, rg2.id_str())
| mmtbx/geometry_restraints/torsion_restraints/torsion_ncs.py | ReplaceText(target='resname2' @(191,54)->(191,62)) | class torsion_ncs(object):
# and in another - MSE. They will be excluded without
# raising Sorry. They could matched, but it is difficult
# to figure out in this code how to make it happen.
not (resname1 in ["MET", "MSE"] and resname1 in ["MET", "MSE"])):
msg = "Error in matching procedure: matching "
msg += "'%s %s' and '%s %s'.\n" % (
resname1, rg1.id_str(), resname2, rg2.id_str()) | class torsion_ncs(object):
# and in another - MSE. They will be excluded without
# raising Sorry. They could matched, but it is difficult
# to figure out in this code how to make it happen.
not (resname1 in ["MET", "MSE"] and resname2 in ["MET", "MSE"])):
msg = "Error in matching procedure: matching "
msg += "'%s %s' and '%s %s'.\n" % (
resname1, rg1.id_str(), resname2, rg2.id_str()) |
1,998 | https://:@github.com/cctbx/cctbx_project.git | 7b8295b84a75a33064f917be984d6017d53a3494 | @@ -795,7 +795,7 @@ class ResidualsPlotter(object):
reflections['xyzcal.px'] = reflections['xyzcal.px.%s'%dest]
if 'xyzobs.mm.value' not in reflections:
- reflections.centroid_px_to_mm(detector)
+ reflections.centroid_px_to_mm(experiments)
reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()
n = len(reflections)
| xfel/command_line/detector_residuals.py | ReplaceText(target='experiments' @(798,36)->(798,44)) | class ResidualsPlotter(object):
reflections['xyzcal.px'] = reflections['xyzcal.px.%s'%dest]
if 'xyzobs.mm.value' not in reflections:
reflections.centroid_px_to_mm(detector)
reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()
n = len(reflections) | class ResidualsPlotter(object):
reflections['xyzcal.px'] = reflections['xyzcal.px.%s'%dest]
if 'xyzobs.mm.value' not in reflections:
reflections.centroid_px_to_mm(experiments)
reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()
n = len(reflections) |
1,999 | https://:@github.com/cctbx/cctbx_project.git | b2783566840ae3392e09171bcb4141e46e6481e5 | @@ -115,7 +115,7 @@ class reader(iotbx_shelx_ext.hklf_reader):
miller_set = miller.set(
crystal_symmetry=crystal_symmetry,
indices=self.indices(), anomalous_flag=anomalous)
- if anomalous is not None:
+ if anomalous is None:
miller_set = miller_set.auto_anomalous()
miller_arrays = []
obs = (miller.array(
| iotbx/shelx/hklf.py | ReplaceText(target=' is ' @(118,16)->(118,24)) | class reader(iotbx_shelx_ext.hklf_reader):
miller_set = miller.set(
crystal_symmetry=crystal_symmetry,
indices=self.indices(), anomalous_flag=anomalous)
if anomalous is not None:
miller_set = miller_set.auto_anomalous()
miller_arrays = []
obs = (miller.array( | class reader(iotbx_shelx_ext.hklf_reader):
miller_set = miller.set(
crystal_symmetry=crystal_symmetry,
indices=self.indices(), anomalous_flag=anomalous)
if anomalous is None:
miller_set = miller_set.auto_anomalous()
miller_arrays = []
obs = (miller.array( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.