|
|
{"_id":"q-en-pytorch-022e865efb10b2ae31ebf0d5562ed384aaa74dabbab74162da0996279403ca2f","text":"tmpdir = tempfile.mkdtemp() ext_suf = '.pyd' if os.sys.platform == 'win32' else '.so' libname = cffi_wrapper_name + ext_suf <del> ffi.compile(tmpdir=tmpdir, verbose=verbose, target=libname) shutil.copy(os.path.join(tmpdir, libname), os.path.join(target_dir, libname)) </del> <ins> outfile = ffi.compile(tmpdir=tmpdir, verbose=verbose, target=libname) shutil.copy(outfile, os.path.join(target_dir, libname)) </ins> finally: shutil.rmtree(tmpdir)"} |
|
|
{"_id":"q-en-pytorch-066dd6b918ee24c19d6d1836ab10af295d1039207535a528a4626cbf00ca2778","text":"for(iw = 0; iw < kW; iw++) { real val = *(ip + it*istrideT + ih*istrideH + iw*istrideW); <del> if (val > maxval) </del> <ins> if ((val > maxval) || isnan(val)) </ins> { maxval = val; maxindex = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + (iw+istartW);"} |
|
|
{"_id":"q-en-pytorch-1fcf3bc313dee3793326e5d79cfda64509e9f7dec1d4b06b486ee9d9f09de30d","text":"raise ValueError('num_workers cannot be negative; ' 'use num_workers=0 to disable multiprocessing.') <ins> if sys.platform == \"win32\" and self.num_workers > 0: raise ValueError('num_workers > 0 is not supported on Windows') </ins> if batch_sampler is None: if sampler is None: if shuffle:"} |
|
|
{"_id":"q-en-pytorch-202b1a281a0d21743153927ee467e8ba4c18b5ea5a14b37e71db236027828df7","text":" |
|
|
{"_id":"q-en-pytorch-21d9d9ac8e6325c9af9098cf1d35c71f8f1bc7483a724f70c90d1ee2e3d4a070","text":"} else { // transposed if (input.size(1) != weight.size(0)) { std::stringstream ss; <del> ss << \"Given transposed=\" << transposed << \", weight\" << weight.sizes() << \", so expected input\" << input.sizes() << \" to have \" </del> <ins> ss << \"Given transposed=\" << transposed << \", weight of size \" << weight.sizes() << \", expected input\" << input.sizes() << \" to have \" </ins> << weight.size(0) << \" channels, but got \" << input.size(1) << \" channels instead\"; throw std::runtime_error(ss.str());"} |
|
|
{"_id":"q-en-pytorch-24958dcac671e563d27c0349df1d5c8487f8d5fc26fb2c0c895955fce808c502","text":"if (weight_dim != k) { std::stringstream ss; <del> ss << \"Expected \" << k << \"-dimensional weight for \" << k << \"-dimensional input \" << input.sizes() << \", but got weight of size \" << weight.sizes() << \" instead\"; </del> <ins> ss << \"Expected \" << weight_dim << \"-dimensional input for \" << weight_dim << \"-dimensional weight \" << weight.sizes() << \", but got input of size \" << input.sizes() << \" instead\"; </ins> throw std::runtime_error(ss.str()); } if (weight.size(0) < groups) {"} |
|
|
{"_id":"q-en-pytorch-286cccb6da61a23a2bcb079e734646e80623fed1741e1e8e7ca8aaf9c8a7a41e","text":"return Subscript(base, [build_SliceExpr(ctx, base, expr.slice)]) elif sub_type is ast.ExtSlice: return Subscript(base, build_ExtSlice(ctx, base, expr.slice)) <ins> elif sys.version_info >= (3, 9): # In Python3.9 array indicies are not wrapped in ast.Index if sub_type is ast.Tuple: # N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k] indices = [] for index_expr in expr.slice.elts: if isinstance(index_expr, ast.Slice): indices.append(build_SliceExpr(ctx, base, index_expr)) else: indices.append(build_expr(ctx, index_expr)) return Subscript(base, indices) return Subscript(base, [build_expr(ctx, expr.slice)]) </ins> else: # Ellipsis (can only happen in Python 2) raise NotSupportedError(base.range(), \"ellipsis is not supported\")"} |
|
|
{"_id":"q-en-pytorch-29b4f12861a142464a9b25b830cabbf59ed5cb2adfaf60f093acac0c57573ea1","text":"const float lr = *lr_ptr; if (!nesterov) { CUDA_1D_KERNEL_LOOP(i, N) { <del> moment_out[i] = mu * moment[i] * lr * grad[i]; </del> <ins> moment_out[i] = mu * moment[i] + lr * grad[i]; </ins> param_out[i] = param[i] - moment_out[i]; } } else {"} |
|
|
{"_id":"q-en-pytorch-2b46b8dbf0d931ed37a0e15ed23bc29801672fff95ab89d5b94bca2bc2a0c892","text":"if isinstance(expr.slice.value, ast.Tuple): # N-dimensional indexing using Tuple: x[(i, j, k)] is equivalent to x[i, j, k] # XXX: Indexing using a list is **different**! It triggers advanced indexing. <del> indices = [] for index_expr in expr.slice.value.elts: indices.append(build_expr(ctx, index_expr)) </del> <ins> indices = [build_expr(ctx, index_expr) for index_expr in expr.slice.value.elts] </ins> return Subscript(base, indices) else: return Subscript(base, [build_expr(ctx, expr.slice.value)])"} |
|
|
{"_id":"q-en-pytorch-2e3ced0554ae542f38fa4f46492eb2a9c1f34db3580330fa33d016c154e86473","text":"'zero-dimensional.*cannot be concatenated'): torch.cat([x, y]) <del> def test_cat_empty(self): </del> <ins> @staticmethod def _test_cat_empty(self, use_cuda=False): </ins> # FIXME: this is legacy behavior and should be removed # when we support empty tensors with arbitrary sizes <del> x = torch.randn(4, 3, 32, 32) empty = torch.randn(0) </del> <ins> if use_cuda: dtype = torch.cuda.float32 else: dtype = torch.float32 x = torch.randn((4, 3, 32, 32), dtype=dtype) empty = torch.randn((0,), dtype=dtype) </ins> res1 = torch.cat([x, empty], dim=1) res2 = torch.cat([empty, x], dim=1) self.assertEqual(res1, res2) <del> conv = torch.nn.Conv2d(3, 3, kernel_size=1) </del> <ins> conv = torch.nn.Conv2d(3, 3, kernel_size=1).float() if use_cuda: conv = conv.cuda() </ins> res1 = torch.cat([conv(x), empty], dim=1) res2 = torch.cat([empty, conv(x)], dim=1) self.assertEqual(res1, res2)"} |
|
|
{"_id":"q-en-pytorch-2ec2247f8a7bf05febc899bf58721a55b2d0844e172b0df05134abd1edcc3ca8","text":"{ tcntr = y*iwidth + x; real val = *(ip + tcntr); <del> if (val > maxval) </del> <ins> if ((val > maxval) || isnan(val)) </ins> { maxval = val; maxindex = tcntr;"} |
|
|
{"_id":"q-en-pytorch-3427195ea7d48d9ab3b176a5c35a13bb1f7306610e21e5774d1802387ca3c835","text":"t2 = torch.from_numpy(t.numpy().transpose()) self.assertEqual(t1, t2) <ins> def test_inplace_division(self): t = torch.rand(5, 5) id_before = id(t) t /= 2 id_after = id(t) self.assertEqual(id_before, id_after) </ins> # Functions to test negative dimension wrapping METHOD = 1 INPLACE_METHOD = 2"} |
|
|
{"_id":"q-en-pytorch-3a31795ee2fc98b796dd0c9851e0b44dab2c72e71d3233392cbb762548177a46","text":"Args: input (Tensor): the tensor to compare other (Tensor or float): the tensor or value to compare <del> out (Tensor, optional): the output tensor. Must be a `ByteTensor` or the same type as `input`. </del> <ins> out (Tensor, optional): the output tensor. Must be a `ByteTensor` </ins> Returns: Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true"} |
|
|
{"_id":"q-en-pytorch-3cdb37047b20c512e3f4eebe8b5c212a33cca4021fd795eab2296789e2e0b00e","text":"blockIdx.x; } <del> |
|
|
{"_id":"q-en-pytorch-4099cb47e51f5963ac2fbc751ead939ce3721d76c3af31b04a878eb6e554a458","text":"#include \"THCTensor.hpp\" #include \"THCHalf.h\" #include \"THCHalfAutoNumerics.cuh\" <ins> #include \"THCNumerics.cuh\" </ins> #include \"common.h\" // kernels borrowed from Caffe"} |
|
|
{"_id":"q-en-pytorch-41d4dd651a0d4badf059091f9ac8324ffb5f3d6684e9d5e78affa9f305316e3b","text":"# This should work though l2.weight = Variable(torch.randn(10, 10)) <ins> def test_embedding_padding_idx(self): embedding = nn.Embedding(10, 20, padding_idx = 0) input = Variable(torch.LongTensor([[0,2,4,5],[4,3,0,9]])) output = embedding(input) self.assertEqual(output[0][0].sum().data[0], 0) self.assertEqual(output[1][2].sum().data[0], 0) </ins> def test_Dropout(self): input = torch.Tensor(1000) self._test_dropout(nn.Dropout, input)"} |
|
|
{"_id":"q-en-pytorch-440e73dec00a24d4e6b6f7a738f49c36aba4ceb41a955f172524f45b894c7d54","text":"z = torch.cat([x, y]) self.assertEqual(z.size(), (21, SIZE, SIZE)) <ins> def test_cat_empty(self): TestTorch._test_cat_empty(self, use_cuda=True) </ins> def test_bernoulli(self): x = torch.tensor([0, 1], dtype=torch.cuda.float32) self.assertEqual(x.bernoulli().tolist(), [0, 1])"} |
|
|
{"_id":"q-en-pytorch-47fdd2074fae54076b9495d25a94a841524a59ccda55df179b17cbd54b477ef8","text":"} } <del> static Tensor dispatch_clamp(const Tensor & self, Scalar min, Scalar max) { AutoNoGIL no_gil; AutoGPU auto_gpu(self); return self.clamp(min, max); } static Tensor dispatch_clamp_min(const Tensor & self, Scalar min) { AutoNoGIL no_gil; AutoGPU auto_gpu(self); return self.clamp_min(min); } static Tensor dispatch_clamp_max(const Tensor & self, Scalar max) { AutoNoGIL no_gil; AutoGPU auto_gpu(self); return self.clamp_max(max); } </del> // The Python clamp() syntax has to be mapped to one of three C++ functions static PyObject * THPVariable_clamp(PyObject* module, PyObject* args, PyObject* kwargs) { HANDLE_TH_ERRORS static PythonArgParser parser({ <del> \"clamp(Tensor input, Scalar min=None, Scalar max=None)\", </del> <ins> \"clamp(Tensor input, Scalar min=None, Scalar max=None, *, Tensor out=None)\", </ins> }); <del> ParsedArgs<3> parsed_args; </del> <ins> ParsedArgs<4> parsed_args; </ins> auto r = parser.parse(args, kwargs, parsed_args); if (!r.isNone(1) && !r.isNone(2)) { <del> return THPVariable_Wrap(dispatch_clamp(r.tensor(0), r.scalar(1), r.scalar(2))); </del> <ins> if (!r.isNone(3)) { return wrap(dispatch_clamp(r.tensor(0), r.scalar(1), r.scalar(2), r.tensor(3))); } else { return wrap(dispatch_clamp(r.tensor(0), r.scalar(1), r.scalar(2))); } </ins> } else if (!r.isNone(1)) { <del> return THPVariable_Wrap(dispatch_clamp_min(r.tensor(0), r.scalar(1))); </del> <ins> if (!r.isNone(3)) { return wrap(dispatch_clamp_min(r.tensor(0), r.scalar(1), r.tensor(3))); } else { return wrap(dispatch_clamp_min(r.tensor(0), r.scalar(1))); } </ins> } else if (!r.isNone(2)) { <del> return THPVariable_Wrap(dispatch_clamp_max(r.tensor(0), r.scalar(2))); </del> <ins> if (!r.isNone(3)) { return wrap(dispatch_clamp_max(r.tensor(0), r.scalar(2), r.tensor(3))); } else { return wrap(dispatch_clamp_max(r.tensor(0), r.scalar(2))); } </ins> } else { throw std::runtime_error(\"At least one of 'min' or 'max' must not be None\"); } <ins> Py_RETURN_NONE; </ins> END_HANDLE_TH_ERRORS }"} |
|
|
{"_id":"q-en-pytorch-4d141356d1159441381c7f3d4f816ba6d9aa5b681d7723a83576b39d219d1fbd","text":"Args: input (Tensor): the tensor to compare other (Tensor or float): the tensor or value to compare <del> out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as `input` </del> <ins> out (Tensor, optional): the output tensor that must be a `ByteTensor` </ins> Returns: Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true."} |
|
|
{"_id":"q-en-pytorch-551cb05265906a9804e2fced9973d2fabb2a5a6facb30f3afcbb988f2bcea4f1","text":"def __idiv__(self, other): return self.div_(other) <ins> __itruediv__ = __idiv__ </ins> def __mod__(self, other): return self.remainder(other)"} |
|
|
{"_id":"q-en-pytorch-5c250d7eb168a7c614115257ae332b0c408b13ef008048b0c212e3c73a0afe0c","text":"// Lua indices begin at 1 IndexType dstIndex_ = indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE; <del> assert(dstIndex < dstFillDimSize); </del> <ins> assert(dstIndex_ < dstFillDimSize); </ins> // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently"} |
|
|
{"_id":"q-en-pytorch-5f5558ee304aa0ad910223e4262d50bde2c4a6c17ed172c87c24b4879280cdf3","text":"tW = targetTensor.size(tDims - 1) adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW) adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH) <del> if self.finput is None: </del> <ins> if not hasattr(self, 'finput') or self.finput is None: </ins> self.finput = input[0].new() <del> if self.fgradInput is None: </del> <ins> if not hasattr(self, 'fgradInput') or self.fgradInput is None: </ins> self.fgradInput = input[0].new() else: <del> if self.finput is None: </del> <ins> if not hasattr(self, 'finput') or self.finput is None: </ins> self.finput = input.new() <del> if self.fgradInput is None: </del> <ins> if not hasattr(self, 'fgradInput') or self.fgradInput is None: </ins> self.fgradInput = input.new() inputTensor = self._makeContiguous(inputTensor)"} |
|
|
{"_id":"q-en-pytorch-60723fc8355f03ca1cf6f865db5b4983650f61c7a434e07bb5e6f9c4cba6a872","text":"THPByteOrder::THP_LITTLE_ENDIAN, to_convert); } <del> SYSCHECK(write(fd, data, to_convert * sizeof(real))); </del> <ins> SYSCHECK(write(fd, le_buffer.get(), to_convert * sizeof(real))); </ins> } } }"} |
|
|
{"_id":"q-en-pytorch-61b459b548dc3e3c41a011899f5c524fe8976e152a080d9910783e4576b9bba5","text":"<del> Subproject commit 9f6a636e547fc70a02fa48436449aad67080698f </del> <ins> Subproject commit add56ccdcac23a6c522a2c1174a866e293c61dab </ins>"} |
|
|
{"_id":"q-en-pytorch-69cb51f0ded6db481b492d760fc1235533115dd208da946400a73295b7f7117d","text":"self.assertEqual(output[0][0].sum().data[0], 0) self.assertEqual(output[1][2].sum().data[0], 0) <ins> def test_embedding_max_norm(self): embedding = nn.Embedding(22, 5, max_norm=1.0) input = Variable(torch.LongTensor([2, 8, 8, 6])) output = embedding(input) self.assertEqual(output[1], output[2]) self.assertTrue(output.data.norm(p=2, dim=1).le(1).all()) @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\") def test_embedding_max_norm_cuda(self): embedding = nn.Embedding(22, 5, max_norm=1.0).cuda() input = Variable(torch.LongTensor([2, 8, 8, 6])).cuda() output = embedding(input) self.assertEqual(output[1], output[2]) self.assertTrue(output.data.norm(p=2, dim=1).le(1).all()) </ins> def test_embedding_functional(self): a = Variable(torch.LongTensor([ [1, 3, 2],"} |
|
|
{"_id":"q-en-pytorch-6c7de40e95bf34dc14b2735c160240ca9b4fdd3569633c3d30d09bfb2a9e0564","text":"for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { T val = ptr_input[iw*istrideW]; <del> if (val > max) { </del> <ins> if ((val > max) || THCNumerics<T>::isnan(val)) { </ins> max = val; argmax = (ih+istartH)*isizeW + iw+istartW; }"} |
|
|
{"_id":"q-en-pytorch-6df228497008e8ec4cd2ec8393bca92b7924400e4a996e7624da61de75ba792f","text":"<ins> #define __STDC_FORMAT_MACROS </ins> #include <Python.h> #ifdef _MSC_VER #include <Windows.h>"} |
|
|
{"_id":"q-en-pytorch-6eb6639f1ba6b2299a30030e64d043695fa872f704c8ec2357677fa315d28087","text":"// fast track for bytes and little endian if (sizeof(real) == 1 || THP_nativeByteOrder() == THPByteOrder::THP_LITTLE_ENDIAN) { <del> SYSCHECK(read(fd, data, sizeof(real) * storage->size)); </del> <ins> char *bytes = (char *) data; uint64_t remaining = sizeof(real) * storage->size; while (remaining > 0) { ssize_t result = read(fd, bytes, remaining); if (result < 0) throw std::system_error(result, std::system_category()); bytes += result; remaining -= result; } </ins> } else { long buffer_size = std::min(size, (long)5000); std::unique_ptr<uint8_t[]> le_buffer(new uint8_t[buffer_size * sizeof(real)]); <del> for (long i = 0; i < size; i += buffer_size) { </del> <ins> for (int64_t i = 0; i < size; i += buffer_size) { </ins> size_t to_convert = std::min(size - i, buffer_size); SYSCHECK(read(fd, le_buffer.get(), sizeof(real) * to_convert)); if (sizeof(real) == 2) {"} |
|
|
{"_id":"q-en-pytorch-73ff66b1583b63baf05c469b53e78587bbebebb925dd78cda19f3d6b264e313f","text":"Args: input (Tensor): the tensor to compare other (Tensor or float): the tensor or value to compare <del> out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input` </del> <ins> out (Tensor, optional): the output tensor that must be a `ByteTensor` </ins> Returns: Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true"} |
|
|
{"_id":"q-en-pytorch-7928473c855fede51fc945c91f8640dc782109f7ffed480f44bee0760b53642d","text":"\"aten::set_grad_enabled(bool val) -> ()\", [](Stack* stack) { torch::GradMode::set_enabled(pop(stack).toBool()); <del> push(stack, IValue()); </del> }, aliasAnalysisConservative()), });"} |
|
|
{"_id":"q-en-pytorch-7d76bd3eea5a0324ece90a539cf25b6518205450745d9e02d08e72a7785c493f","text":"} } <ins> // If all inputs are empty tensors, return an empty tensor if (notEmptyTensor == NULL) { return; } </ins> // In the event that the user specified -1 as the concat dimension, then // we want to pick the nDims as dimension to cat along (and thus nDims - 1 as the // value due to 0-based indexing). If the nDims is // 0 (i.e. we are catting all"} |
|
|
{"_id":"q-en-pytorch-81fc2c89f5705744d911fc82060c1215fc1688583b68e7f9223af82e7c00c1fb","text":"bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { <del> if (ScalarConvert<Dtype, AccType>::to(bottom_data[h * width + w]) > maxval) { </del> <ins> Dtype val = bottom_data[h * width + w]; if ((ScalarConvert<Dtype, AccType>::to(val) > maxval) || THCNumerics<Dtype>::isnan(val)) { </ins> maxidx = h * width + w; <del> maxval = ScalarConvert<Dtype, AccType>::to(bottom_data[maxidx]); </del> <ins> maxval = ScalarConvert<Dtype, AccType>::to(val); </ins> } } }"} |
|
|
{"_id":"q-en-pytorch-88ca958657dda50101b09cbdd7703b45fbeec7c19f02de8e9ef082e239c181d4","text":"<ins> #define __STDC_FORMAT_MACROS </ins> #include <Python.h> #include <structmember.h>"} |
|
|
{"_id":"q-en-pytorch-977578af41147a670dcbbe495360d3862903fb443373d37d981d21a197c31db7","text":"auto input = input_r.contiguous(); auto weight = weight_r; auto bias = bias_r; <del> auto k = input.ndimension(); </del> <ins> auto k = weight.ndimension(); </ins> int64_t dim = k - 2; if (dim <= 0) { <del> throw std::runtime_error(\"input has less dimensions than expected\"); </del> <ins> throw std::runtime_error(\"weight should have at least two dimensions\"); </ins> } ConvParams params;"} |
|
|
{"_id":"q-en-pytorch-9b3bfe1e851162000f63e68f19a6b5422d28fae26c2b41e6552e3d6ccc3747ba","text":"for(iw = 0; iw < kW; iw++) { real val = *(ip + ih*istrideH + iw*istrideW); <del> if (val > maxval) </del> <ins> if ((val > maxval) || isnan(val)) </ins> { maxval = val; maxindex = (ih+istartH)*isizeW + (iw+istartW);"} |
|
|
{"_id":"q-en-pytorch-9b791534cfdf1e04d78ab0fcf440549051b7f8d898ae468ecf0a79a35a682b9e","text":"gI = apply_fn<Transpose>(0, 1)(gIt); } } <ins> if (should_compute_output(0) && !ggO.defined()) ggO = at::zeros_like(gO); if (should_compute_output(1) && !gI.defined()) gI = at::zeros_like(input); if (should_compute_output(2) && !gW.defined()) gW = at::zeros_like(weight); </ins> return {ggO, gI, gW}; }"} |
|
|
{"_id":"q-en-pytorch-9cb6c949988837baec9a8d2f5f5cda5c88e3ef0cd73137ef0b7df869c21c1c37","text":"'expected a non-empty list of Tensors'): torch.cat([], dim=1) <ins> def test_cat_empty(self): self._test_cat_empty(self) </ins> def test_stack(self): x = torch.rand(2, 3, 4) y = torch.rand(2, 3, 4)"} |
|
|
{"_id":"q-en-pytorch-9ec1744c3c99ad877b5de782ef7e6f996113b1f13b635199c006a6d8e8a19f56","text":"} } <ins> |
|
|
{"_id":"q-en-pytorch-a9404e4c571418623fa1b9c97e0ef232f999a7ad76326d587a82ccbd62ab02ac","text":"for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { T val = ptr_input[ih*istrideH + iw*istrideW]; <del> if (val > max) { </del> <ins> if ((val > max) || THCNumerics<T>::isnan(val)) { </ins> max = val; argmax = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + iw+istartW; }"} |
|
|
{"_id":"q-en-pytorch-ae42f0aca4a13adb43080aa570ab847db924974818431f575dd71a17fb988069","text":"index = t * inputH * inputW + h * inputW + w; Dtype val = inputData[index]; <del> if (max < val) </del> <ins> if ((max < val) || THCNumerics<Dtype>::isnan(val)) </ins> { max = val; maxIndex = index;"} |
|
|
{"_id":"q-en-pytorch-aebacc799c3a81586b0c547a02debca587aaa2d0618d2fe2a2717dd0dbd9fc61","text":"res2[i] = max(min_val, min(max_val, res2[i])) self.assertEqual(res1, res2) <ins> out = m1.clone() torch.clamp(m1, min=min_val, max=max_val, out=out) self.assertEqual(out, res1) </ins> res1 = torch.clamp(m1, min=min_val) res2 = m1.clone() for i in iter_indices(res2): res2[i] = max(min_val, res2[i]) self.assertEqual(res1, res2) <ins> torch.clamp(m1, min=min_val, out=out) self.assertEqual(out, res1) </ins> res1 = torch.clamp(m1, max=max_val) res2 = m1.clone() for i in iter_indices(res2): res2[i] = min(max_val, res2[i]) self.assertEqual(res1, res2) <ins> torch.clamp(m1, max=max_val, out=out) self.assertEqual(out, res1) </ins> def test_pow(self): # [res] torch.pow([res,] x)"} |
|
|
{"_id":"q-en-pytorch-aed851444804c1a6b8a8132b51b04030f5e3ddf2cf23b487c946f39f2cd3d211","text":"if (!transposed) { if (input.size(1) != (weight.size(1) * groups)) { std::stringstream ss; <del> ss << \"Given groups=\" << groups << \", weight\" << weight.sizes() << \", so expected input\" << input.sizes() << \" to have \" </del> <ins> ss << \"Given groups=\" << groups << \", weight of size \" << weight.sizes() << \", expected input\" << input.sizes() << \" to have \" </ins> << (weight.size(1) * groups) << \" channels, but got \" << input.size(1) << \" channels instead\"; throw std::runtime_error(ss.str());"} |
|
|
{"_id":"q-en-pytorch-b4947259b64f81b4189708dee2dee9dc3551fa710f6caa3b8c633f919067385d","text":"struct ModeUnsignedPair max = {0, 0}; <del> max = reduceBlockN<struct ModeUnsignedPair, MaxReduceOp<struct ModeUnsignedPair>, 2> </del> <ins> max = reduceBlockWithNThreadLocalReductions<struct ModeUnsignedPair, MaxReduceOp<struct ModeUnsignedPair>, 2> </ins> (uupmem, uup, sliceSize, MaxReduceOp<struct ModeUnsignedPair>(), max); // Store the mode in shared memory for use in finding the mode in the input slice"} |
|
|
{"_id":"q-en-pytorch-b81f9238c60e350db10e6f015b9ea17dc0742bcf9cb5615e33a0db99db1901db","text":"SYSCHECK(write(fd, &self->size, sizeof(long))); // fast track for bytes and little endian if (sizeof(real) == 1 || THP_nativeByteOrder() == THPByteOrder::THP_LITTLE_ENDIAN) { <del> SYSCHECK(write(fd, data, sizeof(real) * self->size)); </del> <ins> char *bytes = (char *) data; uint64_t remaining = sizeof(real) * self->size; while (remaining > 0) { ssize_t result = write(fd, bytes, remaining); if (result < 0) throw std::system_error(result, std::system_category()); bytes += result; remaining -= result; } </ins> } else { long buffer_size = std::min(self->size, (long)5000); std::unique_ptr<uint8_t[]> le_buffer(new uint8_t[buffer_size * sizeof(real)]); <del> for (long i = 0; i < self->size; i += buffer_size) { </del> <ins> for (int64_t i = 0; i < self->size; i += buffer_size) { </ins> size_t to_convert = std::min(self->size - i, buffer_size); if (sizeof(real) == 2) { THP_encodeInt16Buffer((uint8_t*)le_buffer.get(),"} |
|
|
{"_id":"q-en-pytorch-bdcc12c8f962f6793e23cf1d7d5ef92d9945d1fe9fbf4c2b5d59f5e879d7f9e6","text":"def reset_parameters(self): self.weight.data.normal_(0, 1) <ins> if self.padding_idx is not None: self.weight.data[self.padding_idx].fill_(0) </ins> def forward(self, input): <del> return self._backend.Embedding(self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq)(input, self.weight) </del> <ins> padding_idx = self.padding_idx if padding_idx is None: padding_idx = -1 return self._backend.Embedding(padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq)(input, self.weight) </ins> # TODO: SparseLinear"} |
|
|
{"_id":"q-en-pytorch-c0f3e46d5377fdd8131a1db39706016c95280d2cdff5b4d1909a0669e01b61c5","text":"static inline __host__ __device__ char mul(char a, char b) { return a * b; } static inline __host__ __device__ char sub(char a, char b) { return a - b; } static inline __host__ __device__ char div(char a, char b) { return a / b; } <del> static inline __host__ __device__ char abs(char a) { return abs(a); } </del> <ins> static inline __host__ __device__ char abs(char a) { return ::abs((int)a); } </ins> }; template <>"} |
|
|
{"_id":"q-en-pytorch-d220f76b82742fb9c30b11178c8388d7854eacf45c1124207fd51af7fab0f931","text":"@staticmethod def _renorm(ctx, indices, weight, max_norm, norm_type): <del> if indices.dim() == 2: indices = indices.clone().view(-1) </del> <ins> # clone indices since LookupTable_renorm modifies it in-place </ins> ctx._backend.LookupTable_renorm( ctx._backend.library_state, <del> indices, </del> <ins> indices.clone().view(-1), </ins> weight, max_norm, norm_type"} |
|
|
{"_id":"q-en-pytorch-dc2cc16002ad2fcb397464bc29df418578647da2b91fe00525f4e32c4115696b","text":"static inline __host__ __device__ short mul(short a, short b) { return a * b; } static inline __host__ __device__ short sub(short a, short b) { return a - b; } static inline __host__ __device__ short div(short a, short b) { return a / b; } <del> static inline __host__ __device__ short abs(short a) { return abs(a); } </del> <ins> static inline __host__ __device__ short abs(short a) { return ::abs((int)a); } </ins> }; template <>"} |
|
|
{"_id":"q-en-pytorch-dd6af3615c1336a9731953d1a8470906b5f93f83fabf440136bc36f449c84f25","text":">>> # an Embedding module containing 10 tensors of size 3 >>> embedding = nn.Embedding(10, 3) >>> # a batch of 2 samples of 4 indices each <del> >>> input = torch.Tensor([[1,2,4,5],[4,3,2,10]]) </del> <ins> >>> input = torch.LongTensor([[1,2,4,5],[4,3,2,9]]) >>> print(embedding(input)) >>> # example with padding_idx >>> embedding = nn.Embedding(10, 3, padding_idx=0) >>> input = torch.LongTensor([[0,2,0,5]]) </ins> >>> print(embedding(input)) \"\"\" <del> def __init__(self, num_embeddings, embedding_dim, padding_idx=-1, </del> <ins> def __init__(self, num_embeddings, embedding_dim, padding_idx=None, </ins> max_norm=None, norm_type=2, scale_grad_by_freq=False): self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim"} |
|
|
{"_id":"q-en-pytorch-e146c2a64e2bd159eae15b5e7dfd2ad08f34d4ab3ebe3e3a06870fe78e7bbb46","text":"with: submodules: false fetch-depth: 1 <del> - name: Setup Python 3.5 </del> <ins> - name: Setup Python 3.6 </ins> if: matrix.test_type == 'older_python_version' uses: actions/setup-python@v4 with: <del> python-version: '3.5' </del> <ins> python-version: '3.6' </ins> architecture: x64 check-latest: false cache: pip"} |
|
|
{"_id":"q-en-pytorch-e1cc84099c3b118d4920752811c784dcb3638765475447de55f10769a7adf155","text":"Args: num_embeddings: size of the dictionary of embeddings embedding_dim: the size of each embedding vector <del> padding_idx: If given, pads the output with zeros whenever it encounters the index. Default: -1 </del> <ins> padding_idx: If given, pads the output with zeros whenever it encounters the index. Default: None </ins> max_norm: If given, will renormalize the embeddings to always have a norm lesser than this Default: None norm_type: The p of the p-norm to compute for the max_norm option scale_grad_by_freq: if given, this will scale gradients by the frequency of the words in the dictionary."} |
|
|
{"_id":"q-en-pytorch-e5ae77a5e08322e65369c99bb0e38344715024cff6a41f3418003b3cb4bc4e1f","text":"meant to be installed as pip packages) (default: False). relative_to (str, optional): path of the build file. Required when ``package is True``. It's best to use ``__file__`` for this argument. <del> kwargs: additional arguments that are passed to ffi to declar the </del> <ins> kwargs: additional arguments that are passed to ffi to declare the </ins> extension. See `Extension API reference`_ for details. .. _`Extension API reference`: https: |
|
|
{"_id":"q-en-pytorch-e5c6c52e389697c2263e94638406e059af264d366c283b8eb13b7ef2925b5de0","text":"{ index = z * iwidth * iheight + y * iwidth + x; real val = ip[index]; <del> if (val > maxval) </del> <ins> if ((val > maxval) || isnan(val)) </ins> { maxval = val; maxindex = index;"} |
|
|
{"_id":"q-en-pytorch-e68748fa6c4aafa8c187da05b9d98e17b0db0f942a0f44442d1917f9db594699","text":"// For Convolution strategies that don't implicitly handle grad_bias, we add a helper |
|
|
{"_id":"q-en-pytorch-f07ba2188846889a62ffcd00bc1564c97864fab48732feb1e9f5c83d821811a3","text":"Args: input (Tensor): the tensor to compare other (Tensor or float): the tensor or value to compare <del> out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input` </del> <ins> out (Tensor, optional): the output tensor that must be a `ByteTensor` </ins> Returns: Tensor: A `torch.ByteTensor` containing a 1 at each location where comparison is true"} |
|
|
{"_id":"q-en-pytorch-fba64ac32da2f1cb65a2a0cea021ae9220defa92747dc8f4d4bc86b53f0d9510","text":"def test_AdaptiveMaxPool3d_indices_cuda(self, dtype=torch.float): self._test_maxpool_indices(3, adaptive=True, device=\"cuda\", dtype=dtype) <ins> @staticmethod def _test_max_pool_nan(self, device, dtype=torch.float): for adaptive in ['', 'adaptive_']: for num_dim in [1, 2, 3]: fn_name = '{}max_pool{}d'.format(adaptive, num_dim) fn = getattr(F, fn_name) x = torch.full([1, 1] + num_dim * [3], float('nan')) res = fn(x, 1 if adaptive else 3) self.assertTrue(math.isnan(res.item())) @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\") @repeat_test_for_types(ALL_TENSORTYPES) def test_max_pool_nan_cuda(self, dtype=torch.float): self._test_max_pool_nan(self, device=\"cuda\", dtype=dtype) def test_max_pool_nan(self, dtype=torch.float): self._test_max_pool_nan(self, device=\"cpu\") </ins> def _test_scatter(self, tensor): x = torch.tensor(tensor, requires_grad=True) result = dp.scatter(x, (0, 1))"} |
|
|
|