|
{"_id":"doc-en-pytorch-b7efc1be2e54d5adbcd168d9ea6a2d0637592ce4b5a16edbc8abc376fab9abf3","title":"","text":"(Addcdiv, (), ((S, S), (S, S), torch.rand(S, S) + 1e-2) ), (Addcdiv, (0.6,), ((S, S), (S, S), torch.rand(S, S) + 1e-2), 'scale'), (IndexAdd, (0,), ((S, S), index_variable(2, S), (2, S)) ), <del> (IndexCopy, (0,), ((S, S), index_variable(2, S), (2, S)) ), </del> <ins> # (IndexCopy, (0,), ((S, S), index_variable(2, S), (2, S)) ), </ins> (IndexFill, (0, 2), ((S, S), index_variable(2, S)) ), (IndexSelect, (0,), ((S, S), index_variable(2, S)) ), (Gather, (0,), ((M, S), gather_variable((S, S), 1, M)) ),"} |
|
{"_id":"doc-en-pytorch-a5509dbbbfa858e8c6cb55e89fc0a00c1506c539c221b4e583c7ec8963caac7e","title":"","text":"<del> import torch from torch.autograd.function import Function from torch._thnn import type2backend import torch.backends.cudnn as cudnn class BatchNorm(Function): def __init__(self, running_mean, running_var, training, momentum, eps): super(BatchNorm, self).__init__() self.running_mean = running_mean self.running_var = running_var self.training = training self.momentum = momentum self.eps = eps def forward(self, input, weight=None, bias=None): self.save_for_backward(input, weight, bias) # don't use cuDNN for half inputs because cuDNN requires the weight and # bias tensors to be floats, unlike THCUNN which requires half tensors. self.use_cudnn = (cudnn.is_acceptable(input) and cudnn.version() > 5110 and weight is not None and bias is not None and not isinstance(input, torch.cuda.HalfTensor)) # temporary buffers used in forward and backward num_features = input.size(1) _save_mean = input.new(num_features) _save_std = input.new(num_features) output = input.new(input.size()) if self.use_cudnn: torch._C._cudnn_batch_norm_forward( input, output, weight, bias, self.running_mean, self.running_var, _save_mean, _save_std, self.training, self.momentum, self.eps) else: backend = type2backend[type(input)] backend.BatchNormalization_updateOutput( backend.library_state, input, output, weight, bias, self.running_mean, self.running_var, _save_mean, _save_std, self.training, self.momentum, self.eps) if self.requires_grad: self._save_mean = _save_mean self._save_std = _save_std return output def backward(self, grad_output): input, weight, bias = self.saved_tensors grad_input, grad_weight, grad_bias = None, None, None if self.needs_input_grad[0] or self.use_cudnn: grad_input = input.new(input.size()) if (len(self.needs_input_grad) > 1 and self.needs_input_grad[1]) or self.use_cudnn: grad_weight = weight.new(weight.size()).zero_() if (len(self.needs_input_grad) > 1 and self.needs_input_grad[2]) or self.use_cudnn: grad_bias = bias.new(bias.size()).zero_() if self.use_cudnn and self.training: # cudnn does not support backward in evaluate mode torch._C._cudnn_batch_norm_backward( input, grad_output, grad_input, grad_weight, grad_bias, weight, self.running_mean, self.running_var, self._save_mean, self._save_std, self.training, self.eps) else: grad_output = grad_output.contiguous() backend = type2backend[type(input)] backend.BatchNormalization_backward( backend.library_state, input, grad_output, grad_input, grad_weight, grad_bias, weight, self.running_mean, self.running_var, self._save_mean, self._save_std, self.training, 1.0, self.eps) return grad_input, grad_weight, grad_bias </del>"} |
|
{"_id":"doc-en-pytorch-cadd920c3290a7738a8ce57c915804e69be3a64f7d66ff825142f5499221a79d","title":"","text":"def _initialize_backend(): from .._functions.thnn import _all_functions as _thnn_functions from .._functions.linear import Linear <del> from .._functions.batchnorm import BatchNorm </del> from .._functions.conv import ConvNd from .._functions.rnn import RNN, RNNTanhCell, RNNReLUCell, GRUCell, LSTMCell"} |
|
{"_id":"doc-en-pytorch-0e85e2da78bb6403e368fc842d283f6c2f8947167f185d1229142870d43226f1","title":"","text":"HingeEmbeddingLoss, MarginRankingLoss backend.register_function('Linear', Linear) <del> backend.register_function('BatchNorm', BatchNorm) </del> backend.register_function('ConvNd', ConvNd) backend.register_function('RNN', RNN) backend.register_function('RNNTanhCell', RNNTanhCell)"} |
|
{"_id":"doc-en-pytorch-070db6c4fc6f6f907a67f5eff63dda2759f93a2cf19670bccc99c9ab7375a073","title":"","text":"| :attr:`stride` controls the stride for the cross-correlation. | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides <del> for :attr:`padding` number of points | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. </del> <ins> for :attr:`padding` number of points. | :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. `in_channels` and `out_channels` must both be divisible by `groups`. </ins> | At groups=1, all inputs are convolved to all outputs. | At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels, and producing half the output channels, and both subsequently concatenated. <ins> At groups=`in_channels`, each input channel is convolved with its own set of filters (of size `out_channels // in_channels`). </ins> .. note::"} |
|
{"_id":"doc-en-pytorch-f7031e660f7387bb371e278ab1f5e0deba5828cc04c655b5e42a5cb6a283bf7f","title":"","text":"| :attr:`stride` controls the stride for the cross-correlation. | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides <del> for :attr:`padding` number of points | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. </del> <ins> for :attr:`padding` number of points. | :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. `in_channels` and `out_channels` must both be divisible by `groups`. </ins> | At groups=1, all inputs are convolved to all outputs. | At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels, and producing half the output channels, and both subsequently concatenated. <ins> At groups=`in_channels`, each input channel is convolved with its own set of filters (of size `out_channels // in_channels`). </ins> The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:"} |
|
{"_id":"doc-en-pytorch-ca8a9e7cde8b3e302e9883c007c0b71c952ec4d2b1b23b639294d25fe54d87c8","title":"","text":"composed of several input planes. This module can be seen as the gradient of Conv1d with respect to its input. <del> It is sometimes (but incorrectly) refered to as a deconvolutional operation. </del> <ins> It is also known as a fractionally-strided convolution or a deconvolution (although it is not an actual deconvolution operation). | :attr:`stride` controls the stride for the cross-correlation. | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides for :attr:`padding` number of points. | If :attr:`output_padding` is non-zero, then the output is implicitly zero-padded on one side for :attr:`output_padding` number of points. | :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. `in_channels` and `out_channels` must both be divisible by `groups`. | At groups=1, all inputs are convolved to all outputs. | At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels, and producing half the output channels, and both subsequently concatenated. At groups=`in_channels`, each input channel is convolved with its own set of filters (of size `out_channels // in_channels`). </ins> .. note::"} |
|
{"_id":"doc-en-pytorch-2a4353f25e6b66254df4b7bf1a55ef91eb3c6280d482b7114d6ee0c1c2abfae1","title":"","text":"output_padding (int or tuple, optional): Zero-padding added to one side of the output groups (int, optional): Number of blocked connections from input channels to output channels bias (bool, optional): If True, adds a learnable bias to the output <ins> dilation (int or tuple, optional): Spacing between kernel elements </ins> Shape: - Input: :math:`(N, C_{in}, L_{in})`"} |
|
{"_id":"doc-en-pytorch-29428830a26059a849496a55d799332d3518e5869a3f91cc1c2fde212359f495","title":"","text":"composed of several input planes. This module can be seen as the gradient of Conv2d with respect to its input. <del> It is sometimes (but incorrectly) refered to as a deconvolutional operation. </del> <ins> It is also known as a fractionally-strided convolution or a deconvolution (although it is not an actual deconvolution operation). </ins> | :attr:`stride` controls the stride for the cross-correlation. | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides <del> for :attr:`padding` number of points </del> <ins> for :attr:`padding` number of points. </ins> | If :attr:`output_padding` is non-zero, then the output is implicitly zero-padded on one side <del> for :attr:`output_padding` number of points | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. </del> <ins> for :attr:`output_padding` number of points. | :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. `in_channels` and `out_channels` must both be divisible by `groups`. </ins> | At groups=1, all inputs are convolved to all outputs. | At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels, and producing half the output channels, and both subsequently concatenated. <ins> At groups=`in_channels`, each input channel is convolved with its own set of filters (of size `out_channels // in_channels`). </ins> The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding` can either be: <del> - a single ``int`` -- in which case the same value is used for the height and width dimension </del> <ins> - a single ``int`` -- in which case the same value is used for the height and width dimensions </ins> - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension"} |
|
{"_id":"doc-en-pytorch-89807e395c83aa297144688018ba65eda0cbd3a26f5906aad71ea90d8b1422e8","title":"","text":"The transposed convolution operator multiplies each input value element-wise by a learnable kernel, and sums over the outputs from all input feature planes. <del> **This module can be seen as the exact reverse of Conv3d**. It is sometimes (but incorrectly) refered to as a deconvolutional operation. </del> <ins> This module can be seen as the gradient of Conv3d with respect to its input. It is also known as a fractionally-strided convolution or a deconvolution (although it is not an actual deconvolution operation). </ins> | :attr:`stride` controls the stride for the cross-correlation. | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides <del> for :attr:`padding` number of points </del> <ins> for :attr:`padding` number of points. </ins> | If :attr:`output_padding` is non-zero, then the output is implicitly zero-padded on one side <del> for :attr:`output_padding` number of points | :attr:`groups` controls the connections between inputs and outputs. </del> <ins> for :attr:`output_padding` number of points. | :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does. | :attr:`groups` controls the connections between inputs and outputs. `in_channels` and `out_channels` must both be divisible by `groups`. </ins> | At groups=1, all inputs are convolved to all outputs. | At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels, and producing half the output channels, and both subsequently concatenated. <ins> At groups=`in_channels`, each input channel is convolved with its own set of filters (of size `out_channels // in_channels`). </ins> The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding` can either be: <del> - a single ``int`` -- in which case the same value is used for the height and width dimension </del> <ins> - a single ``int`` -- in which case the same value is used for the depth, height and width dimensions </ins> - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, the second `int` for the width dimension and the third `int` for the width dimension"} |
|
{"_id":"doc-en-pytorch-e603c4dc93ed38afc2495b61d9ec1a735327d801673d781f612664d7b18db9f0","title":"","text":"{ PyObject_GC_UnTrack(self); THPFunction_clear(self); <ins> self->cdata_ptr.~weak_ptr(); </ins> self->cdata.~PyFunction(); Py_TYPE(self)->tp_free((PyObject*)self); }"} |
|
{"_id":"doc-en-pytorch-0a77384ef5a6ec2235681b5740207d2b05aaf28faa94cc9f12de1822b7e88284","title":"","text":"// most fields THPFunction* self = (THPFunction*)obj; new (&self->cdata) torch::autograd::PyFunction(obj); <ins> new (&self->cdata_ptr) std::weak_ptr<torch::autograd::PyFunction>(); </ins> self->cdata.num_inputs = -1; self->cdata.is_stochastic = PyObject_IsInstance(obj, THPStochasticFunctionClass); return obj;"} |
|
{"_id":"doc-en-pytorch-795b2ab192cab92f70f1710949659bdbab7d96fb947e1eb04e3bd1a080cfaf4a","title":"","text":"} }; <ins> // Similar to shared_from_this. There's a problem that the Python object // and its cdata depend on each other being alive, so we can't keep // shared_ptrs as members, but we'd like to be able to manage the lifetime of // the objects using shared_ptrs in the C++ graph. The only way to get a new // shared_ptr that references them is through THPFunction_asFunction. When // called for the first time it will allocate a new shared_ptr and save a // weak_ptr in cdata_ptr attr. Later, when we try to take another reference, // we'll try to lock cdata_ptr and return its value if successful. Otherwise it // means that all shared_ptrs returned previously have been freed, so we can // create a new one. This ensures that this object is managed by at most one // shared_ptr control block at any time - a guarantee we depend on in other places // (e.g. we use weak_ptrs in SavedVariable because we know it won't go out of scope). </ins> std::shared_ptr<PyFunction> THPFunction_asFunction(THPFunction* self) { if (!self) { return std::shared_ptr<PyFunction>(); } Py_INCREF((PyObject*)self); <del> return std::shared_ptr<PyFunction>(&self->cdata, Decref()); </del> <ins> auto ptr = self->cdata_ptr.lock(); if (ptr) return ptr; ptr = std::shared_ptr<PyFunction>(&self->cdata, Decref()); self->cdata_ptr = ptr; return ptr; </ins> }"} |
|
{"_id":"doc-en-pytorch-4c76b9791fe29abeb43ad1113e76e402f5a05f42386d55c8621cc65114c5bec7","title":"","text":"std::vector<bool> *is_variable_input; char has_freed_buffers; <ins> // See a comment in THPFucntion_asFunction for details about this field. std::weak_ptr<torch::autograd::PyFunction> cdata_ptr; </ins> torch::autograd::PyFunction cdata; };"} |
|
{"_id":"doc-en-pytorch-6217b780b346a24b481b52fa16ca32bab553e766bdf03a23a5beeb3ab9e75d8e","title":"","text":"// should have saved the grad accumulator. Even if the Variable no longer // alive, the accumulator should be kept alive by the references in the graph). if (requires_grad && !grad_fn && weak_grad_fn.expired() && grad_accumulator.expired()) <del> throw std::logic_error(\"No grad accumulator for a saved leaf!\"); </del> <ins> throw std::logic_error(\"No grad accumulator for a saved leaf!\"); </ins> new_var->grad_accumulator = grad_accumulator; return new_var;"} |
|
{"_id":"doc-en-pytorch-b62bb85e91269872389a86680d531177e7ea2d3df3439640328fac38c35f4ace","title":"","text":"LDFLAGS=\"-L$INSTALL_DIR/lib \" LD_POSTFIX=\".so.1\" LD_POSTFIX_UNVERSIONED=\".so\" <del> if [[ $(uname) == 'Darwin' ]]; then </del> <ins> if [[ $(uname) == 'Darwin' ]]; then </ins> LDFLAGS=\"$LDFLAGS -Qunused-arguments -Wl,-rpath,@loader_path\" LD_POSTFIX=\".1.dylib\" LD_POSTFIX_UNVERSIONED=\".dylib\""} |
|
{"_id":"doc-en-pytorch-786f69c0bf8a2720026de6b219dea5dc3377a7d91fd6be8247ffec9c42571275","title":"","text":"-DCMAKE_CXX_FLAGS=\"$C_FLAGS $CPP_FLAGS\" make install cp \"lib/libnccl.so.1\" \"${INSTALL_DIR}/lib/libnccl.so.1\" <del> ln -s \"${INSTALL_DIR}/lib/libnccl.so.1\" \"${INSTALL_DIR}/lib/libnccl.so\" </del> <ins> if [ ! -f \"${INSTALL_DIR}/lib/libnccl.so\" ]; then ln -s \"${INSTALL_DIR}/lib/libnccl.so.1\" \"${INSTALL_DIR}/lib/libnccl.so\" fi </ins> cd ../.. }"} |
|
{"_id":"doc-en-pytorch-e088ba7eb06236af95ffbf20892258359ddcc1f965fc7285966c25650f6bbb84","title":"","text":"if arg['name'] in ['self', 'state', 'dataType', 'handle']: arg['ignore_check'] = True declaration['options'] = self.filter_unique_options(declaration['options']) <del> return declarations </del> <ins> return [d for d in declarations if not d.get('only_register', False)] </ins> def filter_unique_options(self, options): def signature(option):"} |
|
{"_id":"doc-en-pytorch-ee807a8e8ebf0fd8542ccbaf50e041a6ab1d275e3125a10ba59b21252b2e1b13","title":"","text":"if hasattr(lib, 'cudnnGetErrorString'): lib.cudnnGetErrorString.restype = ctypes.c_char_p __cudnn_version = lib.cudnnGetVersion() <ins> compile_version = torch._C._cudnn_version() # Check that cuDNN major and minor versions match if (__cudnn_version // 100) != (compile_version // 100): raise RuntimeError( 'cuDNN version mismatch: PyTorch was compiled against {} ' 'but linked against {}'.format(compile_version, __cudnn_version)) </ins> else: lib = None return lib"} |
|
{"_id":"doc-en-pytorch-279f897a3c8e0683bf1478390a961f1ad962cef1822c20e026c9288bcad6f801","title":"","text":"- THTensor* output - std::vector<int> pad - std::vector<int> stride <del> - std::vector<int> dilation </del> <ins> - std::vector<int> dilation </ins> - int groups - bool benchmark ]]"} |
|
{"_id":"doc-en-pytorch-fdc3cf1a5a94c106ca38b27943089513289aa388eb7a092c77a71e0dcce94438","title":"","text":"- bool training - double epsilon ]] <ins> [[ name: cudnn_version only_register: True ]] static PyObject * THCUDNN_cudnn_version(PyObject *self, PyObject *args) { return PyLong_FromLong(CUDNN_VERSION); } </ins>"} |
|
{"_id":"doc-en-pytorch-3a4ab99ab856219ac85201364e4bc4edc86046263a15d798b21c81b505236150","title":"","text":"torch.mm(a, b, out=self.output) else: if self.transA: <del> a = a.transpose(2, 3) </del> <ins> a = a.transpose(1, 2) </ins> if self.transB: <del> b = b.transpose(2, 3) </del> <ins> b = b.transpose(1, 2) </ins> self.output.resize_(a.size(0), a.size(1), b.size(2)) torch.bmm(a, b, out=self.output)"} |
|
{"_id":"doc-en-pytorch-14ae05be02173d399ec555be9e0cbceb1564c3c390e31fc761ea536caad90432","title":"","text":"\"\"\" return self._apply(lambda t: t.cuda(device_id)) <del> def cpu(self, device_id=None): </del> <ins> def cpu(self): </ins> \"\"\"Moves all model parameters and buffers to the CPU.\"\"\" return self._apply(lambda t: t.cpu())"} |
|
{"_id":"doc-en-pytorch-b934b16f7e71a839229d7249bd3f6ee13378fc4decd660b53578b16485160f19","title":"","text":"optimizer (Optimizer): Wrapped optimizer. step_size (int): Period of learning rate decay. gamma (float): Multiplicative factor of learning rate decay. <del> Default: -0.1. </del> <ins> Default: 0.1. </ins> last_epoch (int): The index of last epoch. Default: -1. Example:"} |
|
{"_id":"doc-en-pytorch-8a512103206f66edc9a58f9968e1199ec6631cce0dc5816e4d83e5be73846071","title":"","text":"optimizer (Optimizer): Wrapped optimizer. milestones (list): List of epoch indices. Must be increasing. gamma (float): Multiplicative factor of learning rate decay. <del> Default: -0.1. </del> <ins> Default: 0.1. </ins> last_epoch (int): The index of last epoch. Default: -1. Example:"} |
|
{"_id":"doc-en-pytorch-a55794577819384572f0c50a7d045ff8fde16351069652f9e271ed0eee6d2de5","title":"","text":"for i in range(10): Variable(torch.randn(10, 10), _grad_fn=CollectOnDelete()) <del> @unittest.skipIf(not torch.cuda.is_available() or torch.cuda.device_count() < 2, \"CUDA not available or <2 GPUs detected\") </del> <ins> @unittest.skipIf(torch.cuda.device_count() < 2, \"no multi-GPU\") </ins> def test_unused_output_gpu(self): from torch.nn.parallel._functions import Broadcast x = Variable(torch.randn(5, 5).float().cuda(), requires_grad=True)"} |
|
{"_id":"doc-en-pytorch-f2be72fdc7a198558b921f225f231c15560de2e22c0341ca0a22e60b669354d6","title":"","text":"y.sum().backward() self.assertEqual(x.grad.data, torch.ones(5, 5) * 2) <ins> @unittest.skipIf(torch.cuda.device_count() < 2, \"no multi-GPU\") def test_backward_device(self): # check that current device matches the variable's device device = [None] class Identity(torch.autograd.Function): @staticmethod def forward(ctx, x): return x.clone() @staticmethod def backward(ctx, grad_output): device[0] = torch.cuda.current_device() return grad_output.clone() v = Variable(torch.randn(1).cuda(1), requires_grad=True) Identity.apply(v).backward() self.assertEqual(device[0], 1) </ins> def test_detach(self): x = Variable(torch.randn(10, 10), requires_grad=True) y = x + 2"} |
|
{"_id":"doc-en-pytorch-e3bfb9ebe3323f645dba9378592101ba727cf57c670fb6a7a311f2074de13510","title":"","text":"#include \"torch/csrc/autograd/engine.h\" #include \"torch/csrc/autograd/functions/basic_ops.h\" <ins> #include \"torch/csrc/utils/auto_gpu.h\" </ins> #include <atomic> #include <condition_variable>"} |
|
{"_id":"doc-en-pytorch-aa831fc58c1f89901c5a38c90c46b4b4cafa60d966ef73cf6b80d714b6704966","title":"","text":"// This Engine's ReadyQueues and their corresponding threads are leaked here Engine::~Engine() = default; <del> auto Engine::thread_main(std::shared_ptr<ReadyQueue> queue) -> void { </del> <ins> auto Engine::thread_main(std::shared_ptr<ReadyQueue> queue, int device) -> void { </ins> THInferNumThreads(); <ins> AutoGPU guard(device); </ins> while (1) { FunctionTask task = queue->pop_back(); if (!task.base->has_error.load()) {"} |
|
{"_id":"doc-en-pytorch-baf0d2710879c234f73168a1206503d3c970f7f52c16f1c9de9beb3034baf728","title":"","text":"num_devices = 0; } #endif <del> ready_queues = std::vector<std::shared_ptr<ReadyQueue>>(num_devices + 1); for (auto& queue : ready_queues) { </del> <ins> int num_threads = num_devices + 1; ready_queues = std::vector<std::shared_ptr<ReadyQueue>>(num_threads); for (int i = 0; i < num_threads; ++i) { auto& queue = ready_queues[i]; </ins> queue.reset(new ReadyQueue()); <del> std::thread t(&Engine::thread_main, this, queue); </del> <ins> std::thread t(&Engine::thread_main, this, queue, i - 1); </ins> t.detach(); } }"} |
|
{"_id":"doc-en-pytorch-1abbb3415744379d52d8c2ba818c334727821726a0b38019641b613c5a8be9d7","title":"","text":"void evaluate_function(FunctionTask& task); ReadyQueue& ready_queue(int device); void start_threads(); <del> virtual void thread_main(std::shared_ptr<ReadyQueue> queue); </del> <ins> virtual void thread_main(std::shared_ptr<ReadyQueue> queue, int device); </ins> virtual void thread_on_exception(FunctionTask& task, std::exception& e); std::once_flag start_threads_flag;"} |
|
{"_id":"doc-en-pytorch-2ed89dd3c95920b727070ccfadba0f09774b1e12518b6692510064e11c0deb54","title":"","text":"}; struct PythonEngine : public Engine { <del> virtual void thread_main(std::shared_ptr<ReadyQueue> queue) override { </del> <ins> virtual void thread_main(std::shared_ptr<ReadyQueue> queue, int device) override { </ins> // Create a PyThreadState, but release the GIL. This lets AutoGIL calls // inside thread_main acquire the GIL without having to create a new // PyThreadState each time. AutoGIL gil; AutoNoGIL no_gil; <del> Engine::thread_main(queue); </del> <ins> Engine::thread_main(queue, device); </ins> } virtual void thread_on_exception(FunctionTask& task, std::exception& e) override {"} |
|
{"_id":"doc-en-pytorch-c5c2d2dffc46beed1338b355e27295aadf444df8e6a28617528a626aa3a130a4","title":"","text":"#define IS_CUDA false #define CUDA_FLOAT false #else #define IS_CUDA true <del> #define CUDA_BYTE defined(THC_REAL_IS_BYTE) #define CUDA_CHAR defined(THC_REAL_IS_CHAR) #define CUDA_SHORT defined(THC_REAL_IS_SHORT) #define CUDA_INT defined(THC_REAL_IS_INT) #define CUDA_LONG defined(THC_REAL_IS_LONG) #define CUDA_FLOAT defined(THC_REAL_IS_FLOAT) #define CUDA_DOUBLE defined(THC_REAL_IS_DOUBLE) #define CUDA_HALF defined(THC_REAL_IS_HALF) </del> <ins> #if defined(THC_REAL_IS_BYTE) #define CUDA_BYTE 1 #else #define CUDA_BYTE 0 </ins> #endif <ins> #if defined(THC_REAL_IS_CHAR) #define CUDA_CHAR 1 #else #define CUDA_CHAR 0 #endif #if defined(THC_REAL_IS_SHORT) #define CUDA_SHORT 1 #else #define CUDA_SHORT 0 #endif #if defined(THC_REAL_IS_INT) #define CUDA_INT 1 #else #define CUDA_INT 0 #endif #if defined(THC_REAL_IS_LONG) #define CUDA_LONG 1 #else #define CUDA_LONG 0 #endif #if defined(THC_REAL_IS_FLOAT) #define CUDA_FLOAT 1 #else #define CUDA_FLOAT 0 #endif #if defined(THC_REAL_IS_DOUBLE) #define CUDA_DOUBLE 1 #else #define CUDA_DOUBLE 0 #endif #if defined(THC_REAL_IS_HALF) #define CUDA_HALF 1 #else #define CUDA_HALF 0 #endif #endif // ifndef THC_GENERIC_FILE </ins> #if IS_CUDA #define THIndexTensor THCudaLongTensor #define THIndexTensor_(NAME) TH_CONCAT_2(THCudaLongTensor_,NAME)"} |
|
{"_id":"doc-en-pytorch-9a42bf85cbb32a092f35a5f8da89eeb443ef320598541904a01b6c81d9e6880b","title":"","text":"*tempValues__data = *t_data; *tempIndices__data = *tempIndices__dimOffset; }); <ins> THTensor_(free)(tempValues_); THLongTensor_free(tempIndices_); </ins> } if (!keepdim) {"} |
|
{"_id":"doc-en-pytorch-847390673fe92f667b6ffdc3dbdf1d493fcc043c0c48ff5b5023443c83941042","title":"","text":"Best practices -------------- <ins> Device-agnostic code ^^^^^^^^^^^^^^^^^^^^ Due to the structure of PyTorch, you may need to explicitly write device-agnostic (CPU or GPU) code; an example may be creating a new tensor as the initial hidden state of a recurrent neural network. The first step is to determine whether the GPU should be used or not. A common pattern is to use Python's `argparse` module to read in user arguments, and have a flag that can be used to disable CUDA, in combination with `torch.cuda.is_available()`. In the following, `args.cuda` results in a flag that can be used to cast tensors and modules to CUDA if desired:: import argparse import torch parser = argparse.ArgumentParser(description='PyTorch Example') parser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA') args = parser.parse_args() args.cuda = not args.disable_cuda and torch.cuda.is_available() If modules or tensors need to be sent to the GPU, `args.cuda` can be used as follows:: x = torch.Tensor(8, 42) net = Network() if args.cuda: x = x.cuda() net.cuda() When creating tensors, an alternative to the if statement is to have a default datatype defined, and cast all tensors using that. An example when using a dataloader would be as follows:: dtype = torch.cuda.FloatTensor for i, x in enumerate(train_loader): x = Variable(x.type(dtype)) When working with multiple GPUs on a system, you can use the `CUDA_VISIBLE_DEVICES` environment flag to manage which GPUs are available to PyTorch. To manually control which GPU a tensor is created on, the best practice is to use the `torch.cuda.device()` context manager:: print(\"Outside device is 0\") # On device 0 (default in most scenarios) with torch.cuda.device(1): print(\"Inside device is 1\") # On device 1 print(\"Outside device is still 0\") # On device 0 If you have a tensor and would like to create a new tensor of the same type on the same device, then you can use the `.new()` function, which acts the same as a normal tensor constructor. Whilst the previously mentioned methods depend on the current GPU context, `new()` preserves the device of the original tensor. This is the recommended practice when creating modules in which new tensors/variables need to be created internally during the forward pass:: x_cpu = torch.FloatTensor(1) x_gpu = torch.cuda.FloatTensor(1) x_cpu_long = torch.LongTensor(1) y_cpu = x_cpu.new(8, 10, 10).fill_(0.3) y_gpu = x_gpu.new(x_gpu.size()).fill_(-5) y_cpu_long = x_cpu_long.new([[1, 2, 3]]) If you want to create a tensor of the same type and size of another tensor, and fill it with either ones or zeros, `torch.ones_like()` or `torch.zeros_like()` are provided as more convenient functions (which also preserve device):: x_cpu = torch.FloatTensor(1) x_gpu = torch.cuda.FloatTensor(1) y_cpu = torch.ones_like(x_cpu) y_gpu = torch.zeros_like(x_gpu) </ins> Use pinned memory buffers ^^^^^^^^^^^^^^^^^^^^^^^^^"} |
|
{"_id":"doc-en-pytorch-7ae06262e0f1135c2e07df5c166d0feeaecb57db11429bf53e1c2f0b63346a6f","title":"","text":"res2 += i * j self.assertEqual(res1, res2) <ins> # Test 0-strided for tname, _prec in types.items(): v1 = torch.randn(1).type(tname).expand(100) v2 = torch.randn(100).type(tname) res1 = torch.dot(v1, v2) res2 = 0 for i, j in zip(v1, v2): res2 += i * j self.assertEqual(res1, res2) def test_ger(self): types = { 'torch.DoubleTensor': 1e-8, 'torch.FloatTensor': 1e-4, } for tname, _prec in types.items(): v1 = torch.randn(100).type(tname) v2 = torch.randn(100).type(tname) res1 = torch.ger(v1, v2) res2 = torch.zeros(100, 100).type(tname) for i in range(100): for j in range(100): res2[i, j] = v1[i] * v2[j] self.assertEqual(res1, res2) # Test 0-strided for tname, _prec in types.items(): v1 = torch.randn(1).type(tname).expand(100) v2 = torch.randn(100).type(tname) res1 = torch.ger(v1, v2) res2 = torch.zeros(100, 100).type(tname) for i in range(100): for j in range(100): res2[i, j] = v1[i] * v2[j] self.assertEqual(res1, res2) def test_addmv(self): types = { 'torch.DoubleTensor': 1e-8, 'torch.FloatTensor': 1e-4, } for tname, _prec in types.items(): t = torch.randn(10).type(tname) m = torch.randn(10, 100).type(tname) v = torch.randn(100).type(tname) res1 = torch.addmv(t, m, v) res2 = torch.zeros(10).type(tname) res2 += t for i in range(10): for j in range(100): res2[i] += m[i, j] * v[j] self.assertEqual(res1, res2) # Test 0-strided for tname, _prec in types.items(): t = torch.randn(1).type(tname).expand(10) m = torch.randn(10, 1).type(tname).expand(10, 100) v = torch.randn(100).type(tname) res1 = torch.addmv(t, m, v) res2 = torch.zeros(10).type(tname) res2 += t for i in range(10): for j in range(100): res2[i] += m[i, j] * v[j] self.assertEqual(res1, res2) def test_addmm(self): types = { 'torch.DoubleTensor': 1e-8, 'torch.FloatTensor': 1e-4, } for tname, _prec in types.items(): M = torch.randn(10, 25).type(tname) m1 = torch.randn(10, 50).type(tname) m2 = torch.randn(50, 25).type(tname) res1 = torch.addmm(M, m1, m2) res2 = torch.zeros(10, 25).type(tname) res2 += M for i in range(10): for j in range(25): for k in range(50): res2[i, j] += m1[i, k] * m2[k, j] self.assertEqual(res1, res2) # Test 0-strided for tname, _prec in types.items(): M = torch.randn(10, 1).type(tname).expand(10, 25) m1 = torch.randn(10, 1).type(tname).expand(10, 50) m2 = torch.randn(50, 25).type(tname) res1 = torch.addmm(M, m1, m2) res2 = torch.zeros(10, 25).type(tname) res2 += M for i in range(10): for j in range(25): for k in range(50): res2[i, j] += m1[i, k] * m2[k, j] self.assertEqual(res1, res2) </ins> def _testMath(self, torchfn, mathfn): size = (10, 5) # contiguous"} |
|
{"_id":"doc-en-pytorch-0c1d2b21c3aed080ff0bb21e80fe229ef6f87b04f7463a20a1f6ac34395f0a04","title":"","text":"#define THC_GENERIC_FILE \"generic/THCTensorIndex.cu\" #else <ins> // Check tensor dimensions for index operations, and return the slice size. // src can be nullptr in case of indexFill: in that case it is ignored. static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *index, THCTensor *src) { int dstDims = THCTensor_(nDimension)(state, dst); int srcDims = (src == nullptr) ? dstDims : THCTensor_(nDimension)(state, src); THArgCheck(THCudaLongTensor_nDimension(state, index) == 1, 4, \"expecting vector of indices\"); THArgCheck(dim >= 0 && dim < dstDims, 2, \"Indexing dim is out of bounds\"); ptrdiff_t dstSliceSize = 1; for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= dst->size[d]; } } if (src == nullptr) return dstSliceSize; THArgCheck(dim < srcDims, 3, \"Indexing dim is out of bounds\"); THArgCheck(THCudaLongTensor_nElement(state, index) == src->size[dim], 4, \"length of src.size[dim] is not equal to length of indices\"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= src->size[d]; if (!mismatch && dst->size[d] != src->size[d]) mismatch = true; } } THArgCheck(dstSliceSize == srcSliceSize, 2, \"Source/destination tensor have different slice sizes (%ld vs %ld)\", dstSliceSize, srcSliceSize); if (mismatch) { static bool warningShown = false; if (!warningShown) { warningShown = true; fprintf(stderr, \"Warning: source/destination slices have same size but different \" \"shape for an index operation. This behavior is deprecated.n\"); } } return dstSliceSize; } </ins> void THCTensor_(indexCopy_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));"} |
|
{"_id":"doc-en-pytorch-438f0cd197dc41837843b0b79dcabb3cb7066f26b861da7f1fd39c8db8f72698","title":"","text":"dims = THCudaLongTensor_nDimension(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); <del> ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); int srcDims = THCTensor_(nDimension)(state, src); cudaStream_t stream = THCState_getCurrentStream(state); THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3, \"expecting vector of indices\"); THArgCheck(dim < srcDims, 4, \"Indexing dim is out of bounds\"); THArgCheck(srcDims > 0, 2, \"Source tensor is empty\"); THArgCheck(numIndices == src->size[dim], 4, \"length of src.size[dim] is not equal to length of indices\"); int indContig = THCudaLongTensor_isContiguous(state, indices); </del> // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. <ins> ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); </ins> ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim); <del> ptrdiff_t sliceSize = srcTotalSize / numIndices; </del> <ins> ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); cudaStream_t stream = THCState_getCurrentStream(state); int indContig = THCudaLongTensor_isContiguous(state, indices); </ins> int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;"} |
|
{"_id":"doc-en-pytorch-606249d4e5065e174d0bb8445e4f0794e1c65b557690f03981584032238bd0cb","title":"","text":"dims = THCudaLongTensor_nDimension(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); <del> ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); int srcDims = THCTensor_(nDimension)(state, src); cudaStream_t stream = THCState_getCurrentStream(state); THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3, \"expecting vector of indices\"); THArgCheck(dim < srcDims, 4, \"Indexing dim is out of bounds\"); THArgCheck(srcDims > 0, 2, \"Source tensor is empty\"); THArgCheck(numIndices == src->size[dim], 4, \"length of src.size[dim] is not equal to length of indices\"); int indContig = THCudaLongTensor_isContiguous(state, indices); </del> // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. <ins> ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); </ins> ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim); <del> ptrdiff_t sliceSize = srcTotalSize / numIndices; </del> <ins> ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); cudaStream_t stream = THCState_getCurrentStream(state); int indContig = THCudaLongTensor_isContiguous(state, indices); </ins> int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;"} |
|
{"_id":"doc-en-pytorch-a7f5f31517bc2f93e661073a8055244cb988c558177acffc671e7b7f0c3b5dfc","title":"","text":"dims = THCudaLongTensor_nDimension(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); <del> ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); int srcDims = THCTensor_(nDimension)(state, dst); cudaStream_t stream = THCState_getCurrentStream(state); THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3, \"expecting vector of indices\"); THArgCheck(dim < srcDims, 4, \"Indexing dim is out of bounds\"); THArgCheck(srcDims > 0, 2, \"Source tensor is empty\"); int indContig = THCudaLongTensor_isContiguous(state, indices); </del> // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. <ins> ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr); </ins> ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim); <del> ptrdiff_t sliceSize = dstTotalSize / dstFillDimSize; </del> <ins> ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); cudaStream_t stream = THCState_getCurrentStream(state); int indContig = THCudaLongTensor_isContiguous(state, indices); </ins> int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;"} |
|
{"_id":"doc-en-pytorch-8dc6149b5f5badde19efd573c34248a8d068fdf4c78b544c3b62cf1cae95a6e3","title":"","text":"# use faster C loader if available from yaml import CLoader as YamlLoader except ImportError: <del> from yaml import YamlLoader </del> <ins> from yaml import Loader as YamlLoader </ins> GENERATED_COMMENT = CodeTemplate(\"\"\""} |
|
{"_id":"doc-en-pytorch-98a65139c26c58918173adc25dd43e2b8fe912cd091c33627859786ced17cd5a","title":"","text":"r\"\"\" expm1_() -> Tensor <del> In-place version of :meth:`~Tensor.exp` </del> <ins> In-place version of :meth:`~Tensor.expm1` </ins> \"\"\") add_docstr_all('exponential_',"} |
|
{"_id":"doc-en-pytorch-d5c3f8a043c1a6dc370a73393f9fc9679b408c0c7b294a3db0024c7905e75077","title":"","text":"THLongStorage_free(topKSize); #define RUN_K(INDEX_T, DIM, DIR) <del> gatherTopK<real, INDEX_T, DIM, DIR> </del> <ins> gatherTopK<real, INDEX_T, DIM, DIR> </ins> <<<grid, block, 0, THCState_getCurrentStream(state)>>>( inputInfo, sliceSize, "} |
|
{"_id":"doc-en-pytorch-679d810bd7ed5136accea4c7418b780d7eafc6f32420c98e43f99f94af794701","title":"","text":"} #define RUN_T(INDEX_T) <del> TensorInfo<real, INDEX_T> inputInfo = getTensorInfo<THCTensor, INDEX_T>(state, input); TensorInfo<real, INDEX_T> topKInfo = getTensorInfo<THCTensor, INDEX_T>(state, topK); </del> <ins> TensorInfo<real, INDEX_T> inputInfo = getTensorInfo<THCTensor, INDEX_T>(state, input); TensorInfo<real, INDEX_T> topKInfo = getTensorInfo<THCTensor, INDEX_T>(state, topK); </ins> TensorInfo<int64_t, INDEX_T> indicesInfo = getTensorInfo<THCudaLongTensor, INDEX_T>(state, indices); "} |
|
{"_id":"doc-en-pytorch-bf25f2e78031322135546bbc6eda7e79560aab0a0cb5a330956959ec34d5c1f2","title":"","text":"int collapseIndicesDim = indicesInfo.collapseDims(dim); int64_t inputSlices = 1; <del> int64_t topKSlices = 1; for (int i = 0; i < numDims; ++i) { </del> <ins> for (int i = 0; i < inputInfo.dims; ++i) { </ins> inputSlices *= inputInfo.sizes[i]; <ins> } int64_t topKSlices = 1; for (int i = 0; i < topKInfo.dims; ++i) { </ins> topKSlices *= topKInfo.sizes[i]; } "} |
|
{"_id":"doc-en-pytorch-07ad46ccd3b35b225c93307016f8c2417fcdbe18f312fc8cd9bb83f573139683","title":"","text":"] if WITH_CUDNN: main_libraries += ['cudnn'] <del> library_dirs.append(CUDNN_LIB_DIR) </del> <ins> library_dirs.insert(0, CUDNN_LIB_DIR) </ins> # NOTE: these are at the front, in case there's another cuDNN in CUDA path include_dirs.insert(0, CUDNN_INCLUDE_DIR) if not IS_WINDOWS:"} |
|
{"_id":"doc-en-pytorch-60e3bc443896180daa3916c38d9d90d5b3e0138f03760019aba7beaac719f978","title":"","text":"python setup.py install cd test/ echo \"Ninja version: $(ninja --version)\" <del> sh run_test.sh </del> <ins> sh run_test.sh -- -v </ins> echo \"BUILD PASSED\""} |
|
{"_id":"doc-en-pytorch-a2d50b89d13990a6674095f7da658955f6dca03e30045ecebb0ad411faf8265d","title":"","text":"export PATH=\"$PWD:$PATH\" popd <del> time test/run_test.sh </del> <ins> time test/run_test.sh -- -v </ins> rm -rf ninja"} |
|
{"_id":"doc-en-pytorch-bf19cdd3b1e009d8608c593192d698a31682a4f2612e16012caf069b6a3c0506","title":"","text":"python ..ci_scriptsdelete_image.py 7z x %IMAGE_COMMIT_TAG%.7z <del> sh run_test.sh </del> <ins> sh run_test.sh -- -v </ins> EOL"} |
|
{"_id":"doc-en-pytorch-f25dbdd21ed338103f5ceffb8dc8567343f91e643ea156ccf1b52fe5dd283810","title":"","text":"THCDeviceTensor<THCIndex_t, 1> target, THCDeviceTensor<Dtype, 1> output, Dtype *weights, <ins> int n_classes, </ins> int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) {"} |
|
{"_id":"doc-en-pytorch-fedda46d10136b30896303304b40903405a0f57d4e0246550f02a9deb34f8d03","title":"","text":"output[index] = ScalarConvert<int, Dtype>::to(0); continue; } <ins> assert(cur_target >= 0 && cur_target < n_classes); </ins> Dtype weight = weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1); output[index] = -weight * input[index][cur_target];"} |
|
{"_id":"doc-en-pytorch-38635f5ed81c34c96d88693411b87cb69cd435ddda1898d2b51e2b4838becce1","title":"","text":"THCDeviceTensor<Dtype, 1> gradOutput, THCDeviceTensor<Dtype, 2> gradInput, Dtype *weights, <ins> int n_classes, </ins> int ignore_index) { CUDA_KERNEL_LOOP(index, batch_size) {"} |
|
{"_id":"doc-en-pytorch-e07a9efd2cd7a58c4c39247b1e157c77e8eb7156d8f2882cecd23fd54fd8897e","title":"","text":"if (cur_target == ignore_index) { continue; } <ins> assert(cur_target >= 0 && cur_target < n_classes); </ins> Dtype weight = weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1); gradInput[index][cur_target] = -weight * gradOutput[index];"} |
|
{"_id":"doc-en-pytorch-d3fb4ebdce6591812cd12149652cdc7becc9a9ad66902eea4d1a71d24862e604","title":"","text":"toDeviceTensor<THCIndex_t, 1>(state, target), toDeviceTensor<real, 1>(state, output), weights ? THCTensor_(data)(state, weights) : NULL, <ins> n_classes, </ins> ignore_index); <ins> THCudaCheck(cudaGetLastError()); </ins> if (weights) { THCTensor_(free)(state, weights); }"} |
|
{"_id":"doc-en-pytorch-dd77b181434484ed376963610bcc97d0d2d487e868885c180ada5d737f9e88f5","title":"","text":"toDeviceTensor<real, 1>(state, gradOutput), toDeviceTensor<real, 2>(state, gradInput), weights ? THCTensor_(data)(state, weights) : NULL, <ins> n_classes, </ins> ignore_index); <ins> THCudaCheck(cudaGetLastError()); </ins> if (weights) { THCTensor_(free)(state, weights); }"} |
|
{"_id":"doc-en-pytorch-0b1460892c58254a6587f1685bd25c10911face2e6316d8a8407809c162af679","title":"","text":"int batch_size = THTensor_(size)(input, 0); THTensor_(resize1d)(output, batch_size); <ins> int invalid_target = -1; // We cannot throw an exception inside omp parallel </ins> int i; #pragma omp parallel for private(i) for (i = 0; i < batch_size; i++) { int cur_target = THTensor_fastGet1d(target, i) - TH_INDEX_BASE; <del> if (cur_target == ignore_index) { THTensor_fastSet1d(output, i, 0.0f); continue; </del> <ins> if (cur_target >= 0 && cur_target < n_classes) { if (cur_target == ignore_index) { THTensor_fastSet1d(output, i, 0.0f); continue; } real cur_weight = weights ? THTensor_fastGet1d(weights, cur_target) : 1.0f; THTensor_fastSet1d(output, i, -THTensor_fastGet2d(input, i, cur_target) * cur_weight); } else { THAtomicCompareAndSwap(&invalid_target, -1, cur_target); </ins> } <del> real cur_weight = weights ? THTensor_fastGet1d(weights, cur_target) : 1.0f; THTensor_fastSet1d(output, i, -THTensor_fastGet2d(input, i, cur_target) * cur_weight); </del> <ins> } if (invalid_target >= 0) { THError(\"Target %d out of bounds\", invalid_target); </ins> } return;"} |
|
{"_id":"doc-en-pytorch-406d300c4698ef8ebb5337a06de7733923ff9b8c1d75f141fdd19816f61b98ae","title":"","text":"import os.path import torch.nn.modules.activation import torch.autograd <ins> import matplotlib matplotlib.use('Agg') </ins> import pylab"} |
|
{"_id":"doc-en-pytorch-aebacc799c3a81586b0c547a02debca587aaa2d0618d2fe2a2717dd0dbd9fc61","title":"","text":"res2[i] = max(min_val, min(max_val, res2[i])) self.assertEqual(res1, res2) <ins> out = m1.clone() torch.clamp(m1, min=min_val, max=max_val, out=out) self.assertEqual(out, res1) </ins> res1 = torch.clamp(m1, min=min_val) res2 = m1.clone() for i in iter_indices(res2): res2[i] = max(min_val, res2[i]) self.assertEqual(res1, res2) <ins> torch.clamp(m1, min=min_val, out=out) self.assertEqual(out, res1) </ins> res1 = torch.clamp(m1, max=max_val) res2 = m1.clone() for i in iter_indices(res2): res2[i] = min(max_val, res2[i]) self.assertEqual(res1, res2) <ins> torch.clamp(m1, max=max_val, out=out) self.assertEqual(out, res1) </ins> def test_pow(self): # [res] torch.pow([res,] x)"} |
|
{"_id":"doc-en-pytorch-47fdd2074fae54076b9495d25a94a841524a59ccda55df179b17cbd54b477ef8","title":"","text":"} } <del> static Tensor dispatch_clamp(const Tensor & self, Scalar min, Scalar max) { AutoNoGIL no_gil; AutoGPU auto_gpu(self); return self.clamp(min, max); } static Tensor dispatch_clamp_min(const Tensor & self, Scalar min) { AutoNoGIL no_gil; AutoGPU auto_gpu(self); return self.clamp_min(min); } static Tensor dispatch_clamp_max(const Tensor & self, Scalar max) { AutoNoGIL no_gil; AutoGPU auto_gpu(self); return self.clamp_max(max); } </del> // The Python clamp() syntax has to be mapped to one of three C++ functions static PyObject * THPVariable_clamp(PyObject* module, PyObject* args, PyObject* kwargs) { HANDLE_TH_ERRORS static PythonArgParser parser({ <del> \"clamp(Tensor input, Scalar min=None, Scalar max=None)\", </del> <ins> \"clamp(Tensor input, Scalar min=None, Scalar max=None, *, Tensor out=None)\", </ins> }); <del> ParsedArgs<3> parsed_args; </del> <ins> ParsedArgs<4> parsed_args; </ins> auto r = parser.parse(args, kwargs, parsed_args); if (!r.isNone(1) && !r.isNone(2)) { <del> return THPVariable_Wrap(dispatch_clamp(r.tensor(0), r.scalar(1), r.scalar(2))); </del> <ins> if (!r.isNone(3)) { return wrap(dispatch_clamp(r.tensor(0), r.scalar(1), r.scalar(2), r.tensor(3))); } else { return wrap(dispatch_clamp(r.tensor(0), r.scalar(1), r.scalar(2))); } </ins> } else if (!r.isNone(1)) { <del> return THPVariable_Wrap(dispatch_clamp_min(r.tensor(0), r.scalar(1))); </del> <ins> if (!r.isNone(3)) { return wrap(dispatch_clamp_min(r.tensor(0), r.scalar(1), r.tensor(3))); } else { return wrap(dispatch_clamp_min(r.tensor(0), r.scalar(1))); } </ins> } else if (!r.isNone(2)) { <del> return THPVariable_Wrap(dispatch_clamp_max(r.tensor(0), r.scalar(2))); </del> <ins> if (!r.isNone(3)) { return wrap(dispatch_clamp_max(r.tensor(0), r.scalar(2), r.tensor(3))); } else { return wrap(dispatch_clamp_max(r.tensor(0), r.scalar(2))); } </ins> } else { throw std::runtime_error(\"At least one of 'min' or 'max' must not be None\"); } <ins> Py_RETURN_NONE; </ins> END_HANDLE_TH_ERRORS }"} |
|
{"_id":"doc-en-pytorch-9ec1744c3c99ad877b5de782ef7e6f996113b1f13b635199c006a6d8e8a19f56","title":"","text":"} } <ins> // manual dispatch code for clamp inline Tensor dispatch_clamp(const Tensor & self, Scalar min, Scalar max) { AutoNoGIL no_gil; AutoGPU auto_gpu(self); return self.clamp(min, max); } inline Tensor dispatch_clamp_min(const Tensor & self, Scalar min) { AutoNoGIL no_gil; AutoGPU auto_gpu(self); return self.clamp_min(min); } inline Tensor dispatch_clamp_max(const Tensor & self, Scalar max) { AutoNoGIL no_gil; AutoGPU auto_gpu(self); return self.clamp_max(max); } inline Tensor & dispatch_clamp(const Tensor & self, Scalar min, Scalar max, Tensor result) { AutoNoGIL no_gil; AutoGPU auto_gpu(result); return at::clamp_out(result, self, min, max); } inline Tensor & dispatch_clamp_min(const Tensor & self, Scalar min, Tensor result) { AutoNoGIL no_gil; AutoGPU auto_gpu(result); return at::clamp_min_out(result, self, min); } inline Tensor & dispatch_clamp_max(const Tensor & self, Scalar max, Tensor result) { AutoNoGIL no_gil; AutoGPU auto_gpu(result); return at::clamp_max_out(result, self, max); } </ins> ${py_method_dispatch} }} // namespace torch::autograd"} |
|
{"_id":"doc-en-pytorch-85de7ca993fe71820aa6914403ef4020c7c221a26a264b8143092c58d899fe53","title":"","text":"See :func:`torch.ormqr` \"\"\") <ins> add_docstr_all('permute', r\"\"\" permute(*dims) -> Tensor Permute the dimensions of this tensor. Args: *dims (int...): The desired ordering of dimensions Example: >>> x = torch.randn(2, 3, 5) >>> x.size() torch.Size([2, 3, 5]) >>> x.permute(2, 0, 1).size() torch.Size([5, 2, 3]) \"\"\") </ins> add_docstr_all('potrf', r\"\"\" potrf(upper=True) -> Tensor"} |
|
{"_id":"doc-en-pytorch-0cccd73354b58c3b5f70bf8d16e39eb3a4a5ab448c27359da1bd8edfcdaeb323","title":"","text":"endforeach(flag_var) endif() <del> set (CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer\") set (CMAKE_LINKER_FLAGS_DEBUG \"${CMAKE_STATIC_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer\") </del> <ins> set (CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -O0\") set (CMAKE_LINKER_FLAGS_DEBUG \"${CMAKE_STATIC_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -O0\") </ins> if (USE_ASAN) set (CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address\") set (CMAKE_LINKER_FLAGS_DEBUG \"${CMAKE_STATIC_LINKER_FLAGS_DEBUG} -fsanitize=address\")"} |
|
{"_id":"doc-en-pytorch-2188b7f64d09abf523179bc9a17360014adcc6578ad04ba793084e0fac9b2123","title":"","text":"def forward(ctx, input, grid, padding_mode='zeros'): ctx.save_for_backward(input, grid) <ins> if input.device != grid.device: raise RuntimeError((\"input (device {}) and grid (device {}) must be on the same device\" + \"for grid_sampler\").format(input.device, grid.device)) </ins> if padding_mode == 'zeros': ctx.padding_mode = MODE_ZEROS elif padding_mode == 'border':"} |
|
{"_id":"doc-en-pytorch-06405422fd745b4c40f12f712d4a5d5c8b0538cc2e3c793902e24fdf9b7b87b3","title":"","text":"# (3) initialize mean square values and square gradient storage if not 'm' in state: <del> state['m'] = x.new().resize_as_(dfdx).fill_(1) </del> <ins> state['m'] = x.new().resize_as_(dfdx).zero_() </ins> state['tmp'] = x.new().resize_as_(dfdx)"} |
|
{"_id":"doc-en-pytorch-d135710ef973222ecdc593d43b4acb154189b4178c2fd4ce3644b483113544c1","title":"","text":"# State initialization if len(state) == 0: state['step'] = 0 <del> state['square_avg'] = grad.new().resize_as_(grad).fill_(1) </del> <ins> state['square_avg'] = grad.new().resize_as_(grad).zero_() </ins> square_avg = state['square_avg'] alpha = group['alpha']"} |
|
{"_id":"doc-en-pytorch-190b03deb889de299e62e3c7d1686cc75b678b77bbeb1bc1001f55660cec4eab","title":"","text":"lib_paths = list(filter(bool, [ os.getenv('CUDNN_LIB_DIR'), os.path.join(CUDA_HOME, 'lib'), <del> os.path.join(CUDA_HOME, 'lib64') </del> <ins> os.path.join(CUDA_HOME, 'lib64'), '/usr/lib/x86_64-linux-gnu/', </ins> ])) include_paths = list(filter(bool, [ os.getenv('CUDNN_INCLUDE_DIR'), os.path.join(CUDA_HOME, 'include'), <ins> '/usr/include/' </ins> ])) for path in lib_paths: if path is None or not os.path.exists(path):"} |
|
{"_id":"doc-en-pytorch-5d81fd04b9a7ebeda53a709b3bed97b75d90041357170141bf5753a3476e6720","title":"","text":"thisdir = path.dirname(__file__) libpaths = ['', path.join(thisdir, '../../lib')] if sys.platform.startswith('linux'): <del> libnames = ['libcudnn.so.5.1.5', 'libcudnn.so.5.1.3', 'libcudnn.so.5.0.5'] </del> <ins> libnames = ['libcudnn.so.5.1.5', 'libcudnn.so.5.1.3', 'libcudnn.so.5.0.5', 'libcudnn.so.5.1.10'] </ins> elif sys.platform == 'darwin': libnames = ['libcudnn.5.dylib'] else:"} |
|
{"_id":"doc-en-pytorch-77152ccdd008189dcd84c7362b1ae0b2b045baede513ba90c4a6ecc096558113","title":"","text":"is_master_only=True, requires=[\"binary_linux_manywheel_3_7m_cu102_devtoolset7_build\"], extra_props={ <del> \"resource_class\": \"gpu.medium\", </del> <ins> \"resource_class\": \"gpu.nvidia.small\", </ins> \"use_cuda_docker_runtime\": miniutils.quote((str(1))), }, ),"} |
|
{"_id":"doc-en-pytorch-e7b2ccc25ed16d72837d1d8a4b9213b79ac856fe2521ce498a0bc8e7772c9b4c","title":"","text":"# binary_linux_libtorch_3.6m_cpu_test: # environment: # BUILD_ENVIRONMENT: \"libtorch 3.6m cpu\" <del> # resource_class: gpu.medium </del> <ins> # resource_class: gpu.nvidia.small </ins> # <<: *binary_linux_test # # binary_linux_libtorch_3.6m_cu90_test: # environment: # BUILD_ENVIRONMENT: \"libtorch 3.6m cu90\" <del> # resource_class: gpu.medium </del> <ins> # resource_class: gpu.nvidia.small </ins> # <<: *binary_linux_test # docker_build_job:"} |
|
{"_id":"doc-en-pytorch-b8f80ee8a686dfcf445ccf14c31883bcdae221a0f6d56bf4ca9043aa64c44e42","title":"","text":"name: binary_linux_manywheel_3_7m_cu102_devtoolset7_test requires: - binary_linux_manywheel_3_7m_cu102_devtoolset7_build <del> resource_class: gpu.medium </del> <ins> resource_class: gpu.nvidia.small </ins> use_cuda_docker_runtime: \"1\" - binary_linux_test: build_environment: libtorch 3.7m cpu devtoolset7"} |
|
{"_id":"doc-en-pytorch-08a68e469bb00cd7c8204be07a4fdd6a0e80b32aa7000b3c8821986a0a33bf72","title":"","text":"# binary_linux_libtorch_3.6m_cpu_test: # environment: # BUILD_ENVIRONMENT: \"libtorch 3.6m cpu\" <del> # resource_class: gpu.medium </del> <ins> # resource_class: gpu.nvidia.small </ins> # <<: *binary_linux_test # # binary_linux_libtorch_3.6m_cu90_test: # environment: # BUILD_ENVIRONMENT: \"libtorch 3.6m cu90\" <del> # resource_class: gpu.medium </del> <ins> # resource_class: gpu.nvidia.small </ins> # <<: *binary_linux_test #"} |
|
{"_id":"doc-en-pytorch-e5ae77a5e08322e65369c99bb0e38344715024cff6a41f3418003b3cb4bc4e1f","title":"","text":"meant to be installed as pip packages) (default: False). relative_to (str, optional): path of the build file. Required when ``package is True``. It's best to use ``__file__`` for this argument. <del> kwargs: additional arguments that are passed to ffi to declar the </del> <ins> kwargs: additional arguments that are passed to ffi to declare the </ins> extension. See `Extension API reference`_ for details. .. _`Extension API reference`: https://docs.python.org/3/distutils/apiref.html#distutils.core.Extension"} |
|
{"_id":"doc-en-pytorch-60372d9aa71641cb924b2436784423b05696bfc1e6c71292d348c5e9861ec9a6","title":"","text":"# test is no more than 4 higher than the 10th available at the # start. This attempts to catch file descriptor leaks, but allows # one-off initialization that may use up a file descriptor <del> available_fds = self._get_next_fds(10) self.test_case.assertLessEqual( available_fds[-1] - self.next_fds[-1], 5) </del> <ins> # TODO: Disabled because this check is too flaky # available_fds = self._get_next_fds(10) # self.test_case.assertLessEqual( # available_fds[-1] - self.next_fds[-1], 5) </ins> self.test_case.assertFalse(self.has_shm_files()) return False"} |
|
|