diff --git a/README.md b/README.md index 6d63685b89c2b76f5e11118ded5969f4ebe71992..4d0561e803c34dc86855ced8fdaa54ca836ef8e2 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,7 @@ --- tags: - - kernel +- kernel --- - -![Status](https://hubwebhook.dholtz.com/shield?repo=kernels-community/activation) - ## Activation -Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu). - -Kernel source: https://github.com/huggingface/kernels-community/tree/main/activation - +Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu). \ No newline at end of file diff --git a/activation/activation_kernels.cu b/activation/activation_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..839dc36ba4e29d34144b42b4f1ed40be03afd5eb --- /dev/null +++ b/activation/activation_kernels.cu @@ -0,0 +1,204 @@ +#include +#include +#include + +#include + +#include "cuda_compat.h" +#include "dispatch_utils.h" + +namespace vllm { + +// Activation and gating kernel template. +template +__global__ void act_and_mul_kernel( + scalar_t* __restrict__ out, // [..., d] + const scalar_t* __restrict__ input, // [..., 2, d] + const int d) { + const int64_t token_idx = blockIdx.x; + for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { + const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]); + const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]); + out[token_idx * d + idx] = ACT_FN(x) * y; + } +} + +template +__device__ __forceinline__ T silu_kernel(const T& x) { + // x * sigmoid(x) + return (T)(((float)x) / (1.0f + expf((float)-x))); +} + +template +__device__ __forceinline__ T gelu_kernel(const T& x) { + // Equivalent to PyTorch GELU with 'none' approximation. + // Refer to: + // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L36-L38 + const float f = (float)x; + constexpr float ALPHA = M_SQRT1_2; + return (T)(f * 0.5f * (1.0f + ::erf(f * ALPHA))); +} + +template +__device__ __forceinline__ T gelu_tanh_kernel(const T& x) { + // Equivalent to PyTorch GELU with 'tanh' approximation. + // Refer to: + // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L25-L30 + const float f = (float)x; + constexpr float BETA = M_SQRT2 * M_2_SQRTPI * 0.5f; + constexpr float KAPPA = 0.044715; + float x_cube = f * f * f; + float inner = BETA * (f + KAPPA * x_cube); + return (T)(0.5f * f * (1.0f + ::tanhf(inner))); +} + +} // namespace vllm + +// Launch activation and gating kernel. +#define LAUNCH_ACTIVATION_GATE_KERNEL(KERNEL) \ + int d = input.size(-1) / 2; \ + int64_t num_tokens = input.numel() / input.size(-1); \ + dim3 grid(num_tokens); \ + dim3 block(std::min(d, 1024)); \ + const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \ + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \ + VLLM_DISPATCH_FLOATING_TYPES( \ + input.scalar_type(), "act_and_mul_kernel", [&] { \ + vllm::act_and_mul_kernel> \ + <<>>(out.data_ptr(), \ + input.data_ptr(), d); \ + }); + +void silu_and_mul(torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., 2 * d] +{ + LAUNCH_ACTIVATION_GATE_KERNEL(vllm::silu_kernel); +} + +void gelu_and_mul(torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., 2 * d] +{ + LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_kernel); +} + +void gelu_tanh_and_mul(torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., 2 * d] +{ + LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_tanh_kernel); +} + +namespace vllm { + +template +__device__ __forceinline__ T fatrelu_kernel(const T& x, const float threshold) { + const float f = (float)x; + return (T)(f > threshold ? f : 0.0f); +} + +template +__global__ void act_and_mul_kernel_with_param( + scalar_t* __restrict__ out, const scalar_t* __restrict__ input, const int d, + const float param) { + const int64_t token_idx = blockIdx.x; + for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { + const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]); + const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]); + out[token_idx * d + idx] = ACT_FN(x, param) * y; + } +} + +} // namespace vllm + +#define LAUNCH_ACTIVATION_GATE_KERNEL_WITH_PARAM(KERNEL, PARAM) \ + int d = input.size(-1) / 2; \ + int64_t num_tokens = input.numel() / input.size(-1); \ + dim3 grid(num_tokens); \ + dim3 block(std::min(d, 1024)); \ + const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \ + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \ + VLLM_DISPATCH_FLOATING_TYPES( \ + input.scalar_type(), "act_and_mul_kernel_with_param", [&] { \ + vllm::act_and_mul_kernel_with_param> \ + <<>>(out.data_ptr(), \ + input.data_ptr(), d, \ + PARAM); \ + }); + +void fatrelu_and_mul(torch::Tensor& out, // [..., d], + torch::Tensor& input, // [..., 2 * d] + double threshold) { + LAUNCH_ACTIVATION_GATE_KERNEL_WITH_PARAM(vllm::fatrelu_kernel, threshold); +} +namespace vllm { + +// Element-wise activation kernel template. +template +__global__ void activation_kernel( + scalar_t* __restrict__ out, // [..., d] + const scalar_t* __restrict__ input, // [..., d] + const int d) { + const int64_t token_idx = blockIdx.x; + for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { + const scalar_t x = VLLM_LDG(&input[token_idx * d + idx]); + out[token_idx * d + idx] = ACT_FN(x); + } +} + +} // namespace vllm + +// Launch element-wise activation kernel. +#define LAUNCH_ACTIVATION_KERNEL(KERNEL) \ + int d = input.size(-1); \ + int64_t num_tokens = input.numel() / d; \ + dim3 grid(num_tokens); \ + dim3 block(std::min(d, 1024)); \ + const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \ + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \ + VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "activation_kernel", [&] { \ + vllm::activation_kernel> \ + <<>>(out.data_ptr(), \ + input.data_ptr(), d); \ + }); + +namespace vllm { + +template +__device__ __forceinline__ T gelu_new_kernel(const T& x) { + const float x3 = (float)(x * x * x); + const T t = (T)tanhf((T)(0.79788456f * (float)(x + (T)(0.044715f * x3)))); + return ((T)0.5) * x * (((T)1.0) + t); +} + +template +__device__ __forceinline__ T gelu_fast_kernel(const T& x) { + const float f = (float)x; + const T t = + (T)tanhf(((T)(f * 0.79788456f)) * (((T)1.0) + (T)(0.044715f * f) * x)); + return ((T)0.5) * x * (((T)1.0) + t); +} + +template +__device__ __forceinline__ T gelu_quick_kernel(const T& x) { + // x * sigmoid(1.702 * x) + return (T)(((float)x) / (1.0f + expf(-1.702f * (float)x))); +} + +} // namespace vllm + +void gelu_new(torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., d] +{ + LAUNCH_ACTIVATION_KERNEL(vllm::gelu_new_kernel); +} + +void gelu_fast(torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., d] +{ + LAUNCH_ACTIVATION_KERNEL(vllm::gelu_fast_kernel); +} + +void gelu_quick(torch::Tensor& out, // [..., d] + torch::Tensor& input) // [..., d] +{ + LAUNCH_ACTIVATION_KERNEL(vllm::gelu_quick_kernel); +} diff --git a/activation/cuda_compat.h b/activation/cuda_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..82e55613d915a4a9dda5d73eea3601ad0ef949c5 --- /dev/null +++ b/activation/cuda_compat.h @@ -0,0 +1,49 @@ +#pragma once + +#ifdef USE_ROCM + #include +#endif + +#ifndef USE_ROCM + #define WARP_SIZE 32 +#else + #define WARP_SIZE warpSize +#endif + +#ifndef USE_ROCM + #define VLLM_LDG(arg) __ldg(arg) +#else + #define VLLM_LDG(arg) *(arg) +#endif + +#ifndef USE_ROCM + #define VLLM_SHFL_XOR_SYNC(var, lane_mask) \ + __shfl_xor_sync(uint32_t(-1), var, lane_mask) + #define VLLM_SHFL_XOR_SYNC_WIDTH(var, lane_mask, width) \ + __shfl_xor_sync(uint32_t(-1), var, lane_mask, width) +#else + #define VLLM_SHFL_XOR_SYNC(var, lane_mask) __shfl_xor(var, lane_mask) + #define VLLM_SHFL_XOR_SYNC_WIDTH(var, lane_mask, width) \ + __shfl_xor(var, lane_mask, width) +#endif + +#ifndef USE_ROCM + #define VLLM_SHFL_SYNC(var, src_lane) __shfl_sync(uint32_t(-1), var, src_lane) +#else + #define VLLM_SHFL_SYNC(var, src_lane) __shfl(var, src_lane) +#endif + +#ifndef USE_ROCM + #define VLLM_SHFL_DOWN_SYNC(var, lane_delta) \ + __shfl_down_sync(uint32_t(-1), var, lane_delta) +#else + #define VLLM_SHFL_DOWN_SYNC(var, lane_delta) __shfl_down(var, lane_delta) +#endif + +#ifndef USE_ROCM + #define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \ + cudaFuncSetAttribute(FUNC, cudaFuncAttributeMaxDynamicSharedMemorySize, VAL) +#else + #define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \ + hipFuncSetAttribute(FUNC, hipFuncAttributeMaxDynamicSharedMemorySize, VAL) +#endif diff --git a/activation/dispatch_utils.h b/activation/dispatch_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..a634e1c3d488676cf9beb11bc3029915e484b163 --- /dev/null +++ b/activation/dispatch_utils.h @@ -0,0 +1,35 @@ +/* + * Adapted from + * https://github.com/pytorch/pytorch/blob/v2.0.1/aten/src/ATen/Dispatch.h + */ +#pragma once + +#include + +#define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) + +#define VLLM_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)) + +#define VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) + +#define VLLM_DISPATCH_FLOATING_AND_BYTE_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, \ + VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(__VA_ARGS__)) + +#define VLLM_DISPATCH_CASE_INTEGRAL_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) + +#define VLLM_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__)) diff --git a/build.toml b/build.toml new file mode 100644 index 0000000000000000000000000000000000000000..7da9d632a70edb0699eb77f097b9b1a5ae573c48 --- /dev/null +++ b/build.toml @@ -0,0 +1,17 @@ +[general] +name = "activation" + +[torch] +src = [ + "torch-ext/torch_binding.cpp", + "torch-ext/torch_binding.h" +] + +[kernel.activation] +cuda-capabilities = [ "7.0", "7.2", "7.5", "8.0", "8.6", "8.7", "8.9", "9.0" ] +src = [ + "activation/activation_kernels.cu", + "activation/cuda_compat.h", + "activation/dispatch_utils.h", +] +depends = [ "torch" ] diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__init__.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py similarity index 90% rename from build/torch28-cxx11-cu126-aarch64-linux/activation/__init__.py rename to build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 100644 --- a/build/torch28-cxx11-cu126-aarch64-linux/activation/__init__.py +++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py @@ -10,11 +10,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so b/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..1442eb73f43c4768d2b8f9b916943743256af106 --- /dev/null +++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d50cdabfbed1df74e921ac34ff00bca0555977b14ef8082ddae7b1f30985a494 +size 2370160 diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0ee3c40cd38699ce7744be53b0531fe4fa505996 --- /dev/null +++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_o63kkyjirmkf4 +ops = torch.ops._activation_o63kkyjirmkf4 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_o63kkyjirmkf4::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/layers.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py similarity index 51% rename from build/torch28-cxx11-cu126-aarch64-linux/activation/layers.py rename to build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f 100644 --- a/build/torch28-cxx11-cu126-aarch64-linux/activation/layers.py +++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py @@ -5,17 +5,6 @@ from ._ops import ops class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -24,38 +13,7 @@ class SiluAndMul(nn.Module): return out -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -65,8 +23,6 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -76,19 +32,6 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -102,8 +45,6 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): - can_torch_compile: bool = True - def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -111,8 +52,6 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): - can_torch_compile: bool = True - def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -120,8 +59,6 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): - can_torch_compile: bool = True - def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py similarity index 90% rename from build/torch28-cxx11-cu128-aarch64-linux/activation/__init__.py rename to build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 100644 --- a/build/torch28-cxx11-cu128-aarch64-linux/activation/__init__.py +++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py @@ -10,11 +10,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so b/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..e6b6260c4378717d4369cd2577a00350960e317c --- /dev/null +++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bd0709ef09c8f0c18d1dc4a36c8096c59459bece61f5f5dbea95d1e73f54d44 +size 2393264 diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..cd5eee20b4c9a70e4f59e7198a0a28c5dfa06244 --- /dev/null +++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_vrl36m2ejer54 +ops = torch.ops._activation_vrl36m2ejer54 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_vrl36m2ejer54::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py similarity index 51% rename from build/torch28-cxx11-cu128-aarch64-linux/activation/layers.py rename to build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f 100644 --- a/build/torch28-cxx11-cu128-aarch64-linux/activation/layers.py +++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py @@ -5,17 +5,6 @@ from ._ops import ops class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -24,38 +13,7 @@ class SiluAndMul(nn.Module): return out -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -65,8 +23,6 @@ class GeluAndMul(nn.Module): class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - def forward(self, x: torch.Tensor): d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -76,19 +32,6 @@ class GeluTanhAndMul(nn.Module): class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - def __init__(self, threshold: float = 0.0): super().__init__() self.threshold = threshold @@ -102,8 +45,6 @@ class FatreluAndMul(nn.Module): class FastGELU(nn.Module): - can_torch_compile: bool = True - def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_fast(out, x) @@ -111,8 +52,6 @@ class FastGELU(nn.Module): class NewGELU(nn.Module): - can_torch_compile: bool = True - def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) @@ -120,8 +59,6 @@ class NewGELU(nn.Module): class QuickGELU(nn.Module): - can_torch_compile: bool = True - def forward(self, x: torch.Tensor) -> torch.Tensor: out = torch.empty_like(x) ops.gelu_quick(out, x) diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py similarity index 69% rename from build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py rename to build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 100644 --- a/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py +++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py @@ -10,11 +10,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -30,20 +25,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) return out -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_fast(out, x) return out @@ -61,15 +42,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: __all__ = [ "silu_and_mul", - "mul_and_silu", "gelu_and_mul", "gelu_tanh_and_mul", "fatrelu_and_mul", "gelu_fast", "gelu_new", "gelu_quick", - "gelu_tanh", - "silu", - "gelu", "layers", ] diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so b/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..2bfbc3667e483ce8441b310889b63614133f6334 --- /dev/null +++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8353447f64e7d2df1a6a341d9c53bced53abef267f079923ae774170d0d57c53 +size 2427936 diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..05fa036d769ea858865c92b972c68a158899eca1 --- /dev/null +++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_va3moa75vw7c2 +ops = torch.ops._activation_va3moa75vw7c2 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_va3moa75vw7c2::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py similarity index 69% rename from build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py rename to build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 100644 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py +++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py @@ -10,11 +10,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -30,20 +25,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) return out -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_fast(out, x) return out @@ -61,15 +42,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: __all__ = [ "silu_and_mul", - "mul_and_silu", "gelu_and_mul", "gelu_tanh_and_mul", "fatrelu_and_mul", "gelu_fast", "gelu_new", "gelu_quick", - "gelu_tanh", - "silu", - "gelu", "layers", ] diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so b/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..fc35d5f1bdad2b294897ace78c80c2913e8bd793 --- /dev/null +++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df184a6315118d787a1bd6b435cb45f1ca7828445a1f1c0e55c57645cfbba43a +size 2362600 diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..11bf3a1e13ad4f53348d55ce0b8e727307534d67 --- /dev/null +++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_qr3gs3eckeig4 +ops = torch.ops._activation_qr3gs3eckeig4 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_qr3gs3eckeig4::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 --- /dev/null +++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py @@ -0,0 +1,52 @@ +import torch + +from ._ops import ops + +from . import layers + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out + + +__all__ = [ + "silu_and_mul", + "gelu_and_mul", + "gelu_tanh_and_mul", + "fatrelu_and_mul", + "gelu_fast", + "gelu_new", + "gelu_quick", + "layers", +] diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so b/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..e24099a45ad0e0ab481769c24c749947c164b07c --- /dev/null +++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccb13cfc2e45cf483e8b9f77f1760f28b48bcf185508d51b32d45bc759c4e8bb +size 2385440 diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..fbe888c4ec71536f08f7de2d823b75d9bbac0173 --- /dev/null +++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_p7gbzt25w3zg2 +ops = torch.ops._activation_p7gbzt25w3zg2 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_p7gbzt25w3zg2::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 --- /dev/null +++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py @@ -0,0 +1,52 @@ +import torch + +from ._ops import ops + +from . import layers + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out + + +__all__ = [ + "silu_and_mul", + "gelu_and_mul", + "gelu_tanh_and_mul", + "fatrelu_and_mul", + "gelu_fast", + "gelu_new", + "gelu_quick", + "layers", +] diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so b/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..dc1fd3cc2e28051b8a263a5baf287b794d741f41 --- /dev/null +++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f8048853e8cb06e8574a9a9497800d2be438f7989d79f44dcf2e0ced38a75a9 +size 2420192 diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0c022ad4f749236320938c6ae13079a445e860ba --- /dev/null +++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_jg7yaigtn7wco +ops = torch.ops._activation_jg7yaigtn7wco + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_jg7yaigtn7wco::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 --- /dev/null +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py @@ -0,0 +1,52 @@ +import torch + +from ._ops import ops + +from . import layers + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out + + +__all__ = [ + "silu_and_mul", + "gelu_and_mul", + "gelu_tanh_and_mul", + "fatrelu_and_mul", + "gelu_fast", + "gelu_new", + "gelu_quick", + "layers", +] diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..951cd024fc65dcaf31f0ce123d7c56538db90255 --- /dev/null +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cde5439e78ba0e1aaa1937d798b214b46d38cbab8e4384b93a22239fed1a4dd4 +size 2370264 diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f4538ecbd1302013d2026d413f07fefa1e3ed1ba --- /dev/null +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_ncisyrun7guwk +ops = torch.ops._activation_ncisyrun7guwk + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_ncisyrun7guwk::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 --- /dev/null +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py @@ -0,0 +1,52 @@ +import torch + +from ._ops import ops + +from . import layers + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out + + +__all__ = [ + "silu_and_mul", + "gelu_and_mul", + "gelu_tanh_and_mul", + "fatrelu_and_mul", + "gelu_fast", + "gelu_new", + "gelu_quick", + "layers", +] diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..7cc13c8b18b95f7a24b708e41f20daa15cf8e5ba --- /dev/null +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6bd20d411c51fc8729b15cab6a60c5c9185222474aa035489e1bff299d76682 +size 2428040 diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..fc135b9b87ed568acd3b7ae002760780202297ab --- /dev/null +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_ochhfvlnc3vyc +ops = torch.ops._activation_ochhfvlnc3vyc + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_ochhfvlnc3vyc::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 --- /dev/null +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py @@ -0,0 +1,52 @@ +import torch + +from ._ops import ops + +from . import layers + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out + + +__all__ = [ + "silu_and_mul", + "gelu_and_mul", + "gelu_tanh_and_mul", + "fatrelu_and_mul", + "gelu_fast", + "gelu_new", + "gelu_quick", + "layers", +] diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..ed72afe1b709df6b64ae4daf96dfacf5397334e1 --- /dev/null +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41c18b20c2bf8c49d2d3088a9bc1aad4293df0b57eafc9b141a9e8e595fe551a +size 2436672 diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..03feb54a67ee96a3181145a654e9c1d3432d3c83 --- /dev/null +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_u6vnqubnicksq +ops = torch.ops._activation_u6vnqubnicksq + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_u6vnqubnicksq::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 --- /dev/null +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py @@ -0,0 +1,52 @@ +import torch + +from ._ops import ops + +from . import layers + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out + + +__all__ = [ + "silu_and_mul", + "gelu_and_mul", + "gelu_tanh_and_mul", + "fatrelu_and_mul", + "gelu_fast", + "gelu_new", + "gelu_quick", + "layers", +] diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..d9c4fba021babdd0966b329da77216730a212c19 --- /dev/null +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfbcd5da358cd5cb7982d19c8880cf4db6f08b46622a7a953f755ad59e4e1492 +size 2362752 diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..8ec67ec6be213233dc83cb83dcd9e3d8cade5a98 --- /dev/null +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_2vn6ty3gfqfb6 +ops = torch.ops._activation_2vn6ty3gfqfb6 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_2vn6ty3gfqfb6::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 --- /dev/null +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py @@ -0,0 +1,52 @@ +import torch + +from ._ops import ops + +from . import layers + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out + + +__all__ = [ + "silu_and_mul", + "gelu_and_mul", + "gelu_tanh_and_mul", + "fatrelu_and_mul", + "gelu_fast", + "gelu_new", + "gelu_quick", + "layers", +] diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..486879e6269e0c7d0763ac11d82a100dec1b1b91 --- /dev/null +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1bc928823117c800904bcd3492bf1a0c65a32f6d8a842dc039f55e29831ab49 +size 2420344 diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0f4e8d31b42ed7be77b8eaef9aa29251327009bf --- /dev/null +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_myvteedxdpqc6 +ops = torch.ops._activation_myvteedxdpqc6 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_myvteedxdpqc6::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 --- /dev/null +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py @@ -0,0 +1,52 @@ +import torch + +from ._ops import ops + +from . import layers + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out + + +__all__ = [ + "silu_and_mul", + "gelu_and_mul", + "gelu_tanh_and_mul", + "fatrelu_and_mul", + "gelu_fast", + "gelu_new", + "gelu_quick", + "layers", +] diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..42ad3291140b013999afb683ec84c23804759e46 --- /dev/null +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:474727e434a9cd4ec984a6da7124992ead4ca0fefce9581d0fd503e36c065aed +size 2424888 diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..c6d4e4c91a867d657f287510c40366bccef86c94 --- /dev/null +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_rbswus6emrhm2 +ops = torch.ops._activation_rbswus6emrhm2 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_rbswus6emrhm2::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index bbf3ad846a76e365312ad965559a177976801396..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 47765ef8e985a500bbb3e25990387a1f1f15c767..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index de62862184381714910c79ecdf8db3ca14f8a753..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_beeaae6.abi3.so deleted file mode 100755 index c6c9665f880b574481be0f6464ac7637e732df84..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_beeaae6.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ce06ec284ecd4ac5423d3822a60cd9eeb686d0054b38d66567de73e1137b0567 -size 2773632 diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py deleted file mode 100644 index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_beeaae6 -ops = torch.ops._activation_beeaae6 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_beeaae6::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 29e76b5c619af9b19c5650edcfd4f63c4725d35f..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index f54053b63e8c2b7598967b6ca9739ecc85d6142a..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 4d4a3c1172a3a2b4c954199c9762b3251d1c468c..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so deleted file mode 100755 index e9e9102689a8ddf42f881abedcd19e137f22d5e4..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a529bd105aca5081398d63329e829b6b159570424cd654d3a9f275ca9a720e82 -size 2852200 diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py deleted file mode 100644 index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_beeaae6 -ops = torch.ops._activation_beeaae6 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_beeaae6::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 390f279894bed7ce9346ede4953b9ffc9e1b1808..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 86ca448fc1e6e7e119172b94f978b4a88aeda3e1..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index cbcd1da77da3529c73226d8ed8decfae8b9e5436..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so deleted file mode 100644 index 4df8f1606a76b66c06d538cd25db8e894d282405..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:34bdeb9ab72686850aef0a16b225b1b956162edb2cf46cba65c5e5b92ae267ae -size 4207000 diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_320b408 -ops = torch.ops._activation_320b408 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_320b408::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 364976ff5017b183a827c0dfcda90becfbab0e7c..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 008e1b91db1ae539587989af1a212f9cd38a1ae2..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index d00f03a5b9a4944132d13ac0986acc2c54e0ca3c..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so deleted file mode 100755 index 6d8adc0f26f3b10cbc1b441b74bc7f49c0ebdaae..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0f2cffcb6b5b9a49f03a2df46fc2ad36765676edecb468c233e78e1f5e21e206 -size 4127872 diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py deleted file mode 100644 index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_beeaae6 -ops = torch.ops._activation_beeaae6 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_beeaae6::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 7aaa364368efe0e765de132c08296d189a969ede..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index cc7b128cfd05527bc856b66cdaf7d33691835eae..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index e547c241f319a637fa590b09ad35c1592aacce40..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so b/build/torch28-cxx11-cu126-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so deleted file mode 100755 index 8121d3da5057e1d53e4dee4b60de1e13285bd3e0..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:02b62f5d045f370c3fb7c0e7ef458165feb987fba186b8cb9aee55c735a82e93 -size 2699928 diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu126-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0f883290f823dd4b9ad1432d6644d25bcd3a4acf..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_0c3eb4e_dirty -ops = torch.ops._activation_0c3eb4e_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_0c3eb4e_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index e8f8e706b1057711ae9e53bf255aa392d9356d5b..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index ca11e4cda13d6d4f0a9f8a37d7188d53380ddde2..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index e906e10360ab9b669e4add9e39cb9ce133ca04f6..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch28-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so deleted file mode 100755 index 7c3397feac6fa683af5617d944ea5e6f5f42bf1b..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:972be0b2b7ce4f771028406367437488743dc81d70e6316e7a2694df1422b23d -size 2837192 diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/_ops.py deleted file mode 100644 index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_beeaae6 -ops = torch.ops._activation_beeaae6 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_beeaae6::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index bdb5a121a09f628a672c404f5207f691347f83c5..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 0d8c166048d114380e068ca6448ab46ef96da034..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index ea0551b7b1c5e408b9875b62598f6f5f0b489a30..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so b/build/torch28-cxx11-cu128-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so deleted file mode 100755 index f1d23623c037de97ee0207fe5f750d8ba9863d3c..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2f7fe0a00eaf2e228f237ee3058ac9eb2c6fbc4927b1276d0f566bb05bb043b9 -size 3683080 diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu128-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0f883290f823dd4b9ad1432d6644d25bcd3a4acf..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_0c3eb4e_dirty -ops = torch.ops._activation_0c3eb4e_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_0c3eb4e_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index d12dd70b4a1174dc45b09641f8a67395f73f2052..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index e5ad25122dbe45d007132c05ad491272043aff5a..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 55353ba18a89c372e3738c44597e1c129e955e3f..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch28-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so deleted file mode 100755 index f12d8ce6414b9517c65869fe83bb570a87480d74..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ec0756eb56dab9c57cc1aa01cfc2301d508fdf11ac4d02d015f7c16dd2246f2f -size 4116960 diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/_ops.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/_ops.py deleted file mode 100644 index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_beeaae6 -ops = torch.ops._activation_beeaae6 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_beeaae6::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/layers.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index e53c600baf751d47e3c75f0ea262aaa74cbaa2a0..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index cfe526dc3c92a5c7b1a46084e58d4448fc74b15b..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 878be1d140d35a1a92eb1b870cd3ccc0bbb65128..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_320b408.abi3.so deleted file mode 100644 index 485825618d1d0c2e93123fe5197999883b59b748..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_320b408.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3af83bae80c8641200010ba586e5a2cac271fa4fcd344e3532ea7d5094fd7c17 -size 4275744 diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_320b408 -ops = torch.ops._activation_320b408 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_320b408::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index cbbd7d5ff58d32b11600b3114e01c9f049ac553a..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 6239d94f12316596571aa36b5f80073c4b3001c4..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 7c61641f68aa6668f378809762977aac8344e655..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch28-cxx11-cu129-x86_64-linux/activation/_activation_beeaae6.abi3.so deleted file mode 100755 index df6a901f09b0db5c03a0dea245c2500eb9a4b05a..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-x86_64-linux/activation/_activation_beeaae6.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:de346c02f046cbb177556580efc9994632adad1439bb90f451f2f690e326c39c -size 4154840 diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/_ops.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/_ops.py deleted file mode 100644 index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_beeaae6 -ops = torch.ops._activation_beeaae6 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_beeaae6::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/layers.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__init__.py b/build/torch29-cxx11-cu126-aarch64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu126-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 60dc82724c779cfa41bd9b8dcf39c036e2a50109..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 48cda67561066b31e84ee5ecebcf0ef61e1ad322..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 0082ca0b0e28577622a3e430602fabe010369318..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch29-cxx11-cu126-aarch64-linux/activation/_activation_320b408.abi3.so deleted file mode 100644 index 41c75640cfdc7eeff3d57f4a6d403f7e7f10b8d8..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu126-aarch64-linux/activation/_activation_320b408.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f9c24e0eb75a09a9fc19e7096276d560226f198617291681c1a18e94002a629e -size 2963480 diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/_ops.py b/build/torch29-cxx11-cu126-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu126-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_320b408 -ops = torch.ops._activation_320b408 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_320b408::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/layers.py b/build/torch29-cxx11-cu126-aarch64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu126-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch29-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu126-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch29-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 8701dcb62a9afdfff0bf2da0b13995a2f4052dc2..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index a4a13d5d1bf25ab58915502dc566b8de851bc021..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 3d7a5ecaadd06dac28e818f8290b371c1294f7a4..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch29-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so deleted file mode 100755 index 31e749efdff1ee341c214c67049d687123ed5a42..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:59e915bb752b7105f3c2594ababa4480e8de7408257b07f5897f82012377e8c7 -size 2837168 diff --git a/build/torch29-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch29-cxx11-cu126-x86_64-linux/activation/_ops.py deleted file mode 100644 index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu126-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_beeaae6 -ops = torch.ops._activation_beeaae6 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_beeaae6::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch29-cxx11-cu126-x86_64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu126-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch29-cxx11-cu128-aarch64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu128-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 4d338b4d5170fa0130189f67e65562998f8f42be..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index be7ffd679d4afbc36ea076dbc57e3162a60bd409..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index e50041e74611417f4e4037e568a9e041780a5e32..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch29-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so deleted file mode 100644 index dc83e4989904884309410757826ec095ea0fdfe4..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:08ee3dfa4d481eaf44ac3c11a0843598c05950f779dba66abd468fecb7839b32 -size 4208760 diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch29-cxx11-cu128-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu128-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_320b408 -ops = torch.ops._activation_320b408 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_320b408::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch29-cxx11-cu128-aarch64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu128-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch29-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu128-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch29-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index b934f588f1084b4e695f05dd5b505bb9f3b6977a..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 65804eed7cc7204dc308abe7c10470bb29e91534..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index d232f4f0d36e80341e80d079349f68ddc9f5a3cc..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch29-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so deleted file mode 100755 index 386275e1936b21f67c78effb606db9a1d69f729a..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:74494aaff73017fd8103b598f6fc8c92085e3dc0be63bda413f658bb7bbfc9b0 -size 4116936 diff --git a/build/torch29-cxx11-cu128-x86_64-linux/activation/_ops.py b/build/torch29-cxx11-cu128-x86_64-linux/activation/_ops.py deleted file mode 100644 index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu128-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_beeaae6 -ops = torch.ops._activation_beeaae6 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_beeaae6::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-cu128-x86_64-linux/activation/layers.py b/build/torch29-cxx11-cu128-x86_64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu128-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__init__.py b/build/torch29-cxx11-cu130-aarch64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu130-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 21696c8710d6b717d92ebd34545a9ac97cc44942..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 1856969205a3825653d4be5e4c267a9585ff6594..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index a8e0f48d49bb34730201d17d0795310d829e20cb..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch29-cxx11-cu130-aarch64-linux/activation/_activation_320b408.abi3.so deleted file mode 100644 index 02267d619c1ad4c0bb7f84b243e5456c6bf7c798..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu130-aarch64-linux/activation/_activation_320b408.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:73748b54059552f5983322f7dedc36ed349b38ad6fb9318301bb4965b1fe49aa -size 4094968 diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/_ops.py b/build/torch29-cxx11-cu130-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu130-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_320b408 -ops = torch.ops._activation_320b408 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_320b408::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/layers.py b/build/torch29-cxx11-cu130-aarch64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu130-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch29-cxx11-cu130-x86_64-linux/activation/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu130-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu(out, x) - return out - -def silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu(out, x) - return out - - -def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh(out, x) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "mul_and_silu", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "gelu_tanh", - "silu", - "gelu", - "layers", -] diff --git a/build/torch29-cxx11-cu130-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu130-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index eac32d49e24d1a8671ffcddff8119d7a14e35f3f..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu130-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu130-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu130-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 1f4111d9c64240435bd7d59958c320ea24e2f710..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu130-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu130-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu130-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 40eec88689ee66667ecd946bb43a0cd137b80d38..0000000000000000000000000000000000000000 Binary files a/build/torch29-cxx11-cu130-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch29-cxx11-cu130-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch29-cxx11-cu130-x86_64-linux/activation/_activation_beeaae6.abi3.so deleted file mode 100755 index 38e458a1206168d344db213c3c06e3cd873a6834..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu130-x86_64-linux/activation/_activation_beeaae6.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0f5500ae615f8a0abf063368bf22c4d031a2e4a8893817bd3bcaffc321d1622d -size 4019704 diff --git a/build/torch29-cxx11-cu130-x86_64-linux/activation/_ops.py b/build/torch29-cxx11-cu130-x86_64-linux/activation/_ops.py deleted file mode 100644 index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu130-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_beeaae6 -ops = torch.ops._activation_beeaae6 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_beeaae6::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-cu130-x86_64-linux/activation/layers.py b/build/torch29-cxx11-cu130-x86_64-linux/activation/layers.py deleted file mode 100644 index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000 --- a/build/torch29-cxx11-cu130-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,179 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - -class Silu(nn.Module): - """An activation function for SiLU. - - The function computes x -> silu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.silu(out, x) - return out - -class Gelu(nn.Module): - """An activation function for GELU. - - The function computes x -> gelu(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu(out, x) - return out - -class GeluTanh(nn.Module): - """An activation function for GELU with `tanh` approximation. - - The function computes x -> gelu_tanh(x). - - Shapes: - x: (num_tokens, d) or (batch_size, seq_len, d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - out = torch.empty_like(x) - ops.gelu_tanh(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000000000000000000000000000000000000..54ac44c0698d43fb86a123430f5e9d2e9bcda8ea --- /dev/null +++ b/flake.nix @@ -0,0 +1,17 @@ +{ + description = "Flake for activation kernels"; + + inputs = { + kernel-builder.url = "github:huggingface/kernel-builder"; + }; + + outputs = + { + self, + kernel-builder, + }: + kernel-builder.lib.genFlakeOutputs { + path = ./.; + rev = self.shortRev or self.dirtyShortRev or self.lastModifiedDate; + }; +} diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/kernels/__init__.py b/tests/kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/kernels/allclose_default.py b/tests/kernels/allclose_default.py new file mode 100644 index 0000000000000000000000000000000000000000..80eb1eeb9fb738d70efe28d64df98b2ff7223463 --- /dev/null +++ b/tests/kernels/allclose_default.py @@ -0,0 +1,14 @@ +import torch + +# Reference default values of atol and rtol are from +# https://github.com/pytorch/pytorch/blob/6d96beb6bec24d73ee3f080bac54d2104068f675/test/test_transformers.py#L67 +default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float: 1e-5} +default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float: 1.3e-6} + + +def get_default_atol(output) -> float: + return default_atol[output.dtype] + + +def get_default_rtol(output) -> float: + return default_rtol[output.dtype] diff --git a/tests/kernels/test_activation.py b/tests/kernels/test_activation.py new file mode 100644 index 0000000000000000000000000000000000000000..5d6aa773c9abcb5d3c0d61646e465aae9951966d --- /dev/null +++ b/tests/kernels/test_activation.py @@ -0,0 +1,165 @@ +import math +import random +from typing import Type + +import activation +import pytest +import torch +import torch.nn.functional as F + +from .utils import opcheck +from .allclose_default import get_default_atol, get_default_rtol + +DTYPES = [torch.half, torch.bfloat16, torch.float] +NUM_TOKENS = [7, 83, 2048] # Arbitrary values for testing +D = [512, 13824] # Arbitrary values for testing +SEEDS = [0] +CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)] + + +def gelu_fast(x: torch.Tensor) -> torch.Tensor: + return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) + + +def gelu_new(x: torch.Tensor) -> torch.Tensor: + c = math.sqrt(2.0 / math.pi) + return 0.5 * x * (1.0 + torch.tanh(c * (x + 0.044715 * torch.pow(x, 3.0)))) + + +def gelu_quick(x: torch.Tensor) -> torch.Tensor: + return x * torch.sigmoid(1.702 * x) + + +def fatrelu_and_mul(x: torch.Tensor, threshold: float) -> torch.Tensor: + d = x.shape[-1] // 2 + x1 = x[..., :d] + x2 = x[..., d:] + x1 = F.threshold(x1, threshold, 0.0) + return x1 * x2 + + +def silu_and_mul(x: torch.Tensor) -> torch.Tensor: + d = x.shape[-1] // 2 + return F.silu(x[..., :d]) * x[..., d:] + + +def gelu_and_mul(x: torch.Tensor, approximate: str) -> torch.Tensor: + d = x.shape[-1] // 2 + return F.gelu(x[..., :d], approximate=approximate) * x[..., d:] + + +@pytest.mark.parametrize("activation_name", ["silu", "gelu", "gelu_tanh", "fatrelu"]) +@pytest.mark.parametrize("num_tokens", NUM_TOKENS) +@pytest.mark.parametrize("d", D) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_act_and_mul( + activation_name: str, + num_tokens: int, + d: int, + dtype: torch.dtype, + seed: int, + device: str, +) -> None: + random.seed(seed) + torch.manual_seed(seed) + torch.set_default_device(device) + x = torch.randn(num_tokens, 2 * d, dtype=dtype) + if activation_name == "silu": + torch_fn = silu_and_mul + fn = activation.silu_and_mul + op = activation.ops.silu_and_mul + layer = activation.layers.SiluAndMul() + elif activation_name == "gelu": + torch_fn = lambda x: gelu_and_mul(x, "none") + fn = activation.gelu_and_mul + op = activation.ops.gelu_and_mul + layer = activation.layers.GeluAndMul() + elif activation_name == "gelu_tanh": + torch_fn = lambda x: gelu_and_mul(x, "tanh") + fn = activation.gelu_tanh_and_mul + op = activation.ops.gelu_tanh_and_mul + layer = activation.layers.GeluTanhAndMul() + elif activation_name == "fatrelu": + threshold = random.uniform(0, 1) + torch_fn = lambda x: fatrelu_and_mul(x, threshold) + fn = lambda out, x: activation.fatrelu_and_mul(out, x, threshold) + op = activation.ops.fatrelu_and_mul + layer = activation.layers.FatreluAndMul(threshold) + + out_shape = x.shape[:-1] + (x.shape[-1] // 2,) + out = torch.empty(out_shape, dtype=x.dtype, device=x.device) + out = fn(out, x) + mod_out = layer(x) + ref_out = torch_fn(x) + + # The SiLU, GELU and FatReLU implementations are equivalent to the native + # PyTorch implementations, so we can do exact comparison. + torch.testing.assert_close(out, ref_out, atol=0.0, rtol=0.0) + torch.testing.assert_close(mod_out, ref_out, atol=0.0, rtol=0.0) + + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + if activation_name == "fatrelu": + opcheck(op, (out, x, threshold)) + else: + opcheck(op, (out, x)) + + +@pytest.mark.parametrize( + "activation_fns", + [ + ( + gelu_fast, + activation.gelu_fast, + activation.ops.gelu_fast, + activation.layers.FastGELU, + ), + ( + gelu_new, + activation.gelu_new, + activation.ops.gelu_new, + activation.layers.NewGELU, + ), + ( + gelu_quick, + activation.gelu_quick, + activation.ops.gelu_quick, + activation.layers.QuickGELU, + ), + ], +) +@pytest.mark.parametrize("num_tokens", NUM_TOKENS) +@pytest.mark.parametrize("d", D) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("seed", SEEDS) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_activation( + activation_fns, + num_tokens: int, + d: int, + dtype: torch.dtype, + seed: int, + device: str, +) -> None: + torch.manual_seed(seed) + torch.set_default_device(device) + x = torch.randn(num_tokens, d, dtype=dtype) + torch_fn, fn, op, cls = activation_fns + layer = cls() + out = fn(torch.empty_like(x), x) + layer_out = layer(x) + ref_out = torch_fn(x) + torch.testing.assert_close( + out, ref_out, atol=get_default_atol(out), rtol=get_default_rtol(out) + ) + torch.testing.assert_close( + out, layer_out, atol=get_default_atol(out), rtol=get_default_rtol(out) + ) + + out = torch.empty_like(x) + opcheck(op, (out, x)) diff --git a/tests/kernels/utils.py b/tests/kernels/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d24c5babfb9690031844f262212d80274fd478c6 --- /dev/null +++ b/tests/kernels/utils.py @@ -0,0 +1,73 @@ +"""Kernel test utils""" + +import itertools +import random +import unittest +from numbers import Number +from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union + +import pytest +import torch +from torch._prims_common import TensorLikeType + +# For now, disable "test_aot_dispatch_dynamic" since there are some +# bugs related to this test in PyTorch 2.4. +DEFAULT_OPCHECK_TEST_UTILS: Tuple[str, ...] = ( + "test_schema", + "test_autograd_registration", + "test_faketensor", +) + +ALL_OPCHECK_TEST_UTILS: Tuple[str, ...] = ( + "test_schema", + "test_autograd_registration", + "test_faketensor", + "test_aot_dispatch_dynamic", +) + + +# Copied/modified from torch._refs.__init__.py +def fp8_allclose( + a: TensorLikeType, + b: TensorLikeType, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, +) -> bool: + """ + Reference implementation of torch.allclose + """ + torch._refs._check_close_args(name="torch.allclose", a=a, b=b, rtol=rtol, atol=atol) + + return bool( + torch.all( + torch.isclose( + a.double(), b.double(), rtol=rtol, atol=atol, equal_nan=equal_nan + ) + ).item() + ) + + +# A special version of op check that has a restricted default set of test_utils +# and a patched version of allclose that supports fp8 types. +def opcheck( + op: Union[ + torch._ops.OpOverload, + torch._ops.OpOverloadPacket, + torch._library.custom_ops.CustomOpDef, + ], + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + test_utils: Union[str, Sequence[str]] = ALL_OPCHECK_TEST_UTILS, + raise_exception: bool = True, + cond: bool = True +) -> Dict[str, str]: + with unittest.mock.patch("torch.allclose", new=fp8_allclose): + return ( + torch.library.opcheck( + op, args, kwargs, test_utils=test_utils, raise_exception=raise_exception + ) + if cond + else {} + ) diff --git a/torch-ext/activation/__init__.py b/torch-ext/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 --- /dev/null +++ b/torch-ext/activation/__init__.py @@ -0,0 +1,52 @@ +import torch + +from ._ops import ops + +from . import layers + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out + + +__all__ = [ + "silu_and_mul", + "gelu_and_mul", + "gelu_tanh_and_mul", + "fatrelu_and_mul", + "gelu_fast", + "gelu_new", + "gelu_quick", + "layers", +] diff --git a/torch-ext/activation/layers.py b/torch-ext/activation/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f --- /dev/null +++ b/torch-ext/activation/layers.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + +from ._ops import ops + + +class SiluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.silu_and_mul(out, x) + return out + + +class GeluAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_and_mul(out, x) + return out + + +class GeluTanhAndMul(nn.Module): + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.gelu_tanh_and_mul(out, x) + return out + + +class FatreluAndMul(nn.Module): + def __init__(self, threshold: float = 0.0): + super().__init__() + self.threshold = threshold + + def forward(self, x: torch.Tensor): + d = x.shape[-1] // 2 + output_shape = x.shape[:-1] + (d,) + out = torch.empty(output_shape, dtype=x.dtype, device=x.device) + ops.fatrelu_and_mul(out, x, self.threshold) + return out + + +class FastGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_fast(out, x) + return out + + +class NewGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_new(out, x) + return out + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = torch.empty_like(x) + ops.gelu_quick(out, x) + return out diff --git a/torch-ext/torch_binding.cpp b/torch-ext/torch_binding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b6148ecc33137085656b962c0cdc10fd480dc787 --- /dev/null +++ b/torch-ext/torch_binding.cpp @@ -0,0 +1,37 @@ +#include + +#include "registration.h" +#include "torch_binding.h" + +TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { + // Activation ops + // Activation function used in SwiGLU. + ops.def("silu_and_mul(Tensor! out, Tensor input) -> ()"); + ops.impl("silu_and_mul", torch::kCUDA, &silu_and_mul); + + // Activation function used in GeGLU with `none` approximation. + ops.def("gelu_and_mul(Tensor! out, Tensor input) -> ()"); + ops.impl("gelu_and_mul", torch::kCUDA, &gelu_and_mul); + + // Activation function used in GeGLU with `tanh` approximation. + ops.def("gelu_tanh_and_mul(Tensor! out, Tensor input) -> ()"); + ops.impl("gelu_tanh_and_mul", torch::kCUDA, &gelu_tanh_and_mul); + + // FATReLU implementation. + ops.def("fatrelu_and_mul(Tensor! out, Tensor input, float threshold) -> ()"); + ops.impl("fatrelu_and_mul", torch::kCUDA, &fatrelu_and_mul); + + // GELU implementation used in GPT-2. + ops.def("gelu_new(Tensor! out, Tensor input) -> ()"); + ops.impl("gelu_new", torch::kCUDA, &gelu_new); + + // Approximate GELU implementation. + ops.def("gelu_fast(Tensor! out, Tensor input) -> ()"); + ops.impl("gelu_fast", torch::kCUDA, &gelu_fast); + + // Quick GELU implementation. + ops.def("gelu_quick(Tensor! out, Tensor input) -> ()"); + ops.impl("gelu_quick", torch::kCUDA, &gelu_quick); +} + +REGISTER_EXTENSION(TORCH_EXTENSION_NAME) diff --git a/torch-ext/torch_binding.h b/torch-ext/torch_binding.h new file mode 100644 index 0000000000000000000000000000000000000000..cb163cfc1da061377d077bab6d12a8b048d60fa5 --- /dev/null +++ b/torch-ext/torch_binding.h @@ -0,0 +1,18 @@ +#pragma once + +#include + +void silu_and_mul(torch::Tensor &out, torch::Tensor &input); + +void gelu_and_mul(torch::Tensor &out, torch::Tensor &input); + +void gelu_tanh_and_mul(torch::Tensor &out, torch::Tensor &input); + +void fatrelu_and_mul(torch::Tensor &out, torch::Tensor &input, + double threshold); + +void gelu_new(torch::Tensor &out, torch::Tensor &input); + +void gelu_fast(torch::Tensor &out, torch::Tensor &input); + +void gelu_quick(torch::Tensor &out, torch::Tensor &input);