Add support for Ascend NPU
#8
by
statelesshz
- opened
- modeling_chatglm.py +1 -1
modeling_chatglm.py
CHANGED
@@ -27,7 +27,7 @@ from .configuration_chatglm import ChatGLMConfig
|
|
27 |
|
28 |
# flags required to enable jit fusion kernels
|
29 |
|
30 |
-
if sys.platform != 'darwin':
|
31 |
torch._C._jit_set_profiling_mode(False)
|
32 |
torch._C._jit_set_profiling_executor(False)
|
33 |
torch._C._jit_override_can_fuse_on_cpu(True)
|
|
|
27 |
|
28 |
# flags required to enable jit fusion kernels
|
29 |
|
30 |
+
if sys.platform != 'darwin' and torch.cuda.is_available():
|
31 |
torch._C._jit_set_profiling_mode(False)
|
32 |
torch._C._jit_set_profiling_executor(False)
|
33 |
torch._C._jit_override_can_fuse_on_cpu(True)
|