Fabrice-TIERCELIN commited on
Commit
d2722cf
·
verified ·
1 Parent(s): da7deef

' instead of "

Browse files
Files changed (1) hide show
  1. utils/collect_env.py +202 -202
utils/collect_env.py CHANGED
@@ -1,202 +1,202 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- """This file holding some environment constant for sharing by other files."""
3
- import os
4
- import os.path as osp
5
- import subprocess
6
- import sys
7
- from collections import OrderedDict, defaultdict
8
-
9
- import numpy as np
10
- import torch
11
-
12
-
13
- def is_rocm_pytorch() -> bool:
14
- """Check whether the PyTorch is compiled on ROCm."""
15
- is_rocm = False
16
- if TORCH_VERSION != 'parrots':
17
- try:
18
- from torch.utils.cpp_extension import ROCM_HOME
19
- is_rocm = True if ((torch.version.hip is not None) and
20
- (ROCM_HOME is not None)) else False
21
- except ImportError:
22
- pass
23
- return is_rocm
24
-
25
- TORCH_VERSION = torch.__version__
26
-
27
- def get_build_config():
28
- """Obtain the build information of PyTorch or Parrots."""
29
- if TORCH_VERSION == 'parrots':
30
- from parrots.config import get_build_info
31
- return get_build_info()
32
- else:
33
- return torch.__config__.show()
34
-
35
- try:
36
- import torch_musa # noqa: F401
37
- IS_MUSA_AVAILABLE = True
38
- except Exception:
39
- IS_MUSA_AVAILABLE = False
40
-
41
- def is_musa_available() -> bool:
42
- return IS_MUSA_AVAILABLE
43
-
44
- def is_cuda_available() -> bool:
45
- """Returns True if cuda devices exist."""
46
- return torch.cuda.is_available()
47
-
48
- def _get_cuda_home():
49
- if TORCH_VERSION == 'parrots':
50
- from parrots.utils.build_extension import CUDA_HOME
51
- else:
52
- if is_rocm_pytorch():
53
- from torch.utils.cpp_extension import ROCM_HOME
54
- CUDA_HOME = ROCM_HOME
55
- else:
56
- from torch.utils.cpp_extension import CUDA_HOME
57
- return CUDA_HOME
58
-
59
-
60
- def _get_musa_home():
61
- return os.environ.get('MUSA_HOME')
62
-
63
-
64
- def collect_env():
65
- """Collect the information of the running environments.
66
-
67
- Returns:
68
- dict: The environment information. The following fields are contained.
69
-
70
- - sys.platform: The variable of ``sys.platform``.
71
- - Python: Python version.
72
- - CUDA available: Bool, indicating if CUDA is available.
73
- - GPU devices: Device type of each GPU.
74
- - CUDA_HOME (optional): The env var ``CUDA_HOME``.
75
- - NVCC (optional): NVCC version.
76
- - GCC: GCC version, "n/a" if GCC is not installed.
77
- - MSVC: Microsoft Virtual C++ Compiler version, Windows only.
78
- - PyTorch: PyTorch version.
79
- - PyTorch compiling details: The output of \
80
- ``torch.__config__.show()``.
81
- - TorchVision (optional): TorchVision version.
82
- - OpenCV (optional): OpenCV version.
83
- """
84
- from distutils import errors
85
-
86
- env_info = OrderedDict()
87
- env_info['sys.platform'] = sys.platform
88
- env_info['Python'] = sys.version.replace('\n', '')
89
-
90
- cuda_available = is_cuda_available()
91
- musa_available = is_musa_available()
92
- env_info['CUDA available'] = cuda_available
93
- env_info['MUSA available'] = musa_available
94
- env_info['numpy_random_seed'] = np.random.get_state()[1][0]
95
-
96
- if cuda_available:
97
- devices = defaultdict(list)
98
- for k in range(torch.cuda.device_count()):
99
- devices[torch.cuda.get_device_name(k)].append(str(k))
100
- for name, device_ids in devices.items():
101
- env_info['GPU ' + ','.join(device_ids)] = name
102
-
103
- CUDA_HOME = _get_cuda_home()
104
- env_info['CUDA_HOME'] = CUDA_HOME
105
-
106
- if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
107
- if CUDA_HOME == '/opt/rocm':
108
- try:
109
- nvcc = osp.join(CUDA_HOME, 'hip/bin/hipcc')
110
- nvcc = subprocess.check_output(
111
- f'"{nvcc}" --version', shell=True)
112
- nvcc = nvcc.decode('utf-8').strip()
113
- release = nvcc.rfind('HIP version:')
114
- build = nvcc.rfind('')
115
- nvcc = nvcc[release:build].strip()
116
- except subprocess.SubprocessError:
117
- nvcc = 'Not Available'
118
- else:
119
- try:
120
- nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
121
- nvcc = subprocess.check_output(f'"{nvcc}" -V', shell=True)
122
- nvcc = nvcc.decode('utf-8').strip()
123
- release = nvcc.rfind('Cuda compilation tools')
124
- build = nvcc.rfind('Build ')
125
- nvcc = nvcc[release:build].strip()
126
- except subprocess.SubprocessError:
127
- nvcc = 'Not Available'
128
- env_info['NVCC'] = nvcc
129
- elif musa_available:
130
- devices = defaultdict(list)
131
- for k in range(torch.musa.device_count()):
132
- devices[torch.musa.get_device_name(k)].append(str(k))
133
- for name, device_ids in devices.items():
134
- env_info['GPU ' + ','.join(device_ids)] = name
135
-
136
- MUSA_HOME = _get_musa_home()
137
- env_info['MUSA_HOME'] = MUSA_HOME
138
-
139
- if MUSA_HOME is not None and osp.isdir(MUSA_HOME):
140
- try:
141
- mcc = osp.join(MUSA_HOME, 'bin/mcc')
142
- subprocess.check_output(f'"{mcc}" -v', shell=True)
143
- except subprocess.SubprocessError:
144
- mcc = 'Not Available'
145
- env_info['mcc'] = mcc
146
- try:
147
- # Check C++ Compiler.
148
- # For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...',
149
- # indicating the compiler used, we use this to get the compiler name
150
- import io
151
- import sysconfig
152
- cc = sysconfig.get_config_var('CC')
153
- if cc:
154
- cc = osp.basename(cc.split()[0])
155
- cc_info = subprocess.check_output(f'{cc} --version', shell=True)
156
- env_info['GCC'] = cc_info.decode('utf-8').partition(
157
- '\n')[0].strip()
158
- else:
159
- # on Windows, cl.exe is not in PATH. We need to find the path.
160
- # distutils.ccompiler.new_compiler() returns a msvccompiler
161
- # object and after initialization, path to cl.exe is found.
162
- import locale
163
- import os
164
- from distutils.ccompiler import new_compiler
165
- ccompiler = new_compiler()
166
- ccompiler.initialize()
167
- cc = subprocess.check_output(
168
- f'{ccompiler.cc}', stderr=subprocess.STDOUT, shell=True)
169
- encoding = os.device_encoding(
170
- sys.stdout.fileno()) or locale.getpreferredencoding()
171
- env_info['MSVC'] = cc.decode(encoding).partition('\n')[0].strip()
172
- env_info['GCC'] = 'n/a'
173
- except (subprocess.CalledProcessError, errors.DistutilsPlatformError):
174
- env_info['GCC'] = 'n/a'
175
- except io.UnsupportedOperation as e:
176
- # JupyterLab on Windows changes sys.stdout, which has no `fileno` attr
177
- # Refer to: https://github.com/open-mmlab/mmengine/issues/931
178
- # TODO: find a solution to get compiler info in Windows JupyterLab,
179
- # while preserving backward-compatibility in other systems.
180
- env_info['MSVC'] = f'n/a, reason: {str(e)}'
181
-
182
- env_info['PyTorch'] = torch.__version__
183
- env_info['PyTorch compiling details'] = get_build_config()
184
-
185
- try:
186
- import torchvision
187
- env_info['TorchVision'] = torchvision.__version__
188
- except ModuleNotFoundError:
189
- pass
190
-
191
- try:
192
- import cv2
193
- env_info['OpenCV'] = cv2.__version__
194
- except ImportError:
195
- pass
196
-
197
-
198
- return env_info
199
-
200
- if __name__ == '__main__':
201
- for name, val in collect_env().items():
202
- print(f'{name}: {val}')
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ """This file holding some environment constant for sharing by other files."""
3
+ import os
4
+ import os.path as osp
5
+ import subprocess
6
+ import sys
7
+ from collections import OrderedDict, defaultdict
8
+
9
+ import numpy as np
10
+ import torch
11
+
12
+
13
+ def is_rocm_pytorch() -> bool:
14
+ """Check whether the PyTorch is compiled on ROCm."""
15
+ is_rocm = False
16
+ if TORCH_VERSION != "parrots":
17
+ try:
18
+ from torch.utils.cpp_extension import ROCM_HOME
19
+ is_rocm = True if ((torch.version.hip is not None) and
20
+ (ROCM_HOME is not None)) else False
21
+ except ImportError:
22
+ pass
23
+ return is_rocm
24
+
25
+ TORCH_VERSION = torch.__version__
26
+
27
+ def get_build_config():
28
+ """Obtain the build information of PyTorch or Parrots."""
29
+ if TORCH_VERSION == "parrots":
30
+ from parrots.config import get_build_info
31
+ return get_build_info()
32
+ else:
33
+ return torch.__config__.show()
34
+
35
+ try:
36
+ import torch_musa # noqa: F401
37
+ IS_MUSA_AVAILABLE = True
38
+ except Exception:
39
+ IS_MUSA_AVAILABLE = False
40
+
41
+ def is_musa_available() -> bool:
42
+ return IS_MUSA_AVAILABLE
43
+
44
+ def is_cuda_available() -> bool:
45
+ """Returns True if cuda devices exist."""
46
+ return torch.cuda.is_available()
47
+
48
+ def _get_cuda_home():
49
+ if TORCH_VERSION == "parrots":
50
+ from parrots.utils.build_extension import CUDA_HOME
51
+ else:
52
+ if is_rocm_pytorch():
53
+ from torch.utils.cpp_extension import ROCM_HOME
54
+ CUDA_HOME = ROCM_HOME
55
+ else:
56
+ from torch.utils.cpp_extension import CUDA_HOME
57
+ return CUDA_HOME
58
+
59
+
60
+ def _get_musa_home():
61
+ return os.environ.get("MUSA_HOME")
62
+
63
+
64
+ def collect_env():
65
+ """Collect the information of the running environments.
66
+
67
+ Returns:
68
+ dict: The environment information. The following fields are contained.
69
+
70
+ - sys.platform: The variable of ``sys.platform``.
71
+ - Python: Python version.
72
+ - CUDA available: Bool, indicating if CUDA is available.
73
+ - GPU devices: Device type of each GPU.
74
+ - CUDA_HOME (optional): The env var ``CUDA_HOME``.
75
+ - NVCC (optional): NVCC version.
76
+ - GCC: GCC version, "n/a" if GCC is not installed.
77
+ - MSVC: Microsoft Virtual C++ Compiler version, Windows only.
78
+ - PyTorch: PyTorch version.
79
+ - PyTorch compiling details: The output of \
80
+ ``torch.__config__.show()``.
81
+ - TorchVision (optional): TorchVision version.
82
+ - OpenCV (optional): OpenCV version.
83
+ """
84
+ from distutils import errors
85
+
86
+ env_info = OrderedDict()
87
+ env_info["sys.platform"] = sys.platform
88
+ env_info["Python"] = sys.version.replace("\n", "")
89
+
90
+ cuda_available = is_cuda_available()
91
+ musa_available = is_musa_available()
92
+ env_info["CUDA available"] = cuda_available
93
+ env_info["MUSA available"] = musa_available
94
+ env_info["numpy_random_seed"] = np.random.get_state()[1][0]
95
+
96
+ if cuda_available:
97
+ devices = defaultdict(list)
98
+ for k in range(torch.cuda.device_count()):
99
+ devices[torch.cuda.get_device_name(k)].append(str(k))
100
+ for name, device_ids in devices.items():
101
+ env_info["GPU " + ",".join(device_ids)] = name
102
+
103
+ CUDA_HOME = _get_cuda_home()
104
+ env_info["CUDA_HOME"] = CUDA_HOME
105
+
106
+ if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
107
+ if CUDA_HOME == "/opt/rocm":
108
+ try:
109
+ nvcc = osp.join(CUDA_HOME, "hip/bin/hipcc")
110
+ nvcc = subprocess.check_output(
111
+ f"\"{nvcc}\" --version", shell=True)
112
+ nvcc = nvcc.decode("utf-8").strip()
113
+ release = nvcc.rfind("HIP version:")
114
+ build = nvcc.rfind("")
115
+ nvcc = nvcc[release:build].strip()
116
+ except subprocess.SubprocessError:
117
+ nvcc = "Not Available"
118
+ else:
119
+ try:
120
+ nvcc = osp.join(CUDA_HOME, "bin/nvcc")
121
+ nvcc = subprocess.check_output(f"\"{nvcc}\" -V", shell=True)
122
+ nvcc = nvcc.decode("utf-8").strip()
123
+ release = nvcc.rfind("Cuda compilation tools")
124
+ build = nvcc.rfind("Build ")
125
+ nvcc = nvcc[release:build].strip()
126
+ except subprocess.SubprocessError:
127
+ nvcc = "Not Available"
128
+ env_info["NVCC"] = nvcc
129
+ elif musa_available:
130
+ devices = defaultdict(list)
131
+ for k in range(torch.musa.device_count()):
132
+ devices[torch.musa.get_device_name(k)].append(str(k))
133
+ for name, device_ids in devices.items():
134
+ env_info["GPU " + ",".join(device_ids)] = name
135
+
136
+ MUSA_HOME = _get_musa_home()
137
+ env_info["MUSA_HOME"] = MUSA_HOME
138
+
139
+ if MUSA_HOME is not None and osp.isdir(MUSA_HOME):
140
+ try:
141
+ mcc = osp.join(MUSA_HOME, "bin/mcc")
142
+ subprocess.check_output(f"\"{mcc}\" -v", shell=True)
143
+ except subprocess.SubprocessError:
144
+ mcc = "Not Available"
145
+ env_info["mcc"] = mcc
146
+ try:
147
+ # Check C++ Compiler.
148
+ # For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...',
149
+ # indicating the compiler used, we use this to get the compiler name
150
+ import io
151
+ import sysconfig
152
+ cc = sysconfig.get_config_var("CC")
153
+ if cc:
154
+ cc = osp.basename(cc.split()[0])
155
+ cc_info = subprocess.check_output(f"{cc} --version", shell=True)
156
+ env_info["GCC"] = cc_info.decode("utf-8").partition(
157
+ "\n")[0].strip()
158
+ else:
159
+ # on Windows, cl.exe is not in PATH. We need to find the path.
160
+ # distutils.ccompiler.new_compiler() returns a msvccompiler
161
+ # object and after initialization, path to cl.exe is found.
162
+ import locale
163
+ import os
164
+ from distutils.ccompiler import new_compiler
165
+ ccompiler = new_compiler()
166
+ ccompiler.initialize()
167
+ cc = subprocess.check_output(
168
+ f"{ccompiler.cc}", stderr=subprocess.STDOUT, shell=True)
169
+ encoding = os.device_encoding(
170
+ sys.stdout.fileno()) or locale.getpreferredencoding()
171
+ env_info["MSVC"] = cc.decode(encoding).partition("\n")[0].strip()
172
+ env_info["GCC"] = "n/a"
173
+ except (subprocess.CalledProcessError, errors.DistutilsPlatformError):
174
+ env_info["GCC"] = "n/a"
175
+ except io.UnsupportedOperation as e:
176
+ # JupyterLab on Windows changes sys.stdout, which has no `fileno` attr
177
+ # Refer to: https://github.com/open-mmlab/mmengine/issues/931
178
+ # TODO: find a solution to get compiler info in Windows JupyterLab,
179
+ # while preserving backward-compatibility in other systems.
180
+ env_info["MSVC"] = f"n/a, reason: {str(e)}"
181
+
182
+ env_info["PyTorch"] = torch.__version__
183
+ env_info["PyTorch compiling details"] = get_build_config()
184
+
185
+ try:
186
+ import torchvision
187
+ env_info["TorchVision"] = torchvision.__version__
188
+ except ModuleNotFoundError:
189
+ pass
190
+
191
+ try:
192
+ import cv2
193
+ env_info["OpenCV"] = cv2.__version__
194
+ except ImportError:
195
+ pass
196
+
197
+
198
+ return env_info
199
+
200
+ if __name__ == "__main__":
201
+ for name, val in collect_env().items():
202
+ print(f"{name}: {val}")