Fabrice-TIERCELIN
commited on
" rather than '
Browse files
hyvideo/modules/fp8_optimization.py
CHANGED
@@ -83,7 +83,7 @@ def convert_fp8_linear(module, dit_weight_path, original_dtype, params_to_keep={
|
|
83 |
setattr(module, "fp8_matmul_enabled", True)
|
84 |
|
85 |
# loading fp8 mapping file
|
86 |
-
fp8_map_path = dit_weight_path.replace(
|
87 |
if os.path.exists(fp8_map_path):
|
88 |
fp8_map = torch.load(fp8_map_path, map_location=lambda storage, loc: storage)
|
89 |
else:
|
@@ -91,7 +91,7 @@ def convert_fp8_linear(module, dit_weight_path, original_dtype, params_to_keep={
|
|
91 |
|
92 |
fp8_layers = []
|
93 |
for key, layer in module.named_modules():
|
94 |
-
if isinstance(layer, nn.Linear) and (
|
95 |
fp8_layers.append(key)
|
96 |
original_forward = layer.forward
|
97 |
layer.weight = torch.nn.Parameter(layer.weight.to(torch.float8_e4m3fn))
|
|
|
83 |
setattr(module, "fp8_matmul_enabled", True)
|
84 |
|
85 |
# loading fp8 mapping file
|
86 |
+
fp8_map_path = dit_weight_path.replace(".pt", "_map.pt")
|
87 |
if os.path.exists(fp8_map_path):
|
88 |
fp8_map = torch.load(fp8_map_path, map_location=lambda storage, loc: storage)
|
89 |
else:
|
|
|
91 |
|
92 |
fp8_layers = []
|
93 |
for key, layer in module.named_modules():
|
94 |
+
if isinstance(layer, nn.Linear) and ("double_blocks" in key or "single_blocks" in key):
|
95 |
fp8_layers.append(key)
|
96 |
original_forward = layer.forward
|
97 |
layer.weight = torch.nn.Parameter(layer.weight.to(torch.float8_e4m3fn))
|