{ "_class_name": "FluxTransformer2DModel", "_commit_hash": null, "_diffusers_version": "0.34.0", "_use_default_values": [ "out_channels", "axes_dims_rope" ], "attention_head_dim": 128, "axes_dims_rope": [ 16, 56, 56 ], "guidance_embeds": true, "in_channels": 64, "joint_attention_dim": 4096, "neuron": { "auto_cast": "matmul", "auto_cast_type": "bf16", "compiler_type": "neuronx-cc", "compiler_version": "2.19.8089.0+8ab9f450", "dynamic_batch_size": false, "inline_weights_to_neff": true, "input_names": [ "hidden_states", "encoder_hidden_states", "pooled_projections", "timestep", "image_rotary_emb", "guidance" ], "model_type": "flux-transformer-2d", "optlevel": "2", "output_attentions": false, "output_hidden_states": false, "output_names": [ "out_hidden_states" ], "static_batch_size": 1, "static_encoder_hidden_size": 768, "static_height": 128, "static_num_channels": 64, "static_patch_size": 1, "static_rotary_axes_dim": 128, "static_sequence_length": 512, "static_vae_scale_factor": 8, "static_width": 128, "task": "semantic-segmentation", "tensor_parallel_size": 4 }, "num_attention_heads": 24, "num_layers": 19, "num_single_layers": 38, "out_channels": null, "patch_size": 1, "pooled_projection_dim": 768, "transformers_version": null }