ABDALLALSWAITI commited on
Commit
4d157d7
·
verified ·
1 Parent(s): daa2cf7

Upload FLUX_FP8_Demo.ipynb with huggingface_hub

Browse files
Files changed (1) hide show
  1. FLUX_FP8_Demo.ipynb +53 -1
FLUX_FP8_Demo.ipynb CHANGED
@@ -1 +1,53 @@
1
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4",
8
+ "machine_shape": "hm",
9
+ "private_outputs": true,
10
+ "accelerator": "GPU"
11
+ },
12
+ "kernelspec": {
13
+ "name": "python3",
14
+ "display_name": "Python 3"
15
+ },
16
+ "language_info": {
17
+ "name": "python"
18
+ }
19
+ },
20
+ "cells": [
21
+ {
22
+ "cell_type": "markdown",
23
+ "metadata": {
24
+ "id": "intro"
25
+ },
26
+ "source": [
27
+ "# FLUX.1-dev-ControlNet-Union-Pro-2.0 (FP8 Quantized) Demo\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ABDALLALSWAITI/FLUX.1-dev-ControlNet-Union-Pro-2.0-fp8/blob/main/FLUX_FP8_Demo.ipynb)\n\nThis notebook demonstrates how to use the FP8 quantized version of the FLUX.1-dev-ControlNet-Union-Pro-2.0 model. This is a direct quantization of the original model to FP8 format."
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "metadata": {
33
+ "id": "setup"
34
+ },
35
+ "source": [
36
+ "# Install dependencies\n!pip install -q diffusers transformers accelerate controlnet_aux opencv-python\n\n# Clone the repository\n!git clone https://huggingface.co/ABDALLALSWAITI/FLUX.1-dev-ControlNet-Union-Pro-2.0-fp8\n!cp FLUX.1-dev-ControlNet-Union-Pro-2.0-fp8/pipeline_flux_controlnet.py .\n!cp FLUX.1-dev-ControlNet-Union-Pro-2.0-fp8/controlnet_flux.py ."
37
+ ],
38
+ "execution_count": null,
39
+ "outputs": []
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "metadata": {
44
+ "id": "load_model"
45
+ },
46
+ "source": [
47
+ "import torch\nfrom pipeline_flux_controlnet import FluxControlNetPipeline\nfrom controlnet_flux import FluxControlNetModel\n\n# Check if FP8 is supported\nfp8_supported = False\ntry:\n test = torch.tensor([1.0], dtype=torch.float8_e4m3fn)\n fp8_supported = True\n print('FP8 is supported!')\nexcept:\n print('FP8 not supported, using BF16 instead')\n\n# Load the model\ncontrolnet = FluxControlNetModel.from_pretrained(\n 'FLUX.1-dev-ControlNet-Union-Pro-2.0-fp8',\n torch_dtype=torch.bfloat16)\n\nprint('Model loaded successfully!')"
48
+ ],
49
+ "execution_count": null,
50
+ "outputs": []
51
+ }
52
+ ]
53
+ }