hysts HF Staff commited on
Commit
e9c796e
·
1 Parent(s): 86f96a4
.pre-commit-config.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v5.0.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/astral-sh/ruff-pre-commit
17
+ rev: v0.11.10
18
+ hooks:
19
+ - id: ruff
20
+ args: ["--fix"]
21
+ - id: ruff-format
22
+ - repo: https://github.com/pre-commit/mirrors-mypy
23
+ rev: v1.15.0
24
+ hooks:
25
+ - id: mypy
26
+ args: ["--ignore-missing-imports"]
27
+ additional_dependencies:
28
+ [
29
+ "types-python-slugify",
30
+ "types-pytz",
31
+ "types-PyYAML",
32
+ "types-requests",
33
+ ]
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
.vscode/extensions.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recommendations": [
3
+ "ms-python.python",
4
+ "charliermarsh.ruff",
5
+ "streetsidesoftware.code-spell-checker",
6
+ "tamasfe.even-better-toml"
7
+ ]
8
+ }
.vscode/settings.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "charliermarsh.ruff",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.fixAll.ruff": "explicit",
9
+ "source.organizeImports": "explicit"
10
+ }
11
+ },
12
+ "[jupyter]": {
13
+ "files.insertFinalNewline": false
14
+ },
15
+ "notebook.output.scrolling": true,
16
+ "notebook.formatOnSave.enabled": true
17
+ }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏎️💨
4
  colorFrom: yellow
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 4.40.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
4
  colorFrom: yellow
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 5.30.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py CHANGED
@@ -1,6 +1,8 @@
 
 
1
  import gradio as gr
2
  import numpy as np
3
- import random
4
  import spaces
5
  import torch
6
  from diffusers import DiffusionPipeline
@@ -13,28 +15,52 @@ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", tor
13
  MAX_SEED = np.iinfo(np.int32).max
14
  MAX_IMAGE_SIZE = 2048
15
 
16
- @spaces.GPU()
17
- def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  if randomize_seed:
19
- seed = random.randint(0, MAX_SEED)
20
  generator = torch.Generator().manual_seed(seed)
21
  image = pipe(
22
- prompt = prompt,
23
- width = width,
24
- height = height,
25
- num_inference_steps = num_inference_steps,
26
- generator = generator,
27
- guidance_scale=0.0
28
- ).images[0]
29
  return image, seed
30
-
 
31
  examples = [
32
  "a tiny astronaut hatching from an egg on the moon",
33
  "a cat holding a sign that says hello world",
34
  "an anime illustration of a wiener schnitzel",
35
  ]
36
 
37
- css="""
38
  #col-container {
39
  margin: 0 auto;
40
  max-width: 520px;
@@ -42,29 +68,23 @@ css="""
42
  """
43
 
44
  with gr.Blocks(css=css) as demo:
45
-
46
  with gr.Column(elem_id="col-container"):
47
- gr.Markdown(f"""# FLUX.1 [schnell]
48
  12B param rectified flow transformer distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) for 4 step generation
49
  [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-schnell)]
50
  """)
51
-
52
  with gr.Row():
53
-
54
  prompt = gr.Text(
55
  label="Prompt",
56
  show_label=False,
57
  max_lines=1,
58
  placeholder="Enter your prompt",
59
- container=False,
60
  )
61
-
62
- run_button = gr.Button("Run", scale=0)
63
-
64
  result = gr.Image(label="Result", show_label=False)
65
-
66
  with gr.Accordion("Advanced Settings", open=False):
67
-
68
  seed = gr.Slider(
69
  label="Seed",
70
  minimum=0,
@@ -72,11 +92,9 @@ with gr.Blocks(css=css) as demo:
72
  step=1,
73
  value=0,
74
  )
75
-
76
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
77
-
78
  with gr.Row():
79
-
80
  width = gr.Slider(
81
  label="Width",
82
  minimum=256,
@@ -84,7 +102,7 @@ with gr.Blocks(css=css) as demo:
84
  step=32,
85
  value=1024,
86
  )
87
-
88
  height = gr.Slider(
89
  label="Height",
90
  minimum=256,
@@ -92,10 +110,8 @@ with gr.Blocks(css=css) as demo:
92
  step=32,
93
  value=1024,
94
  )
95
-
96
  with gr.Row():
97
-
98
-
99
  num_inference_steps = gr.Slider(
100
  label="Number of inference steps",
101
  minimum=1,
@@ -103,20 +119,22 @@ with gr.Blocks(css=css) as demo:
103
  step=1,
104
  value=4,
105
  )
106
-
107
  gr.Examples(
108
- examples = examples,
109
- fn = infer,
110
- inputs = [prompt],
111
- outputs = [result, seed],
112
- cache_examples="lazy"
 
113
  )
114
 
115
- gr.on(
116
- triggers=[run_button.click, prompt.submit],
117
- fn = infer,
118
- inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps],
119
- outputs = [result, seed]
120
  )
121
 
122
- demo.launch()
 
 
 
1
+ import random
2
+
3
  import gradio as gr
4
  import numpy as np
5
+ import PIL.Image
6
  import spaces
7
  import torch
8
  from diffusers import DiffusionPipeline
 
15
  MAX_SEED = np.iinfo(np.int32).max
16
  MAX_IMAGE_SIZE = 2048
17
 
18
+
19
+ @spaces.GPU
20
+ def infer(
21
+ prompt: str,
22
+ seed: int = 42,
23
+ randomize_seed: bool = False,
24
+ width: int = 1024,
25
+ height: int = 1024,
26
+ num_inference_steps: int = 4,
27
+ progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
28
+ ) -> tuple[PIL.Image.Image, int]:
29
+ """Generate an image from a prompt using the FLUX.1 [schnell] model.
30
+
31
+ Args:
32
+ prompt: The prompt to generate an image from.
33
+ seed: The seed to use for the random number generator. Defaults to 42.
34
+ randomize_seed: Whether to randomize the seed. Defaults to False.
35
+ width: The width of the image to generate. Defaults to 1024.
36
+ height: The height of the image to generate. Defaults to 1024.
37
+ num_inference_steps: The number of inference steps to use. Defaults to 4.
38
+ progress: The progress bar to use. Defaults to a progress bar that tracks the tqdm progress.
39
+
40
+ Returns:
41
+ A tuple containing the generated image and the seed.
42
+ """
43
  if randomize_seed:
44
+ seed = random.randint(0, MAX_SEED) # noqa: S311
45
  generator = torch.Generator().manual_seed(seed)
46
  image = pipe(
47
+ prompt=prompt,
48
+ width=width,
49
+ height=height,
50
+ num_inference_steps=num_inference_steps,
51
+ generator=generator,
52
+ guidance_scale=0.0,
53
+ ).images[0]
54
  return image, seed
55
+
56
+
57
  examples = [
58
  "a tiny astronaut hatching from an egg on the moon",
59
  "a cat holding a sign that says hello world",
60
  "an anime illustration of a wiener schnitzel",
61
  ]
62
 
63
+ css = """
64
  #col-container {
65
  margin: 0 auto;
66
  max-width: 520px;
 
68
  """
69
 
70
  with gr.Blocks(css=css) as demo:
 
71
  with gr.Column(elem_id="col-container"):
72
+ gr.Markdown("""# FLUX.1 [schnell]
73
  12B param rectified flow transformer distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) for 4 step generation
74
  [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-schnell)]
75
  """)
76
+
77
  with gr.Row():
 
78
  prompt = gr.Text(
79
  label="Prompt",
80
  show_label=False,
81
  max_lines=1,
82
  placeholder="Enter your prompt",
83
+ submit_btn=True,
84
  )
 
 
 
85
  result = gr.Image(label="Result", show_label=False)
86
+
87
  with gr.Accordion("Advanced Settings", open=False):
 
88
  seed = gr.Slider(
89
  label="Seed",
90
  minimum=0,
 
92
  step=1,
93
  value=0,
94
  )
 
95
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
96
+
97
  with gr.Row():
 
98
  width = gr.Slider(
99
  label="Width",
100
  minimum=256,
 
102
  step=32,
103
  value=1024,
104
  )
105
+
106
  height = gr.Slider(
107
  label="Height",
108
  minimum=256,
 
110
  step=32,
111
  value=1024,
112
  )
113
+
114
  with gr.Row():
 
 
115
  num_inference_steps = gr.Slider(
116
  label="Number of inference steps",
117
  minimum=1,
 
119
  step=1,
120
  value=4,
121
  )
122
+
123
  gr.Examples(
124
+ examples=examples,
125
+ fn=infer,
126
+ inputs=prompt,
127
+ outputs=[result, seed],
128
+ cache_examples=True,
129
+ cache_mode="lazy",
130
  )
131
 
132
+ prompt.submit(
133
+ fn=infer,
134
+ inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps],
135
+ outputs=[result, seed],
 
136
  )
137
 
138
+
139
+ if __name__ == "__main__":
140
+ demo.launch(mcp_server=True)
pyproject.toml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "flux-1-schnell"
3
+ version = "0.1.0"
4
+ description = ""
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "accelerate>=1.7.0",
9
+ "diffusers>=0.33.1",
10
+ "gradio[mcp]==5.30.0",
11
+ "hf-transfer>=0.1.9",
12
+ "hf-xet>=1.1.2",
13
+ "mcp==1.8.1",
14
+ "sentencepiece>=0.2.0",
15
+ "spaces>=0.36.0",
16
+ "torch==2.5.1",
17
+ "transformers>=4.52.3",
18
+ "xformers>=0.0.29.post1",
19
+ ]
20
+
21
+ [tool.ruff]
22
+ line-length = 119
23
+
24
+ [tool.ruff.lint]
25
+ select = ["ALL"]
26
+ ignore = [
27
+ "COM812", # missing-trailing-comma
28
+ "D203", # one-blank-line-before-class
29
+ "D213", # multi-line-summary-second-line
30
+ "E501", # line-too-long
31
+ "SIM117", # multiple-with-statements
32
+ #
33
+ "D100", # undocumented-public-module
34
+ "D101", # undocumented-public-class
35
+ "D102", # undocumented-public-method
36
+ "D103", # undocumented-public-function
37
+ "D104", # undocumented-public-package
38
+ "D105", # undocumented-magic-method
39
+ "D107", # undocumented-public-init
40
+ "EM101", # raw-string-in-exception
41
+ "FBT001", # boolean-type-hint-positional-argument
42
+ "FBT002", # boolean-default-value-positional-argument
43
+ "PD901", # pandas-df-variable-name
44
+ "PGH003", # blanket-type-ignore
45
+ "PLR0913", # too-many-arguments
46
+ "PLR0915", # too-many-statements
47
+ "TRY003", # raise-vanilla-args
48
+ ]
49
+ unfixable = [
50
+ "F401", # unused-import
51
+ ]
52
+
53
+ [tool.ruff.lint.pydocstyle]
54
+ convention = "google"
55
+
56
+ [tool.ruff.lint.per-file-ignores]
57
+ "*.ipynb" = ["T201", "T203"]
58
+
59
+ [tool.ruff.format]
60
+ docstring-code-format = true
requirements.txt CHANGED
@@ -1,7 +1,295 @@
1
- accelerate
2
- git+https://github.com/huggingface/diffusers.git
3
- invisible_watermark
4
- torch
5
- transformers==4.42.4
6
- xformers
7
- sentencepiece
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ accelerate==1.7.0
4
+ # via flux-1-schnell (pyproject.toml)
5
+ aiofiles==24.1.0
6
+ # via gradio
7
+ annotated-types==0.7.0
8
+ # via pydantic
9
+ anyio==4.9.0
10
+ # via
11
+ # gradio
12
+ # httpx
13
+ # mcp
14
+ # sse-starlette
15
+ # starlette
16
+ certifi==2025.4.26
17
+ # via
18
+ # httpcore
19
+ # httpx
20
+ # requests
21
+ charset-normalizer==3.4.2
22
+ # via requests
23
+ click==8.1.8
24
+ # via
25
+ # typer
26
+ # uvicorn
27
+ diffusers==0.33.1
28
+ # via flux-1-schnell (pyproject.toml)
29
+ exceptiongroup==1.3.0
30
+ # via anyio
31
+ fastapi==0.115.12
32
+ # via gradio
33
+ ffmpy==0.5.0
34
+ # via gradio
35
+ filelock==3.18.0
36
+ # via
37
+ # diffusers
38
+ # huggingface-hub
39
+ # torch
40
+ # transformers
41
+ # triton
42
+ fsspec==2025.5.0
43
+ # via
44
+ # gradio-client
45
+ # huggingface-hub
46
+ # torch
47
+ gradio==5.30.0
48
+ # via
49
+ # flux-1-schnell (pyproject.toml)
50
+ # spaces
51
+ gradio-client==1.10.1
52
+ # via gradio
53
+ groovy==0.1.2
54
+ # via gradio
55
+ h11==0.16.0
56
+ # via
57
+ # httpcore
58
+ # uvicorn
59
+ hf-transfer==0.1.9
60
+ # via flux-1-schnell (pyproject.toml)
61
+ hf-xet==1.1.2
62
+ # via flux-1-schnell (pyproject.toml)
63
+ httpcore==1.0.9
64
+ # via httpx
65
+ httpx==0.28.1
66
+ # via
67
+ # gradio
68
+ # gradio-client
69
+ # mcp
70
+ # safehttpx
71
+ # spaces
72
+ httpx-sse==0.4.0
73
+ # via mcp
74
+ huggingface-hub==0.31.4
75
+ # via
76
+ # accelerate
77
+ # diffusers
78
+ # gradio
79
+ # gradio-client
80
+ # tokenizers
81
+ # transformers
82
+ idna==3.10
83
+ # via
84
+ # anyio
85
+ # httpx
86
+ # requests
87
+ importlib-metadata==8.7.0
88
+ # via diffusers
89
+ jinja2==3.1.6
90
+ # via
91
+ # gradio
92
+ # torch
93
+ markdown-it-py==3.0.0
94
+ # via rich
95
+ markupsafe==3.0.2
96
+ # via
97
+ # gradio
98
+ # jinja2
99
+ mcp==1.8.1
100
+ # via
101
+ # flux-1-schnell (pyproject.toml)
102
+ # gradio
103
+ mdurl==0.1.2
104
+ # via markdown-it-py
105
+ mpmath==1.3.0
106
+ # via sympy
107
+ networkx==3.4.2
108
+ # via torch
109
+ numpy==2.2.6
110
+ # via
111
+ # accelerate
112
+ # diffusers
113
+ # gradio
114
+ # pandas
115
+ # transformers
116
+ # xformers
117
+ nvidia-cublas-cu12==12.4.5.8
118
+ # via
119
+ # nvidia-cudnn-cu12
120
+ # nvidia-cusolver-cu12
121
+ # torch
122
+ nvidia-cuda-cupti-cu12==12.4.127
123
+ # via torch
124
+ nvidia-cuda-nvrtc-cu12==12.4.127
125
+ # via torch
126
+ nvidia-cuda-runtime-cu12==12.4.127
127
+ # via torch
128
+ nvidia-cudnn-cu12==9.1.0.70
129
+ # via torch
130
+ nvidia-cufft-cu12==11.2.1.3
131
+ # via torch
132
+ nvidia-curand-cu12==10.3.5.147
133
+ # via torch
134
+ nvidia-cusolver-cu12==11.6.1.9
135
+ # via torch
136
+ nvidia-cusparse-cu12==12.3.1.170
137
+ # via
138
+ # nvidia-cusolver-cu12
139
+ # torch
140
+ nvidia-nccl-cu12==2.21.5
141
+ # via torch
142
+ nvidia-nvjitlink-cu12==12.4.127
143
+ # via
144
+ # nvidia-cusolver-cu12
145
+ # nvidia-cusparse-cu12
146
+ # torch
147
+ nvidia-nvtx-cu12==12.4.127
148
+ # via torch
149
+ orjson==3.10.18
150
+ # via gradio
151
+ packaging==25.0
152
+ # via
153
+ # accelerate
154
+ # gradio
155
+ # gradio-client
156
+ # huggingface-hub
157
+ # spaces
158
+ # transformers
159
+ pandas==2.2.3
160
+ # via gradio
161
+ pillow==11.2.1
162
+ # via
163
+ # diffusers
164
+ # gradio
165
+ psutil==5.9.8
166
+ # via
167
+ # accelerate
168
+ # spaces
169
+ pydantic==2.11.5
170
+ # via
171
+ # fastapi
172
+ # gradio
173
+ # mcp
174
+ # pydantic-settings
175
+ # spaces
176
+ pydantic-core==2.33.2
177
+ # via pydantic
178
+ pydantic-settings==2.9.1
179
+ # via mcp
180
+ pydub==0.25.1
181
+ # via gradio
182
+ pygments==2.19.1
183
+ # via rich
184
+ python-dateutil==2.9.0.post0
185
+ # via pandas
186
+ python-dotenv==1.1.0
187
+ # via pydantic-settings
188
+ python-multipart==0.0.20
189
+ # via
190
+ # gradio
191
+ # mcp
192
+ pytz==2025.2
193
+ # via pandas
194
+ pyyaml==6.0.2
195
+ # via
196
+ # accelerate
197
+ # gradio
198
+ # huggingface-hub
199
+ # transformers
200
+ regex==2024.11.6
201
+ # via
202
+ # diffusers
203
+ # transformers
204
+ requests==2.32.3
205
+ # via
206
+ # diffusers
207
+ # huggingface-hub
208
+ # spaces
209
+ # transformers
210
+ rich==14.0.0
211
+ # via typer
212
+ ruff==0.11.11
213
+ # via gradio
214
+ safehttpx==0.1.6
215
+ # via gradio
216
+ safetensors==0.5.3
217
+ # via
218
+ # accelerate
219
+ # diffusers
220
+ # transformers
221
+ semantic-version==2.10.0
222
+ # via gradio
223
+ sentencepiece==0.2.0
224
+ # via flux-1-schnell (pyproject.toml)
225
+ shellingham==1.5.4
226
+ # via typer
227
+ six==1.17.0
228
+ # via python-dateutil
229
+ sniffio==1.3.1
230
+ # via anyio
231
+ spaces==0.36.0
232
+ # via flux-1-schnell (pyproject.toml)
233
+ sse-starlette==2.3.5
234
+ # via mcp
235
+ starlette==0.46.2
236
+ # via
237
+ # fastapi
238
+ # gradio
239
+ # mcp
240
+ # sse-starlette
241
+ sympy==1.13.1
242
+ # via torch
243
+ tokenizers==0.21.1
244
+ # via transformers
245
+ tomlkit==0.13.2
246
+ # via gradio
247
+ torch==2.5.1
248
+ # via
249
+ # flux-1-schnell (pyproject.toml)
250
+ # accelerate
251
+ # xformers
252
+ tqdm==4.67.1
253
+ # via
254
+ # huggingface-hub
255
+ # transformers
256
+ transformers==4.52.3
257
+ # via flux-1-schnell (pyproject.toml)
258
+ triton==3.1.0
259
+ # via torch
260
+ typer==0.15.4
261
+ # via gradio
262
+ typing-extensions==4.13.2
263
+ # via
264
+ # anyio
265
+ # exceptiongroup
266
+ # fastapi
267
+ # gradio
268
+ # gradio-client
269
+ # huggingface-hub
270
+ # pydantic
271
+ # pydantic-core
272
+ # rich
273
+ # spaces
274
+ # torch
275
+ # typer
276
+ # typing-inspection
277
+ # uvicorn
278
+ typing-inspection==0.4.1
279
+ # via
280
+ # pydantic
281
+ # pydantic-settings
282
+ tzdata==2025.2
283
+ # via pandas
284
+ urllib3==2.4.0
285
+ # via requests
286
+ uvicorn==0.34.2
287
+ # via
288
+ # gradio
289
+ # mcp
290
+ websockets==15.0.1
291
+ # via gradio-client
292
+ xformers==0.0.29.post1
293
+ # via flux-1-schnell (pyproject.toml)
294
+ zipp==3.21.0
295
+ # via importlib-metadata
uv.lock ADDED
The diff for this file is too large to render. See raw diff