Datasets:

Modalities:
Image
Size:
< 1K
Libraries:
Datasets
License:
Xianbao QIAN commited on
Commit
1ab3250
1 Parent(s): a64918d
diffusion_course/unit1/01_introduction_to_diffusers.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
diffusion_course/unit1/01_introduction_to_diffusers_CN.ipynb CHANGED
@@ -541,7 +541,7 @@
541
  },
542
  {
543
  "cell_type": "code",
544
- "execution_count": 1,
545
  "metadata": {
546
  "colab": {
547
  "base_uri": "https://localhost:8080/"
@@ -549,19 +549,7 @@
549
  "id": "-yX-MZhSsxwp",
550
  "outputId": "f8efea0d-41e6-4674-c09f-d905d6cd05dc"
551
  },
552
- "outputs": [
553
- {
554
- "ename": "ModuleNotFoundError",
555
- "evalue": "No module named 'torchvision'",
556
- "output_type": "error",
557
- "traceback": [
558
- "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
559
- "\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
560
- "Cell \u001b[1;32mIn[1], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorchvision\u001b[39;00m\n\u001b[0;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdatasets\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m load_dataset\n\u001b[0;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorchvision\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m transforms\n",
561
- "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'torchvision'"
562
- ]
563
- }
564
- ],
565
  "source": [
566
  "import torchvision\n",
567
  "from datasets import load_dataset\n",
@@ -710,7 +698,7 @@
710
  },
711
  {
712
  "cell_type": "code",
713
- "execution_count": 4,
714
  "metadata": {
715
  "colab": {
716
  "base_uri": "https://localhost:8080/",
@@ -719,19 +707,7 @@
719
  "id": "oP-rFQUzdx9h",
720
  "outputId": "dea1ec0a-9a08-433a-a8d4-9f8731e2e3ea"
721
  },
722
- "outputs": [
723
- {
724
- "ename": "NameError",
725
- "evalue": "name 'plt' is not defined",
726
- "output_type": "error",
727
- "traceback": [
728
- "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
729
- "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
730
- "Cell \u001b[1;32mIn[4], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[43mplt\u001b[49m\u001b[38;5;241m.\u001b[39mplot(noise_scheduler\u001b[38;5;241m.\u001b[39malphas_cumprod\u001b[38;5;241m.\u001b[39mcpu() \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m \u001b[38;5;241m0.5\u001b[39m, label\u001b[38;5;241m=\u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m$\u001b[39m\u001b[38;5;124m{\u001b[39m\u001b[38;5;124m\\\u001b[39m\u001b[38;5;124msqrt\u001b[39m\u001b[38;5;124m{\u001b[39m\u001b[38;5;124m\\\u001b[39m\u001b[38;5;124mbar\u001b[39m\u001b[38;5;124m{\u001b[39m\u001b[38;5;124m\\\u001b[39m\u001b[38;5;124malpha}_t}}$\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 2\u001b[0m plt\u001b[38;5;241m.\u001b[39mplot((\u001b[38;5;241m1\u001b[39m \u001b[38;5;241m-\u001b[39m noise_scheduler\u001b[38;5;241m.\u001b[39malphas_cumprod\u001b[38;5;241m.\u001b[39mcpu()) \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m \u001b[38;5;241m0.5\u001b[39m, label\u001b[38;5;241m=\u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m$\u001b[39m\u001b[38;5;124m\\\u001b[39m\u001b[38;5;124msqrt\u001b[39m\u001b[38;5;124m{\u001b[39m\u001b[38;5;124m(1 - \u001b[39m\u001b[38;5;124m\\\u001b[39m\u001b[38;5;124mbar\u001b[39m\u001b[38;5;124m{\u001b[39m\u001b[38;5;124m\\\u001b[39m\u001b[38;5;124malpha}_t)}$\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 3\u001b[0m plt\u001b[38;5;241m.\u001b[39mlegend(fontsize\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mx-large\u001b[39m\u001b[38;5;124m\"\u001b[39m);\n",
731
- "\u001b[1;31mNameError\u001b[0m: name 'plt' is not defined"
732
- ]
733
- }
734
- ],
735
  "source": [
736
  "plt.plot(noise_scheduler.alphas_cumprod.cpu() ** 0.5, label=r\"${\\sqrt{\\bar{\\alpha}_t}}$\")\n",
737
  "plt.plot((1 - noise_scheduler.alphas_cumprod.cpu()) ** 0.5, label=r\"$\\sqrt{(1 - \\bar{\\alpha}_t)}$\")\n",
@@ -854,23 +830,11 @@
854
  },
855
  {
856
  "cell_type": "code",
857
- "execution_count": 5,
858
  "metadata": {
859
  "id": "fRGXiotOs4Mc"
860
  },
861
- "outputs": [
862
- {
863
- "ename": "NameError",
864
- "evalue": "name 'image_size' is not defined",
865
- "output_type": "error",
866
- "traceback": [
867
- "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
868
- "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
869
- "Cell \u001b[1;32mIn[5], line 5\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdiffusers\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m UNet2DModel\n\u001b[0;32m 3\u001b[0m \u001b[38;5;66;03m# Create a model\u001b[39;00m\n\u001b[0;32m 4\u001b[0m model \u001b[38;5;241m=\u001b[39m UNet2DModel(\n\u001b[1;32m----> 5\u001b[0m sample_size\u001b[38;5;241m=\u001b[39m\u001b[43mimage_size\u001b[49m, \u001b[38;5;66;03m# the target image resolution\u001b[39;00m\n\u001b[0;32m 6\u001b[0m in_channels\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m3\u001b[39m, \u001b[38;5;66;03m# the number of input channels, 3 for RGB images\u001b[39;00m\n\u001b[0;32m 7\u001b[0m out_channels\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m3\u001b[39m, \u001b[38;5;66;03m# the number of output channels\u001b[39;00m\n\u001b[0;32m 8\u001b[0m layers_per_block\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m, \u001b[38;5;66;03m# how many ResNet layers to use per UNet block\u001b[39;00m\n\u001b[0;32m 9\u001b[0m block_out_channels\u001b[38;5;241m=\u001b[39m(\u001b[38;5;241m64\u001b[39m, \u001b[38;5;241m128\u001b[39m, \u001b[38;5;241m128\u001b[39m, \u001b[38;5;241m256\u001b[39m), \u001b[38;5;66;03m# More channels -> more parameters\u001b[39;00m\n\u001b[0;32m 10\u001b[0m down_block_types\u001b[38;5;241m=\u001b[39m(\n\u001b[0;32m 11\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDownBlock2D\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;66;03m# a regular ResNet downsampling block\u001b[39;00m\n\u001b[0;32m 12\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDownBlock2D\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m 13\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAttnDownBlock2D\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;66;03m# a ResNet downsampling block with spatial self-attention\u001b[39;00m\n\u001b[0;32m 14\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAttnDownBlock2D\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m 15\u001b[0m ),\n\u001b[0;32m 16\u001b[0m up_block_types\u001b[38;5;241m=\u001b[39m(\n\u001b[0;32m 17\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAttnUpBlock2D\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m 18\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAttnUpBlock2D\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;66;03m# a ResNet upsampling block with spatial self-attention\u001b[39;00m\n\u001b[0;32m 19\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUpBlock2D\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m 20\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUpBlock2D\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;66;03m# a regular ResNet upsampling block\u001b[39;00m\n\u001b[0;32m 21\u001b[0m ),\n\u001b[0;32m 22\u001b[0m )\n\u001b[0;32m 23\u001b[0m model\u001b[38;5;241m.\u001b[39mto(device);\n",
870
- "\u001b[1;31mNameError\u001b[0m: name 'image_size' is not defined"
871
- ]
872
- }
873
- ],
874
  "source": [
875
  "from diffusers import UNet2DModel\n",
876
  "\n",
@@ -1838,7 +1802,7 @@
1838
  "name": "python",
1839
  "nbconvert_exporter": "python",
1840
  "pygments_lexer": "ipython3",
1841
- "version": "3.9.7"
1842
  },
1843
  "vscode": {
1844
  "interpreter": {
 
541
  },
542
  {
543
  "cell_type": "code",
544
+ "execution_count": null,
545
  "metadata": {
546
  "colab": {
547
  "base_uri": "https://localhost:8080/"
 
549
  "id": "-yX-MZhSsxwp",
550
  "outputId": "f8efea0d-41e6-4674-c09f-d905d6cd05dc"
551
  },
552
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
553
  "source": [
554
  "import torchvision\n",
555
  "from datasets import load_dataset\n",
 
698
  },
699
  {
700
  "cell_type": "code",
701
+ "execution_count": null,
702
  "metadata": {
703
  "colab": {
704
  "base_uri": "https://localhost:8080/",
 
707
  "id": "oP-rFQUzdx9h",
708
  "outputId": "dea1ec0a-9a08-433a-a8d4-9f8731e2e3ea"
709
  },
710
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
711
  "source": [
712
  "plt.plot(noise_scheduler.alphas_cumprod.cpu() ** 0.5, label=r\"${\\sqrt{\\bar{\\alpha}_t}}$\")\n",
713
  "plt.plot((1 - noise_scheduler.alphas_cumprod.cpu()) ** 0.5, label=r\"$\\sqrt{(1 - \\bar{\\alpha}_t)}$\")\n",
 
830
  },
831
  {
832
  "cell_type": "code",
833
+ "execution_count": null,
834
  "metadata": {
835
  "id": "fRGXiotOs4Mc"
836
  },
837
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
838
  "source": [
839
  "from diffusers import UNet2DModel\n",
840
  "\n",
 
1802
  "name": "python",
1803
  "nbconvert_exporter": "python",
1804
  "pygments_lexer": "ipython3",
1805
+ "version": "3.9.6 (default, Sep 26 2022, 11:37:49) \n[Clang 14.0.0 (clang-1400.0.29.202)]"
1806
  },
1807
  "vscode": {
1808
  "interpreter": {
diffusion_course/unit1/01_introduction_to_diffusers_CN.md CHANGED
@@ -276,20 +276,6 @@ train_dataloader = torch.utils.data.DataLoader(
276
  )
277
  ```
278
 
279
-
280
- ---------------------------------------------------------------------------
281
-
282
- ModuleNotFoundError Traceback (most recent call last)
283
-
284
- Cell In[1], line 1
285
- ----> 1 import torchvision
286
- 2 from datasets import load_dataset
287
- 3 from torchvision import transforms
288
-
289
-
290
- ModuleNotFoundError: No module named 'torchvision'
291
-
292
-
293
  我们可以从中取出一批图像数据来看一看他们是什么样子:
294
 
295
 
@@ -351,20 +337,6 @@ plt.plot((1 - noise_scheduler.alphas_cumprod.cpu()) ** 0.5, label=r"$\sqrt{(1 -
351
  plt.legend(fontsize="x-large");
352
  ```
353
 
354
-
355
- ---------------------------------------------------------------------------
356
-
357
- NameError Traceback (most recent call last)
358
-
359
- Cell In[4], line 1
360
- ----> 1 plt.plot(noise_scheduler.alphas_cumprod.cpu() ** 0.5, label=r"${\sqrt{\bar{\alpha}_t}}$")
361
- 2 plt.plot((1 - noise_scheduler.alphas_cumprod.cpu()) ** 0.5, label=r"$\sqrt{(1 - \bar{\alpha}_t)}$")
362
- 3 plt.legend(fontsize="x-large");
363
-
364
-
365
- NameError: name 'plt' is not defined
366
-
367
-
368
  **练习:** 你可以探索一下使用不同的beta_start时曲线是如何变化的,beta_end 与 beta_schedule可以通过以下注释内容来修改:
369
 
370
 
@@ -447,39 +419,6 @@ model = UNet2DModel(
447
  model.to(device);
448
  ```
449
 
450
-
451
- ---------------------------------------------------------------------------
452
-
453
- NameError Traceback (most recent call last)
454
-
455
- Cell In[5], line 5
456
- 1 from diffusers import UNet2DModel
457
- 3 # Create a model
458
- 4 model = UNet2DModel(
459
- ----> 5 sample_size=image_size, # the target image resolution
460
- 6 in_channels=3, # the number of input channels, 3 for RGB images
461
- 7 out_channels=3, # the number of output channels
462
- 8 layers_per_block=2, # how many ResNet layers to use per UNet block
463
- 9 block_out_channels=(64, 128, 128, 256), # More channels -> more parameters
464
- 10 down_block_types=(
465
- 11 "DownBlock2D", # a regular ResNet downsampling block
466
- 12 "DownBlock2D",
467
- 13 "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention
468
- 14 "AttnDownBlock2D",
469
- 15 ),
470
- 16 up_block_types=(
471
- 17 "AttnUpBlock2D",
472
- 18 "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention
473
- 19 "UpBlock2D",
474
- 20 "UpBlock2D", # a regular ResNet upsampling block
475
- 21 ),
476
- 22 )
477
- 23 model.to(device);
478
-
479
-
480
- NameError: name 'image_size' is not defined
481
-
482
-
483
  当在处理更高分辨率的输入时,你可能想用更多层的下、上采样模块,让注意力层只聚焦在最低分辨率(最底)层来减少内存消耗。我们在之后会讨论该如何实验来找到最适用与你手头场景的配置方法。
484
 
485
  我们可以通过输入一批数据和随机的迭代周期数来看输出是否与输入尺寸相同:
 
276
  )
277
  ```
278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  我们可以从中取出一批图像数据来看一看他们是什么样子:
280
 
281
 
 
337
  plt.legend(fontsize="x-large");
338
  ```
339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  **练习:** 你可以探索一下使用不同的beta_start时曲线是如何变化的,beta_end 与 beta_schedule可以通过以下注释内容来修改:
341
 
342
 
 
419
  model.to(device);
420
  ```
421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
  当在处理更高分辨率的输入时,你可能想用更多层的下、上采样模块,让注意力层只聚焦在最低分辨率(最底)层来减少内存消耗。我们在之后会讨论该如何实验来找到最适用与你手头场景的配置方法。
423
 
424
  我们可以通过输入一批数据和随机的迭代周期数来看输出是否与输入尺寸相同: