model: base_learning_rate: 2.0e-05 target: ldm.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.0015 linear_end: 0.0195 num_timesteps_cond: 1 log_every_t: 200 timesteps: 1000 first_stage_key: image cond_stage_key: action_ #scheduler_sampling_rate: 0.0 hybrid_key: c_concat image_size: 256 channels: 3 cond_stage_trainable: true conditioning_key: hybrid monitor: val/loss_simple_ema unet_config: target: ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 256 in_channels: 24 out_channels: 3 model_channels: 192 attention_resolutions: - 32 - 16 - 8 num_res_blocks: 2 channel_mult: - 1 - 2 - 3 - 5 num_head_channels: 32 use_spatial_transformer: true transformer_depth: 1 context_dim: 768 first_stage_config: target: ldm.models.autoencoder.IdentityFirstStage params: embed_dim: 3 n_embed: 8192 cond_stage_config: target: ldm.modules.encoders.modules.GPTEmbedder params: n_embed: 768 n_layer: 12 data: target: data.data_processing.datasets.DataModule params: batch_size: 16 num_workers: 1 wrap: false shuffle: True drop_last: True pin_memory: True prefetch_factor: 2 persistent_workers: True train: target: data.data_processing.datasets.ActionsData params: data_csv_path: train_dataset/train_dataset_14frames_firstframe.csv validation: target: data.data_processing.datasets.ActionsData params: data_csv_path: train_dataset/train_dataset_14frames_firstframe.csv lightning: trainer: benchmark: False max_epochs: 4 limit_val_batches: 0 accelerator: gpu gpus: 1 accumulate_grad_batches: 8 gradient_clip_val: 1 checkpoint_callback: True