Dataset Preview
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError Exception: UnicodeDecodeError Message: 'utf-8' codec can't decode byte 0x80 in position 64: invalid start byte Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1854, in _prepare_split_single for _, table in generator: File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/text/text.py", line 73, in _generate_tables batch = f.read(self.config.chunksize) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/utils/file_utils.py", line 826, in read_with_retries out = read(*args, **kwargs) File "/usr/local/lib/python3.9/codecs.py", line 322, in decode (result, consumed) = self._buffer_decode(data, self.errors, final) UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 64: invalid start byte The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1420, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1052, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1897, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
text
string |
---|
from easydict import EasyDict |
main_config = dict( |
exp_name='data_suz_mt_20250102/ddp_8gpu_nlayer8_18tasks_brf1e-05_tbs256_seed0/acrobot-swingup_seed0', |
env=dict( |
manager=dict( |
episode_num=float('inf'), |
max_retry=1, |
step_timeout=None, |
auto_reset=True, |
reset_timeout=None, |
retry_type='reset', |
retry_waiting_time=0.1, |
shared_memory=False, |
copy_on_get=True, |
context='fork', |
wait_num=float('inf'), |
step_wait_timeout=None, |
connect_timeout=60, |
reset_inplace=False, |
cfg_type='SyncSubprocessEnvManagerDict', |
type='subprocess', |
), |
stop_value=500000, |
n_evaluator_episode=3, |
domain_name='acrobot', |
task_name='swingup', |
frame_skip=2, |
warp_frame=False, |
scale=False, |
clip_rewards=False, |
action_repeat=1, |
frame_stack=1, |
from_pixels=False, |
visualize_reward=False, |
height=84, |
width=84, |
channels_first=True, |
resize=84, |
replay_path=None, |
save_replay_gif=False, |
replay_path_gif=None, |
render_image=False, |
collect_max_episode_steps=1000, |
eval_max_episode_steps=1000, |
cfg_type='DMC2GymEnvDict', |
env_id='acrobot-swingup', |
observation_shape_list=[6, 5, 5, 5, 5, 17, 8, 9, 12, 12, 15, 15, 3, 6, 6, 24, 24, 24], |
action_space_size_list=[1, 1, 1, 1, 1, 6, 2, 2, 2, 2, 4, 4, 1, 2, 2, 6, 6, 6], |
continuous=True, |
collector_env_num=8, |
evaluator_env_num=3, |
game_segment_length=100, |
), |
policy=dict( |
model=dict( |
model_type='mlp', |
continuous_action_space=True, |
observation_shape=[3, 64, 64], |
self_supervised_learning_loss=True, |
categorical_distribution=True, |
image_channel=3, |
frame_stack_num=1, |
num_res_blocks=1, |
num_channels=64, |
support_scale=50, |
bias=True, |
discrete_action_encoding_type='one_hot', |
res_connection_in_dynamics=True, |
norm_type='LN', |
analysis_sim_norm=False, |
analysis_dormant_ratio=False, |
harmony_balance=False, |
learn={'learner': {'hook': {'save_ckpt_after_iter': 10000}}}, |
world_model_cfg={'continuous_action_space': True, 'tokens_per_block': 2, 'max_blocks': 5, 'max_tokens': 10, 'context_length': 4, 'gru_gating': False, 'device': 'cuda', 'analysis_sim_norm': False, 'analysis_dormant_ratio': False, 'action_space_size': 6, 'group_size': 8, 'attention': 'causal', 'num_layers': 8, 'num_heads': 8, 'embed_dim': 768, 'embed_pdrop': 0.1, 'resid_pdrop': 0.1, 'attn_pdrop': 0.1, 'support_size': 101, 'max_cache_size': 5000, 'env_num': 8, 'latent_recon_loss_weight': 0.0, 'perceptual_loss_weight': 0.0, 'policy_entropy_weight': 0.05, 'predict_latent_loss_type': 'group_kl', 'obs_type': 'vector', 'gamma': 1, 'dormant_threshold': 0.025, 'policy_loss_type': 'kl', 'observation_shape_list': [6, 5, 5, 5, 5, 17, 8, 9, 12, 12, 15, 15, 3, 6, 6, 24, 24, 24], 'action_space_size_list': [1, 1, 1, 1, 1, 6, 2, 2, 2, 2, 4, 4, 1, 2, 2, 6, 6, 6], 'num_unroll_steps': 5, 'num_of_sampled_actions': 20, 'sigma_type': 'conditioned', 'fixed_sigma_value': 0.5, 'bound_type': None, 'model_type': 'mlp', 'norm_type': 'LN', 'task_num': 18, 'use_normal_head': True, 'use_softmoe_head': False, 'use_moe_head': False, 'num_experts_in_moe_head': 4, 'moe_in_transformer': False, 'multiplication_moe_in_transformer': False, 'num_experts_of_moe_in_transformer': 4}, |
observation_shape_list=[6, 5, 5, 5, 5, 17, 8, 9, 12, 12, 15, 15, 3, 6, 6, 24, 24, 24], |
action_space_size_list=[1, 1, 1, 1, 1, 6, 2, 2, 2, 2, 4, 4, 1, 2, 2, 6, 6, 6], |
num_of_sampled_actions=20, |
), |
learn=dict( |
learner=dict( |
train_iterations=1000000000, |
dataloader=dict( |
num_workers=0, |
), |
log_policy=True, |
hook=dict( |
load_ckpt_before_run='', |
log_show_after_iter=100, |
save_ckpt_after_iter=1000000, |
save_ckpt_after_run=True, |
), |
cfg_type='BaseLearnerDict', |
), |
), |
collect=dict( |
collector=dict( |
deepcopy_obs=False, |
transform_obs=False, |
collect_print_freq=100, |
End of preview.
No dataset card yet
- Downloads last month
- 16