eDriveMORL / train.py
TJIET's picture
Upload folder using huggingface_hub
bcba0e4 verified
import minari
import numpy as np
import d3rlpy.dataset
from d3rlpy.dataset import MDPDataset
from fcev import FCEVEnv, load_drive_cycle
from d3rlpy.algos import SACConfig, TD3PlusBCConfig, IQLConfig, CQLConfig, BCQConfig, CalQLConfig, AWACConfig, \
ReBRACConfig, TACRConfig, PLASConfig, PRDCConfig, BEARConfig, DecisionTransformerConfig, CalQL
def load_minari_as_d3rlpy(name="fcev-mpc-v1", num=None):
"""Load Minari dataset with a custom reward function.
Args:
name (str): Dataset name.
num (int, optional): Number of episodes to sample.
beta (float): Logistic function slope.
c (float): Offset for logistic transformation.
Returns:
MDPDataset: Dataset with custom rewards.
"""
dataset = minari.load_dataset(name)
episodes = dataset.sample_episodes(num) if num else dataset.sample_episodes(dataset.total_episodes)
all_obs = []
all_actions = []
all_rewards = []
all_terminals = []
for ep in episodes:
obs = ep.observations[:-1]
actions = ep.actions
rewards = ep.rewards
terminals = ep.terminations
n = len(actions)
obs = obs[:n]
actions = actions[:n]
rewards = rewards[:n]
terminals = terminals[:n]
all_obs.append(obs)
all_actions.append(actions)
all_rewards.append(rewards)
all_terminals.append(terminals)
obs = np.vstack(all_obs)
act = np.vstack(all_actions)
reward = np.hstack(all_rewards)
terminal = np.hstack(all_terminals)
return MDPDataset(
observations=obs,
actions=act,
rewards=reward,
terminals=terminal
)
# Define environment and dataset
env_name = "fcev-mpc-v1"
# env_name = "fcev-rule-v1"
# Save dataset to disk
dataset = load_minari_as_d3rlpy(env_name)
# Reload dataset using ReplayBuffer
dataset.dump(f"datasets/{env_name}.h5")
with open(f"datasets/{env_name}.h5", "rb") as f:
dataset = d3rlpy.dataset.ReplayBuffer.load(f, d3rlpy.dataset.InfiniteBuffer())
# dataset = d3rlpy.datasets.get_minari("fcev-mpc-v1")
# Select and build algorithm
# algo = SACConfig(compile_graph=True).create()
# algo = TD3PlusBCConfig(compile_graph=True).create()
algo = CQLConfig(compile_graph=True).create()
# algo = BCQConfig(compile_graph=True).create()
# algo = IQLConfig(compile_graph=True).create()
# algo = CalQLConfig(compile_graph=True).create()
# algo = DecisionTransformerConfig(compile_graph=True).create()
# Setup logging
algo.build_with_env(env=FCEVEnv(load_drive_cycle("CLTC-P-PartI.csv")))
# Setup FileAdapterFactory and TensorboardAdapterFactory
logger_adapter = d3rlpy.logging.CombineAdapterFactory([
d3rlpy.logging.FileAdapterFactory(root_dir="d3rlpy_logs"),
d3rlpy.logging.TensorboardAdapterFactory(root_dir="tensorboard_logs"),
d3rlpy.logging.WanDBAdapterFactory()
])
# Train the algorithm offline
algo.fit(dataset, n_steps=10000, n_steps_per_epoch=1000)
# algo = TD3PlusBC(actor_learning_rate=1e-4, alpha=2.5)
# algo.fit(dataset, n_epochs=200)
# algo.save_model("td3bc_model.d3")