|
import minari
|
|
import numpy as np
|
|
import d3rlpy.dataset
|
|
from d3rlpy.dataset import MDPDataset
|
|
from fcev import FCEVEnv, load_drive_cycle
|
|
from d3rlpy.algos import SACConfig, TD3PlusBCConfig, IQLConfig, CQLConfig, BCQConfig, CalQLConfig, AWACConfig, \
|
|
ReBRACConfig, TACRConfig, PLASConfig, PRDCConfig, BEARConfig, DecisionTransformerConfig, CalQL
|
|
|
|
def load_minari_as_d3rlpy(name="fcev-mpc-v1", num=None):
|
|
"""Load Minari dataset with a custom reward function.
|
|
|
|
Args:
|
|
name (str): Dataset name.
|
|
num (int, optional): Number of episodes to sample.
|
|
beta (float): Logistic function slope.
|
|
c (float): Offset for logistic transformation.
|
|
|
|
Returns:
|
|
MDPDataset: Dataset with custom rewards.
|
|
"""
|
|
dataset = minari.load_dataset(name)
|
|
episodes = dataset.sample_episodes(num) if num else dataset.sample_episodes(dataset.total_episodes)
|
|
|
|
all_obs = []
|
|
all_actions = []
|
|
all_rewards = []
|
|
all_terminals = []
|
|
|
|
for ep in episodes:
|
|
obs = ep.observations[:-1]
|
|
actions = ep.actions
|
|
rewards = ep.rewards
|
|
terminals = ep.terminations
|
|
|
|
n = len(actions)
|
|
obs = obs[:n]
|
|
actions = actions[:n]
|
|
rewards = rewards[:n]
|
|
terminals = terminals[:n]
|
|
|
|
all_obs.append(obs)
|
|
all_actions.append(actions)
|
|
all_rewards.append(rewards)
|
|
all_terminals.append(terminals)
|
|
|
|
obs = np.vstack(all_obs)
|
|
act = np.vstack(all_actions)
|
|
reward = np.hstack(all_rewards)
|
|
terminal = np.hstack(all_terminals)
|
|
|
|
return MDPDataset(
|
|
observations=obs,
|
|
actions=act,
|
|
rewards=reward,
|
|
terminals=terminal
|
|
)
|
|
|
|
|
|
|
|
env_name = "fcev-mpc-v1"
|
|
|
|
|
|
|
|
dataset = load_minari_as_d3rlpy(env_name)
|
|
|
|
|
|
dataset.dump(f"datasets/{env_name}.h5")
|
|
with open(f"datasets/{env_name}.h5", "rb") as f:
|
|
dataset = d3rlpy.dataset.ReplayBuffer.load(f, d3rlpy.dataset.InfiniteBuffer())
|
|
|
|
|
|
|
|
|
|
|
|
algo = CQLConfig(compile_graph=True).create()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
algo.build_with_env(env=FCEVEnv(load_drive_cycle("CLTC-P-PartI.csv")))
|
|
|
|
|
|
logger_adapter = d3rlpy.logging.CombineAdapterFactory([
|
|
d3rlpy.logging.FileAdapterFactory(root_dir="d3rlpy_logs"),
|
|
d3rlpy.logging.TensorboardAdapterFactory(root_dir="tensorboard_logs"),
|
|
d3rlpy.logging.WanDBAdapterFactory()
|
|
])
|
|
|
|
|
|
algo.fit(dataset, n_steps=10000, n_steps_per_epoch=1000)
|
|
|
|
|
|
|
|
|