eDriveMORL / run.py
TJIET's picture
Upload folder using huggingface_hub
bcba0e4 verified
import wandb
import d3rlpy
import argparse
import traceback
from d3rlpy.dataset import ReplayBuffer, InfiniteBuffer
from d3rlpy.preprocessing import StandardObservationScaler
from d3rlpy.logging import CombineAdapterFactory, FileAdapterFactory, TensorboardAdapterFactory
from fcev import FCEVEnv, load_drive_cycle
from d3rlpy.algos import (
TD3PlusBCConfig, IQLConfig, CQLConfig, BCQConfig,
CalQLConfig, AWACConfig, ReBRACConfig, TACRConfig,
PLASConfig, PRDCConfig, BEARConfig
)
from typing import Any, Optional
from d3rlpy.logging import WanDBAdapter
from d3rlpy.logging.logger import (
AlgProtocol,
LoggerAdapter,
LoggerAdapterFactory,
SaveProtocol,
)
# ---------- WandB Logger Factory ----------
class GWanDBAdapterFactory(LoggerAdapterFactory):
r"""WandB Logger Adapter Factory class.
This class creates instances of the WandB Logger Adapter for experiment
tracking.
Args:
project (Optional[str], optional): The name of the WandB project.
Defaults to None.
"""
_project: Optional[str]
def __init__(self, project: Optional[str] = None, experiment_name: Optional[str] = None,) -> None:
self._project = project
def create(
self, algo: AlgProtocol, experiment_name: str, n_steps_per_epoch: int
) -> LoggerAdapter:
return WanDBAdapter(
algo=algo,
experiment_name=experiment_name,
n_steps_per_epoch=n_steps_per_epoch,
project=self._project,
)
# ---------- Algorithm Config Dictionary ----------
def get_algo_configs():
# Algorithm configurations with encoder and observation preprocessing settings
algo_configs = {
"TD3PlusBC": TD3PlusBCConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"IQL": IQLConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"CQL": CQLConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"BCQ": BCQConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"CalQL": CalQLConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"AWAC": AWACConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"ReBRAC": ReBRACConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), q_func_factory=d3rlpy.models.QRQFunctionFactory()),
"TACR": TACRConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"PLAS": PLASConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"PRDC": PRDCConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"BEAR": BEARConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
}
return algo_configs
# ---------- Training Function ----------
def train(args):
algo_configs = get_algo_configs()
if args.algo not in algo_configs:
raise ValueError(f"Unsupported algorithm: {args.algo}")
# Load dataset
with open(args.dataset_path, "rb") as f:
dataset = ReplayBuffer.load(f, InfiniteBuffer())
# Load environment for evaluation
env = FCEVEnv(load_drive_cycle(args.drive_cycle))
config = algo_configs[args.algo]
algo = config.create(device=args.device)
# Setup logger
logger_adapters = [
FileAdapterFactory(root_dir=f"d3rlpy_logs/{args.algo}"),
TensorboardAdapterFactory(root_dir=f"tensorboard_logs/{args.algo}")
]
if args.wandb:
logger_adapters.append(GWanDBAdapterFactory(experiment_name=f"{args.algo}-run", project=args.wandb_project))
logger_adapter = CombineAdapterFactory(logger_adapters)
try:
print(f"\n🚀 Starting training: {args.algo}")
algo.fit(
dataset,
n_steps=args.n_steps,
n_steps_per_epoch=args.n_steps_per_epoch,
logger_adapter=logger_adapter,
evaluators={
'init_value': d3rlpy.metrics.InitialStateValueEstimationEvaluator(),
'soft_opc': d3rlpy.metrics.SoftOPCEvaluator(return_threshold=100),
'action': d3rlpy.metrics.ContinuousActionDiffEvaluator(),
'environment': d3rlpy.metrics.EnvironmentEvaluator(env),
'Advantage': d3rlpy.metrics.DiscountedSumOfAdvantageEvaluator()
},
)
print(f"\n✅ Training finished for: {args.algo}")
except Exception as e:
print(f"\n❌ Training failed: {args.algo}")
print(traceback.format_exc())
wandb.finish()
# ---------- Main CLI ----------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Offline RL training for FCEV")
parser.add_argument("--algo", type=str, default="AWAC",
choices=list(get_algo_configs().keys()),
help="Name of the offline RL algorithm")
parser.add_argument("--dataset-path", type=str, default="datasets/fcev-mpc-v1.h5",
help="Path to the .h5 dataset file")
parser.add_argument("--drive-cycle", type=str, default="CLTC-P-PartI.csv",
help="Path to the drive cycle CSV file")
parser.add_argument("--n-steps", type=int, default=10000,
help="Total number of training steps")
parser.add_argument("--n-steps-per-epoch", type=int, default=100,
help="Steps per epoch")
parser.add_argument("--device", type=str, default="cuda:0",
help="Training device (e.g., 'cpu', 'cuda:0')")
parser.add_argument("--wandb", action="store_true",
help="Enable WandB logging")
parser.add_argument("--wandb-project", type=str, default="fcev-offline-benchmark",
help="WandB project name (used only if --wandb is enabled)")
args = parser.parse_args()
train(args)