Francesco Capuano commited on
Commit
1a48c91
·
1 Parent(s): 006f8db

add: app demo

Browse files
Files changed (3) hide show
  1. app.py +239 -0
  2. copy.md +109 -0
  3. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib
2
+ matplotlib.use('Agg')
3
+
4
+ import gradio as gr
5
+ import gymnasium as gym
6
+ from stable_baselines3 import SAC
7
+ from stable_baselines3.common.vec_env import VecFrameStack, DummyVecEnv
8
+ import os
9
+
10
+ from huggingface_hub import hf_hub_download
11
+
12
+ import gym_laser # Registers env name for gym.make()
13
+
14
+ # Pre-trained model configurations (TODO: add models by hosting them on huggingface)
15
+ PRETRAINED_MODELS = {
16
+ "Random Policy": None,
17
+ "Upload Custom Model": "upload",
18
+ "SAC-UDR(1.5,2.5)": "sac-udr-narrow",
19
+ "SAC-UDR(1.0,9.0)": "sac-udr-wide-extra",
20
+ }
21
+
22
+ MAX_STEPS = 100_000 # large number for continuous simulation
23
+
24
+ def get_model_path(model_id):
25
+ """Get the path to a pre-trained model."""
26
+ return f"pretrained-policies/{model_id}.zip"
27
+
28
+
29
+ def load_pretrained_model(model_id):
30
+ """Load a pre-trained model."""
31
+ model = hf_hub_download(
32
+ repo_id=f"fracapuano/{model_id}", filename=f"{model_id}.zip"
33
+ )
34
+ return SAC.load(model)
35
+
36
+
37
+ def make_env_fn():
38
+ """Helper function to create a single environment instance."""
39
+ return gym.make("LaserEnv", render_mode="rgb_array")
40
+
41
+
42
+ def initialize_environment():
43
+ """Initializes the environment on app load."""
44
+ try:
45
+ env = DummyVecEnv([make_env_fn])
46
+ env = VecFrameStack(env, n_stack=5)
47
+ obs = env.reset()
48
+ state = {
49
+ "env": env,
50
+ "obs": obs,
51
+ "model": None,
52
+ "step_num": 0,
53
+ "current_b_integral": 2.0, # Store current B-integral in state
54
+ "model_filename": "Random Policy" # Default model name
55
+ }
56
+ return state
57
+ except Exception as e:
58
+ return None, f"Error: {e}"
59
+
60
+
61
+ def load_selected_model(state, model_selection, uploaded_file):
62
+ """Loads a model based on selection (pre-trained or uploaded)."""
63
+ if state is None:
64
+ return state, gr.update()
65
+
66
+ try:
67
+ if model_selection == "Random Policy":
68
+ state["model"] = None
69
+ state["model_filename"] = "Random Policy"
70
+ state["obs"] = state["env"].reset()
71
+ state["step_num"] = 0
72
+ return state, gr.update()
73
+
74
+ elif model_selection == "Upload Custom Model":
75
+ if uploaded_file is None:
76
+ return state, "Please upload a model file.", gr.update()
77
+
78
+ model_filename = uploaded_file.name.split('/')[-1]
79
+ state["model"] = SAC.load(uploaded_file.name)
80
+ state["model_filename"] = model_filename
81
+ state["obs"] = state["env"].reset()
82
+ state["step_num"] = 0
83
+ return state, gr.update()
84
+
85
+ else:
86
+ model_id = PRETRAINED_MODELS[model_selection]
87
+ model = load_pretrained_model(model_id)
88
+
89
+ state["model"] = model
90
+ state["model_filename"] = model_selection
91
+ state["obs"] = state["env"].reset()
92
+ state["step_num"] = 0
93
+ return state, gr.update()
94
+
95
+ except Exception as e:
96
+ return state, f"Error loading model: {e}", gr.update()
97
+
98
+ def update_b_integral(state, b_integral):
99
+ """Updates the B-integral value in the state without restarting simulation."""
100
+ if state is not None:
101
+ state["current_b_integral"] = b_integral
102
+ return state
103
+
104
+
105
+ def run_continuous_simulation(state):
106
+ """Runs the simulation continuously, using the current B-integral from state."""
107
+ if not state or "env" not in state:
108
+ yield state, None, "Environment not ready."
109
+ return
110
+
111
+ env = state["env"]
112
+ obs = state["obs"]
113
+ step_num = state.get("step_num", 0)
114
+
115
+ # Run for a large number of steps to simulate "always-on"
116
+ for i in range(MAX_STEPS):
117
+ model = state.get("model")
118
+ model_filename = state.get("model_filename", "Random Policy")
119
+ current_b = state.get("current_b_integral", 2.0)
120
+
121
+ # Apply the current B-integral value from state
122
+ env.envs[0].unwrapped.laser.B = float(current_b)
123
+
124
+ if model:
125
+ action, _ = model.predict(obs, deterministic=True)
126
+ else:
127
+ action = env.action_space.sample().reshape(1, -1)
128
+
129
+ obs, _, done, _ = env.step(action)
130
+ frame = env.render()
131
+
132
+ if done[0]:
133
+ obs = env.reset()
134
+ step_num = 0
135
+ else:
136
+ step_num += 1
137
+
138
+ state["obs"] = obs
139
+ state["step_num"] = step_num
140
+
141
+ yield state, frame
142
+
143
+
144
+ with gr.Blocks(css="body {zoom: 90%}") as demo:
145
+ gr.Markdown("# Shaping Laser Pulses with Reinforcement Learning")
146
+
147
+ with gr.Tab("Demo"):
148
+ sim_state = gr.State()
149
+
150
+ with gr.Row():
151
+ b_slider = gr.Slider(
152
+ minimum=0,
153
+ maximum=10,
154
+ step=0.5,
155
+ value=2.0,
156
+ label="B-integral",
157
+ info="Adjust nonlinearity live during simulation.",
158
+ )
159
+
160
+ with gr.Row():
161
+ image_display = gr.Image(label="Environment Render", interactive=False, height=360)
162
+
163
+ with gr.Row():
164
+ with gr.Column():
165
+ model_selector = gr.Dropdown(
166
+ choices=list(PRETRAINED_MODELS.keys()),
167
+ value="Random Policy",
168
+ label="Model Selection",
169
+ info="Choose a pre-trained model or upload your own"
170
+ )
171
+
172
+ with gr.Row():
173
+ with gr.Column(scale=1):
174
+ model_uploader = gr.UploadButton(
175
+ "Upload Model (.zip)",
176
+ file_types=['.zip'],
177
+ elem_id="model-upload",
178
+ visible=False # Initially hidden
179
+ )
180
+
181
+ # Show/hide upload button based on selection
182
+ def update_upload_visibility(selection):
183
+ return gr.update(visible=(selection == "Upload Custom Model"))
184
+
185
+ model_selector.change(
186
+ fn=update_upload_visibility,
187
+ inputs=[model_selector],
188
+ outputs=[model_uploader]
189
+ )
190
+
191
+ # On page load, initialize and start the continuous simulation
192
+ init_event = demo.load(
193
+ fn=initialize_environment,
194
+ inputs=None,
195
+ outputs=[sim_state]
196
+ )
197
+
198
+ continuous_event = init_event.then(
199
+ fn=run_continuous_simulation,
200
+ inputs=[sim_state],
201
+ outputs=[sim_state, image_display]
202
+ )
203
+
204
+ # When model selection changes, load the selected model
205
+ model_change_event = model_selector.change(
206
+ fn=load_selected_model,
207
+ inputs=[sim_state, model_selector, model_uploader],
208
+ outputs=[sim_state, model_uploader],
209
+ cancels=[continuous_event]
210
+ ).then(
211
+ fn=run_continuous_simulation,
212
+ inputs=[sim_state],
213
+ outputs=[sim_state, image_display]
214
+ )
215
+
216
+ # When a custom model is uploaded, load it
217
+ model_upload_event = model_uploader.upload(
218
+ fn=load_selected_model,
219
+ inputs=[sim_state, model_selector, model_uploader],
220
+ outputs=[sim_state, model_uploader],
221
+ cancels=[continuous_event]
222
+ ).then(
223
+ fn=run_continuous_simulation,
224
+ inputs=[sim_state],
225
+ outputs=[sim_state, image_display]
226
+ )
227
+
228
+ # When B-integral slider changes, just update the value in state (no restart needed)
229
+ b_slider.change(
230
+ fn=update_b_integral,
231
+ inputs=[sim_state, b_slider],
232
+ outputs=[sim_state]
233
+ )
234
+
235
+ with gr.Tab("About"):
236
+ with open("copy.md", "r") as f:
237
+ gr.Markdown(f.read())
238
+
239
+ demo.launch()
copy.md ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Table of Contents
2
+ - [TL;DR](#tl-dr)
3
+ - [Shaping Laser Pulses](#shaping-laser-pulses)
4
+ - [Automated approaches](#automated-approaches)
5
+ - [BO's limitations](#bos-limitations)
6
+ - [RL to the rescue](#rl-to-the-rescue)
7
+
8
+
9
+ ## TL; DR:
10
+ We train a Reinforcement Learning agent to **optimally shape laser pulses** from readily-available diagnostics images, across a range of dynamics parameters for intensity maximization.
11
+ Our method **(1) completely bypasses imprecise reconstructions** of ultra-fast laser pulses, **(2) can learn to be robust to varying dynamics** and **(3) prevents erratic behavior** at test-time by training in coarse simulation only.
12
+
13
+ <div align="center">
14
+ <img src="https://huggingface.co/datasets/fracapuano/rlaser-assets/resolve/main/assets/Figure1_and_CPA.png" alt="Phase changes animation">
15
+ <p> (A) Schematic representation of the RL pipeline for pulse shaping in HPL systems. (B) Illustration of the process of linear and non-linear phase accumulation taking place along the pump-chain of laser systems.</p>
16
+ </div>
17
+
18
+ By opportunely controlling the phase imposed at the stretcher, one can benefit from both energy and duration gains, for maximal peak intensity.
19
+
20
+ ---
21
+
22
+ ## Shaping Laser Pulses
23
+
24
+ Ultra-fast light-matter interactions, such as laser-plasma physics and nonlinear optics, require precise shaping of the temporal pulse profile.
25
+ Optimizing such profiles is one of the most critical tasks to establish control over these interactions.
26
+ Typically, the highest intensities conveyed by laser pulses can usually be achieved by compressing a pulse to its transform-limited (TL) pulse shape, while some interactions may require arbitrary temporal shapes different from the TL profile (mainly to protect the system from potential damage).
27
+
28
+
29
+ <div align="center">
30
+ <img src="https://huggingface.co/datasets/fracapuano/rlaser-assets/resolve/main/assets/phase.gif" alt="Phase changes animation">
31
+ <p>Changes in the spectral phase applied on the input spectrum (left) have a direct impact on the temporal profile (right).</p>
32
+ </div>
33
+
34
+ In this work, we shape laser pulses by varying the GDD, TOD and FOD coefficients, effectively tuning the spectral phase applied to minimize temporal pulse duration.
35
+
36
+ <!-- add link to space demo -->
37
+
38
+ ## Automated approaches
39
+
40
+ The most common automated laser pulse shape optimization approaches mainly employ black-box algorithms, such as Bayesian Optimization (BO) and Evolutionary Strategies (ES). These algorithms are typically used in a closed feedback loop between the pulse shaper and various measurement devices.
41
+
42
+ For pulse duration minimization, numerical methods including BO and ES require precise temporal shape reconstruction, to measure the loss against a target temporal profile, or obtain derived metrics such as duration at full-width half-max, or peak intensity value.
43
+
44
+ Recently, approaches based on BO have gained popularity because of their broad applicability and sample efficiency over ES, often requiring a fraction of the function evaluations to obtain comparable performance.
45
+ Indeed, in automated pulse shaping, each function evaluation requires one (or more) real-world laser bursts. Therefore, methods that directly optimize real-world operational hardware are evaluated based on their efficiency in terms of number of the required interactions.
46
+
47
+ ### BO's limitations
48
+
49
+ While effective, BO suffers from limitations related to (1) the need to perform precise pulse reconstruction (2) machine-safety and (3) transferability. To a large extent, these limitations are only more significant for other methods such as ES.
50
+
51
+ #### 1. Imprecise pulse reconstruction
52
+ BO requires accurate measurements of the current pulse shape to guide optimization. However, real-world pulse reconstruction techniques can be **noisy or imprecise**, leading to poor state estimation, and increasingly high risk of applying suboptimal controls.
53
+
54
+ <div align="center">
55
+ <img src="https://huggingface.co/datasets/fracapuano/rlaser-assets/resolve/main/assets/reconstructing_frog.png" alt="Phase changes animation" width="70%">
56
+ <p>Temporal profiles with temporal-domain reconstructed phase (top) versus diagnostic measures of the burst status (bottom), in the form of FROG traces. Image source: Zahavy et al., 2018.</p>
57
+ </div>
58
+
59
+ #### 2. Dependancy on the dynamics
60
+ BO typically optimizes for specific system parameters and **doesn't generalize well when laser dynamics change**. Each new experimental setup or parameter regime may require re-optimizing the process from scratch!
61
+
62
+ This follows from standard BO optimizing a typically-scalar loss function under stationarity assumptions, which can prove rather problematic in the context of pulse-shaping. This follows from the fact day-to-day changes in the experimental setup can quite reasonably result in non-stationarity: **the same control, when applied in different experimental conditions, can yield significantly different results**.
63
+
64
+ <div align="center">
65
+ <img src="https://huggingface.co/datasets/fracapuano/rlaser-assets/resolve/main/assets/B_integral.png" alt="Phase changes animation" width="70%">
66
+ <p>Impact of experimental conditions only, in this case a non-linearity parameter known as "B-integral", on the end-result of applying the same control.</p>
67
+ </div>
68
+
69
+ #### 3. Erratic exploration
70
+
71
+ BO can endanger the system by applying **abrupt controls at initialization**. Controls are applied as temperature gradients applied on a gated-optical fiber, and as such successive controls cannot typically vary significantly because the one-step difference in temperature difference cannot vary arbitrarily.
72
+
73
+ <div align="center" style="display: flex; justify-content: center; gap: 20px;">
74
+ <div>
75
+ <img src="https://huggingface.co/datasets/fracapuano/rlaser-assets/resolve/main/assets/pulses_anim.gif" alt="BO temporal profile">
76
+ </div>
77
+ <div>
78
+ <img src="https://huggingface.co/datasets/fracapuano/rlaser-assets/resolve/main/assets/control_anim.gif" alt="BO exploration">
79
+ </div>
80
+ </div>
81
+ <p>BO, (left) temporal profile obtained probing points from the parameters space and (right) BO, evolution of the probed points as the parameters space is explored.</p>
82
+
83
+ ## RL to the rescue
84
+
85
+ In this work, we address all these limitations by **(1) learning policies directly from readily-available images**, capable of **(2) working across varying dynamics**, and **(3) trained in coarse simulation to prevent erratic-behavior** at test time.
86
+
87
+ First, (1) we train our RL agent directly from readily available diagnostic measurements in the form of 64x64 images. This means we can **entirely bypass the reconstruction noise** arising from numerical methods for temporal pulse-shape reconstruction, learning straight from single-channel images.
88
+
89
+ <div align="center">
90
+ <img src="https://huggingface.co/datasets/fracapuano/rlaser-assets/resolve/main/assets/Figure1.png" width="50%">
91
+ <p>Control is applied directly from images, thus learning to adjust to unmodeled changes in the environment. </p>
92
+ </div>
93
+
94
+ Further, (2) by training on diverse scenarios, RL can develop both **safe and general control strategies** adaptive to a range of different dynamics. In turn, this allows to run and lively update control policies across experimental conditions.
95
+ <div align="center">
96
+ <img src="https://huggingface.co/datasets/fracapuano/rlaser-assets/resolve/main/assets/udr_vs_doraemon_average.png" width="50%">
97
+ <p>We can retain high level of performance (>70%) even for larger---above 5, fictional---levels of non-linearity in the systems. This shows we can retain performance by applying a proper randomization technique.</p>
98
+ </div>
99
+
100
+ Lastly, (3) by learning in a corse simulation, we can **drastically limit the number of interactions at test time**, preventing erratic behavior which would endanger system's safety.
101
+
102
+ <div align="center">
103
+ <img src="https://huggingface.co/datasets/fracapuano/rlaser-assets/resolve/main/assets/machinesafety.png" width="50%">
104
+ <p> Controls applied (BO vs RL). As it samples from an iteratively-refined surrogate model of the objective function, BO explores much more erratically than RL.</p>
105
+ </div>
106
+
107
+ In conclusion, we demonstrate that deep reinforcement learning can master laser pulse shaping by learning **robust policies from raw diagnostics**, paving the way towards **autonomous control of complex physical systems**.
108
+
109
+ If you're interested in learning more, check out [our latest paper](https://huggingface.co/papers/2503.00499), our [simulator's code](https://github.com/fracapuano/gym-laser), and try out the [live demo](https://huggingface.co/spaces/fracapuano/RLaser).
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://test.pypi.org/simple/
2
+
3
+ gradio==5.38.0
4
+ gym_laser==0.1.0
5
+ gymnasium==1.0.0
6
+ huggingface_hub==0.33.4
7
+ matplotlib==3.10.3
8
+ stable_baselines3==2.5.0