|
import gradio as gr |
|
import pandas as pd |
|
import plotly.express as px |
|
import plotly.graph_objects as go |
|
from scipy.stats import zscore |
|
import scipy.interpolate |
|
import numpy as np |
|
import plotly.figure_factory as ff |
|
|
|
|
|
df = pd.read_parquet("hf://datasets/derek-thomas/classification-ie-optimization/data/train-00000-of-00001.parquet") |
|
|
|
|
|
df['image'] = df['image'].map({ |
|
'michaelf34/infinity:0.0.75-trt-onnx': 'trt-onnx', |
|
'michaelf34/infinity:0.0.75': 'default' |
|
}) |
|
|
|
best_config = df[['1B_cost', 'hw_type', 'image', 'batch_size', 'vus', ]].sort_values(by='1B_cost').head(n=1) |
|
best_config['1B_cost'] = best_config['1B_cost'].round(2) |
|
|
|
def plot_sanity_checks(df): |
|
return sum(df.total_requests - df.successful_requests) |
|
|
|
def top_outliers(df): |
|
|
|
|
|
df['absolute_deviation'] = abs(df['accuracy_percentage'] - df['accuracy_percentage'].mean()) |
|
|
|
|
|
df['z_score'] = zscore(df['accuracy_percentage']) |
|
|
|
|
|
top_outliers = df.nlargest(5, 'absolute_deviation')[['hw_type', 'batch_size', 'vus', 'total_requests', 'accuracy_percentage', 'absolute_deviation', 'z_score']] |
|
top_outliers['accuracy_percentage'] = top_outliers['accuracy_percentage'].round(2) |
|
top_outliers['absolute_deviation'] = top_outliers['absolute_deviation'].round(2) |
|
top_outliers['z_score'] = top_outliers['z_score'].round(2) |
|
return top_outliers |
|
|
|
def best_image_by_cost_savings(df): |
|
|
|
grouped = df.groupby(["vus", "batch_size", "hw_type"]) |
|
|
|
|
|
valid_groups = grouped.filter(lambda x: x["image"].nunique() > 1) |
|
|
|
|
|
def compute_best_image(group): |
|
"""Find the best (cheapest) image in each (VUs, batch_size, hw_type) group.""" |
|
group = group.sort_values("1B_cost", ascending=True) |
|
|
|
best_image = group.iloc[0]["image"] if not group.empty else None |
|
cost_max = group["1B_cost"].max() |
|
group["better_image"] = best_image |
|
group["cost_savings_percent"] = ( |
|
100 * (cost_max - group["1B_cost"]) / cost_max if cost_max > 0 else 0 |
|
) |
|
|
|
return group |
|
|
|
cost_diffs = valid_groups.groupby(["vus", "batch_size", "hw_type"]).apply(compute_best_image).reset_index(drop=True) |
|
|
|
|
|
unique_images = cost_diffs["better_image"].unique() |
|
colors = px.colors.qualitative.Set1 |
|
|
|
|
|
color_map = {image: colors[i % len(colors)] for i, image in enumerate(unique_images)} |
|
|
|
|
|
figs = [] |
|
for hw in cost_diffs["hw_type"].unique(): |
|
subset = cost_diffs[cost_diffs["hw_type"] == hw] |
|
|
|
fig = px.scatter( |
|
subset, |
|
x="batch_size", |
|
y="vus", |
|
color="better_image", |
|
size="cost_savings_percent", |
|
title=f"Best Image by Cost Savings - HW Type: {hw}<br><sup>Color = Best Image (Consistent). Size = Savings % of cheaper image</sup>", |
|
labels={"batch_size": "Batch Size (log)", "vus": "VUs (log)", "better_image": "Cheaper Image"}, |
|
hover_data=["1B_cost", "cost_savings_percent"], |
|
log_x=True, |
|
log_y=True, |
|
color_discrete_map=color_map, |
|
category_orders={"better_image": sorted(unique_images)} |
|
|
|
) |
|
figs.append(fig) |
|
return figs |
|
|
|
|
|
def plot_accuracy_distribution(df): |
|
mean_accuracy = df["accuracy_percentage"].mean() |
|
std_dev_accuracy = df["accuracy_percentage"].std() |
|
|
|
|
|
fig = ff.create_distplot([df['accuracy_percentage']], ['Accuracy Percentage'], show_hist=False, show_rug=True) |
|
|
|
|
|
fig.add_trace(go.Scatter(x=[mean_accuracy, mean_accuracy], y=[0, 1], |
|
mode="lines", name="Mean", line=dict(color="red", dash="dash"))) |
|
|
|
fig.add_trace(go.Scatter(x=[mean_accuracy - std_dev_accuracy, mean_accuracy - std_dev_accuracy], |
|
y=[0, 1], |
|
mode="lines", name="Mean - 1 Std Dev", line=dict(color="blue", dash="dot"))) |
|
|
|
fig.add_trace(go.Scatter(x=[mean_accuracy + std_dev_accuracy, mean_accuracy + std_dev_accuracy], |
|
y=[0, 1], |
|
mode="lines", name="Mean + 1 Std Dev", line=dict(color="blue", dash="dot"))) |
|
|
|
|
|
fig.update_layout(title="Density Plot of Accuracy Percentage", |
|
xaxis_title="Accuracy Percentage", |
|
yaxis_title="Density", |
|
showlegend=True) |
|
return fig |
|
|
|
def plot_cost_vs_latency(df): |
|
|
|
bottom_100 = df.nsmallest(100, "1B_cost").copy() |
|
bottom_100["1B_cost"] = bottom_100["1B_cost"].round(2) |
|
bottom_100["throughput_req_per_sec"] = bottom_100["throughput_req_per_sec"].round(2) |
|
bottom_100["avg_latency_ms"] = bottom_100["avg_latency_ms"].round(3) |
|
|
|
|
|
bottom_100["hw_image_combo"] = bottom_100["hw_type"] + " | " + bottom_100["image"] |
|
|
|
|
|
global_min = bottom_100.nsmallest(1, "1B_cost") |
|
|
|
|
|
def pareto_efficient(df, x_col, y_col): |
|
sorted_df = df.sort_values(by=[x_col, y_col]) |
|
pareto_points = [] |
|
min_cost = np.inf |
|
|
|
for _, row in sorted_df.iterrows(): |
|
if row[y_col] < min_cost: |
|
pareto_points.append(row) |
|
min_cost = row[y_col] |
|
|
|
return pd.DataFrame(pareto_points) |
|
|
|
|
|
pareto_front = pareto_efficient(bottom_100, "avg_latency_ms", "1B_cost") |
|
|
|
|
|
fig = px.scatter( |
|
bottom_100, |
|
x="avg_latency_ms", |
|
y="1B_cost", |
|
symbol="hw_image_combo", |
|
color="batch_size", |
|
color_continuous_scale="viridis", |
|
opacity=0.7, |
|
title="1B Requests Cost/day vs. Latency<br><sup>Pareto-efficient points and global min highlighted</sup>", |
|
labels={ |
|
"avg_latency_ms": "Average Latency (ms)", |
|
"1B_cost": "Daily Cost ($)", |
|
"hw_image_combo": "Hardware | Image", |
|
"batch_size": "Batch Size", |
|
}, |
|
hover_data=["vus", "batch_size", "throughput_req_per_sec"] |
|
) |
|
|
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=global_min["avg_latency_ms"], |
|
y=global_min["1B_cost"], |
|
mode="markers", |
|
marker=dict(size=12, color="red", symbol="star", line=dict(width=2, color="black")), |
|
name="Global Min Cost", |
|
hovertemplate="Latency: %{x} ms<br>Cost: $%{y}<br>Batch Size: %{text}<br>VUs: %{customdata[0]}<br>Throughput: %{customdata[1]} req/sec", |
|
text=global_min["batch_size"], |
|
customdata=global_min[["vus", "throughput_req_per_sec"]].values, |
|
showlegend=False |
|
) |
|
) |
|
|
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=pareto_front["avg_latency_ms"], |
|
y=pareto_front["1B_cost"], |
|
mode="lines+markers", |
|
line=dict(color="red", width=2, dash="dash"), |
|
marker=dict(size=6, color="red"), |
|
name="Pareto Front", |
|
hovertemplate="Latency: %{x} ms<br>Cost: $%{y}<br>Batch Size: %{text}<br>VUs: %{customdata[0]}<br>Throughput: %{customdata[1]} req/sec", |
|
text=pareto_front["batch_size"], |
|
customdata=pareto_front[["vus", "throughput_req_per_sec"]].values, |
|
showlegend=False |
|
) |
|
) |
|
|
|
|
|
fig.update_layout( |
|
title_x=0.5, |
|
legend=dict( |
|
x=1, |
|
y=1.2, |
|
title="Hardware | Image" |
|
) |
|
); |
|
return fig |
|
|
|
def plot_cost_vs_vus_batch(df, hw=None, img=None): |
|
|
|
percentiles = np.linspace(0, 100, 40) |
|
cost_bins = np.percentile(df["1B_cost"], percentiles) |
|
|
|
|
|
grid_x_real, grid_y_real = np.meshgrid( |
|
np.linspace(df["vus"].min(), df["vus"].max(), 100), |
|
np.linspace(df["batch_size"].min(), df["batch_size"].max(), 100) |
|
) |
|
|
|
|
|
grid_z_real = scipy.interpolate.griddata( |
|
(df["vus"], df["batch_size"]), |
|
df["1B_cost"], |
|
(grid_x_real, grid_y_real), |
|
method='linear' |
|
) |
|
|
|
|
|
lowest_cost_points = df.nsmallest(1, "1B_cost") |
|
|
|
|
|
min_per_batch = df.loc[df.groupby("batch_size")["1B_cost"].idxmin()] |
|
|
|
|
|
fig = go.Figure() |
|
|
|
|
|
fig.add_trace( |
|
go.Contour( |
|
z=grid_z_real, |
|
x=np.linspace(df["vus"].min(), df["vus"].max(), 100), |
|
y=np.linspace(df["batch_size"].min(), df["batch_size"].max(), 100), |
|
colorscale="viridis_r", |
|
contours=dict( |
|
start=cost_bins[0], |
|
end=cost_bins[-1], |
|
size=np.diff(cost_bins).mean(), |
|
showlabels=True |
|
), |
|
colorbar=dict(title="Cost (1B Requests)"), |
|
hovertemplate="VUs: %{x}<br>Batch Size: %{y}<br>Cost: %{z}", |
|
opacity=0.8 |
|
) |
|
) |
|
|
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=df["vus"], |
|
y=df["batch_size"], |
|
mode="markers", |
|
marker=dict(size=3, color="white", line=dict(width=0.5, color="black")), |
|
name="Real Data Points", |
|
hovertemplate="VUs: %{x}<br>Batch Size: %{y}<br>Cost: %{text}", |
|
text=df["1B_cost"].round(2), |
|
showlegend=False |
|
) |
|
) |
|
|
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=lowest_cost_points["vus"], |
|
y=lowest_cost_points["batch_size"], |
|
mode="markers+text", |
|
marker=dict(size=10, color="red", symbol="star", line=dict(width=1.5, color="black")), |
|
name="Lowest Cost Point", |
|
hovertemplate="VUs: %{x}<br>Batch Size: %{y}<br>Cost: %{text}", |
|
text=lowest_cost_points["1B_cost"].round(2), |
|
textposition="top center", |
|
showlegend=False |
|
) |
|
) |
|
|
|
|
|
fig.add_trace( |
|
go.Scatter( |
|
x=min_per_batch["vus"], |
|
y=min_per_batch["batch_size"], |
|
mode="markers", |
|
marker=dict(size=6, color="red", line=dict(width=0.5, color="black")), |
|
name="Min Cost per Batch Size", |
|
hovertemplate="VUs: %{x}<br>Batch Size: %{y}<br>Cost: %{text}", |
|
text=min_per_batch["1B_cost"].round(2), |
|
showlegend=False |
|
) |
|
) |
|
|
|
fig.update_layout( |
|
title=f"Cost vs VUs and Batch Size ({hw}, Image: {img})" if hw else 'Cost vs VUs and Batch Size', |
|
xaxis_title="VUs", |
|
yaxis_title="Batch Size", |
|
xaxis_type="log", |
|
yaxis_type="log" |
|
) |
|
|
|
text1 = f"Contour Plot of Cost vs VUs and Batch Size ({hw}, Image: {img})<br><sup>The lowest cost size per batch is highlighted in red</sup>" |
|
text2 = f"Contour Plot of Cost vs VUs and Batch Size<br><sup>The lowest cost size per batch is highlighted in red</sup>" |
|
fig.update_layout( |
|
title={ |
|
"text": text1 if hw else text2, |
|
"x": 0.5, |
|
"y": 0.85, |
|
} |
|
); |
|
|
|
return fig |
|
|
|
monotonic_md = """## Did we try enough VUs? |
|
How do we know that we tried enough VUs? What if we tried a higher amount of VUs and throughput kept increasing? If thats the case then we would see a monotonicly increasing relationship between VUs and Throughput and we would need to run more tests. Lets check this out! |
|
|
|
We can check by: |
|
1. **Grouping data** by `hw_type` and `batch_size` to match how we generated the experiments |
|
1. **Sorting the data** by `vus` within each group to ensure we get the data in the correct order for our check |
|
1. **Check for monotonic increase** in `throughput_req_per_sec` flag the groups that always increase throughput as VUs increase |
|
|
|
But how do we **know**? We can use the slider to check what would have happened if we had not tried past a certain amount. Lets say that if we tried 256 instead of our actual 1024 we would have left some potential on the table, we can simulate this by filtering our runs. |
|
|
|
### Verification |
|
1. Put the slider at `256` and see that there are a number of scenarios where we should have checked for a higher VU count |
|
1. Put the slider at `1024 and verify that there are no scenarios shown |
|
""" |
|
|
|
def filter_dataframe(df, vus_filter): |
|
return df[df['vus'] <= vus_filter] |
|
|
|
def get_monotonic_dataframe(df, vus_filter): |
|
df_filtered = filter_dataframe(df, vus_filter) |
|
grouped = df_filtered.groupby(['hw_type', 'image', 'batch_size']) |
|
|
|
monotonic_series = {} |
|
for (hw_type, image, batch_size), group in grouped: |
|
group_sorted = group.sort_values('vus').reset_index(drop=True) |
|
if group_sorted['throughput_req_per_sec'].is_monotonic_increasing: |
|
monotonic_series[(hw_type, image, batch_size)] = group_sorted[['vus', 'throughput_req_per_sec']] |
|
|
|
if not monotonic_series: |
|
return pd.DataFrame(columns=['hw_type', 'image', 'batch_size']) |
|
|
|
results_df = pd.DataFrame([(hw_type, image, batch_size) for (hw_type, image, batch_size) in monotonic_series.keys()], |
|
columns=['hw_type', 'image', 'batch_size']) |
|
return results_df |
|
|
|
|
|
enough_vus_md = """## Did we try enough VUs? |
|
How do we know that we tried enough VUs? What if we tried a higher amount of VUs and throughput kept increasing? If thats the case then we would see a monotonicly increasing relationship between VUs and Throughput and we would need to run more tests. Lets check this out! |
|
|
|
We can check by: |
|
1. **Grouping data** by `hw_type` and `batch_size` to match how we generated the experiments |
|
1. **Sorting the data** by `vus` within each group to ensure we get the data in the correct order for our check |
|
1. **Check for monotonic increase** in `throughput_req_per_sec` flag the groups that always increase throughput as VUs increase |
|
|
|
But how do we **know**? We can use the slider to check what would have happened if we had not tried past a certain amount. Lets say that if we tried 256 instead of our actual 1024 we would have left some potential on the table, we can simulate this by filtering our runs. |
|
|
|
### Verification |
|
1. Put the slider at `256` and see that there are a number of scenarios where we should have checked for a higher VU count |
|
1. Put the slider at `1024 and verify that there are no scenarios shown |
|
""" |
|
|
|
accuracy_md1 = """ |
|
## Are we Accurate Enough? |
|
We shouldn't expect to see significant changes in accuracy. We should see a pretty tight distribution, but there might be some deviation since for lower VUs we wont have as many samples of our `10_000` that we saw with higher VUs. |
|
""" |
|
accuracy_md2 = """ |
|
Here we can see some deviation with a large z-score, but overall its not that big of an absolute devation. These also occur when we have relatively low `total_requests` which makes sense. |
|
|
|
We should worry more if we see major `absolute_deviation` with higher `total_requests`. We can see those values here: |
|
""" |
|
|
|
best_image_by_cost_savings_md = """## Best Image by Cost Savings |
|
|
|
### Chart |
|
- Color = Best Image for that `vu`/`batch_size`/`GPU` |
|
- Size = % cost savings vs. the worst (most expensive) image in that group. |
|
- Small dots dont mean that much, large dots do |
|
|
|
|
|
### Analysis |
|
We can see that `trt-onnx` is quite a bit stronger in `nvidia-l4`. There are no significant red dots. |
|
|
|
#### `nvidia-l4` |
|
- `trt-onnx` (blue) dominates most points, indicating it's typically the cheaper choice |
|
- At larger batch sizes (right side) and higher VUs (upper part of the chart), you often see big blue bubbles, suggesting `trt-onnx` can save a significant percentage versus `default` |
|
- A few red points (i.e., `default` cheaper) appear at lower batch sizes, but they're less frequent and often show smaller savings differences |
|
|
|
#### `nvidia-t4` |
|
- There's more of a mix: some points favor `default` and others favor `trt-onnx` |
|
- You can see some large red bubbles, meaning `default` can occasionally produce big savings under certain (VUs, batch_size) conditions |
|
- However, `trt-onnx` is still cheaper in many scenarios, especially toward higher batch sizes |
|
|
|
### Takeaways |
|
If you have time/budget, its better to analyze both. You can see that they are close at times. But if you only have time/budget at the current cost ratio consider the `nvidia-l4` in this case. |
|
""" |
|
|
|
cost_vs_latency_md = """## 1B Requests Cost vs. Latency |
|
|
|
This scatter plot visualizes the relationship between **average latency (ms)** and **cost per billion requests per day** for different **hardware types (hw_type)** and **image processing configurations (image)**. |
|
|
|
### How to Read the Chart: |
|
- **Point Symbols**: Represent different **hardware + image** configurations. |
|
- **Color Gradient**: Represents batch size, helping to see cost trends across different batch sizes. |
|
- **Hover Data**: Displays additional details like **VUs, batch size, and throughput per second**. |
|
|
|
### Key Features: |
|
- **Global Minimum Cost (Red Star)**: Marks the configuration with the lowest cost. |
|
- **Pareto Front (Red Dashed Line + Points)**: Highlights the most efficient configurations, minimizing both cost and latency. These configurations offer the best trade-offs. |
|
|
|
### How to Use: |
|
- Find the **lowest-cost, low-latency configurations** by looking at points near the bottom-left. |
|
- Use the **Pareto front** to identify cost-effective configurations. |
|
- Compare different **hardware and image processing strategies** to optimize your setup. |
|
|
|
This visualization helps in selecting the best configuration balancing **performance (low latency)** and **cost efficiency**. |
|
""" |
|
|
|
contour_md = """## Cost vs VUs and Batch Size Contour Plots |
|
|
|
These contour plots visualize the cost per billion tokens per day (`1B_cost`) as a function of **VUs (Virtual Users)** and **Batch Size** for different hardware configurations (`hw_type`) and image types (`image`). |
|
There are real points, but in-between I |
|
|
|
### How to Read the Charts: |
|
- **Color Gradient**: Shows the cost levels, with darker colors representing higher costs and lighter colors representing lower costs. |
|
- **Contour Lines**: Represent cost levels, helping identify cost-effective regions. |
|
- **White Dots**: Represent real data points used to generate the interpolated surface. |
|
- **Red Stars**: Highlight the lowest cost point in the dataset. |
|
- **Small Red Dots**: Indicate the lowest cost for each batch size. |
|
- **Tight clusters**: (of contour lines) indicate costs changing rapidly with small adjustments to batch size or VUs. |
|
|
|
### How to Use: |
|
- Identify the **lowest cost configurations** (red stars and dots). |
|
- Observe how **cost changes** with batch size and VUs to optimize your setup. |
|
- Compare different hardware types (`hw_type`) and image processing strategies (`image`) to find the best-performing configuration. |
|
|
|
### Analysis |
|
Overall we can see that `nvidia-t4`s are more expensive for this cost-ratio and task. We should consider using the `nvidia-l4`. |
|
|
|
| GPU | Image | Batch Size | VUs | Min Cost | |
|
|-----------|------------|------------|-----|----------| |
|
| nvidia-t4 | `trt-onnx` | 512 | 48 | $611.07 | |
|
| nvidia-t4 | `default` | 32 | 32 | $622.81 | |
|
| nvidia-l4 | `trt-onnx` | 64 | 448 | $255.07 | |
|
| nvidia-l4 | `default` | 64 | 448 | $253.82 | |
|
|
|
We can see a clear winner with `nvidia-l4` over `nvidia-t4` at this cost ratio. But surprisingly we see `default` slightly outperform `trt-onnx`. |
|
I think we should be careful not to overfit. These numbers can vary per run, but its good to know that each image can be competitive. |
|
|
|
#### `nvidia-t4` |
|
- Here we can see that `trt-onnx` and `default` both perform similarly but with `trt-onnx` having a slight edge. |
|
- `trt-onnx` has a lower overall cost band (~611–659) than `default` (~623–676) |
|
|
|
#### `nvidia-l4` |
|
- `trt-onnx` has a broad area of relatively low cost and hits a very low floor (~255) |
|
- This is great since it shows that we get consistently good results! |
|
- `default` can also dip into the mid‐200s in certain spots, but it has bigger, more expensive areas—especially at lower VUs and batch sizes. |
|
- This means we need to spend time to optimize it |
|
|
|
### Conclusion |
|
If I have time, I might analyze the `nvidia-l4` with `trt-onnx` across some different runs. Despite being `$1.25` more expensive per 1B requests its a safer more consistent bet IMO. |
|
""" |
|
|
|
with gr.Blocks() as demo: |
|
with gr.Sidebar(): |
|
gr.Markdown(""" |
|
# Classification Optimization |
|
|
|
## Sanity Check Charts: |
|
- **No Failed Requests**: Verify that all requests were successful. |
|
- **Monotonic Series**: Ensure that we tried enough VUs |
|
- **Accuracy Distribution**: Evaluate the consistency of accuracy across runs. |
|
## Cost Analysis Charts |
|
- **Best Image by Cost Savings**: Identify the best image based on cost savings |
|
- **Cost vs Latency**: Identify optimal configurations balancing cost and latency. |
|
- **Cost vs VUs & Batch**: Analyze cost trends based on VUs and batch size. |
|
""") |
|
gr.Markdown("## Best Config") |
|
gr.HTML(best_config.transpose().to_html(header=False)) |
|
with gr.Tab("Cost Analysis"): |
|
with gr.Tab("Cost vs VUs & Batch"): |
|
gr.Markdown(contour_md) |
|
for hw in df["hw_type"].unique(): |
|
for img in df["image"].unique(): |
|
df_hw_img = df[(df["hw_type"] == hw) & (df["image"] == img) & (df["vus"] > 20)].copy() |
|
gr.Plot(plot_cost_vs_vus_batch(df_hw_img, hw=hw, img=img)) |
|
with gr.Tab("Best Image by Cost Savings"): |
|
gr.Markdown(best_image_by_cost_savings_md) |
|
for fig in best_image_by_cost_savings(df): |
|
gr.Plot(fig) |
|
with gr.Tab("Cost vs Latency"): |
|
gr.Markdown(cost_vs_latency_md) |
|
gr.Plot(plot_cost_vs_latency(df)) |
|
with gr.Tab("Sanity Checks"): |
|
|
|
with gr.Tab("Failed Requests"): |
|
gr.Markdown("### Failed Requests Check\nIf all requests were successful, the result should be 0.") |
|
gr.Text(value=str(plot_sanity_checks(df)), interactive=False) |
|
|
|
with gr.Tab("Monotonic Series"): |
|
gr.Markdown(enough_vus_md) |
|
vus_slider = gr.Slider(minimum=0, maximum=df['vus'].max(), value=1024, label="VUs Filter") |
|
|
|
|
|
@gr.render(inputs=vus_slider) |
|
def plot_monotonic_series(vus_filter): |
|
gr.Markdown("### Monotonic Series Dataframe") |
|
gr.Dataframe(value=get_monotonic_dataframe(df, vus_filter)) |
|
df_filtered = filter_dataframe(df, vus_filter) |
|
grouped = df_filtered.groupby(['hw_type', 'image', 'batch_size']) |
|
|
|
monotonic_series = {} |
|
for (hw_type, image, batch_size), group in grouped: |
|
group_sorted = group.sort_values('vus').reset_index(drop=True) |
|
if group_sorted['throughput_req_per_sec'].is_monotonic_increasing: |
|
monotonic_series[(hw_type, image, batch_size)] = group_sorted[['vus', 'throughput_req_per_sec']] |
|
|
|
if not monotonic_series: |
|
gr.Markdown("### No monotonically increasing series found.") |
|
else: |
|
gr.Markdown("### Plots of Monotonic Series") |
|
for (hw_type, image, batch_size), data in monotonic_series.items(): |
|
fig = px.line(data, x='vus', y='throughput_req_per_sec', markers=True, |
|
title=f'Throughput Trend for HW: {hw_type}, Image: {image}, Batch: {batch_size}') |
|
gr.Plot(fig) |
|
|
|
with gr.Tab("Accuracy Distribution"): |
|
gr.Markdown(accuracy_md1) |
|
gr.Plot(plot_accuracy_distribution(df)) |
|
gr.Markdown(accuracy_md2) |
|
gr.Dataframe(top_outliers(df)) |
|
|
|
demo.launch() |
|
|