in method_comparison/app.py [0:0]
def build_app(df):
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# PEFT method comparison")
gr.Markdown(
"Find more information [on the PEFT GitHub repo](https://github.com/huggingface/peft/tree/main/method_comparison)"
)
# Hidden state to store the current filter query.
filter_state = gr.State("")
gr.Markdown("## Choose the task and base model")
with gr.Row():
task_dropdown = gr.Dropdown(
label="Select Task",
choices=sorted(df["task_name"].unique()),
value=sorted(df["task_name"].unique())[0],
)
model_dropdown = gr.Dropdown(
label="Select Model ID", choices=get_model_ids(sorted(df["task_name"].unique())[0], df)
)
data_table = gr.DataFrame(label="Results", value=df, interactive=False)
with gr.Row():
filter_textbox = gr.Textbox(
label="Filter DataFrame",
placeholder="Enter filter (e.g.: peft_type=='LORA')",
interactive=True,
)
apply_filter_button = gr.Button("Apply Filter")
reset_filter_button = gr.Button("Reset Filter")
gr.Markdown("## Pareto plot")
gr.Markdown(
"Select 2 criteria to plot the Pareto frontier. This will show the best PEFT methods along this axis and "
"the trade-offs with the other axis. The PEFT methods that Pareto-dominate are shown in colors. All other "
"methods are inferior with regard to these two metrics. Hover over a point to show details."
)
with gr.Row():
x_default = (
"cuda_memory_max" if "cuda_memory_max" in metric_preferences else list(metric_preferences.keys())[0]
)
y_default = (
"test_accuracy" if "test_accuracy" in metric_preferences else list(metric_preferences.keys())[1]
)
metric_x_dropdown = gr.Dropdown(
label="1st metric for Pareto plot",
choices=list(metric_preferences.keys()),
value=x_default,
)
metric_y_dropdown = gr.Dropdown(
label="2nd metric for Pareto plot",
choices=list(metric_preferences.keys()),
value=y_default,
)
pareto_plot = gr.Plot(label="Pareto Frontier Plot")
summary_box = gr.Textbox(label="Summary Statistics", lines=6)
csv_output = gr.File(label="Export Filtered Data as CSV")
def update_on_task(task_name, current_filter):
new_models = get_model_ids(task_name, df)
filtered = filter_data(task_name, new_models[0] if new_models else "", df)
if current_filter.strip():
try:
mask = parse_and_filter(filtered, current_filter)
df_queried = filtered[mask]
if not df_queried.empty:
filtered = df_queried
except Exception:
# invalid filter query
pass
return gr.update(choices=new_models, value=new_models[0] if new_models else None), filtered
task_dropdown.change(
fn=update_on_task, inputs=[task_dropdown, filter_state], outputs=[model_dropdown, data_table]
)
def update_on_model(task_name, model_id, current_filter):
filtered = filter_data(task_name, model_id, df)
if current_filter.strip():
try:
mask = parse_and_filter(filtered, current_filter)
filtered = filtered[mask]
except Exception:
pass
return filtered
model_dropdown.change(
fn=update_on_model, inputs=[task_dropdown, model_dropdown, filter_state], outputs=data_table
)
def update_pareto_plot_and_summary(task_name, model_id, metric_x, metric_y, current_filter):
filtered = filter_data(task_name, model_id, df)
if current_filter.strip():
try:
mask = parse_and_filter(filtered, current_filter)
filtered = filtered[mask]
except Exception as e:
return generate_pareto_plot(filtered, metric_x, metric_y), f"Filter error: {e}"
pareto_df = compute_pareto_frontier(filtered, metric_x, metric_y)
fig = generate_pareto_plot(filtered, metric_x, metric_y)
summary = compute_pareto_summary(filtered, pareto_df, metric_x, metric_y)
return fig, summary
for comp in [model_dropdown, metric_x_dropdown, metric_y_dropdown]:
comp.change(
fn=update_pareto_plot_and_summary,
inputs=[task_dropdown, model_dropdown, metric_x_dropdown, metric_y_dropdown, filter_state],
outputs=[pareto_plot, summary_box],
)
def apply_filter(filter_query, task_name, model_id, metric_x, metric_y):
filtered = filter_data(task_name, model_id, df)
if filter_query.strip():
try:
mask = parse_and_filter(filtered, filter_query)
filtered = filtered[mask]
except Exception as e:
# Update the table, plot, and summary even if there is a filter error.
return (
filter_query,
filtered,
generate_pareto_plot(filtered, metric_x, metric_y),
f"Filter error: {e}",
)
pareto_df = compute_pareto_frontier(filtered, metric_x, metric_y)
fig = generate_pareto_plot(filtered, metric_x, metric_y)
summary = compute_pareto_summary(filtered, pareto_df, metric_x, metric_y)
return filter_query, filtered, fig, summary
apply_filter_button.click(
fn=apply_filter,
inputs=[filter_textbox, task_dropdown, model_dropdown, metric_x_dropdown, metric_y_dropdown],
outputs=[filter_state, data_table, pareto_plot, summary_box],
)
def reset_filter(task_name, model_id, metric_x, metric_y):
filtered = filter_data(task_name, model_id, df)
pareto_df = compute_pareto_frontier(filtered, metric_x, metric_y)
fig = generate_pareto_plot(filtered, metric_x, metric_y)
summary = compute_pareto_summary(filtered, pareto_df, metric_x, metric_y)
# Return empty strings to clear the filter state and textbox.
return "", "", filtered, fig, summary
reset_filter_button.click(
fn=reset_filter,
inputs=[task_dropdown, model_dropdown, metric_x_dropdown, metric_y_dropdown],
outputs=[filter_state, filter_textbox, data_table, pareto_plot, summary_box],
)
gr.Markdown("## Export data")
# Export button for CSV download.
export_button = gr.Button("Export Filtered Data")
export_button.click(
fn=lambda task, model: export_csv(filter_data(task, model, df)),
inputs=[task_dropdown, model_dropdown],
outputs=csv_output,
)
demo.load(
fn=update_pareto_plot_and_summary,
inputs=[task_dropdown, model_dropdown, metric_x_dropdown, metric_y_dropdown, filter_state],
outputs=[pareto_plot, summary_box],
)
return demo