Skip to content

Minor UI tweaks #37

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 57 additions & 0 deletions examples/deploy-on-spaces.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import random

from tqdm import tqdm

import trackio as wandb

project_id = random.randint(10000, 99999)

wandb.init(
project=f"fake-training-{project_id}",
name="test-run",
config=dict(
epochs=5,
learning_rate=0.001,
batch_size=32,
),
space_id=f"abidlabs/trackio-{project_id}",
)

EPOCHS = 5
NUM_TRAIN_BATCHES = 100
NUM_VAL_BATCHES = 20

for epoch in range(EPOCHS):
train_loss = 0
train_accuracy = 0
val_loss = 0
val_accuracy = 0

for _ in tqdm(range(NUM_TRAIN_BATCHES), desc=f"Epoch {epoch + 1} - Training"):
loss = random.uniform(0.2, 1.0)
accuracy = random.uniform(0.6, 0.95)
train_loss += loss
train_accuracy += accuracy

for _ in tqdm(range(NUM_VAL_BATCHES), desc=f"Epoch {epoch + 1} - Validation"):
loss = random.uniform(0.2, 0.9)
accuracy = random.uniform(0.65, 0.98)
val_loss += loss
val_accuracy += accuracy

train_loss /= NUM_TRAIN_BATCHES
train_accuracy /= NUM_TRAIN_BATCHES
val_loss /= NUM_VAL_BATCHES
val_accuracy /= NUM_VAL_BATCHES

wandb.log(
{
"epoch": epoch + 1,
"train_loss": train_loss,
"train_accuracy": train_accuracy,
"val_loss": val_loss,
"val_accuracy": val_accuracy,
}
)

wandb.finish()
4 changes: 3 additions & 1 deletion examples/fake-training.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import random
import time

from tqdm import tqdm

import trackio as wandb

wandb.init(
project="fake-training",
project=f"fake-training-{random.randint(10000, 99999)}",
name="test-run",
config=dict(
epochs=5,
Expand Down Expand Up @@ -50,5 +51,6 @@
"val_accuracy": val_accuracy,
}
)
time.sleep(1)

wandb.finish()
36 changes: 30 additions & 6 deletions trackio/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,11 +197,9 @@ def configure(request: gr.Request):
run_cb = gr.CheckboxGroup(
label="Runs", choices=[], interactive=True, elem_id="run-cb"
)

with gr.Sidebar(position="right", open=False) as settings_sidebar:
gr.Markdown("### ⚙️ Settings")
realtime_cb = gr.Checkbox(label="Refresh realtime", value=True)
smoothing_cb = gr.Checkbox(label="Smoothing", value=True)
gr.HTML("<hr>")
realtime_cb = gr.Checkbox(label="Refresh metrics realtime", value=True)
smoothing_cb = gr.Checkbox(label="Smooth metrics", value=True)

timer = gr.Timer(value=1)
metrics_subset = gr.State([])
Expand Down Expand Up @@ -255,19 +253,45 @@ def configure(request: gr.Request):
)

x_lim = gr.State(None)
last_step = gr.State(None)

def update_x_lim(select_data: gr.SelectData):
return select_data.index

def update_last_step(project, runs):
"""Update the last step from all runs to detect when new data is available."""
if not project or not runs:
return None

max_step = 0
for run in runs:
metrics = SQLiteStorage.get_metrics(project, run)
if metrics:
df = pd.DataFrame(metrics)
if "step" not in df.columns:
df["step"] = range(len(df))
if not df.empty:
max_step = max(max_step, df["step"].max())

return max_step if max_step > 0 else None

timer.tick(
fn=update_last_step,
inputs=[project_dd, run_cb],
outputs=last_step,
show_progress="hidden",
)

@gr.render(
triggers=[
demo.load,
run_cb.change,
timer.tick,
last_step.change,
smoothing_cb.change,
x_lim.change,
],
inputs=[project_dd, run_cb, smoothing_cb, metrics_subset, x_lim],
show_progress="hidden",
)
def update_dashboard(project, runs, smoothing, metrics_subset, x_lim_value):
dfs = []
Expand Down