Skip to content

Fix module caching #1029

Fix module caching

Fix module caching #1029

Workflow file for this run

name: e2e-test
env:
AWS_ACCESS_KEY_ID: ${{ vars.LPD_PERF_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.LPD_PERF_AWS_SECRET_ACCESS_KEY }}
AWS_BUCKET: ${{ vars.LPD_PERF_AWS_BUCKET }}
AWS_REGION: ${{ vars.LPD_PERF_AWS_REGION }}
LIGHTPANDA_DISABLE_TELEMETRY: true
on:
push:
branches:
- main
paths:
- "build.zig"
- "src/**/*.zig"
- "src/*.zig"
- "vendor/zig-js-runtime"
- ".github/**"
- "vendor/**"
pull_request:
# By default GH trigger on types opened, synchronize and reopened.
# see https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request
# Since we skip the job when the PR is in draft state, we want to force CI
# running when the PR is marked ready_for_review w/o other change.
# see https://github.com/orgs/community/discussions/25722#discussioncomment-3248917
types: [opened, synchronize, reopened, ready_for_review]
paths:
- ".github/**"
- "build.zig"
- "src/**/*.zig"
- "src/*.zig"
- "vendor/**"
- ".github/**"
- "vendor/**"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
zig-build-release:
name: zig build release
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
# fetch submodules recusively, to get zig-js-runtime submodules also.
submodules: recursive
- uses: ./.github/actions/install
- name: zig build release
run: zig build -Doptimize=ReleaseFast -Dcpu=x86_64 -Dgit_commit=$(git rev-parse --short ${{ github.sha }})
- name: upload artifact
uses: actions/upload-artifact@v4
with:
name: lightpanda-build-release
path: |
zig-out/bin/lightpanda
retention-days: 1
puppeteer-perf:
name: puppeteer-perf
needs: zig-build-release
env:
MAX_MEMORY: 30000
MAX_AVG_DURATION: 24
LIGHTPANDA_DISABLE_TELEMETRY: true
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
with:
repository: 'lightpanda-io/demo'
fetch-depth: 0
- run: npm install
- name: download artifact
uses: actions/download-artifact@v4
with:
name: lightpanda-build-release
- run: chmod a+x ./lightpanda
- name: run puppeteer
run: |
python3 -m http.server 1234 -d ./public & echo $! > PYTHON.pid
./lightpanda serve & echo $! > LPD.pid
RUNS=100 npm run bench-puppeteer-cdp > puppeteer.out || exit 1
cat /proc/`cat LPD.pid`/status |grep VmHWM|grep -oP '\d+' > LPD.VmHWM
kill `cat LPD.pid` `cat PYTHON.pid`
- name: puppeteer result
run: cat puppeteer.out
- name: memory regression
run: |
export LPD_VmHWM=`cat LPD.VmHWM`
echo "Peak resident set size: $LPD_VmHWM"
test "$LPD_VmHWM" -le "$MAX_MEMORY"
- name: duration regression
run: |
export PUPPETEER_AVG_DURATION=`cat puppeteer.out|grep 'avg run'|sed 's/avg run duration (ms) //'`
echo "puppeteer avg duration: $PUPPETEER_AVG_DURATION"
test "$PUPPETEER_AVG_DURATION" -le "$MAX_AVG_DURATION"
demo-scripts:
name: demo-scripts
needs: zig-build-release
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
with:
repository: 'lightpanda-io/demo'
fetch-depth: 0
- run: npm install
- name: download artifact
uses: actions/download-artifact@v4
with:
name: lightpanda-build-release
- run: chmod a+x ./lightpanda
- name: run end to end tests
run: |
./lightpanda serve & echo $! > LPD.pid
go run runner/main.go --verbose
kill `cat LPD.pid`
cdp-and-hyperfine-bench:
name: cdp-and-hyperfine-bench
needs: zig-build-release
# Don't execute on PR
if: github.event_name != 'pull_request'
# use a self host runner.
runs-on: lpd-bench-hetzner
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
with:
repository: 'lightpanda-io/demo'
fetch-depth: 0
- run: npm install
- name: download artifact
uses: actions/download-artifact@v4
with:
name: lightpanda-build-release
- run: chmod a+x ./lightpanda
- name: start http
run: |
go run ws/main.go & echo $! > WS.pid
sleep 2
- name: run puppeteer
run: |
./lightpanda serve & echo $! > LPD.pid
sleep 2
RUNS=100 npm run bench-puppeteer-cdp > puppeteer.out || exit 1
cat /proc/`cat LPD.pid`/status |grep VmHWM|grep -oP '\d+' > LPD.VmHWM
kill `cat LPD.pid`
- name: puppeteer result
run: cat puppeteer.out
- name: json output
run: |
export AVG_DURATION=`cat puppeteer.out|grep 'avg run'|sed 's/avg run duration (ms) //'`
export TOTAL_DURATION=`cat puppeteer.out|grep 'total duration'|sed 's/total duration (ms) //'`
export LPD_VmHWM=`cat LPD.VmHWM`
echo "{\"duration_total\":${TOTAL_DURATION},\"duration_avg\":${AVG_DURATION},\"mem_peak\":${LPD_VmHWM}}" > bench.json
cat bench.json
- name: run hyperfine
run: |
hyperfine --export-json=hyperfine.json --warmup 3 --runs 20 --shell=none "./lightpanda --dump http://127.0.0.1:1234/campfire-commerce/"
- name: stop http
run: kill `cat WS.pid`
- name: write commit
run: |
echo "${{github.sha}}" > commit.txt
- name: upload artifact
uses: actions/upload-artifact@v4
with:
name: bench-results
path: |
bench.json
hyperfine.json
commit.txt
retention-days: 10
perf-fmt:
name: perf-fmt
needs: cdp-and-hyperfine-bench
# Don't execute on PR
if: github.event_name != 'pull_request'
runs-on: ubuntu-latest
timeout-minutes: 15
container:
image: ghcr.io/lightpanda-io/perf-fmt:latest
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
steps:
- name: download artifact
uses: actions/download-artifact@v4
with:
name: bench-results
- name: format and send json result
run: /perf-fmt cdp ${{ github.sha }} bench.json
- name: format and send json result
run: /perf-fmt hyperfine ${{ github.sha }} hyperfine.json