mirror of
https://github.com/lihop/godot-xterm.git
synced 2024-11-23 10:10:24 +01:00
perf(term): add benchmarks
Adds benchmarks by running [alacritty/vtebench](https://github.com/alacritty/vtebench) benchmarks in the terminal. Uses code based on [godotengine/godot-benchmarks](https://github.com/godotengine/godot-benchmarks) to measure average GPU and CPU time spent per frame. Uses [github-action-benchmark](https://github.com/benchmark-action/github-action-benchmark) for continuous integration, and publishes benchmark results to https://lihop.github.io/godot-xterm/dev/bench/.
This commit is contained in:
parent
8b33818751
commit
9569c9e489
10 changed files with 326 additions and 0 deletions
1
.gitattributes
vendored
1
.gitattributes
vendored
|
@ -17,6 +17,7 @@
|
|||
# Files to exclude from asset-lib download.
|
||||
/addons/gd-plug export-ignore
|
||||
/default_env.tres export-ignore
|
||||
/benchmark export-ignore
|
||||
/docs export-ignore
|
||||
/.env.example export-ignore
|
||||
/examples export-ignore
|
||||
|
|
105
.github/workflows/main.yml
vendored
105
.github/workflows/main.yml
vendored
|
@ -312,6 +312,111 @@ jobs:
|
|||
name: failed-screenshots
|
||||
path: test/visual_regression/screenshots
|
||||
|
||||
benchmark:
|
||||
name: Benchmark (${{matrix.benchmark}})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
benchmark:
|
||||
[
|
||||
editor_launch,
|
||||
cursor_motion,
|
||||
dense_cells,
|
||||
light_cells,
|
||||
scrolling,
|
||||
scrolling_bottom_region,
|
||||
scrolling_bottom_small_region,
|
||||
scrolling_fullscreen,
|
||||
scrolling_top_region,
|
||||
scrolling_top_small_region,
|
||||
unicode,
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Setup Godot
|
||||
uses: lihop/setup-godot@v2
|
||||
with:
|
||||
version: "4.2.2-stable"
|
||||
- name: Install just
|
||||
uses: taiki-e/install-action@just
|
||||
- name: Update gdextension file
|
||||
run: | # Use release builds as the build job finishes sooner.
|
||||
sed -i 's/template_debug/template_release/g' addons/godot_xterm/native/godot-xterm.gdextension
|
||||
- name: Import assets
|
||||
shell: bash
|
||||
run: godot --editor --headless --quit-after 100 || true
|
||||
- name: Wait for build
|
||||
uses: fountainhead/action-wait-for-check@v1.2.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
checkName: "Build (linux, x86_64, release) #${{ github.run_number }}"
|
||||
ref: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
- name: Install binary build artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: addons/godot_xterm/native/bin
|
||||
merge-multiple: true
|
||||
- name: Benchmark
|
||||
shell: bash
|
||||
run: just bench ${{matrix.benchmark}}
|
||||
- name: Upload results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: benchmark-results-${{ matrix.benchmark }}
|
||||
path: benchmark/results/*.json
|
||||
|
||||
process-benchmarks:
|
||||
name: Process Benchmarks
|
||||
runs-on: ubuntu-latest
|
||||
needs: [benchmark]
|
||||
permissions:
|
||||
deployments: write
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/upload-artifact/merge@v4
|
||||
with:
|
||||
name: benchmark-results
|
||||
pattern: "benchmark-results-*"
|
||||
delete-merged: true
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: benchmark-results
|
||||
path: benchmark/results/
|
||||
- name: Merge results
|
||||
run: jq -s '[.[][]]' benchmark/results/*.json > benchmark/results/all.json
|
||||
- name: Download previous benchmark data
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ./cache
|
||||
key: ${{runner.os}}-benchmark
|
||||
- name: Store benchmark result
|
||||
uses: benchmark-action/github-action-benchmark@v1
|
||||
if: github.ref != 'refs/heads/main'
|
||||
with:
|
||||
tool: "customSmallerIsBetter"
|
||||
output-file-path: benchmark/results/all.json
|
||||
external-data-json-path: ./cache/benchmark-data.json
|
||||
alert-threshold: "20%"
|
||||
fail-threshold: "200%"
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
comment-on-alert: true
|
||||
summary-always: true
|
||||
- name: Publish benchmark results
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: benchmark-action/github-action-benchmark@v1
|
||||
with:
|
||||
name: "GodotXterm Benchmarks"
|
||||
tool: "customSmallerIsBetter"
|
||||
output-file-path: benchmark/results/all.json
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
gh-pages-branch: stable
|
||||
benchmark-data-dir-path: docs/dev/bench
|
||||
auto-push: true
|
||||
|
||||
merge-artifacts:
|
||||
name: Merge Artifacts
|
||||
runs-on: ubuntu-latest
|
||||
|
|
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -10,3 +10,6 @@
|
|||
[submodule "addons/godot_xterm/native/thirdparty/node-pty"]
|
||||
path = addons/godot_xterm/native/thirdparty/node-pty
|
||||
url = https://github.com/microsoft/node-pty
|
||||
[submodule "benchmark/vtebench"]
|
||||
path = benchmark/vtebench
|
||||
url = git@github.com:alacritty/vtebench
|
||||
|
|
9
Justfile
9
Justfile
|
@ -32,3 +32,12 @@ test-visual:
|
|||
|
||||
uninstall:
|
||||
{{godot}} --headless -s plug.gd uninstall
|
||||
|
||||
bench name="":
|
||||
@if [ "{{name}}" = "editor_launch" ]; then \
|
||||
./benchmark/editor_launch.sh {{godot}}; \
|
||||
elif [ -n "{{name}}" ]; then \
|
||||
{{godot}} --windowed --resolution 800x600 --position 0,0 benchmark/benchmark.tscn -- --benchmark={{name}}; \
|
||||
else \
|
||||
ls -1 benchmark/vtebench/benchmarks | xargs -I {} just bench {} && just bench editor_launch; \
|
||||
fi
|
||||
|
|
124
benchmark/benchmark.gd
Normal file
124
benchmark/benchmark.gd
Normal file
|
@ -0,0 +1,124 @@
|
|||
extends Control
|
||||
|
||||
|
||||
class Results:
|
||||
var render_cpu := 0.0
|
||||
var render_gpu := 0.0
|
||||
var vtebench := {value = 0.0, variance = 0.0}
|
||||
|
||||
|
||||
var terminal_exit_code := -1
|
||||
|
||||
|
||||
func _ready():
|
||||
var timeout := 120
|
||||
var benchmark := ""
|
||||
|
||||
var args = OS.get_cmdline_user_args()
|
||||
for arg in args:
|
||||
if arg.begins_with("--benchmark"):
|
||||
benchmark = arg.split("=")[1]
|
||||
|
||||
if benchmark.is_empty():
|
||||
_quit_with_error("No benchmark specified")
|
||||
|
||||
RenderingServer.viewport_set_measure_render_time(get_tree().root.get_viewport_rid(), true)
|
||||
|
||||
var results := Results.new()
|
||||
var begin_time := Time.get_ticks_usec()
|
||||
var frames_captured := 0
|
||||
|
||||
$Terminal.run_benchmark(benchmark)
|
||||
await $Terminal.started
|
||||
|
||||
while terminal_exit_code == -1:
|
||||
await get_tree().process_frame
|
||||
|
||||
if Time.get_ticks_usec() - begin_time > (timeout * 1e6):
|
||||
_quit_with_error("Benchmark took longer than %ss to run" % timeout)
|
||||
|
||||
results.render_cpu += (
|
||||
RenderingServer.viewport_get_measured_render_time_cpu(
|
||||
get_tree().root.get_viewport_rid()
|
||||
)
|
||||
+ RenderingServer.get_frame_setup_time_cpu()
|
||||
)
|
||||
results.render_gpu += RenderingServer.viewport_get_measured_render_time_gpu(
|
||||
get_tree().root.get_viewport_rid()
|
||||
)
|
||||
|
||||
if terminal_exit_code != 0:
|
||||
_quit_with_error("Terminal exited with error code: %d" % terminal_exit_code)
|
||||
|
||||
results.render_cpu /= float(max(1.0, float(frames_captured)))
|
||||
results.render_gpu /= float(max(1.0, float(frames_captured)))
|
||||
|
||||
results.vtebench = _process_dat_results("res://benchmark/results/%s.dat" % benchmark)
|
||||
|
||||
var json_results = (
|
||||
JSON
|
||||
. stringify(
|
||||
[
|
||||
{
|
||||
name = benchmark,
|
||||
unit = "milliseconds",
|
||||
value = _round(results.vtebench.value),
|
||||
range = results.vtebench.range,
|
||||
},
|
||||
{
|
||||
name = "%s - render cpu" % benchmark,
|
||||
unit = "milliseconds",
|
||||
value = _round(results.render_cpu),
|
||||
},
|
||||
{
|
||||
name = "%s - render gpu" % benchmark,
|
||||
unit = "milliseconds",
|
||||
value = _round(results.render_gpu),
|
||||
}
|
||||
],
|
||||
" "
|
||||
)
|
||||
)
|
||||
|
||||
var file = FileAccess.open("res://benchmark/results/%s.json" % benchmark, FileAccess.WRITE)
|
||||
file.store_string(json_results)
|
||||
|
||||
print(json_results)
|
||||
get_tree().quit(terminal_exit_code)
|
||||
|
||||
|
||||
func _on_terminal_exited(exit_code: int):
|
||||
terminal_exit_code = exit_code
|
||||
|
||||
|
||||
func _round(val: float, sig_figs := 4) -> float:
|
||||
return snapped(val, pow(10, floor(log(val) / log(10)) - sig_figs + 1))
|
||||
|
||||
|
||||
func _process_dat_results(path: String) -> Dictionary:
|
||||
var file := FileAccess.open(path, FileAccess.READ)
|
||||
var samples := []
|
||||
|
||||
file.get_line() # Skip the first 'header' line.
|
||||
while !file.eof_reached():
|
||||
var line := file.get_line().strip_edges()
|
||||
if line.is_valid_float():
|
||||
samples.append(line.to_float())
|
||||
|
||||
if samples.size() < 2:
|
||||
_quit_with_error("Not enough samples")
|
||||
|
||||
var avg: float = (samples.reduce(func(acc, n): return acc + n, 0)) / samples.size()
|
||||
|
||||
var std_dev := 0.0
|
||||
for sample in samples:
|
||||
std_dev += pow(sample - avg, 2)
|
||||
std_dev /= (samples.size() - 1)
|
||||
|
||||
return {value = avg, range = "± %.2f" % _round(sqrt(std_dev))}
|
||||
|
||||
|
||||
func _quit_with_error(error_msg: String):
|
||||
await get_tree().process_frame
|
||||
push_error(error_msg)
|
||||
get_tree().quit(1)
|
26
benchmark/benchmark.tscn
Normal file
26
benchmark/benchmark.tscn
Normal file
|
@ -0,0 +1,26 @@
|
|||
[gd_scene load_steps=5 format=3 uid="uid://b2axn64mqnt8n"]
|
||||
|
||||
[ext_resource type="Script" path="res://benchmark/benchmark.gd" id="1_tmqb5"]
|
||||
[ext_resource type="PackedScene" uid="uid://cysad55lwtnc6" path="res://examples/terminal/terminal.tscn" id="2_3raq0"]
|
||||
[ext_resource type="Script" path="res://benchmark/terminal_benchmark.gd" id="3_8t8od"]
|
||||
[ext_resource type="FontVariation" uid="uid://ckq73bs2fwsie" path="res://themes/fonts/regular.tres" id="3_hnrrm"]
|
||||
|
||||
[node name="Benchmark" type="Control"]
|
||||
layout_mode = 3
|
||||
anchors_preset = 15
|
||||
anchor_right = 1.0
|
||||
anchor_bottom = 1.0
|
||||
grow_horizontal = 2
|
||||
grow_vertical = 2
|
||||
script = ExtResource("1_tmqb5")
|
||||
|
||||
[node name="Terminal" parent="." instance=ExtResource("2_3raq0")]
|
||||
layout_mode = 1
|
||||
focus_mode = 0
|
||||
theme_override_fonts/normal_font = ExtResource("3_hnrrm")
|
||||
script = ExtResource("3_8t8od")
|
||||
|
||||
[connection signal="exited" from="Terminal" to="." method="_on_terminal_exited"]
|
||||
[connection signal="data_received" from="Terminal/PTY" to="Terminal" method="_on_pty_data_received"]
|
||||
|
||||
[editable path="Terminal"]
|
24
benchmark/editor_launch.sh
Executable file
24
benchmark/editor_launch.sh
Executable file
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
godot=${1:-godot}
|
||||
|
||||
if ! command -v $godot &> /dev/null; then
|
||||
echo "Error: '$godot' command not found. Please provide a valid path to the Godot executable."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
results_file=benchmark/results/editor_launch.json
|
||||
value=$({ time -p $godot --editor --quit; } 2>&1 | tail -n3 | head -n1 | cut -d' ' -f2)
|
||||
cat <<EOF > $results_file
|
||||
[
|
||||
{
|
||||
"name": "editor_launch",
|
||||
"unit": "seconds",
|
||||
"value": $value
|
||||
}
|
||||
]
|
||||
EOF
|
||||
cat $results_file
|
||||
|
2
benchmark/results/.gitignore
vendored
Normal file
2
benchmark/results/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
*
|
||||
!.gitignore
|
31
benchmark/terminal_benchmark.gd
Normal file
31
benchmark/terminal_benchmark.gd
Normal file
|
@ -0,0 +1,31 @@
|
|||
extends "res://examples/terminal/terminal.gd"
|
||||
|
||||
signal started
|
||||
signal exited(exit_code: int)
|
||||
|
||||
var vtebench_dir := ProjectSettings.globalize_path("res://benchmark/vtebench")
|
||||
|
||||
|
||||
func _ready():
|
||||
pty.connect("exited", self._on_exit)
|
||||
|
||||
|
||||
func run_benchmark(benchmark):
|
||||
pty.fork(
|
||||
"cargo",
|
||||
["run", "--", "-b", "benchmarks/%s" % benchmark, "--dat", "../results/%s.dat" % benchmark],
|
||||
vtebench_dir,
|
||||
87,
|
||||
29
|
||||
)
|
||||
|
||||
|
||||
func _on_exit(exit_code, _signal):
|
||||
exited.emit(exit_code)
|
||||
|
||||
|
||||
func _on_pty_data_received(data: PackedByteArray):
|
||||
# Listen for the reset sequence (\x1bc), to determine that the benchmark has started.
|
||||
if data.slice(0, 2) == PackedByteArray([27, 99]):
|
||||
$PTY.disconnect("data_received", _on_pty_data_received)
|
||||
started.emit()
|
1
benchmark/vtebench
Submodule
1
benchmark/vtebench
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit c75155bfc252227c0efc101c1971df3e327c71c4
|
Loading…
Reference in a new issue