Skip to content

Revive the CTS job #7675

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 2 commits into
base: trunk
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 87 additions & 0 deletions .github/workflows/cts.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
name: CTS

on:
pull_request:
types: [labeled, opened, synchronize]
workflow_dispatch:

env:
CARGO_INCREMENTAL: false
CARGO_TERM_COLOR: always
RUST_BACKTRACE: full
MSRV: "1.84"

jobs:
cts:
# For pull requests, run only run if we add the "PR: run CTS" label
#
if: "github.event_name == 'workflow_dispatch' || (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'PR: run CTS'))"
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tried a bunch of versions of this, and GitHub kept saying it was an error in the yaml syntax. But I'm not sure if this one is correct, or if it's interpreted as a non-null string that will always be true.


strategy:
fail-fast: false
matrix:
include:
# Windows
- name: Windows x86_64
os: windows-2022
target: x86_64-pc-windows-msvc
backends: dx12

# Linux
#- name: Linux x86_64
# os: ubuntu-20.04
# target: x86_64-unknown-linux-gnu
# backends: vulkan # gl

name: CTS ${{ matrix.name }}
runs-on: ${{ matrix.os }}

steps:
- name: checkout repo
uses: actions/checkout@v4
with:
path: wgpu

- name: checkout cts
run: |
git clone https://github.com/gpuweb/cts.git
cd cts
git checkout $(cat ../wgpu/cts_runner/revision.txt)

- name: Install Repo MSRV toolchain
run: |
rustup toolchain install ${{ env.MSRV }} --no-self-update --profile=minimal --target ${{ matrix.target }}
rustup override set ${{ env.MSRV }}
cargo -V

- name: caching
uses: Swatinem/rust-cache@v2
with:
key: cts-b # suffix for cache busting
workspaces: wgpu

# We enable line numbers for panics, but that's it
- name: disable debug
shell: bash
run: |
mkdir -p wgpu/.cargo
echo """[profile.dev]
debug = 1" > wgpu/.cargo/config.toml

- name: build CTS runner
run: |
cargo build --manifest-path wgpu/cts_runner/Cargo.toml

- name: run CTS
shell: bash
run: |
cd cts;
for backend in ${{ matrix.backends }}; do
echo "======= CTS TESTS $backend ======";
grep -v '^//' ../wgpu/cts_runner/test.lst | while IFS=$' \t\r\n' read test; do
echo "=== Running $test ===";
DENO_WEBGPU_BACKEND=$backend cargo run --manifest-path ../wgpu/cts_runner/Cargo.toml --frozen -- ./tools/run_deno --verbose "$test";
done
done
echo;
echo "Note: Summary reflects only the last test suite, not the entire run."
40 changes: 20 additions & 20 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 5 additions & 5 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -230,11 +230,11 @@ web-sys = { version = "0.3.77", default-features = false }
web-time = "1"

# deno dependencies
deno_console = "0.190.0"
deno_core = "0.336.0"
deno_url = "0.190.0"
deno_web = "0.221.0"
deno_webidl = "0.190.0"
deno_console = "0.192.0"
deno_core = "0.338.0"
deno_url = "0.192.0"
deno_web = "0.224.0"
deno_webidl = "0.192.0"
deno_webgpu = { version = "0.157.0", path = "./deno_webgpu" }
deno_unsync = "0.4.2"
deno_error = "0.5.5"
Expand Down
1 change: 1 addition & 0 deletions cts_runner/src/bootstrap.js
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import * as performance from "ext:deno_web/15_performance.js";
import { loadWebGPU } from "ext:deno_webgpu/00_init.js";
import * as imageData from "ext:deno_web/16_image_data.js";
const webgpu = loadWebGPU();
webgpu.initGPU();

// imports needed to pass module evaluation
import "ext:deno_url/01_urlpattern.js";
Expand Down
14 changes: 8 additions & 6 deletions deno_webgpu/adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ use deno_core::v8;
use deno_core::GarbageCollected;
use deno_core::OpState;
use deno_core::WebIDL;
use tokio::sync::Mutex;

use super::device::GPUDevice;
use crate::webidl::features_to_feature_names;
Expand Down Expand Up @@ -128,7 +127,9 @@ impl GPUAdapter {
let required_limits =
serde_json::from_value(serde_json::to_value(descriptor.required_limits)?)?;

let webgpu_trace = std::env::var_os("DENO_WEBGPU_TRACE").unwrap();
let trace = std::env::var_os("DENO_WEBGPU_TRACE")
.map(|path| wgpu_types::Trace::Directory(std::path::PathBuf::from(path)))
.unwrap_or_default();

let wgpu_descriptor = wgpu_types::DeviceDescriptor {
label: crate::transform_label(descriptor.label.clone()),
Expand All @@ -137,18 +138,19 @@ impl GPUAdapter {
),
required_limits,
memory_hints: Default::default(),
trace: wgpu_types::Trace::Directory(std::path::PathBuf::from(webgpu_trace)),
trace,
};

let (device, queue) =
self.instance
.adapter_request_device(self.id, &wgpu_descriptor, None, None)?;

let (lost_sender, lost_receiver) = tokio::sync::oneshot::channel();
let (uncaptured_sender, mut uncaptured_receiver) = tokio::sync::mpsc::unbounded_channel();
let (uncaptured_sender_is_closed_sender, mut uncaptured_sender_is_closed_receiver) =
tokio::sync::oneshot::channel::<()>();

let resolver = v8::PromiseResolver::new(scope).unwrap();
let promise = resolver.get_promise(scope);
let device = GPUDevice {
instance: self.instance.clone(),
id: device,
Expand All @@ -157,12 +159,12 @@ impl GPUAdapter {
queue_obj: SameObject::new(),
adapter_info: self.info.clone(),
error_handler: Arc::new(super::error::DeviceErrorHandler::new(
lost_sender,
v8::Global::new(scope, resolver),
uncaptured_sender,
uncaptured_sender_is_closed_sender,
)),
adapter: self.id,
lost_receiver: Mutex::new(Some(lost_receiver)),
lost_promise: v8::Global::new(scope, promise),
limits: SameObject::new(),
features: SameObject::new(),
};
Expand Down
15 changes: 5 additions & 10 deletions deno_webgpu/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ pub struct GPUDevice {
pub queue_obj: SameObject<GPUQueue>,

pub error_handler: super::error::ErrorHandler,
pub lost_receiver: tokio::sync::Mutex<Option<tokio::sync::oneshot::Receiver<()>>>,
pub lost_promise: v8::Global<v8::Promise>,
}

impl Drop for GPUDevice {
Expand Down Expand Up @@ -118,6 +118,7 @@ impl GPUDevice {
fn queue(&self, scope: &mut v8::HandleScope) -> v8::Global<v8::Object> {
self.queue_obj.get(scope, |_| GPUQueue {
id: self.queue,
device: self.id,
error_handler: self.error_handler.clone(),
instance: self.instance.clone(),
label: self.label.clone(),
Expand Down Expand Up @@ -560,16 +561,10 @@ impl GPUDevice {
}
}

// TODO(@crowlKats): support returning same promise
#[async_method]
#[getter]
#[cppgc]
async fn lost(&self) -> GPUDeviceLostInfo {
if let Some(lost_receiver) = self.lost_receiver.lock().await.take() {
let _ = lost_receiver.await;
}

GPUDeviceLostInfo
#[global]
fn lost(&self) -> v8::Global<v8::Promise> {
self.lost_promise.clone()
}

#[required(1)]
Expand Down
12 changes: 7 additions & 5 deletions deno_webgpu/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ use std::fmt::Formatter;
use std::sync::Mutex;
use std::sync::OnceLock;

use deno_core::v8;

use wgpu_core::binding_model::CreateBindGroupError;
use wgpu_core::binding_model::CreateBindGroupLayoutError;
use wgpu_core::binding_model::CreatePipelineLayoutError;
Expand Down Expand Up @@ -35,7 +37,7 @@ pub type ErrorHandler = std::sync::Arc<DeviceErrorHandler>;

pub struct DeviceErrorHandler {
pub is_lost: OnceLock<()>,
lost_sender: Mutex<Option<tokio::sync::oneshot::Sender<()>>>,
lost_resolver: Mutex<Option<v8::Global<v8::PromiseResolver>>>,
uncaptured_sender_is_closed: Mutex<Option<tokio::sync::oneshot::Sender<()>>>,

pub uncaptured_sender: tokio::sync::mpsc::UnboundedSender<GPUError>,
Expand All @@ -53,13 +55,13 @@ impl Drop for DeviceErrorHandler {

impl DeviceErrorHandler {
pub fn new(
lost_sender: tokio::sync::oneshot::Sender<()>,
lost_resolver: v8::Global<v8::PromiseResolver>,
uncaptured_sender: tokio::sync::mpsc::UnboundedSender<GPUError>,
uncaptured_sender_is_closed: tokio::sync::oneshot::Sender<()>,
) -> Self {
Self {
is_lost: Default::default(),
lost_sender: Mutex::new(Some(lost_sender)),
lost_resolver: Mutex::new(Some(lost_resolver)),
uncaptured_sender,
uncaptured_sender_is_closed: Mutex::new(Some(uncaptured_sender_is_closed)),
scopes: Mutex::new(vec![]),
Expand All @@ -80,8 +82,8 @@ impl DeviceErrorHandler {
if matches!(err, GPUError::Lost) {
let _ = self.is_lost.set(());

if let Some(sender) = self.lost_sender.lock().unwrap().take() {
let _ = sender.send(());
if let Some(_resolver) = self.lost_resolver.lock().unwrap().take() {
// TODO: Need a scope to call resolver here.
}
return;
}
Expand Down
Loading
Loading