diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index 9119e3940d2..7080c6aa78d 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -4,7 +4,7 @@ on: push: branches: - master - - model-templates + - ci_* paths: - "src/**" - "tests/**" diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 7373fb1567b..d1a435bc173 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -6,9 +6,6 @@ name: Self-hosted runner (scheduled) on: - push: - branches: - - ci_* repository_dispatch: schedule: - cron: "0 0 * * *" diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index dc8dc075b3b..007cb472671 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -1141,22 +1141,22 @@ class ModelTesterMixin: for model_class in self.all_parallelizable_model_classes: inputs_dict = self._prepare_for_class(inputs_dict, model_class) - model = model_class(config) - output = model(**inputs_dict) - - model.parallelize() - - def cast_to_gpu(dictionary): + def cast_to_device(dictionary, device): output = {} for k, v in dictionary.items(): if isinstance(v, torch.Tensor): - output[k] = v.to("cuda:0") + output[k] = v.to(device) else: output[k] = v return output - parallel_output = model(**cast_to_gpu(inputs_dict)) + model = model_class(config) + output = model(**cast_to_device(inputs_dict, "cpu")) + + model.parallelize() + + parallel_output = model(**cast_to_device(inputs_dict, "cuda:0")) for value, parallel_value in zip(output, parallel_output): if isinstance(value, torch.Tensor):