diff --git a/test/distributed/test_store.py b/test/distributed/test_store.py index e557a4835962..757ae7afbe83 100644 --- a/test/distributed/test_store.py +++ b/test/distributed/test_store.py @@ -1178,6 +1178,6 @@ class TestClientProtocol(TestCase): if __name__ == "__main__": if device_type != "cpu": assert not torch.get_device_module()._initialized, ( - "test_distributed must not have initialized {device_type} context on main process" + f"test_distributed must not have initialized {device_type} context on main process" ) run_tests() diff --git a/test/distributed/test_symmetric_memory.py b/test/distributed/test_symmetric_memory.py index 191e0f50d56b..57c7175daaba 100644 --- a/test/distributed/test_symmetric_memory.py +++ b/test/distributed/test_symmetric_memory.py @@ -451,7 +451,7 @@ class AsyncTPTest(MultiProcContinuousTest): elif gather_dim == 1: leading_dims = (BATCH, M // self.world_size) else: - raise AssertionError("Invalid scale_mode: {scale_mode}") + raise AssertionError(f"Invalid scale_mode: {scale_mode}") torch.manual_seed(42 + rank) diff --git a/test/quantization/core/test_workflow_module.py b/test/quantization/core/test_workflow_module.py index 329266d7c63b..4a2d0ecda4ae 100644 --- a/test/quantization/core/test_workflow_module.py +++ b/test/quantization/core/test_workflow_module.py @@ -97,7 +97,7 @@ class TestObserver(QuantizationTestCase): reduce_range=reduce_range)] def _get_ref_params(reduce_range, qscheme, dtype, input_scale, min_val, max_val): - assert dtype in _INT_DTYPES, "Not supported dtype: {dtype}, supported dtypes are {_INT_DTYPES}" + assert dtype in _INT_DTYPES, f"Not supported dtype: {dtype}, supported dtypes are {_INT_DTYPES}" eps = torch.tensor([tolerance]) if dtype in [torch.qint8, torch.int8]: if reduce_range: diff --git a/test/quantization/pt2e/test_numeric_debugger.py b/test/quantization/pt2e/test_numeric_debugger.py index a0269bf635b9..cb4aaf86a85c 100644 --- a/test/quantization/pt2e/test_numeric_debugger.py +++ b/test/quantization/pt2e/test_numeric_debugger.py @@ -82,7 +82,7 @@ class TestNumericDebugger(TestCase): prev_decomp_op_to_debug_handle_map[prev_decomp_op] == debug_handle ), f"Node {node} has different debug handle {debug_handle}" - "than previous node sharing the same decomp op {prev_decomp_op}" + f"than previous node sharing the same decomp op {prev_decomp_op}" bfs_trace_with_node_process( model, _extract_debug_handles_with_prev_decomp_op_from_node diff --git a/test/test_sparse_csr.py b/test/test_sparse_csr.py index ff3470b6852c..90bd85e230f6 100644 --- a/test/test_sparse_csr.py +++ b/test/test_sparse_csr.py @@ -2702,7 +2702,7 @@ class TestSparseCSR(TestCase): # Sparse CSR only supports 2D tensors as inputs # Fail early to prevent silent success with this test if sample.input.ndim != 2: - raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.") + raise ValueError(f"Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.") sample.input = sample.input.to_sparse_csr() expect = op(sample.input, *sample.args, **sample.kwargs) @@ -2726,7 +2726,7 @@ class TestSparseCSR(TestCase): # Sparse CSR only supports 2D tensors as inputs # Fail early to prevent silent success with this test if sample.input.ndim != 2: - raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.") + raise ValueError(f"Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.") sample.input = sample.input.to_sparse_csr() expect = op(sample.input, *sample.args, **sample.kwargs)