diff --git a/mxrec_add_ons/rec_for_torch/operators/hstu_dense_forward/op_host/tiling_policy.cpp b/mxrec_add_ons/rec_for_torch/operators/hstu_dense_forward/op_host/tiling_policy.cpp index 320487da9126a198e0c374481a2b1a2d530b1e4d..51b9e07d06a3fd480cbf0cdf9bb6ae47fb117be1 100644 --- a/mxrec_add_ons/rec_for_torch/operators/hstu_dense_forward/op_host/tiling_policy.cpp +++ b/mxrec_add_ons/rec_for_torch/operators/hstu_dense_forward/op_host/tiling_policy.cpp @@ -31,7 +31,7 @@ ShapeRange::ShapeRange(int64_t lbound, int64_t ubound, int64_t mutiple, const ch bool ShapeRange::Check(int64_t val) const { OPS_CHECK((val < lbound || val > ubound || val % mutiple != 0), - OPS_LOG_E("%s must meet range[%lld %lld] and mutiple of [%lld]. but get value %lld\n", name, lbound, + OPS_LOG_E("", "%s must meet range[%lld %lld] and mutiple of [%lld]. but get value %lld\n", name, lbound, ubound, mutiple, val), return false); return true; diff --git a/mxrec_add_ons/rec_for_torch/torch_plugin/torch_demo/hstu_dense/test_hstu_dense_forward_demo.py b/mxrec_add_ons/rec_for_torch/torch_plugin/torch_demo/hstu_dense/test_hstu_dense_forward_demo.py index 6603996675a143b7abae8de410ebbbe62b3a96f4..194d6d2970717a7e643bf450ffa9e36ef82333ae 100644 --- a/mxrec_add_ons/rec_for_torch/torch_plugin/torch_demo/hstu_dense/test_hstu_dense_forward_demo.py +++ b/mxrec_add_ons/rec_for_torch/torch_plugin/torch_demo/hstu_dense/test_hstu_dense_forward_demo.py @@ -239,6 +239,34 @@ class TestHstuJaggedDemo: def test_hstu_dens_forward_2048bs(self, head_num, max_seq_len, head_dim, enable_bias, mask_type, silu_scale, data_type): self.execute(2048, max_seq_len, head_num, head_dim, enable_bias, mask_type, silu_scale, data_type) + + @pytest.mark.parametrize("head_num", [255]) + @pytest.mark.parametrize("max_seq_len", [16]) + @pytest.mark.parametrize("head_dim", [256]) + @pytest.mark.parametrize("enable_bias", [True]) + @pytest.mark.parametrize("mask_type", [mask_custom]) + @pytest.mark.parametrize("silu_scale", [1 / 1024]) + @pytest.mark.parametrize("data_type", [torch.bfloat16]) + @pytest.mark.skipif(get_chip(), reason="This test case is Skipped for Ascend310P.") + def test_hstu_dens_forward_head_num_255(self, head_num, max_seq_len, head_dim, enable_bias, mask_type, silu_scale, + data_type): + with pytest.raises(RuntimeError) as e_info: + self.execute(20, max_seq_len, head_num, head_dim, enable_bias, mask_type, silu_scale, data_type) + assert "head num must meet range[2 8] and mutiple of [2]. but get value 255" in str(e_info.value) + + @pytest.mark.parametrize("head_num", [2]) + @pytest.mark.parametrize("max_seq_len", [16]) + @pytest.mark.parametrize("head_dim", [255]) + @pytest.mark.parametrize("enable_bias", [True]) + @pytest.mark.parametrize("mask_type", [mask_custom]) + @pytest.mark.parametrize("silu_scale", [1 / 1024]) + @pytest.mark.parametrize("data_type", [torch.bfloat16]) + @pytest.mark.skipif(get_chip(), reason="This test case is Skipped for Ascend310P.") + def test_hstu_dens_forward_head_dim_255(self, head_num, max_seq_len, head_dim, enable_bias, mask_type, silu_scale, + data_type): + with pytest.raises(RuntimeError) as e_info: + self.execute(20, max_seq_len, head_num, head_dim, enable_bias, mask_type, silu_scale, data_type) + assert "dim size must meet range[16 512] and mutiple of [16]. but get value 255" in str(e_info.value) class TestHstuNormalDemo: