Skip to content
This repository was archived by the owner on Apr 28, 2023. It is now read-only.

Commit 59c3629

Browse files
Update C++ tests to follow TC coding guide
1 parent 2cd79b8 commit 59c3629

13 files changed

+266
-268
lines changed

test/test_autotuner.cc

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -96,13 +96,14 @@ TEST_F(ATenCompilationUnitTest, LayerNorm) {
9696
std::vector<at::Tensor> outputs;
9797

9898
static constexpr auto TC = R"TC(
99-
def layernorm(float(T, B, C) I) -> (O, mean, centered, var) {
100-
mean(t, b) +=! I(t, b, c) / C
101-
centered(t, b, c) = I(t, b, c) - mean(t, b)
102-
var(t, b) +=! centered(t, b, c) * centered(t, b, c)
103-
var(t, b) = (var(t, b)) / C
104-
O(t, b, c) = centered(t, b, c) / rsqrt(var(t, b))
105-
}
99+
def layernorm(float(T, B, C) I) -> (O, mean, centered, var) {
100+
mean(t, b) +=! I(t, b, c) / C
101+
centered(t, b, c) = I(t, b, c) - mean(t, b)
102+
103+
var(t, b) +=! centered(t, b, c) * centered(t, b, c)
104+
var(t, b) = var(t, b) / C
105+
O(t, b, c) = centered(t, b, c) / rsqrt(var(t, b))
106+
}
106107
)TC";
107108
auto options = tc::CudaMappingOptions::makeNaiveCudaMappingOptions();
108109
auto name = "layernorm";
@@ -119,9 +120,9 @@ TEST_F(ATenCompilationUnitTest, MatmulA) {
119120
std::vector<at::Tensor> outputs;
120121

121122
static constexpr auto TC = R"TC(
122-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
123-
output(i, j) +=! A(i, kk) * B(kk, j)
124-
}
123+
def matmul(float(M,N) A, float(N,K) B) -> (output) {
124+
output(m, k) +=! A(m, r_n) * B(r_n, k)
125+
}
125126
)TC";
126127
auto options = tc::CudaMappingOptions::makeNaiveCudaMappingOptions();
127128
auto name = "matmul";
@@ -138,9 +139,9 @@ TEST_F(ATenCompilationUnitTest, MatmulB) {
138139
std::vector<at::Tensor> outputs;
139140

140141
static constexpr auto TC = R"TC(
141-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
142-
output(i, j) +=! A(i, kk) * B(kk, j)
143-
}
142+
def matmul(float(M,N) A, float(N,K) B) -> (output) {
143+
output(m, k) +=! A(m, r_n) * B(r_n, k)
144+
}
144145
)TC";
145146
auto options = tc::CudaMappingOptions::makeNaiveCudaMappingOptions();
146147
auto name = "matmul";
@@ -157,9 +158,9 @@ TEST_F(ATenCompilationUnitTest, MatmulC) {
157158
std::vector<at::Tensor> outputs;
158159

159160
static constexpr auto TC = R"TC(
160-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
161-
output(i, j) +=! A(i, kk) * B(kk, j)
162-
}
161+
def matmul(float(M,N) A, float(N,K) B) -> (output) {
162+
output(m, k) +=! A(m, r_n) * B(r_n, k)
163+
}
163164
)TC";
164165
auto options = tc::CudaMappingOptions::makeNaiveCudaMappingOptions();
165166
auto name = "matmul";
@@ -176,9 +177,9 @@ TEST_F(ATenCompilationUnitTest, TensorDot) {
176177
std::vector<at::Tensor> outputs;
177178

178179
static constexpr auto TC = R"TC(
179-
def tensordot(float(N, C1, C2, H, W) I0, float(N, C2, C3, H, W) I1) -> (O) {
180-
O(n, c1, c3, h, w) +=! I0(n, c1, c2, h, w) * I1(n, c2, c3, h, w)
181-
}
180+
def tensordot(float(N, C1, C2, H, W) I0, float(N, C2, C3, H, W) I1) -> (O) {
181+
O(n, c1, c3, h, w) +=! I0(n, c1, r_c2, h, w) * I1(n, r_c2, c3, h, w)
182+
}
182183
)TC";
183184
auto options = tc::CudaMappingOptions::makeConvolutionCudaMappingOptions();
184185
auto name = "tensordot";

test/test_autotuner_utility.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,9 @@ TEST(RestoreCandidates, NoCache) {
6464
}
6565

6666
static constexpr auto tc_ = R"(
67-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
68-
output(m, k) +=! A(m, nn) * B(nn, k)
69-
})";
67+
def matmul(float(M,N) A, float(N,K) B) -> (output) {
68+
output(m, k) +=! A(m, r_n) * B(r_n, k)
69+
})";
7070

7171
void EnableCaches() {
7272
tc::CudaCache::enableCache();

test/test_caffe2.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ TEST_F(Caffe2CopyTest, DISABLED_TcCopyOp_Gradient1D) {
7676
auto AddInput =
7777
TestHarness::AddDeterministicallyRandomInput<float, CUDAContext>;
7878
AddInput(w, {M}, "I");
79-
AddInput(w, {M}, "O_grad");
79+
AddInput(w, {M}, "g_O");
8080
};
8181
OperatorDef def =
8282
TestHarness::ConfigureCUDA("TcCopyOp", {"I"}, {"O"}, {strategyArg});
@@ -99,7 +99,7 @@ TEST_F(Caffe2CopyTest, DISABLED_TcCopyOp_Gradient2D) {
9999
auto AddInput =
100100
TestHarness::AddDeterministicallyRandomInput<float, CUDAContext>;
101101
AddInput(w, {M, N}, "I");
102-
AddInput(w, {M, N}, "O_grad");
102+
AddInput(w, {M, N}, "g_O");
103103
};
104104
OperatorDef def =
105105
TestHarness::ConfigureCUDA("TcCopyOp", {"I"}, {"O"}, {strategyArg});
@@ -122,7 +122,7 @@ TEST_F(Caffe2CopyTest, DISABLED_TcCopyOp_Gradient3D) {
122122
auto AddInput =
123123
TestHarness::AddDeterministicallyRandomInput<float, CUDAContext>;
124124
AddInput(w, {M, N, P}, "I");
125-
AddInput(w, {M, N, P}, "O_grad");
125+
AddInput(w, {M, N, P}, "g_O");
126126
};
127127
OperatorDef def =
128128
TestHarness::ConfigureCUDA("TcCopyOp", {"I"}, {"O"}, {strategyArg});
@@ -145,7 +145,7 @@ TEST_F(Caffe2CopyTest, DISABLED_TcCopyOp_Gradient4D) {
145145
auto AddInput =
146146
TestHarness::AddDeterministicallyRandomInput<float, CUDAContext>;
147147
AddInput(w, {M, N, P, Q}, "I");
148-
AddInput(w, {M, N, P, Q}, "O_grad");
148+
AddInput(w, {M, N, P, Q}, "g_O");
149149
};
150150
OperatorDef def =
151151
TestHarness::ConfigureCUDA("TcCopyOp", {"I"}, {"O"}, {strategyArg});
@@ -168,7 +168,7 @@ TEST_F(Caffe2CopyTest, DISABLED_TcCopyOp_Gradient5D) {
168168
auto AddInput =
169169
TestHarness::AddDeterministicallyRandomInput<float, CUDAContext>;
170170
AddInput(w, {M, N, P, Q, R}, "I");
171-
AddInput(w, {M, N, P, Q, R}, "O_grad");
171+
AddInput(w, {M, N, P, Q, R}, "g_O");
172172
};
173173
OperatorDef def =
174174
TestHarness::ConfigureCUDA("TcCopyOp", {"I"}, {"O"}, {strategyArg});
@@ -201,7 +201,7 @@ TEST_F(Caffe2Test, DISABLED_TcMatMulOp_Gradient) {
201201
TestHarness::AddDeterministicallyRandomInput<float, CUDAContext>;
202202
AddInput(w, {M, K}, "I");
203203
AddInput(w, {K, N}, "W");
204-
AddInput(w, {M, N}, "O_grad");
204+
AddInput(w, {M, N}, "g_O");
205205
};
206206

207207
CudaMappingOptions options = tc::makeBaseCliStrategy()
@@ -572,7 +572,7 @@ TEST_F(Caffe2Test, DISABLED_TcConvolutionOp_Gradient) {
572572
AddInput(w, {NN, C, H, W}, "I");
573573
AddInput(w, {F, C, KH, KW}, "filter");
574574
AddInput(w, {F}, "bias");
575-
AddInput(w, {NN, F, H - KH + 1, W - KW + 1}, "H_grad");
575+
AddInput(w, {NN, F, H - KH + 1, W - KW + 1}, "g_H");
576576
};
577577

578578
CudaMappingOptions options =

test/test_compilation_cache.cc

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -996,7 +996,7 @@ TEST(
996996
* std::vector<at::Tensor> inputs_;
997997
* int M;
998998
* static constexpr auto tc_ = R"(
999-
* def fcrelu(float(B,M) I, float(N,M) W1, float(N) B1) -> (O1) {
999+
* def fcrelu(float(B,M) I, float(N,M) W1, float(N) B1) -> (O1) {
10001000
* O1(b, n) += I(b, m) * W1(n, m)
10011001
* O1(b, n) = O1(b, n) + B1(n)
10021002
* O1(b, n) = fmax(O1(b, n), 0)
@@ -1026,9 +1026,9 @@ class MatMulTester {
10261026
std::vector<at::Tensor> inputs_;
10271027
int M;
10281028
static constexpr auto tc_ = R"(
1029-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
1030-
output(m, k) +=! A(m, nn) * B(nn, k)
1031-
})";
1029+
def matmul(float(M,N) A, float(N,K) B) -> (output) {
1030+
output(m, k) +=! A(m, r_n) * B(r_n, k)
1031+
})";
10321032
};
10331033

10341034
class ConvolutionTester {
@@ -1061,11 +1061,12 @@ class ConvolutionTester {
10611061
int KH;
10621062
int KW;
10631063
static constexpr auto tc_ = R"(
1064-
def convolution(float(N,C,H,W) I, float(O,C,KH,KW) W1, float(O) B)
1065-
-> (tmp, O1) {
1066-
tmp(n, o, h, w) +=! I(n, c, h + kh, w + kw) * W1(o, c, kh, kw)
1067-
O1(n, o, h, w) = tmp(n, o, h, w) + B(o)
1068-
})";
1064+
def convolution(float(N,C,H,W) I, float(O,C,KH,KW) W1, float(O) B)
1065+
-> (tmp, O1)
1066+
{
1067+
tmp(n, o, h, w) +=! I(n, r_c, h + r_kh, w + r_kw) * W1(o, r_c, r_kh, r_kw)
1068+
O1(n, o, h, w) = tmp(n, o, h, w) + B(o)
1069+
})";
10691070
};
10701071

10711072
class CompilationCacheTest : public ::testing::Test {
@@ -1357,9 +1358,9 @@ TEST_F(CompilationCacheTest, Serialization) {
13571358

13581359
TEST(CompilationCache, ManualInjection) {
13591360
static constexpr auto tc = R"(
1360-
def add(float(N) A, float(N) B) -> (output) {
1361-
output(i) = A(i) + B(i)
1362-
})";
1361+
def add(float(N) A, float(N) B) -> (output) {
1362+
output(n) = A(n) + B(n)
1363+
})";
13631364

13641365
tc::ManualCudaCache::enableCache();
13651366
tc::ATenCompilationUnit<tc::CudaTcExecutor> atCompl;

0 commit comments

Comments
 (0)