Skip to content
This repository was archived by the owner on Apr 28, 2023. It is now read-only.

Commit 60fcf52

Browse files
Update docs to match TC coding guide
1 parent f3db396 commit 60fcf52

File tree

12 files changed

+136
-132
lines changed

12 files changed

+136
-132
lines changed

docs/source/framework/caffe2_integration/integration_with_example.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ For demonstration purpose, we will pick a simple example for :code:`matmul` laye
5050
dyndep.InitOpsLibrary(os.path.join(os.environ.get("CONDA_PREFIX"), "lib/libtc_c2.so"))
5151
5252
lang = """
53-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
54-
output(i, j) +=! A(i, kk) * B(kk, j)
53+
def matmul(float(M,K) A, float(N,K) B) -> (output) {
54+
output(m, n) +=! A(m, r_k) * B(n, r_k)
5555
}
5656
"""
5757
mat1, mat2 = np.random.rand(100, 400), np.random.rand(400, 500)
@@ -68,4 +68,4 @@ Future
6868
------
6969

7070
The integration with Caffe2 is very basic at the moment. We do not provide autotuner
71-
support for Caffe2 and welcome contributions from community.
71+
support for Caffe2 at the moment and welcome contributions from the community.

docs/source/framework/pytorch_integration/autograd_with_tc.rst

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,11 @@ Examples
2727
from torch.nn.parameter import Parameter
2828
CONV_LANG = """
2929
def convolution(float(N,C,H,W) I, float(M,C,KH,KW) W1) -> (O) {{
30-
O(n, m, h, w) +=! I(n, c, {sh} * h + kh, {sw} * w + kw) * W1(m, c, kh, kw)
30+
O(n, m, h, w) +=! I(n, r_c, {sh} * h + r_kh, {sw} * w + r_kw) * W1(m, r_c, r_kh, r_kw)
3131
}}
32-
def convolution_grad(float(N,C,H,W) I, float(M,C,KH,KW) W1, float(N,M,H,W) O_grad) -> (I_grad, W1_grad) {{
33-
I_grad(n, c, h, w) +=! O_grad(n, m, {sh} * h - kh, {sw} * w - kw) * W1(m, c, kh, kw)
34-
W1_grad(m, c, kh, kw) +=! O_grad(n, m, {sh} * h - kh, {sw} * w - kw) * I(n, c, h, w)
32+
def convolution_grad(float(N,C,H,W) I, float(M,C,KH,KW) W1, float(N,M,H,W) g_O) -> (g_I, g_W1) {{
33+
g_I(n, c, h, w) +=! g_O( n, r_m, {sh} * h - r_kh, {sw} * w - r_kw) * W1(r_m, c, r_kh, r_kw)
34+
g_W1(m, c, kh, kw) +=! g_O(r_n, m, {sh} * r_h - kh, {sw} * r_w - kw) * I(r_n, c, r_h, r_w)
3535
}}
3636
"""
3737
N, C, H, W, O, kH, kW, sH, sW = 32, 4, 56, 56, 16, 1, 1, 1, 1
@@ -66,11 +66,11 @@ them, the example for that would be:
6666
from torch.nn.parameter import Parameter
6767
CONV_LANG = """
6868
def convolution(float(N,C,H,W) I, float(M,C,KH,KW) W1) -> (O) {{
69-
O(n, m, h, w) +=! I(n, c, {sh} * h + kh, {sw} * w + kw) * W1(m, c, kh, kw)
69+
O(n, m, h, w) +=! I(n, r_c, {sh} * h + r_kh, {sw} * w + r_kw) * W1(m, r_c, r_kh, r_kw)
7070
}}
71-
def convolution_grad(float(N,C,H,W) I, float(M,C,KH,KW) W1, float(N,M,H,W) O_grad) -> (I_grad, W1_grad) {{
72-
I_grad(n, c, h, w) +=! O_grad(n, m, {sh} * h - kh, {sw} * w - kw) * W1(m, c, kh, kw)
73-
W1_grad(m, c, kh, kw) +=! O_grad(n, m, {sh} * h - kh, {sw} * w - kw) * I(n, c, h, w)
71+
def convolution_grad(float(N,C,H,W) I, float(M,C,KH,KW) W1, float(N,M,H,W) g_O) -> (g_I, g_W1) {{
72+
g_I(n, c, h, w) +=! g_O( n, r_m, {sh} * h - r_kh, {sw} * w - r_kw) * W1(r_m, c, r_kh, r_kw)
73+
g_W1(m, c, kh, kw) +=! g_O(r_n, m, {sh} * r_h - kh, {sw} * r_w - kw) * I(r_n, c, r_h, r_w)
7474
}}
7575
"""
7676
N, C, H, W, O, kH, kW, sH, sW = 32, 4, 56, 56, 16, 1, 1, 1, 1
@@ -100,11 +100,11 @@ Let's see how to cache options to file when we tune a training layer.
100100
import torch
101101
CONV_LANG = """
102102
def convolution(float(N,C,H,W) I, float(M,C,KH,KW) W1) -> (O) {{
103-
O(n, m, h, w) +=! I(n, c, {sh} * h + kh, {sw} * w + kw) * W1(m, c, kh, kw)
103+
O(n, m, h, w) +=! I(n, r_c, {sh} * h + r_kh, {sw} * w + r_kw) * W1(m, r_c, r_kh, r_kw)
104104
}}
105-
def convolution_grad(float(N,C,H,W) I, float(M,C,KH,KW) W1, float(N,M,H,W) O_grad) -> (I_grad, W1_grad) {{
106-
I_grad(n, c, h, w) +=! O_grad(n, m, {sh} * h - kh, {sw} * w - kw) * W1(m, c, kh, kw)
107-
W1_grad(m, c, kh, kw) +=! O_grad(n, m, {sh} * h - kh, {sw} * w - kw) * I(n, c, h, w)
105+
def convolution_grad(float(N,C,H,W) I, float(M,C,KH,KW) W1, float(N,M,H,W) g_O) -> (g_I, g_W1) {{
106+
g_I(n, c, h, w) +=! g_O( n, r_m, {sh} * h - r_kh, {sw} * w - r_kw) * W1(r_m, c, r_kh, r_kw)
107+
g_W1(m, c, kh, kw) +=! g_O(r_n, m, {sh} * r_h - kh, {sw} * r_w - kw) * I(r_n, c, r_h, r_w)
108108
}}
109109
"""
110110
N, C, H, W, O, kH, kW, sH, sW = 32, 4, 56, 56, 16, 1, 1, 1, 1
@@ -133,14 +133,14 @@ the example below for how to use it:
133133
import torch
134134
LANG = """
135135
def convolution(float(N, C, H, W) I, float(M, C, KH, KW) W1, float(M) B) -> (tmp, O) {
136-
tmp(n, m, h, w) +=! I(n, c, h + kh, w + kw) * W1(m, c, kh, kw)
136+
tmp(n, m, h, w) +=! I(n, r_c, h + r_kh, w + r_kw) * W1(m, r_c, r_kh, r_kw)
137137
O(n, m, h, w) = tmp(n, m, h, w) + B(m)
138138
}
139-
def convolution_grad(float(N, C, H, W) I, float(M, C, KH, KW) W1, float(M) B, float(N, M, H, W) O_grad)
140-
-> (I_grad, W1_grad, B_grad) {
141-
I_grad(n, c, h, w) +=! O_grad(n, m, h - kh, w - kw) * W1(m, c, kh, kw)
142-
W1_grad(m, c, kh, kw) +=! O_grad(n, m, h - kh, w - kw) * I(n, c, h, w)
143-
B_grad(m) +=! O_grad(n, m, h, w)
139+
def convolution_grad(float(N, C, H, W) I, float(M, C, KH, KW) W1, float(M) B, float(N, M, H, W) g_O)
140+
-> (g_I, g_W1, g_B) {
141+
g_I(n, c, h, w) +=! g_O( n, r_m, h - r_kh, w - r_kw) * W1(r_m, c, r_kh, r_kw)
142+
g_W1(m, c, kh, kw) +=! g_O(r_n, m, r_h - kh, r_w - kw) * I(r_n, c, r_h, r_w)
143+
g_B(m) +=! g_O(n, m, h, w)
144144
}
145145
"""
146146

docs/source/framework/pytorch_integration/autotuning_layers.rst

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ An example demonstrating each step above is:
2424
import tensor_comprehensions as tc
2525
import torch
2626
lang = """
27-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
28-
output(i, j) +=! A(i, kk) * B(kk, j)
27+
def matmul(float(M,K) A, float(N,K) B) -> (output) {
28+
output(m, n) +=! A(m, r_k) * B(n, r_k)
2929
}
3030
"""
3131
matmul = tc.define(lang, name="matmul")
@@ -108,8 +108,8 @@ An example for how to pass options:
108108
import tensor_comprehensions as tc
109109
import torch
110110
lang = """
111-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
112-
output(i, j) +=! A(i, kk) * B(kk, j)
111+
def matmul(float(M,K) A, float(N,K) B) -> (output) {
112+
output(m, n) +=! A(m, r_k) * B(n, r_k)
113113
}
114114
"""
115115
matmul = tc.define(lang, name="matmul")
@@ -134,8 +134,8 @@ argument to the autotuning call. There are two ways of caching the tuned options
134134
import tensor_comprehensions as tc
135135
import torch
136136
lang = """
137-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
138-
output(i, j) +=! A(i, kk) * B(kk, j)
137+
def matmul(float(M,K) A, float(N,K) B) -> (output) {
138+
output(m, n) +=! A(m, r_k) * B(n, r_k)
139139
}
140140
"""
141141
matmul = tc.define(lang, name="matmul")
@@ -151,8 +151,8 @@ argument to the autotuning call. There are two ways of caching the tuned options
151151
import tensor_comprehensions as tc
152152
import torch
153153
lang = """
154-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
155-
output(i, j) +=! A(i, kk) * B(kk, j)
154+
def matmul(float(M,K) A, float(N,K) B) -> (output) {
155+
output(m, n) +=! A(m, r_k) * B(n, r_k)
156156
}
157157
"""
158158
matmul = tc.define(lang, name="matmul")
@@ -182,8 +182,8 @@ For example:
182182
import tensor_comprehensions as tc
183183
import torch
184184
lang = """
185-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
186-
output(i, j) +=! A(i, kk) * B(kk, j)
185+
def matmul(float(M,K) A, float(N,K) B) -> (output) {
186+
output(m, n) +=! A(m, r_k) * B(n, r_k)
187187
}
188188
"""
189189
matmul = tc.define(lang, name="matmul")
@@ -207,8 +207,8 @@ For example:
207207
208208
import tensor_comprehensions as tc
209209
lang = """
210-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
211-
output(i, j) +=! A(i, kk) * B(kk, j)
210+
def matmul(float(M,K) A, float(N,K) B) -> (output) {
211+
output(m, n) +=! A(m, r_k) * B(n, r_k)
212212
}
213213
"""
214214
matmul = tc.define(lang, name="matmul")
@@ -237,8 +237,8 @@ Below is example describing the above usage:
237237
import tensor_comprehensions as tc
238238
cache = "{}/matmul_3_4_5".format(PATH_PREFIX)
239239
lang = """
240-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
241-
output(i, j) +=! A(i, kk) * B(kk, j)
240+
def matmul(float(M,K) A, float(N,K) B) -> (output) {
241+
output(m, n) +=! A(m, r_k) * B(n, r_k)
242242
}
243243
"""
244244
matmul = tc.define(lang, name="matmul")

docs/source/framework/pytorch_integration/frequently_asked_questions.rst

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,10 @@ as input not output.
2020
.. code::
2121
2222
def softmax(float(N, D) I) -> (O, maxVal, expDistance) {
23-
maxVal(n) max= I(n, d)
24-
expDistance(n, d) = exp(I(n, d) - maxVal(n))
25-
expSum(n) +=! expDistance(n, d)
26-
O(n, d) = expDistance(n, d) / expSum(n)
23+
maxVal(n) max=! I(n, d)
24+
expDistance(n, d) = exp(I(n, d) - maxVal(n))
25+
expSum(n) +=! expDistance(n, d)
26+
O(n, d) = expDistance(n, d) / expSum(n)
2727
}
2828
2929
**Valid TC**
@@ -33,15 +33,15 @@ The correct TC would be:
3333
.. code::
3434
3535
def softmax(float(N, D) I) -> (O, maxVal, expDistance, expSum) {
36-
maxVal(n) max= I(n, d)
37-
expDistance(n, d) = exp(I(n, d) - maxVal(n))
38-
expSum(n) +=! expDistance(n, d)
39-
O(n, d) = expDistance(n, d) / expSum(n)
36+
maxVal(n) max=! I(n, d)
37+
expDistance(n, d) = exp(I(n, d) - maxVal(n))
38+
expSum(n) +=! expDistance(n, d)
39+
O(n, d) = expDistance(n, d) / expSum(n)
4040
}
4141
4242
Can I re-use a temporary variable?
4343
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
44-
You can as long as the tensor dependencies are strictly DAG. For example:
44+
You can as long as the tensor dependencies form a DAG. For example:
4545

4646
**Invalid**
4747

@@ -54,19 +54,18 @@ You can as long as the tensor dependencies are strictly DAG. For example:
5454
O(n, d) = O(n, d) / tmp(n)
5555
}
5656
57-
This TC is invalid because :code:`tmp` and :code:`O(n, d)` have cyclic dependency.
57+
This TC is invalid because :code:`tmp` and :code:`O(n, d)` have a cyclic dependency.
5858

5959
**Valid**
6060

6161
.. code::
6262
6363
def softmax(float(N, D) I) -> (O, expsum, maxVal) {
64-
maxVal(n) max= I(n, d)
64+
maxVal(n) max=! I(n, d)
6565
expsum(n) +=! exp(I(n, d) - maxVal(n))
6666
O(n, d) = exp(I(n, d) - maxVal(n)) / expsum(n)
6767
}
6868
69-
7069
Autotuner
7170
---------
7271

docs/source/framework/pytorch_integration/getting_started.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,8 @@ For demonstration purpose, we will pick a simple example for :code:`matmul` laye
7070
import tensor_comprehensions as tc
7171
import torch
7272
lang = """
73-
def matmul(float(M,N) A, float(N,K) B) -> (output) {
74-
output(i, j) +=! A(i, kk) * B(kk, j)
73+
def matmul(float(M,K) A, float(N,K) B) -> (output) {
74+
output(m, n) +=! A(m, r_k) * B(n, r_k)
7575
}
7676
"""
7777
matmul = tc.define(lang, name="matmul")

0 commit comments

Comments
 (0)