Skip to content

Commit 05d29a9

Browse files
authored
[doc] update examples.md and installation.md (#1702)
* correct AOT device type * cosmetic changes
1 parent ba674f4 commit 05d29a9

File tree

2 files changed

+13
-13
lines changed

2 files changed

+13
-13
lines changed

docs/tutorials/examples.md

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ data = data.to(memory_format=torch.channels_last)
180180
model = model.to("xpu")
181181
data = data.to("xpu")
182182
model = torch.xpu.optimize(model, dtype=torch.float32)
183-
######################################################
183+
#################### code changes ####################
184184
185185
with torch.no_grad():
186186
model(data)
@@ -207,7 +207,7 @@ data = torch.randint(vocab_size, size=[batch_size, seq_length])
207207
model = model.to("xpu")
208208
data = data.to("xpu")
209209
model = torch.xpu.optimize(model, dtype=torch.float32)
210-
######################################################
210+
#################### code changes ####################
211211
212212
with torch.no_grad():
213213
model(data)
@@ -237,7 +237,7 @@ data = data.to(memory_format=torch.channels_last)
237237
model = model.to("xpu")
238238
data = data.to("xpu")
239239
model = torch.xpu.optimize(model, dtype=torch.float32)
240-
######################################################
240+
#################### code changes ####################
241241
242242
with torch.no_grad():
243243
d = torch.rand(1, 3, 224, 224)
@@ -271,7 +271,7 @@ data = torch.randint(vocab_size, size=[batch_size, seq_length])
271271
model = model.to("xpu")
272272
data = data.to("xpu")
273273
model = torch.xpu.optimize(model, dtype=torch.float32)
274-
######################################################
274+
#################### code changes ####################
275275
276276
with torch.no_grad():
277277
d = torch.randint(vocab_size, size=[batch_size, seq_length])
@@ -311,7 +311,7 @@ data = data.to(memory_format=torch.channels_last)
311311
model = model.to("xpu")
312312
data = data.to("xpu")
313313
model = torch.xpu.optimize(model, dtype=torch.bfloat16)
314-
######################################################
314+
#################### code changes ####################
315315
316316
with torch.no_grad():
317317
################################# code changes ######################################
@@ -341,7 +341,7 @@ data = torch.randint(vocab_size, size=[batch_size, seq_length])
341341
model = model.to("xpu")
342342
data = data.to("xpu")
343343
model = torch.xpu.optimize(model, dtype=torch.bfloat16)
344-
######################################################
344+
#################### code changes ####################
345345
346346
with torch.no_grad():
347347
################################# code changes ######################################
@@ -374,7 +374,7 @@ data = data.to(memory_format=torch.channels_last)
374374
model = model.to("xpu")
375375
data = data.to("xpu")
376376
model = torch.xpu.optimize(model, dtype=torch.bfloat16)
377-
######################################################
377+
#################### code changes ####################
378378
379379
with torch.no_grad():
380380
d = torch.rand(1, 3, 224, 224)
@@ -408,7 +408,7 @@ data = torch.randint(vocab_size, size=[batch_size, seq_length])
408408
model = model.to("xpu")
409409
data = data.to("xpu")
410410
model = torch.xpu.optimize(model, dtype=torch.bfloat16)
411-
######################################################
411+
#################### code changes ####################
412412
413413
with torch.no_grad():
414414
d = torch.randint(vocab_size, size=[batch_size, seq_length])
@@ -448,7 +448,7 @@ data = data.to(memory_format=torch.channels_last)
448448
model = model.to("xpu")
449449
data = data.to("xpu")
450450
model = torch.xpu.optimize(model, dtype=torch.float16)
451-
######################################################
451+
#################### code changes ####################
452452
453453
with torch.no_grad():
454454
################################# code changes ######################################
@@ -478,7 +478,7 @@ data = torch.randint(vocab_size, size=[batch_size, seq_length])
478478
model = model.to("xpu")
479479
data = data.to("xpu")
480480
model = torch.xpu.optimize(model, dtype=torch.float16)
481-
######################################################
481+
#################### code changes ####################
482482
483483
with torch.no_grad():
484484
################################# code changes ######################################
@@ -511,7 +511,7 @@ data = data.to(memory_format=torch.channels_last)
511511
model = model.to("xpu")
512512
data = data.to("xpu")
513513
model = torch.xpu.optimize(model, dtype=torch.float16)
514-
######################################################
514+
#################### code changes ####################
515515
516516
with torch.no_grad():
517517
d = torch.rand(1, 3, 224, 224)
@@ -545,7 +545,7 @@ data = torch.randint(vocab_size, size=[batch_size, seq_length])
545545
model = model.to("xpu")
546546
data = data.to("xpu")
547547
model = torch.xpu.optimize(model, dtype=torch.float16)
548-
######################################################
548+
#################### code changes ####################
549549
550550
with torch.no_grad():
551551
d = torch.randint(vocab_size, size=[batch_size, seq_length])

docs/tutorials/installation.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ $ pip install dist/*.whl
9494
Please refer to [AOT documentation](./AOT.md) for how to configure `USE_AOT_DEVLIST`.
9595

9696
```bash
97-
$ export USE_AOT_DEVLIST='dg2-g10-c0'
97+
$ export USE_AOT_DEVLIST='ats-m150'
9898
```
9999

100100
### Install Intel® Extension for PyTorch\*:

0 commit comments

Comments
 (0)