Skip to content

Commit 99e3f0c

Browse files
authored
2.5.10-xpu dockerfile updates (#5086)
* update layers for 2.5.10 xpu * add pti and ocloc * update docker and validate RC2 * validate on patched wheels * remove copy * remove env var
1 parent b41bb09 commit 99e3f0c

File tree

3 files changed

+58
-58
lines changed

3 files changed

+58
-58
lines changed

docker/Dockerfile.prebuilt

Lines changed: 16 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ ARG DEBIAN_FRONTEND=noninteractive
2323
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
2424

2525
RUN apt-get update && \
26-
apt-get install -y --no-install-recommends --fix-missing \
26+
apt-get install -y --no-install-recommends \
2727
apt-utils \
2828
build-essential \
2929
ca-certificates \
@@ -40,50 +40,37 @@ RUN apt-get update && \
4040
rm -rf /var/lib/apt/lists/*
4141

4242
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
43-
gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg
44-
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" | \
43+
gpg --dearmor --yes --output /usr/share/keyrings/intel-graphics.gpg
44+
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy unified" | \
4545
tee /etc/apt/sources.list.d/intel-gpu-jammy.list
4646

4747
ARG ICD_VER
48-
ARG LEVEL_ZERO_GPU_VER
48+
ARG OCLOC_VER
4949
ARG LEVEL_ZERO_VER
5050
ARG LEVEL_ZERO_DEV_VER
51+
ARG XPU_SMI_VER
5152

5253
RUN apt-get update && \
53-
apt-get install -y --no-install-recommends --fix-missing \
54-
intel-opencl-icd=${ICD_VER} \
55-
intel-level-zero-gpu=${LEVEL_ZERO_GPU_VER} \
56-
level-zero=${LEVEL_ZERO_VER} \
57-
level-zero-dev=${LEVEL_ZERO_DEV_VER} && \
54+
apt-get install -y --no-install-recommends \
55+
intel-opencl-icd=${ICD_VER} \
56+
intel-ocloc=${OCLOC_VER} \
57+
libze1=${LEVEL_ZERO_VER} \
58+
libze-dev=${LEVEL_ZERO_DEV_VER} \
59+
xpu-smi=${XPU_SMI_VER} && \
5860
apt-get clean && \
5961
rm -rf /var/lib/apt/lists/*
6062

61-
RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \
62-
| gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && \
63-
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" \
64-
| tee /etc/apt/sources.list.d/oneAPI.list
65-
66-
ARG DPCPP_VER
67-
ARG MKL_VER
68-
ARG CCL_VER
69-
70-
RUN apt-get update && \
71-
apt-get install -y --no-install-recommends --fix-missing \
72-
intel-oneapi-runtime-dpcpp-cpp=${DPCPP_VER} \
73-
intel-oneapi-runtime-mkl=${MKL_VER} \
74-
intel-oneapi-runtime-ccl=${CCL_VER} && \
75-
apt-get clean && \
76-
rm -rf /var/lib/apt/lists/*
63+
RUN rm -rf /etc/apt/sources.list.d/intel-gpu-jammy.list
7764

7865
ARG PYTHON
7966
RUN apt-get update && apt install -y software-properties-common
8067
RUN add-apt-repository -y ppa:deadsnakes/ppa
8168

8269
RUN apt-cache policy $PYTHON && apt-get update && apt-get install -y \
83-
--no-install-recommends --fix-missing $PYTHON
70+
--no-install-recommends $PYTHON
8471

85-
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
86-
${PYTHON} lib${PYTHON} python3-pip ${PYTHON}-distutils && \
72+
RUN apt-get update && apt-get install -y --no-install-recommends \
73+
${PYTHON} lib${PYTHON} python3-pip ${PYTHON}-distutils && \
8774
apt-get clean && \
8875
rm -rf /var/lib/apt/lists/*
8976

@@ -107,7 +94,7 @@ ARG TORCHVISION_WHL_URL
10794
ARG TORCHAUDIO_WHL_URL
10895
ARG ONECCL_BIND_PT_WHL_URL
10996

110-
RUN python -m pip install numpy
97+
RUN python -m pip install numpy
11198

11299
RUN python -m pip install torch==${TORCH_VERSION} --extra-index-url ${TORCH_WHL_URL} && \
113100
python -m pip install intel_extension_for_pytorch==${IPEX_VERSION} --extra-index-url ${IPEX_WHL_URL} && \

docker/README.md

Lines changed: 31 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -33,18 +33,20 @@ Alternatively, `./build.sh` script has docker build command to install prebuilt
3333
To pull docker images use the following command:
3434

3535
```bash
36-
docker pull intel/intel-extension-for-pytorch:xpu
36+
docker pull intel/intel-extension-for-pytorch:2.5.10-xpu
3737
```
38+
3839
### Running container:
3940

4041
Run the following commands to start Intel® Extension for PyTorch\* GPU container. You can use `-v` option to mount your
4142
local directory into the container. The `-v` argument can be omitted if you do not need
4243
access to a local directory in the container. Pass the video and render groups to your
4344
docker container so that the GPU is accessible.
4445

46+
```bash
47+
IMAGE_NAME=intel/intel-extension-for-pytorch:2.5.10-xpu
4548
```
46-
IMAGE_NAME=intel/intel-extension-for-pytorch:xpu
47-
```
49+
4850
```bash
4951
docker run --rm \
5052
-v <your-local-dir>:/workspace \
@@ -58,44 +60,57 @@ docker run --rm \
5860

5961
#### Verify if XPU is accessible from PyTorch:
6062
You are inside the container now. Run the following command to verify XPU is visible to PyTorch:
63+
6164
```bash
6265
python -c "import torch;print(torch.device('xpu'))"
6366
```
67+
6468
Sample output looks like below:
65-
```
69+
70+
```bash
6671
xpu
6772
```
73+
6874
Then, verify that the XPU device is available to Intel® Extension for PyTorch\*:
75+
6976
```bash
70-
python -c "import intel_extension_for_pytorch as ipex;print(ipex.xpu.is_available())"
77+
python -c "import torch;import intel_extension_for_pytorch as ipex;print(torch.xpu.has_xpu())"
7178
```
79+
7280
Sample output looks like below:
73-
```
81+
82+
```bash
7483
True
7584
```
85+
7686
Use the following command to check whether MKL is enabled as default:
87+
7788
```bash
78-
python -c "import intel_extension_for_pytorch as ipex;print(ipex.xpu.has_onemkl())"
89+
python -c "import torch;import intel_extension_for_pytorch as ipex;print(torch.xpu.has_onemkl())"
7990
```
91+
8092
Sample output looks like below:
81-
```
93+
94+
```bash
8295
True
8396
```
97+
8498
Finally, use the following command to show detailed info of detected device:
99+
85100
```bash
86-
python -c "import torch; import intel_extension_for_pytorch as ipex; print(torch.__version__); print(ipex.__version__); [print(f'[{i}]: {ipex.xpu.get_device_properties(i)}') for i in range(ipex.xpu.device_count())];"
101+
python -c "import torch; import intel_extension_for_pytorch as ipex; print(torch.__version__); print(ipex.__version__); [print(f'[{i}]: {torch.xpu.get_device_properties(i)}') for i in range(torch.xpu.device_count())];"
87102
```
88103

89104
Sample output looks like below:
105+
106+
```bash
107+
2.5.1+cxx11.abi
108+
2.5.10+xpu
109+
[0]: _XpuDeviceProperties(name='Intel(R) Data Center GPU Max 1550', platform_name='Intel(R) Level-Zero', type='gpu', driver_version='1.3.30049', total_memory=65536MB, max_compute_units=448, gpu_eu_count=448, gpu_subslice_count=56, max_work_group_size=1024, max_num_sub_groups=64, sub_group_sizes=[16 32], has_fp16=1, has_fp64=1, has_atomic64=1)
110+
[1]: _XpuDeviceProperties(name='Intel(R) Data Center GPU Max 1550', platform_name='Intel(R) Level-Zero', type='gpu', driver_version='1.3.30049', total_memory=65536MB, max_compute_units=448, gpu_eu_count=448, gpu_subslice_count=56, max_work_group_size=1024, max_num_sub_groups=64, sub_group_sizes=[16 32], has_fp16=1, has_fp64=1, has_atomic64=1)
90111
```
91-
2.1.0.post2+cxx11.abi
92-
2.1.30+xpu
93-
[0]: _DeviceProperties(name='Intel(R) Data Center GPU Max 1550', platform_name='Intel(R) Level-Zero', dev_type='gpu', driver_version='1.3.27642', has_fp64=1, total_memory=65536MB, max_compute_units=448, gpu_eu_count=448)
94-
[1]: _DeviceProperties(name='Intel(R) Data Center GPU Max 1550', platform_name='Intel(R) Level-Zero', dev_type='gpu', driver_version='1.3.27642', has_fp64=1, total_memory=65536MB, max_compute_units=448, gpu_eu_count=448)
95-
```
112+
96113
#### Running your own script
97114

98115
Now you are inside container with Python 3.10, PyTorch, and Intel® Extension for PyTorch\* preinstalled. You can run your own script
99116
to run on Intel GPU.
100-
101-

docker/build.sh

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,23 @@
11
#!/bin/bash
22

33
if [[ ${IMAGE_TYPE} = "xpu" ]]; then
4-
IMAGE_NAME=intel/intel-extension-for-pytorch:2.1.30-$IMAGE_TYPE
4+
IMAGE_NAME=intel/intel-extension-for-pytorch:2.5.10-$IMAGE_TYPE
55
docker build --build-arg http_proxy=$http_proxy \
66
--build-arg https_proxy=$https_proxy \
77
--build-arg no_proxy=" " \
88
--build-arg NO_PROXY=" " \
99
--build-arg UBUNTU_VERSION=22.04 \
1010
--build-arg PYTHON=python3.10 \
11-
--build-arg ICD_VER=23.43.27642.40-803~22.04 \
12-
--build-arg LEVEL_ZERO_GPU_VER=1.3.27642.40-803~22.04 \
13-
--build-arg LEVEL_ZERO_VER=1.14.0-744~22.04 \
14-
--build-arg LEVEL_ZERO_DEV_VER=1.14.0-744~22.04 \
15-
--build-arg DPCPP_VER=2024.1.0-963 \
16-
--build-arg MKL_VER=2024.1.0-691 \
17-
--build-arg CCL_VER=2021.12.0-309 \
18-
--build-arg TORCH_VERSION=2.1.0.post2+cxx11.abi \
19-
--build-arg IPEX_VERSION=2.1.30+xpu \
20-
--build-arg TORCHVISION_VERSION=0.16.0.post2+cxx11.abi \
21-
--build-arg TORCHAUDIO_VERSION=2.1.0.post2+cxx11.abi \
22-
--build-arg ONECCL_BIND_PT_VERSION=2.1.300+xpu \
11+
--build-arg ICD_VER=24.39.31294.20-1032~22.04 \
12+
--build-arg OCLOC_VER=24.39.31294.21-1032~22.04 \
13+
--build-arg LEVEL_ZERO_VER=1.17.44.0-1022~22.04 \
14+
--build-arg LEVEL_ZERO_DEV_VER=1.17.44.0-1022~22.04 \
15+
--build-arg XPU_SMI_VER=1.2.39-66~22.04 \
16+
--build-arg TORCH_VERSION=2.5.1+cxx11.abi \
17+
--build-arg IPEX_VERSION=2.5.10+xpu \
18+
--build-arg TORCHVISION_VERSION=0.20.1+cxx11.abi \
19+
--build-arg TORCHAUDIO_VERSION=2.5.1+cxx11.abi \
20+
--build-arg ONECCL_BIND_PT_VERSION=2.5.0+xpu \
2321
--build-arg TORCH_WHL_URL=https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ \
2422
--build-arg IPEX_WHL_URL=https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ \
2523
--build-arg TORCHVISION_WHL_URL=https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ \

0 commit comments

Comments
 (0)