forked from ROCm/flash-attention
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDockerfile.rocm
36 lines (29 loc) · 1.45 KB
/
Dockerfile.rocm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# BSD 3 Clause
# Copyright 2023 Advanced Micro Devices, Inc.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
ARG VERSION=""
ARG TAG=latest
FROM rocm/pytorch${VERSION}:$TAG
USER root
COPY ./ /workspace/flash-attention
WORKDIR /workspace/flash-attention
# Initialize git submodule and apply patch
RUN git submodule update --init \
&& PYTHON_SITE_PACKAGES=$(python -c 'import site; print(site.getsitepackages()[0])') \
&& echo "Patching ${PYTHON_SITE_PACKAGES}/torch/utils/hipify/hipify_python.py" \
&& patch "${PYTHON_SITE_PACKAGES}/torch/utils/hipify/hipify_python.py" hipify_patch.patch
# ARG GPU_ARCHS=native
# ARG MAX_JOBS=-1
# RUN echo "GPU_ARCHS is set to: ${GPU_ARCHS}"
# RUN echo "Ninja building with MAX_JOBS=${MAX_JOBS}"
# # Build Flash Attention for ROCm
# RUN GPU_ARCHS=${GPU_ARCHS} MAX_JOBS=${MAX_JOBS} pip install .