1
+ # PyTorch Models
2
+ FROM pytorch/pytorch AS PT
3
+ RUN pip3 install scipy
4
+ RUN apt update && apt install -y wget imagemagick
5
+ COPY scripts/get_cls_model_from_pytorch.py /
6
+
7
+ WORKDIR /models/vision/classification/alexnet
8
+ RUN /get_cls_model_from_pytorch.py alexnet
9
+
10
+ WORKDIR /models/vision/classification/densenet
11
+ RUN /get_cls_model_from_pytorch.py densenet121
12
+
13
+ WORKDIR /models/vision/classification/googlenet
14
+ RUN /get_cls_model_from_pytorch.py googlenet
15
+
16
+ WORKDIR /models/vision/classification/inception
17
+ RUN /get_cls_model_from_pytorch.py inception_v3 inception_v3.onnx 299
18
+
19
+ WORKDIR /models/vision/classification/mobilenet
20
+ RUN /get_cls_model_from_pytorch.py mobilenet_v2
21
+
22
+ WORKDIR /models/vision/classification/squeezenet
23
+ RUN /get_cls_model_from_pytorch.py squeezenet1_0
24
+ RUN /get_cls_model_from_pytorch.py squeezenet1_1
25
+
26
+ WORKDIR /models/vision/classification/vgg
27
+ RUN /get_cls_model_from_pytorch.py vgg16
28
+ RUN /get_cls_model_from_pytorch.py vgg19
29
+
30
+ COPY scripts/get_unet.py /tmp
31
+ WORKDIR /models/vision/segmentation/unet
32
+ RUN python3 /tmp/get_unet.py
33
+ RUN wget https://github.com/zhixuhao/unet/raw/master/img/0test.png
34
+ RUN convert 0test.png -resize 256x256 test.jpg && rm 0test.png
35
+
36
+ # HRNet
37
+ FROM pytorch/pytorch:1.8.1-cuda11.1-cudnn8-devel AS HRNET
38
+ RUN apt-get update && apt-get install -y git wget libgeos-dev gcc \
39
+ libglib2.0-dev libsm6 libxext6 libxrender-dev
40
+ RUN pip3 install EasyDict==1.7 opencv-python==3.4.8.29 shapely==1.6.4 Cython \
41
+ pandas pyyaml json_tricks scikit-image yacs>=0.1.5 \
42
+ tensorboardX==1.6 pycocotools gdown
43
+ WORKDIR /tmp
44
+ RUN mkdir -p images annot /models/vision/pose_estimation
45
+ RUN git clone --depth=1 https://github.com/leoxiaobin/deep-high-resolution-net.pytorch
46
+ RUN python3 -c "import gdown; gdown.download('https://drive.google.com/uc?id=1_wn2ifmoQprBrFvUCDedjPON4Y6jsN-v', '/tmp/pose_hrnet_w32_256x256.pth')"
47
+ RUN wget 'https://upload-images.jianshu.io/upload_images/1877813-ff9b9c6b0e013006.jpg?imageMogr2/auto-orient/strip|imageView2/2/w/1200/format/webp' \
48
+ -O images/005808361.jpg
49
+ RUN cd deep-high-resolution-net.pytorch/lib/nms && \
50
+ python3 setup_linux.py build_ext --inplace
51
+ ENV PYTHONPATH=/tmp/deep-high-resolution-net.pytorch/lib
52
+ COPY scripts/hrnet/hrnet_cfg.yaml scripts/hrnet/get_hrnet.py ./
53
+ COPY scripts/hrnet/test.json annot
54
+ RUN python3 get_hrnet.py --cfg hrnet_cfg.yaml TEST.MODEL_FILE pose_hrnet_w32_256x256.pth
55
+ RUN mv pose_hrnet_w32_256x256.onnx input.in output.in /models/vision/pose_estimation
56
+
57
+ # YOLOv5
58
+ WORKDIR /tmp
59
+ RUN apt install -y mesa-utils libgl1-mesa-glx
60
+ RUN git clone https://github.com/ultralytics/yolov5
61
+ WORKDIR yolov5
62
+ RUN git checkout -b tmp c03d590
63
+ RUN pip3 install -r requirements.txt # base requirements
64
+ RUN pip3 install coremltools>=4.1 onnx>=1.8.1 scikit-learn==0.19.2
65
+ RUN PYTHONPATH=/tmp/yolov5 python3 models/export.py --weights yolov5l.pt \
66
+ --img 640 --batch 1
67
+ WORKDIR /models/vision/detection/yolo
68
+ RUN mv /tmp/yolov5/yolov5l.onnx .
69
+
70
+ # ONNX models
71
+ FROM alpine/git:v2.30.1 AS ONNX
72
+ RUN apk add git-lfs
73
+ WORKDIR /tmp
74
+ RUN git clone --depth=1 https://github.com/onnx/models
75
+ WORKDIR /tmp/models
76
+ RUN git-lfs pull -X="" -I="caffenet-3.onnx"
77
+ RUN git-lfs pull -X="" -I="yolov3-10.onnx"
78
+ RUN git-lfs pull -X="" -I="inception-v1-9.onnx"
79
+ RUN git-lfs pull -X="" -I="resnet18-v1-7.onnx"
80
+ RUN git-lfs pull -X="" -I="resnet50-v2-7.onnx"
81
+ RUN git-lfs pull -X="" -I="resnet101-v2-7.onnx"
82
+ RUN git-lfs pull -X="" -I="shufflenet-9.onnx"
83
+ RUN git-lfs pull -X="" -I="bertsqad-10.onnx"
84
+
85
+ WORKDIR /models/vision/classification/caffenet
86
+ RUN mv /tmp/models/vision/classification/caffenet/model/caffenet-3.onnx .
87
+
88
+ WORKDIR /models/vision/classification/inception
89
+ RUN mv /tmp/models/vision/classification/inception_and_googlenet/inception_v1/model/inception-v1-9.onnx .
90
+
91
+ WORKDIR /models/vision/classification/resnet
92
+ RUN mv /tmp/models/vision/classification/resnet/model/resnet50-v2-7.onnx .
93
+ RUN mv /tmp/models/vision/classification/resnet/model/resnet18-v1-7.onnx .
94
+ RUN mv /tmp/models/vision/classification/resnet/model/resnet101-v2-7.onnx .
95
+
96
+ WORKDIR /models/vision/classification/shufflenet
97
+ RUN mv /tmp/models/vision/classification/shufflenet/model/shufflenet-9.onnx .
98
+
99
+ WORKDIR /models/vision/detection/yolo
100
+ RUN mv /tmp/models/vision/object_detection_segmentation/yolov3/model/yolov3-10.onnx .
101
+
102
+ WORKDIR /models/text/comprehension/bert
103
+ RUN mv /tmp/models/text/machine_comprehension/bert-squad/model/bertsquad-10.onnx .
104
+
105
+ WORKDIR /tmp
106
+ RUN git clone --depth=1 -b rel-1.8.0 https://github.com/onnx/onnx.git
107
+ RUN mv onnx/onnx/backend/test/data/node /unittests
108
+
109
+
110
+ # MNIST Simple TF Model
111
+ FROM tensorflow/tensorflow:1.14.0 AS TF
112
+ RUN apt-get install -y wget
113
+ WORKDIR mnist_simple
114
+ COPY scripts/mnist_simple_train.py .
115
+ RUN python mnist_simple_train.py
116
+ WORKDIR /models/vision/classification/mnist_simple
117
+ RUN mv /mnist_simple/mnist_simple.pb .
118
+ RUN wget -qO- https://web.archive.org/web/20160828233817/http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz | gunzip -c > test_image
119
+ RUN wget -qO- https://web.archive.org/web/20160828233817/http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz | gunzip -c > test_label
120
+
121
+ # Data
122
+ FROM curlimages/curl:7.76.0 As Data
123
+ RUN mkdir -p /tmp/models/vision/test_images
124
+ WORKDIR /tmp/models/vision/test_images
125
+ RUN curl -o plane.jpg http://images.cocodataset.org/test2017/000000030207.jpg
126
+ RUN curl -o food.jpg http://images.cocodataset.org/test2017/000000228503.jpg
127
+ RUN curl -o sport.jpg http://images.cocodataset.org/test2017/000000133861.jpg
128
+ RUN curl -o dog.jpg https://raw.githubusercontent.com/pytorch/hub/master/images/dog.jpg
129
+ RUN mkdir -p /tmp/models/detection
130
+ WORKDIR /tmp/models/detection
131
+ RUN curl -o test.jpg http://images.cocodataset.org/test2017/000000133861.jpg
132
+ RUN mkdir -p /tmp/models/vision/classification/caffenet
133
+ WORKDIR /tmp/models/vision/classification/caffenet
134
+ RUN curl -o caffenet.prototxt https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_reference_caffenet/deploy.prototxt
135
+ RUN curl -o caffenet.caffemodel http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel
136
+ RUN mkdir -p /tmp/models/vision/detection/yolo
137
+ WORKDIR /tmp/models/vision/detection/yolo
138
+ RUN curl -o test.jpg http://images.cocodataset.org/test2017/000000133861.jpg
139
+
140
+ # Collect all Models & Data
141
+ FROM alpine
142
+ RUN apk add tree
143
+ COPY --from=PT /models /models
144
+ COPY --from=ONNX /models /models
145
+ COPY --from=HRNET /models /models
146
+ COPY --from=TF /models /models
147
+ COPY --from=DATA /tmp/models /models
148
+ COPY --from=ONNX /unittests /unittests
149
+ ENTRYPOINT ["tree" , "/models" ]
0 commit comments