forked from kenshohara/3D-ResNets-PyTorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathrun_train.sh
executable file
·154 lines (136 loc) · 3.94 KB
/
run_train.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
#!/bin/bash
#root_path=$SGE_LOCALDIR/data
root_path=data
video_path=makehuman_videos/jpg
annotation_path=makehuman.json
result_path=results
model_path=models
log_path=logs
dataset=makehuman
#n_classes=2535
n_pretrain_classes=700
pretrain_path=models/r3d34_K_200ep.pth
ft_begin_module=fc
model=resnet
model_depth=34
batch_size=128
n_threads=4
checkpoint=5
# resume_path=$root_path/results/save_200.pth
resume_path=$result_path/save_200.pth
output_topk=5
inference_batch_size=1
dataset_src=data.tar.bz2
pretrain_src=pretrain/r3d34_K_200ep.pth
timestamp=`date +%s`
result_dst=$result_path/result_$timestamp
log_dst=$log_path/log_$timestamp
# mocap_labels=$HOME/repos/mogen/utils/mocap_labels.json
# blacklist=$HOME/repos/mogen/utils/blacklist.txt
# make/check directory for result/log
mkdir -p $result_dst
mkdir -p $log_path
# unpack
#tar -jxvf $dataset_src -C $SGE_LOCALDIR > /dev/null
tar -jxvf $dataset_src
n_classes=`python3 util_scripts/makehuman_json.py --root $root_path --get_n_classes`
# (option) mocap_labels
if [ -z $mocap_labels ]; then
cp $mocap_labels $root_path
fi
# (option) blacklist
if [ -z $backlist ]; then
cp $blacklist $root_path
fi
# generate annotation file
python3 util_scripts/makehuman_json.py --root $root_path
# cp $root_path/$annotation_path $result_dst
mkdir -p $root_path/$result_path
mkdir -p $root_path/$model_path
cp $pretrain_src $root_path/$model_path
# show params
echo "timestamp: $timestamp" > $log_dst
echo "" >> $log_dst
echo "dataset" >> $log_dst
echo "-------" >> $log_dst
echo "kind: $dataset" >> $log_dst
echo "src: $dataset_src" >> $log_dst
echo "pretrain model: $pretrain_src" >> $log_dst
echo "" >> $log_dst
echo "network" >> $log_dst
echo "-------" >> $log_dst
echo "model: $model" >> $log_dst
echo "model depth: $model_depth" >> $log_dst
echo "batch size: $batch_size" >> $log_dst
echo "# of classes: $n_classes" >> $log_dst
echo "# of pretrain classes: $n_pretrain_classes" >> $log_dst
echo "" >> $log_dst
echo "result" >> $log_dst
echo "------" >> $log_dst
echo $result_dst >> $log_dst
echo "" >> $log_dst
echo "time elapsed (sec)" >> $log_dst
echo "------------------" >> $log_dst
echo "unpack: $SECONDS" >> $log_dst
# scratch
# python3 main.py \
# --root_path $root_path \
# --video_path $video_path \
# --annotation_path $annotation_path \
# --result_path $result_path \
# --dataset $dataset \
# --n_classes $n_classes \
# --model $model \
# --model_depth $model_depth \
# --batch_size $batch_size \
# --n_threads $n_threads \
# --checkpoint $checkpoint
# fine-tuning
python3 main.py \
--root_path $root_path \
--video_path $video_path \
--annotation_path $annotation_path \
--result_path $result_path \
--dataset $dataset \
--n_classes $n_classes \
--n_pretrain_classes $n_pretrain_classes \
--pretrain_path $pretrain_path \
--ft_begin_module $ft_begin_module \
--model $model \
--model_depth $model_depth \
--batch_size $batch_size \
--n_threads $n_threads \
--checkpoint $checkpoint #--no_cuda
echo "eval 1: $SECONDS" >> $log_dst
# eval top5 prob
python3 main.py \
--root_path $root_path \
--video_path $video_path \
--annotation_path $annotation_path \
--result_path $result_path \
--dataset $dataset \
--resume_path $resume_path \
--n_classes $n_classes \
--model $model \
--model_depth $model_depth \
--n_threads $n_threads \
--no_train \
--no_val \
--inference \
--output_topk $output_topk \
--inference_batch_size $inference_batch_size #--no_cuda
echo "eval 2: $SECONDS" >> $log_dst
# eval top1 accuracy
python3 -m util_scripts.eval_accuracy \
$root_path/$annotation_path \
$root_path/$result_path/val.json \
--subset validation \
-k 1 \
--ignore \
--save
# echo "copy result: $SECONDS" >> $log_dst
# copy LOCALDIR to HOME
# cp $root_path/$result_path/* $result_dst
# cp $root_path/$annotation_path $result_dst
echo "total: $SECONDS" >> $log_dst
echo "complete!"