forked from neu-spiral/CaP
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathold_run.sh
71 lines (44 loc) · 1.47 KB
/
old_run.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#!/bin/bash
cuda_device=2
# Dataset and model selections, should be selected in these combinations given:
# dataset=cifar10
# model=resnet18
# dataset=cifar100
# model=wrn28
# dataset=esc
# model=escnet
dataset=flash
model=flashnet
# Put this below as a parameter in order to avoid pre-training, and use a teacher model. If not, pre-training will be performed:
# -lm ${teacher} \
# Prune ratios and num_partitions, should be selected in these combinations given:
yaml_version=0 # meaning no yaml file is used, num_partitions=2,4,8 is used
# prune_ratio=0.5
# num_partitions=2
prune_ratio=0.75
num_partitions=4
# prune_ratio=0.875
# num_partitions=8
##### NO LONGER FILE VERSIONS, USES INTEGER INSTEAD #####
# # For yaml file selection, parameter is '-np config/${model}-$2.yaml':
# yaml_version=1
# prune_ratio=0.75
# num_partitions=config/${model}-v${yaml_version}.yaml
teacher=${dataset}-${model}.pt
prune_finetune() {
st=$5
save_name=${dataset}-${model}-$5-np${num_partitions}-pr$4-lcm$3
# save_name=${dataset}-${model}-$5-np$2-vers${version}-pr$4-lcm$3-lm${teacher}
python -m source.core.run_partition -cfg config/${dataset}.yaml \
-mf ${save_name}.pt \
--device $1 \
-np ${num_partitions} \
-st ${st} \
-pfl -lcm $3 -pr $4 -co \
-lr 0.01 \
-ep 1 \
-ree 1 \
-relr 0.001 \
>logs/${save_name}.out
}
prune_finetune "cuda:${cuda_device}" v${yaml_version} 0.001 ${prune_ratio} kernel