-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathBeta.py
98 lines (88 loc) · 4.53 KB
/
Beta.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import logging, argparse
import pickle
import numpy as np
from ProbGenerate import Problem, Path
from distributedSolver import Learning
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Estimate model through MAP',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--graph_type', default="geant", type=str, help='Graph type',
choices=['erdos_renyi', 'balanced_tree', 'hypercube', "cicular_ladder", "cycle", "grid_2d",
'lollipop', 'expander', 'hypercube', 'star', 'barabasi_albert', 'watts_strogatz',
'regular', 'powerlaw_tree', 'small_world', 'geant', 'abilene', 'dtelekom',
'servicenetwork', 'ToyExample2'])
parser.add_argument('--types', default=3, type=int, help='Number of types')
parser.add_argument('--learners', default=3, type=int, help='Number of learner')
parser.add_argument('--sources', default=3, type=int, help='Number of nodes generating data')
parser.add_argument('--solver', type=str, help='solver type',
choices=['DFW', 'FW', 'DPGA', 'PGA', 'DMaxFair', 'MaxFair', 'DMaxTP', 'MaxTP'])
parser.add_argument('--max_datarate', default=8, type=float, help="Maximum data rate of each sources")
parser.add_argument('--stepsize', default=0.01, type=float, help="stepsize for FW")
parser.add_argument('--random_seed', default=19930101, type=int, help='Random seed')
parser.add_argument('--debug_level', default='DEBUG', type=str, help='Debug Level',
choices=['INFO', 'DEBUG', 'WARNING', 'ERROR'])
args = parser.parse_args()
np.random.seed(args.random_seed + 2023)
args.debug_level = eval("logging." + args.debug_level)
logging.basicConfig(level=args.debug_level)
fname = 'Result_15_{}/Result_{}_{}learners_{}sources_{}types_{}stepsize'.format(
args.solver, args.graph_type, args.learners, args.sources, args.types, args.stepsize)
logging.info('Read data from ' + fname)
with open(fname, 'rb') as f:
results = pickle.load(f)
fname = 'Problem_15/Problem_{}_{}learners_{}sources_{}types'.format(
args.graph_type, args.learners, args.sources, args.types)
logging.info('Read data from ' + fname)
with open(fname, 'rb') as f:
P = pickle.load(f)
learning = Learning(P)
mean = P.prior['mean']
covariance = P.prior['cov']
learners = P.learners
T = P.T
N1 = 50
N2 = 50
N3 = 20
result = results[1]
dist = 0
if result == 0:
distance = 0
# obj = learning.objU(result, 50, 50)
for l in learners:
norm = 0
for k in range(N3):
beta = np.random.multivariate_normal(mean[l].reshape(len(mean[l])), covariance[l])
beta = beta.reshape((len(beta),1))
noice = P.prior['noice'][l]
for j in range(N1):
n = learning.generate_sample1(result, l)
for i in range(N2):
features = learning.generate_sample2(n)
a = 0
b = 0
for s in features:
for feature in features[s]:
# feature = feature * 0
# feature[0: int(np.floor(len(features[s][i]) / 3)), 0] = [10] * int(np.floor(len(feature) / 3))
a += np.dot(feature, feature.transpose()) / noice[s]
y = np.dot(feature.transpose(), beta) + np.random.normal(0, noice[s])
b += feature * y / noice[s]
# cov_inv = np.linalg.inv(covariance[l])
temp1 = a + np.linalg.inv(covariance[l])
temp1 = np.linalg.inv(temp1)
temp2 = np.dot(np.linalg.inv(covariance[l]), mean[l]) + b
map_l = np.dot(temp1, temp2)
# norm_temp = np.linalg.norm(map_l - beta)
# if norm_temp < 1:
# print(norm_temp)
norm += np.linalg.norm(map_l - beta) / np.linalg.norm(beta)
norm = norm / N1 / N2 / N3
logging.debug(norm)
dist += norm
distance = dist / len(learners)
print(distance)
fname = 'Result_15_{}/beta_{}_{}learners_{}sources_{}types_{}stepsize'.format(
args.solver, args.graph_type, args.learners, args.sources, args.types, args.stepsize)
logging.info('Save in ' + fname)
with open(fname, 'wb') as f:
pickle.dump(distance, f)