-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathEPAMP_unoffload_test.py
148 lines (127 loc) · 5.85 KB
/
EPAMP_unoffload_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
# pylint: disable=C0103, C0301
import logging
import sys
import argparse
import numpy as np
import networkx as nx
import utils
from numpy import inf
from computeNc import computeNc
from buildFci import buildFci
from EPAMP_unoffload_from_void import unoffload
import random
def main():
# small simulation to test the unoffload function
RTT = 0.106 # RTT edge-cloud
M = 100 # n. microservices
delay_increase_target = 0.03 # requested delay reduction
lambda_val = 20 # request per second
Ne = 1e9 # bitrate cloud-edge
S_edge_b = np.zeros(M) # initial state.
S_edge_b[M-1] = 1 # Last value is the user must be set equal to one
Cost_cpu_edge = 1.3 # cost of CPU at the edge
Cost_mem_edge = 1.3 # cost of memory at the edge
Cost_cpu_cloud = 1 # cost of CPU at the cloud
Cost_mem_cloud = 1 # cost of memory at the cloud
random=dict()
random['n_parents'] = 3
Fcm_range_min = 0.1 # min value of microservice call frequency
Fcm_range_max = 0.5 # max value of microservice call frequency
Acpu_quota = 0.5 # CPU quota
Acpu_range_min = 1 # min value of requested CPU quota per instance-set
Acpu_range_max = 32 # max value of requested CPU quota per instance-set
Rs_range_min = 1000 # min value of response size in bytes
Rs_range_max = 50000 # max of response size in bytes
Rs = np.random.randint(Rs_range_min,Rs_range_max,M) # random response size bytes
Rs[M-1]=0 # istio ingress has no response size
# build dependency graph
Fcm = np.zeros([M,M]) # microservice call frequency matrix
for i in range(1,M-1):
n_parent=np.random.randint(1,random['n_parents'])
for j in range(n_parent):
a = np.random.randint(i)
Fcm[a,i]=1
# set random values for microservice call frequency matrix
for i in range(0,M-1):
for j in range(0,M-1):
Fcm[i,j]=np.random.uniform(0.1,0.5) if Fcm[i,j]>0 else 0
Fcm[M-1,0] = 1 # user call microservice 0 (the ingress microservice)
# add x dependency path at random
G = nx.DiGraph(Fcm) # Create microservice dependency graph
dependency_paths_b = np.empty((0,M), int) # Storage of binary-based (b) encoded dependency paths
for ms in range(M-1):
paths_n = list(nx.all_simple_paths(G, source=M-1, target=ms))
for path_n in paths_n:
path_b = np.zeros((1,M),int)
path_b[0,path_n] = 1 # Binary-based (b) encoding of the dependency path
dependency_paths_b = np.append(dependency_paths_b,path_b,axis=0)
l = len(dependency_paths_b)
x = 10
random_values = np.random.choice(range(l), size=x, replace=False)
for j in random_values:
S_edge_b = np.minimum(S_edge_b + dependency_paths_b[j],1)
S_b = np.concatenate((np.ones(M), S_edge_b)) # (2*M,) full state
S_b[M-1] = 0 # edge istio proxy
# set random values for CPU and memory requests
Acpu_void = (np.random.randint(32,size=M)+1) * Acpu_quota
Acpu_void[M-1]=0 # user has no CPU request
Acpu_void = np.concatenate((Acpu_void, np.zeros(M))) # (2*M,) vector of CPU requests for void state
Amem_void = np.zeros(2*M)
Qcpu = np.ones(2*M) # CPU quantum in cpu sec
Qmem = np.zeros(2*M) # Memory quantum in bytes
S_b_void = np.concatenate((np.ones(M), np.zeros(M))) # (2*M,) state with no instance-set in the edge
S_b_void[M-1] = 0 # User is not in the cloud
S_b_void[2*M-1] = 1 # User is in the cloud
Fci_void = np.matrix(buildFci(S_b_void, Fcm, M)) # instance-set call frequency matrix of the void state
Nci_void = computeNc(Fci_void, M, 2) # number of instance call per user request of the void state
# compute Acpu and Amem for the current state
# assumption is that cloud resource are reduced proportionally with respect to the reduction of the number of times instances are called
Fci = np.matrix(buildFci(S_b, Fcm, M)) # instance-set call frequency matrix of the current state
Nci = computeNc(Fci, M, 2) # number of instance call per user request of the current state
Acpu = Acpu_void.copy()
Amem = Amem_void.copy()
utils.computeResourceShift(Acpu,Amem,Nci,Acpu_void,Amem_void,Nci_void)
Cost_edge = utils.computeCost(Acpu, Amem, Qcpu, Qmem, Cost_cpu_edge, Cost_mem_edge, Cost_cpu_cloud, Cost_mem_cloud)[0]
# set 0 random internal delay
Di = np.zeros(2*M)
# Call the unoffload function
params = {
'S_edge_b': S_edge_b,
'Acpu': Acpu,
'Amem': Amem,
'Qcpu': Qcpu,
'Qmem': Qmem,
'Fcm': Fcm,
'M': M,
'lambd': lambda_val,
'Rs': Rs,
'Di': Di,
'delay_increase_target': delay_increase_target,
'RTT': RTT,
'Ne': Ne,
'Cost_cpu_edge': Cost_cpu_edge,
'Cost_mem_edge': Cost_mem_edge,
'locked': None,
'dependency_paths_b': None,
'u_limit': 2,
'max_added_dp': 1000000,
'min_added_dp': -1,
}
# Call the unoffload function
result_list = unoffload(params)
result=result_list[1]
print(f"Initial config:\n {np.argwhere(S_edge_b==1).squeeze()}, Cost: {Cost_edge}")
print(f"Result for unoffload:\n {np.argwhere(result['S_edge_b']==1).squeeze()}, Cost: {result['Cost']}, delay increase: {result['delay_increase']}, cost decrease: {result['cost_decrease']}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument( '-log',
'--loglevel',
default='warning',
help='Provide logging level. Example --loglevel debug, default=warning' )
args = parser.parse_args()
logging.basicConfig(stream=sys.stdout, level=args.loglevel.upper(),format='%(asctime)s EPAMP offload %(levelname)s %(message)s')
logging.info( 'Logging now setup.' )
seed = 150271
np.random.seed(seed)
random.seed(seed)
main()