@@ -138,6 +138,9 @@ function setup_master() {
138
138
/^ApiserverToken:/ {token=$2} \
139
139
/^ApiserverDiscoveryToken:/ {token_hash=$2} \
140
140
END {print "sudo kubeadm join " ip ":" port " --token " token " --discovery-token-ca-cert-hash " token_hash}' \' ' ~/vhive/scripts/masterKey.yaml' )
141
+
142
+ server_exec $MASTER_NODE " kubectl taint nodes \$ (hostname) node-role.kubernetes.io/control-plane-"
143
+ server_exec $MASTER_NODE " kubectl label nodes \$ (hostname) loader-nodetype=master"
141
144
}
142
145
143
146
function setup_vhive_firecracker_daemon() {
@@ -166,9 +169,19 @@ function setup_workers() {
166
169
167
170
if [ " $2 " = " MASTER" ]; then
168
171
server_exec $node " sudo ${MASTER_LOGIN_TOKEN} "
172
+ server_exec $node " kubectl taint nodes \$ (hostname) node-role.kubernetes.io/control-plane-"
173
+ server_exec $node " kubectl label nodes \$ (hostname) loader-nodetype=master"
169
174
echo " Backup master node $node has joined the cluster."
170
175
else
171
176
server_exec $node " sudo ${LOGIN_TOKEN} "
177
+
178
+ if [ " $3 " = " LOADER" ]; then
179
+ # First node after the control plane nodes
180
+ server_exec $node " kubectl label nodes \$ (hostname) loader-nodetype=monitoring" < /dev/null
181
+ else
182
+ server_exec $node " kubectl label nodes \$ (hostname) loader-nodetype=worker" < /dev/null
183
+ fi
184
+
172
185
echo " Worker node $node has joined the cluster."
173
186
fi
174
187
@@ -194,12 +207,16 @@ function setup_workers() {
194
207
for node in " $@ "
195
208
do
196
209
# Set up API Server load balancer arguments - Less than because 1 CP is the "main" master node already
197
- HA_SETTING=" "
210
+ HA_SETTING=" OTHER"
211
+ LOADER_NODE=" OTHER"
212
+
198
213
if [ " $NODE_COUNTER " -lt $CONTROL_PLANE_REPLICAS ]; then
199
214
HA_SETTING=" MASTER"
215
+ elif [ " $NODE_COUNTER " -eq $CONTROL_PLANE_REPLICAS ]; then
216
+ LOADER_NODE=" LOADER"
200
217
fi
201
218
202
- internal_setup " $node " $HA_SETTING &
219
+ internal_setup " $node " " $HA_SETTING " " $LOADER_NODE " &
203
220
let NODE_COUNTER++
204
221
done
205
222
@@ -274,15 +291,17 @@ function copy_k8s_certificates() {
274
291
shift # make argument list only contain worker nodes (drops master node)
275
292
276
293
setup_master
294
+
295
+ # Copy API server certificates from master to each worker node
296
+ copy_k8s_certificates " $@ "
297
+
298
+ # Join cluster
277
299
setup_workers " $@ "
278
300
279
301
if [ $PODS_PER_NODE -gt 240 ]; then
280
302
extend_CIDR " $@ "
281
303
fi
282
304
283
- # Untaint master to schedule knative control plane there
284
- server_exec $MASTER_NODE " kubectl taint nodes \$ (hostname) node-role.kubernetes.io/control-plane-"
285
-
286
305
# Notify the master that all nodes have joined the cluster
287
306
server_exec $MASTER_NODE ' tmux send -t master "y" ENTER'
288
307
@@ -292,18 +311,8 @@ function copy_k8s_certificates() {
292
311
namespace_info=$( server_exec $MASTER_NODE " kubectl get namespaces" )
293
312
done
294
313
295
- echo " Master node $MASTER_NODE finalized."
296
-
297
- # Copy API server certificates from master to each worker node
298
- copy_k8s_certificates " $@ "
299
-
300
314
server_exec $MASTER_NODE ' cd loader; bash scripts/setup/patch_init_scale.sh'
301
315
302
- source $DIR /label.sh
303
-
304
- # Force placement of metrics collectors and instrumentation on the loader node and control plane on master
305
- label_nodes $MASTER_NODE $1 # loader node is second on the list, becoming first after arg shift
306
-
307
316
# patch knative to accept nodeselector
308
317
server_exec $MASTER_NODE " cd loader; kubectl patch configmap config-features -n knative-serving -p '{\" data\" : {\" kubernetes.podspec-nodeselector\" : \" enabled\" }}'"
309
318
0 commit comments