wget -O - https://get.acme.sh | sh -s email=my@example.com
./acme.sh
./acme.sh --issue -d ar.d.ca -w /home/www/d/d
cd /root/.acme.sh/
./acme.sh --issue --dns -d admin.d.ca --yes-I-know-dns-manual-mode-enough-go-ahead-please
dig TXT _acme-challenge.admin.d.ca
./acme.sh --renew -d admin.d.ca --yes-I-know-dns-manual-mode-enough-go-ahead-please
kubectl 3
kubectl get nodes --show-labels
kubectl label nodes <your-node-name> nodegroup=w45
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
env: test
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
nodeSelector:
nodegroup: w45
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
env: test
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
nodeName: worker3.domain.com
spec:
containers:
- image: hieuvpn/lap:10
imagePullPolicy: IfNotPresent
name: lap
ports:
- containerPort: 80
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/www/html
name: 100mb
dnsPolicy: ClusterFirst
nodeSelector:
supportnodes: w45
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /_STORAGE/support-desk
type: ""
name: 100mb
nodejs server
mkdir mailform
mv mailform.js mailform
cd mailform/
yum install npm
npm init -y
npm install express express-rate-limit
node mailform.js
while true; do date; node mailform.js ; sleep 1; done
const express = require('express');
const rateLimit = require('express-rate-limit');
const bodyParser = require('body-parser');
const https = require('https');
const fs = require('fs');
const app = express();
const options = {
key: fs.readFileSync('private.key'),
cert: fs.readFileSync('public.crt')
}
// Apply rate limiting middleware
const limiter = rateLimit({
windowMs: 5 * 60 * 1000, // 5 minutes
max: 2, // limit each IP to 2 requests per windowMs
message: 'Too many requests from this IP, please try again later.'
});
app.use(limiter);
app.use(bodyParser.json());
app.options('*',(req,res) => {
res.status(200).end();
} );
// Your webhook endpoint
app.get('/bm/:email', (req, res) => {
// Handle webhook logic here
const requestBody = req.params.email;
console.log('Received webhook request:', requestBody);
// Options for the POST request
const options = {
hostname: 'hostname.com', // Replace with your hostname
port: 443, // Replace with your port number
path: '/hooks/catch/y/x/' , // '/your/post/endpoint', // Replace with your endpoint path
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Length': JSON.stringify({'email':requestBody}).length
}
};
const zap = https.request(options, (zres) => {
console.log(`statusCode: ${zres.statusCode}`);
zap.on('data', (chunk) => {
console.log('Zapier Response body:', chunk.toString());
});
});
// Send a custom response
res.status(200).json({ message: 'in : '+requestBody });
// Handle request errors
zap.on('error', (error) => {
console.error('Error sending zapier request:', error);
});
// Write data to request body
zap.write(JSON.stringify({'email':requestBody}));
// End the request
zap.end();
});
//--------END GET--------------
app.post('/contact', (req, res) => {
// Handle webhook logic here
let requestBody = ''; // Assuming JSON request body
req.on('data', chunk => {
requestBody += chunk;
});
req.on('end',() => {
console.log('Received contact request:', requestBody);
// Options for the POST request
const options = {
hostname: 'hostname.com', // Replace with your hostname
port: 443, // Replace with your port number
path: '/hooks/catch/a/b/' , // Replace with your endpoint path
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Length': requestBody.length
}
};
const zap = https.request(options, (zres) => {
console.log(`statusCode: ${zres.statusCode}`);
zap.on('data', (chunk) => {
console.log('Zapier Response body:', chunk.toString());
});
});
// Send a custom response
res.status(200).json({ message: 'zap contact : '+requestBody });
// Handle request errors
zap.on('error', (error) => {
console.error('Error sending zapier request:', error);
});
// Write data to request body
zap.write(requestBody);
// End the request
zap.end();
} );
});
//=======END POST=========
const server = https.createServer(options,app);
// Start the server
const port = process.env.PORT || 3000;
server.listen(port, () => {
console.log(`Server is running on port ${port}`);
});
kubectl #2
spec:
containers:
- args:
- -c
- rm -rf /var/www/html/* ; cp -r /_100MB/www/* /var/www/html ; echo "ErrorDocument
404 /index.html" >> /etc/httpd/conf/httpd.conf; /usr/sbin/httpd ; sleep
5; tail -f /var/log/httpd/access_log
command:
- /bin/bash
image: hieuvpn/lap:10
imagePullPolicy: IfNotPresent
name: lap
env:
- name: RITRUSTEDORIGINS
value: https://redis-prod.example.org
envFrom:
- configMapRef:
name: myconfigmap
ports:
- containerPort: 80
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /_100MB
name: 100mb
subPath: support-desk
------------
terminationGracePeriodSeconds: 30
volumes:
- name: 100mb
persistentVolumeClaim:
claimName: v100
------------------
apiVersion: v1
kind: ConfigMap
metadata:
name: myconfigmap
data:
VARIABLE1: value1
VARIABLE2: value2
VARIABLE3: value3
-------------------
sudo /bin/kubectl exec `sudo /bin/kubectl get pods | grep test-app | head -n 1 | cut -d' ' -f1` -- sh -c "rm -rf /_100MB/support-desk/www/*"
sudo /bin/kubectl cp /tmp/www `sudo /bin/kubectl get pods | grep test-app | head -n 1 | cut -d' ' -f1`:/_100MB/support-desk/
sudo /bin/kubectl rollout restart deployment support-desk
Ingress-Nginx Controller
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.2/deploy/static/provider/cloud/deploy.yaml
kubectl create ingress support-desk --class=nginx --rule="support.helpusdefend.org/*=support-desk:8090"
Redis Cluster
#
# Redis Cluster service
#
apiVersion: v1
kind: Service
metadata:
name: redis-cluster
labels:
app: redis-cluster
environment: dev
spec:
publishNotReadyAddresses: true
ports:
- port: 6379
targetPort: 6379
name: client
- port: 16379
targetPort: 16379
name: gossip
clusterIP: None
#type: ClusterIP
selector:
app: redis-cluster
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: redis-cluster-pdb
spec:
selector:
matchLabels:
app: redis-cluster
maxUnavailable: 0
---
#
# Redis configuration file for clustered mode
#
apiVersion: v1
kind: ConfigMap
metadata:
name: redis-cluster-config
labels:
app: redis-cluster
data:
redis.conf: |+
cluster-enabled yes
cluster-require-full-coverage no
cluster-node-timeout 15000
cluster-config-file nodes.conf
cluster-migration-barrier 1
appendonly yes
# Other cluster members need to be able to connect
protected-mode no
requirepass "xxxxxx"
masterauth "xxxxxx"
#
# A script to bootstrap Stateful Set members as they initialize
#
bootstrap-pod.sh: |+
#!/bin/sh
set -ex
# Find which member of the Stateful Set this pod is running
# e.g. "redis-cluster-0" -> "0"
PET_ORDINAL=$(cat /etc/podinfo/pod_name | rev | cut -d- -f1)
MY_SHARD=$(($PET_ORDINAL % $NUM_SHARDS))
redis-server /conf/redis.conf &
# TODO: Wait until redis-server process is ready
sleep 1
if [ $PET_ORDINAL -lt $NUM_SHARDS ]; then
# Set up primary nodes. Divide slots into equal(ish) contiguous blocks
NUM_SLOTS=$(( 16384 / $NUM_SHARDS ))
REMAINDER=$(( 16384 % $NUM_SHARDS ))
START_SLOT=$(( $NUM_SLOTS * $MY_SHARD + ($MY_SHARD < $REMAINDER ? $MY_SHARD : $REMAINDER) ))
END_SLOT=$(( $NUM_SLOTS * ($MY_SHARD+1) + ($MY_SHARD+1 < $REMAINDER ? $MY_SHARD+1 : $REMAINDER) - 1 ))
PEER_IP=$(perl -MSocket -e "print inet_ntoa(scalar(gethostbyname(\"redis-cluster-0.redis-cluster.$POD_NAMESPACE.svc.cluster.local\")))")
redis-cli --pass "xxxxxx" cluster meet $PEER_IP 6379
redis-cli --pass "xxxxxx"cluster addslots $(seq $START_SLOT $END_SLOT)
else
# Set up a replica
PEER_IP=$(perl -MSocket -e "print inet_ntoa(scalar(gethostbyname(\"redis-cluster-$MY_SHARD.redis-cluster.$POD_NAMESPACE.svc.cluster.local\")))")
redis-cli --pass "xxxxxx" --cluster add-node localhost:6379 $PEER_IP:6379 --cluster-slave
fi
wait
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis-cluster
spec:
podManagementPolicy: OrderedReady # default
serviceName: redis-cluster
replicas: 6
selector:
matchLabels:
app: redis-cluster # has to match .spec.template.metadata.labels
template:
metadata:
labels:
app: redis-cluster
name: redis-cluster
spec:
# affinity: # Ensure that each Redis instance is provisioned on a different k8s node
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: "app"
# operator: In
# values:
# - redis-cluster
# topologyKey: "kubernetes.io/hostname"
terminationGracePeriodSeconds: 10
containers:
- name: redis-cluster
image: redis:6.2.6
ports:
- containerPort: 6379
name: client
- containerPort: 16379
name: gossip
command:
- sh
args:
- /conf/bootstrap-pod.sh
# Ensure that Redis is online before initializing the next node.
# TODO: Test that the cluster node is init'd properly.
readinessProbe:
exec:
command:
- sh
- -c
- "redis-cli -h $(hostname) ping"
initialDelaySeconds: 5
timeoutSeconds: 5
securityContext:
capabilities:
add:
- IPC_LOCK
# Mark a node as down if Redis server stops running
livenessProbe:
exec:
command:
- sh
- -c
- "redis-cli -h $(hostname) ping"
initialDelaySeconds: 20
periodSeconds: 3
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NUM_SHARDS
value: "3" # If you modify this value, make sure there are at least 2 times the number of replicas
volumeMounts:
- name: conf
mountPath: /conf
readOnly: false
- name: podinfo
mountPath: /etc/podinfo
readOnly: false
initContainers:
# Wait for the redis-cluster service to exist. We need it to resolve the hostnames of our nodes
- name: init-redis-cluster
image: busybox:1.28
command: ['sh', '-c', "until nslookup redis-cluster.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for redis-cluster; sleep 2; done"]
volumes:
# Insert our pre-baked Redis configuration file into /conf/redis.conf
- name: conf
configMap:
name: redis-cluster-config
items:
- key: redis.conf
path: redis.conf
- key: bootstrap-pod.sh # TODO: Move this or extract it into its own Docker image
path: bootstrap-pod.sh
# The init container will use this info to find cluster peers
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "annotations"
fieldRef:
fieldPath: metadata.annotations
- path: "pod_name"
fieldRef:
fieldPath: metadata.name
- path: "pod_namespace"
fieldRef:
fieldPath: metadata.namespace
---REDISINSIGHT----
apiVersion: v1
kind: Service
metadata:
name: redisinsight-service # name should not be 'redisinsight'
# since the service creates
# environment variables that
# conflicts with redisinsight
# application's environment
# variables `RI_APP_HOST` and
# `RI_APP_PORT`
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 5540
selector:
app: redisinsight
---
# RedisInsight deployment with name 'redisinsight'
apiVersion: apps/v1
kind: Deployment
metadata:
name: redisinsight #deployment name
labels:
app: redisinsight #deployment label
spec:
replicas: 1 #a single replica pod
selector:
matchLabels:
app: redisinsight #which pods is the deployment managing, as defined by the pod template
template: #pod template
metadata:
labels:
app: redisinsight #label for pod/s
spec:
containers:
- name: redisinsight #Container name (DNS_LABEL, unique)
image: redis/redisinsight:latest #repo/image
imagePullPolicy: IfNotPresent #Installs the latest RedisInsight version
volumeMounts:
- name: redisinsight #Pod volumes to mount into the container's filesystem. Cannot be updated.
mountPath: /data
ports:
- containerPort: 5540 #exposed container port and protocol
protocol: TCP
volumes:
- name: redisinsight
emptyDir: {}
[root@k8s-ca-master2 k8s_HA_Centos7]# cat redis-fail.sh
#!/bin/bash
echo -n 0
/usr/bin/redis-cli -h redis-cluster -a xxxxxx cluster nodes | grep -E "(disconnected|fail|noaddr)" | wc -l
[root@k8s-ca-master2 k8s_HA_Centos7]# cat redis-nodes.sh
#!/bin/bash
echo -n 0
/usr/bin/redis-cli -h redis-cluster -a xxxxxx cluster nodes | wc -l
exec redis-nodes /bin/bash /etc/snmp/redis-nodes.sh
exec redis-fail /bin/bash /etc/snmp/redis-fail.sh
graph_image.php , replace 1600000 with 2600000
tc qdisc add dev ens33 root netem delay 50ms
tc qdisc del dev ens33 root
Examples
Emulating wide area network delays
This is the simplest example, it just adds a fixed amount of delay to all packets going out of the local Ethernet.
# tc qdisc add dev eth0 root netem delay 100ms
Now a simple ping test to host on the local network should show an increase of 100 milliseconds. The delay is limited by the clock resolution of the kernel (Hz). On most 2.4 systems, the system clock runs at 100 Hz which allows delays in increments of 10 ms. On 2.6, the value is a configuration parameter from 1000 to 100 Hz.
Later examples just change parameters without reloading the qdisc
Real wide area networks show variability so it is possible to add random variation.
# tc qdisc change dev eth0 root netem delay 100ms 10ms
This causes the added delay to be 100 ± 10 ms. Network delay variation isn’t purely random, so to emulate that there is a correlation value as well.
# tc qdisc change dev eth0 root netem delay 100ms 10ms 25%
This causes the added delay to be 100 ± 10 ms with the next random element depending 25% on the last one. This isn’t true statistical correlation, but an approximation.
Delay distribution
Typically, the delay in a network is not uniform. It is more common to use a something like a normal distribution to describe the variation in delay. The netem discipline can take a table to specify a non-uniform distribution.
# tc qdisc change dev eth0 root netem delay 100ms 20ms distribution normal
The actual tables (normal, pareto, paretonormal) are generated as part of the iproute2 compilation and placed in /usr/lib/tc; so it is possible with some effort to make your own distribution based on experimental data.
Packet loss
Random packet loss is specified in the ‘tc’ command in percent. The smallest possible non-zero value is:
2−32 = 0.0000000232%
# tc qdisc change dev eth0 root netem loss 0.1%
This causes 1/10th of a percent (i.e. 1 out of 1000) packets to be randomly dropped.
An optional correlation may also be added. This causes the random number generator to be less random and can be used to emulate packet burst losses.
# tc qdisc change dev eth0 root netem loss 0.3% 25%
This will cause 0.3% of packets to be lost, and each successive probability depends by a quarter on the last one.
Probn = 0.25 × Probn-1 + 0.75 × Random
Note that you should use tc qdisc add
if you have no rules for that interface or tc qdisc change
if you already have rules for that interface. Attempting to use tc qdisc change
on an interface with no rules will give the error RTNETLINK answers: No such file or directory
.
softEther
==============vpnserver1=============
======= secureNAT 172.16.101.254, static route table 172.16.99.0/255.255.255.0/172.16.101.1,...
======= bridge VPN hub to tap device 'vpn'
ifconfig eth0 100.100.100.101/24 up
/root/vpnserver/vpnserver start
sleep 10
ifconfig tap_vpn 172.16.101.1/24 up
ip tunnel add tun1 mode ipip remote 100.100.100.201 local 100.100.100.101
ip tunnel add tun2 mode ipip remote 100.100.100.216 local 100.100.100.101
ifconfig tun1 172.16.201.3/31 up
ifconfig tun2 172.16.201.5/31 up
ip route add 172.16.99.1 via 172.16.201.2 dev tun1
ip route add 172.16.99.101 via 172.16.201.4 dev tun2
============vpnserver2=================
=========== secureNAT 172.16.102.254, static route table 172.16.99.0/255.255.255.0/172.16.102.1,...
======= bridge VPN hub to tap device 'vpn'
ifconfig eth0 100.100.100.102/24 up
/root/vpnserver/vpnserver start
sleep 10
ifconfig tap_vpn 172.16.102.1/24 up
ip tunnel add tun1 mode ipip remote 100.100.100.201 local 100.100.100.102
ip tunnel add tun2 mode ipip remote 100.100.100.216 local 100.100.100.102
ifconfig tun1 172.16.202.3/31 up
ifconfig tun2 172.16.202.5/31 up
ip route add 172.16.99.1 via 172.16.202.2 dev tun1
ip route add 172.16.99.101 via 172.16.202.4 dev tun2
echo 1 > /proc/sys/net/ipv4/ip_forward
================ worker6 =========
ifconfig eth0 100.100.100.216/24 up
echo 1 > /proc/sys/net/ipv4/ip_forward
ip tunnel add tun1 mode ipip remote 100.100.100.101 local 100.100.100.216
ip tunnel add tun2 mode ipip remote 100.100.100.102 local 100.100.100.216
ifconfig tun1 172.16.201.4/31 up
ifconfig tun2 172.16.202.4/31 up
ip route add 172.16.101.0/24 via 172.16.201.5 dev tun1
ip route add 172.16.102.0/24 via 172.16.202.5 dev tun2
ip addr add 172.16.99.101/32 dev lo
=============== master 1 ===============
ifconfig eth0 100.100.100.201/24 up
echo 1 > /proc/sys/net/ipv4/ip_forward
ip tunnel add tun1 mode ipip remote 100.100.100.101 local 100.100.100.201
ip tunnel add tun2 mode ipip remote 100.100.100.102 local 100.100.100.201
ifconfig tun1 172.16.201.2/31 up
ifconfig tun2 172.16.202.2/31 up
ip route add 172.16.101.0/24 via 172.16.201.3 dev tun1
ip route add 172.16.102.0/24 via 172.16.202.3 dev tun2
ip addr add 172.16.99.1/32 dev lo
linux folder monitoring
yum -y install inotify-tools
[root@worker-3 ~]# cat watch.sh
#!/bin/bash
TARGET=/var/log/pods/
inotifywait -m -e create -e moved_to --format "%f" $TARGET \
| while read FILENAME
do
echo Detected path $TARGET file $FILENAME
sleep 5
SUBDIR="`ls ${TARGET}${FILENAME}`"
POD="`echo $FILENAME | cut -d'_' -f 2`"
tail -f ${TARGET}$FILENAME/$SUBDIR/0.log | logger -t "$POD" &
done
[root@worker-3 ~]# cat dwatch.sh
#!/bin/bash
TARGET=/var/log/pods/
inotifywait -m -e delete –format “%f” $TARGET \
| while read FILENAME
do
echo Deleted path $TARGET file $FILENAME
kill `ps ax | grep tail | grep $FILENAME | cut -b 1-6`
done
[root@worker-3 ~]#
#!/bin/bash
if [ ! -f /usr/bin/dialog ]; then
yum -y install dialog
fi
HEIGHT=20
WIDTH=85
CHOICE_HEIGHT=14
BACKTITLE="DEFEND Test Lab"
TITLE="Install Kubernetes - Multi-master with HAProxy - Ubuntu 18.04 LTS"
MENU="Choose one of the following options:"
IP_LBS=`cat txt |head -n 1 | tail -n 1`
IP_MASTER1=`cat txt |head -n 2 | tail -n 1`
IP_MASTER2=`cat txt |head -n 3 | tail -n 1`
IP_MASTER3=`cat txt |head -n 4 | tail -n 1`
JOIN_TOKEN=`cat txt |head -n 5 | tail -n 1`
while [ "$CHOICE" != "8" ]; do
CHOICE=$(dialog --clear \
--backtitle "$BACKTITLE" \
--title "$TITLE" \
--menu "$MENU" \
$HEIGHT $WIDTH $CHOICE_HEIGHT \
1 "Cluster Configuration"\
2 "Setup Load Balancer"\
3 "Install Primary Controller node"\
4 "Get Cluster Join Command" \
5 "Install Secondary Controller node"\
6 "Install Worker node"\
7 "Install Test HTTP deployment/service"\
8 "Exit" 2>&1 >/dev/tty)
clear
case $CHOICE in
1)
dialog --form "Enter IP Address" 12 60 8 \
"LBS: " 1 1 "$IP_LBS" 1 15 16 0 \
"Master1:" 2 1 "$IP_MASTER1" 2 15 16 0 \
"Master2:" 3 1 "$IP_MASTER2" 3 15 16 0 \
"Master3:" 4 1 "$IP_MASTER3" 4 15 16 0 \
"JoinToken:" 5 1 "$JOIN_TOKEN" 5 15 500 0 \
2> txt
IP_LBS=`cat txt |head -n 1 | tail -n 1`
IP_MASTER1=`cat txt |head -n 2 | tail -n 1`
IP_MASTER2=`cat txt |head -n 3 | tail -n 1`
IP_MASTER3=`cat txt |head -n 4 | tail -n 1`
JOIN_TOKEN=`cat txt |head -n 5 | tail -n 1`
;;
2)
cat << EOF
GPC : Network Services - Create Network LoadBalancer - TCP Load Balancing -
Start Configuration - External - Single region only - Target Pool
Backend configuration - backends Existing Instances - create a health check
Firewall allow ingress TCP/6443 from 130.211.0.0/22 35.191.0.0/16
AWS : TBD
EOF
read -p '[======== Go Back ======] press enter key'
;;
3)
cat common.sh > master1.sh
cat << EOF >> master1.sh
cat << M1EOFM1 > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: stable
apiServer:
certSANs:
- "$IP_LBS"
controlPlaneEndpoint: "$IP_LBS:6443"
M1EOFM1
kubeadm config images pull
kubeadm init --config=kubeadm-config.yaml
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
kubectl get node
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml
yum -y install wget
wget 'https://github.com/derailed/k9s/releases/download/v0.25.18/k9s_Linux_x86_64.tar.gz'
tar -xvf k9s_Linux_x86_64.tar.gz
chmod +x k9s
mv k9s /usr/bin
k9s
EOF
ssh $IP_MASTER1 '/bin/sh -s' < master1.sh
read -p '[======== Go Back ======] press enter key'
;;
4)
echo '/usr/bin/kubeadm token create --print-join-command' > get_token.sh
JOIN_TOKEN=`ssh $IP_MASTER1 '/bin/sh -s' < get_token.sh`
echo $JOIN_TOKEN
cat << EOF > txt
$IP_HAPROXY
$IP_MASTER1
$IP_MASTER2
$IP_MASTER3
$JOIN_TOKEN
EOF
read -p '[======== Go Back ======] press enter key'
;;
5)
if [ ! -f /usr/bin/sshpass ]; then
yum -y install sshpass
fi
read -p "Enter ssh password for Secondary Controller : " PASSWORD
cat << EOF > copy.sh
USER=root
if [ ! -f /usr/bin/sshpass ]; then
yum -y install sshpass
fi
MASTER_NODE_IPS="$IP_MASTER2 $IP_MASTER3" # apply the correct master1 and master2 ips
for host in \${MASTER_NODE_IPS}; do
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/ca.crt "\${USER}"@\$host:
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/ca.key "\${USER}"@\$host:
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/sa.key "\${USER}"@\$host:
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/sa.pub "\${USER}"@\$host:
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/front-proxy-ca.crt "\${USER}"@\$host:
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/front-proxy-ca.key "\${USER}"@\$host:
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/etcd/ca.crt "\${USER}"@\$host:etcd-ca.crt
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/etcd/ca.key "\${USER}"@\$host:etcd-ca.key
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/admin.conf "\${USER}"@\$host:
done
EOF
sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $IP_MASTER1 '/bin/sh -s' < copy.sh
cat common.sh > move.sh
cat << EOF >> move.sh
USER=root
mkdir -p /etc/kubernetes/pki/etcd
mv /\${USER}/ca.crt /etc/kubernetes/pki/
mv /\${USER}/ca.key /etc/kubernetes/pki/
mv /\${USER}/sa.pub /etc/kubernetes/pki/
mv /\${USER}/sa.key /etc/kubernetes/pki/
mv /\${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /\${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /\${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /\${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /\${USER}/admin.conf /etc/kubernetes/admin.conf
$JOIN_TOKEN --control-plane
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
kubectl get node
yum -y install wget
wget 'https://github.com/derailed/k9s/releases/download/v0.25.18/k9s_Linux_x86_64.tar.gz'
tar -xvf k9s_Linux_x86_64.tar.gz
chmod +x k9s
mv k9s /usr/bin
EOF
sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $IP_MASTER2 '/bin/sh -s' < move.sh
sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $IP_MASTER3 '/bin/sh -s' < move.sh
read -p '[======== Go Back ======] press enter key'
;;
6)
read -p "Enter worker node IP: " wip
cat common.sh > worker.sh
cat << EOF >> worker.sh
$JOIN_TOKEN
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
kubectl get node
yum -y install wget
wget 'https://github.com/derailed/k9s/releases/download/v0.25.18/k9s_Linux_x86_64.tar.gz'
tar -xvf k9s_Linux_x86_64.tar.gz
chmod +x k9s
mv k9s /usr/bin
EOF
ssh $wip '/bin/sh -s' < worker.sh
read -p '[======== Go Back ======] press enter key'
;;
7)
cat << EOF > deploy.sh
cat << DEPEOF > test-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: test
name: test-app
namespace: default
spec:
minReadySeconds: 5
progressDeadlineSeconds: 600
replicas: 4
revisionHistoryLimit: 10
selector:
matchLabels:
app: test
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: test
spec:
containers:
- image: hieuvpn/lap:6
imagePullPolicy: Always
name: lap
ports:
- containerPort: 80
protocol: TCP
resources:
limits:
cpu: 200m
requests:
cpu: 50m
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
DEPEOF
cat << DEPEOF > test-svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: test
name: test
namespace: default
spec:
allocateLoadBalancerNodePorts: true
externalIPs:
- $IP_MASTER1
- $IP_MASTER2
- $IP_MASTER3
ports:
- nodePort: 30380
port: 8080
protocol: TCP
targetPort: 80
selector:
app: test
sessionAffinity: None
type: LoadBalancer
DEPEOF
kubectl apply -f test-app.yaml
kubectl apply -f test-svc.yaml
EOF
ssh $IP_MASTER1 '/bin/sh -s' < deploy.sh
read -p '[======== Go Back ======] press enter key'
;;
esac
done
Common.sh
#!/bin/bash
echo "Installing Docker..."
#yum -y update
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum -y install docker-ce
containerd config default > /etc/containerd/config.toml
systemctl restart containerd
systemctl enable --now docker
echo "Check Docker Status"
systemctl status docker
echo "Install kubelet kubeadm kubectl"
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet kubeadm kubectl
yum install -y nfs-utils
systemctl enable --now kubelet
systemctl status kubelet
sed -i '/swap/d' /etc/fstab
swapoff -a
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
systemctl stop firewalld
systemctl disable firewalld
#echo "Reboot OS in 10 seconds..."
/usr/bin/sleep 5
#reboot
kubeadm config images pull
#docker pull mysql:8.0.28
#docker pull bitnami/phpmyadmin
*****Manual install kubeadm
Installing kubeadm | Kubernetes
****Remove master node from etcd
$ kubectl exec etcd-< nodeNameMasterNode > -n kube-system -- etcdctl --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/peer.crt --key /etc/kubernetes/pki/etcd/peer.key member list
1863b58e85c8a808, started, nodeNameMaster1, https://IP1:2380, https://IP1:2379, false
676d4bfab319fa22, started, nodeNameMaster2, https://IP2:2380, https://IP2:2379, false
b0c50c50d563ed51, started, nodeNameMaster3, https://IP3:2380, https://IP3:2379, false
$ kubectl exec etcd-nodeNameMaster1 -n kube-system -- etcdctl --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/peer.crt --key /etc/kubernetes/pki/etcd/peer.key member remove b0c50c50d563ed51
Member b0c50c50d563ed51 removed from cluster d1e1de99e3d19634