Archive for February, 2023

3
Feb

Kubernetes MultiMasters

   Posted by: admin    in Mẹo vặt của hiếu râu

#!/bin/bash

if [ ! -f /usr/bin/dialog ]; then
   yum -y install dialog
fi

HEIGHT=20
WIDTH=85
CHOICE_HEIGHT=14
BACKTITLE="DEFEND Test Lab"
TITLE="Install Kubernetes - Multi-master with HAProxy - Ubuntu 18.04 LTS"
MENU="Choose one of the following options:"

            IP_LBS=`cat txt |head -n 1 | tail -n 1`
            IP_MASTER1=`cat txt |head -n 2 | tail -n 1`
            IP_MASTER2=`cat txt |head -n 3 | tail -n 1`
            IP_MASTER3=`cat txt |head -n 4 | tail -n 1`
            JOIN_TOKEN=`cat txt |head -n 5 | tail -n 1`

while [ "$CHOICE" != "8" ]; do

CHOICE=$(dialog --clear \
                --backtitle "$BACKTITLE" \
                --title "$TITLE" \
                --menu "$MENU" \
                $HEIGHT $WIDTH $CHOICE_HEIGHT \
         1 "Cluster Configuration"\
         2 "Setup Load Balancer"\
         3 "Install Primary Controller node"\
         4 "Get Cluster Join Command" \
         5 "Install Secondary Controller node"\
         6 "Install Worker node"\
         7 "Install Test HTTP deployment/service"\
         8 "Exit" 2>&1 >/dev/tty)
clear
case $CHOICE in
        1)

            dialog --form "Enter IP Address" 12 60 8 \
                    "LBS: " 1 1 "$IP_LBS" 1 15 16 0 \
                    "Master1:" 2 1 "$IP_MASTER1" 2 15 16 0 \
                    "Master2:" 3 1 "$IP_MASTER2" 3 15 16 0 \
                    "Master3:" 4 1 "$IP_MASTER3" 4 15 16 0 \
                    "JoinToken:" 5 1 "$JOIN_TOKEN" 5 15 500 0 \
                    2> txt

            IP_LBS=`cat txt |head -n 1 | tail -n 1`
            IP_MASTER1=`cat txt |head -n 2 | tail -n 1`
            IP_MASTER2=`cat txt |head -n 3 | tail -n 1`
            IP_MASTER3=`cat txt |head -n 4 | tail -n 1`
            JOIN_TOKEN=`cat txt |head -n 5 | tail -n 1`

            ;;
        2)
            cat << EOF
GPC : Network Services - Create Network LoadBalancer - TCP Load Balancing -
      Start Configuration - External - Single region only - Target Pool 
      Backend configuration - backends Existing Instances - create a health check
      Firewall allow ingress TCP/6443 from 130.211.0.0/22 35.191.0.0/16
AWS : TBD
EOF
            read -p '[======== Go Back ======] press enter key'
        ;;

     3)
             cat common.sh > master1.sh
             cat << EOF >> master1.sh
cat << M1EOFM1 > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: stable
apiServer:
  certSANs:
  - "$IP_LBS"
controlPlaneEndpoint: "$IP_LBS:6443"
M1EOFM1
kubeadm config images pull
kubeadm init --config=kubeadm-config.yaml
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
kubectl get node
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml
yum -y install wget
wget 'https://github.com/derailed/k9s/releases/download/v0.25.18/k9s_Linux_x86_64.tar.gz'
tar -xvf k9s_Linux_x86_64.tar.gz
chmod +x k9s
mv k9s /usr/bin
k9s
EOF
            ssh $IP_MASTER1 '/bin/sh -s' < master1.sh
            read -p '[======== Go Back ======] press enter key'
        ;;

     4)
            echo '/usr/bin/kubeadm token create --print-join-command' > get_token.sh
            JOIN_TOKEN=`ssh $IP_MASTER1 '/bin/sh -s' < get_token.sh`
            echo $JOIN_TOKEN
            cat << EOF > txt
$IP_HAPROXY
$IP_MASTER1
$IP_MASTER2
$IP_MASTER3
$JOIN_TOKEN
EOF
            read -p '[======== Go Back ======] press enter key'
        ;;

      5)
            if [ ! -f /usr/bin/sshpass ]; then
              yum -y install sshpass
            fi
            read -p "Enter ssh password for Secondary Controller : " PASSWORD

            cat << EOF > copy.sh
USER=root
if [ ! -f /usr/bin/sshpass ]; then
      yum -y install sshpass
fi
MASTER_NODE_IPS="$IP_MASTER2 $IP_MASTER3" # apply the correct master1 and master2 ips
for host in \${MASTER_NODE_IPS}; do
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/ca.crt "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/ca.key "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/sa.key "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/sa.pub "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/front-proxy-ca.crt "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/front-proxy-ca.key "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/etcd/ca.crt "\${USER}"@\$host:etcd-ca.crt
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/etcd/ca.key "\${USER}"@\$host:etcd-ca.key
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/admin.conf "\${USER}"@\$host:
done
EOF
            sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $IP_MASTER1 '/bin/sh -s' < copy.sh
            cat common.sh > move.sh
            cat << EOF >> move.sh
USER=root
mkdir -p /etc/kubernetes/pki/etcd
mv /\${USER}/ca.crt /etc/kubernetes/pki/
mv /\${USER}/ca.key /etc/kubernetes/pki/
mv /\${USER}/sa.pub /etc/kubernetes/pki/
mv /\${USER}/sa.key /etc/kubernetes/pki/
mv /\${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /\${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /\${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /\${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /\${USER}/admin.conf /etc/kubernetes/admin.conf

$JOIN_TOKEN --control-plane
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
kubectl get node
yum -y install wget
wget 'https://github.com/derailed/k9s/releases/download/v0.25.18/k9s_Linux_x86_64.tar.gz'
tar -xvf k9s_Linux_x86_64.tar.gz
chmod +x k9s
mv k9s /usr/bin

EOF
            sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $IP_MASTER2 '/bin/sh -s' < move.sh
            sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $IP_MASTER3 '/bin/sh -s' < move.sh
            read -p '[======== Go Back ======] press enter key'
        ;;
      
      6)
            read -p "Enter worker node IP: " wip

            cat common.sh > worker.sh
            cat << EOF >> worker.sh

$JOIN_TOKEN 
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
kubectl get node
yum -y install wget
wget 'https://github.com/derailed/k9s/releases/download/v0.25.18/k9s_Linux_x86_64.tar.gz'
tar -xvf k9s_Linux_x86_64.tar.gz
chmod +x k9s
mv k9s /usr/bin

EOF
            ssh $wip '/bin/sh -s' < worker.sh
            read -p '[======== Go Back ======] press enter key'
        ;;

      7)
            cat << EOF > deploy.sh
cat << DEPEOF > test-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: test
  name: test-app
  namespace: default
spec:
  minReadySeconds: 5
  progressDeadlineSeconds: 600
  replicas: 4
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: test
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
    type: RollingUpdate
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: test
    spec:
      containers:
      - image: hieuvpn/lap:6
        imagePullPolicy: Always
        name: lap
        ports:
        - containerPort: 80
          protocol: TCP
        resources:
          limits:
            cpu: 200m
          requests:
            cpu: 50m
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30

DEPEOF

cat << DEPEOF > test-svc.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    app: test
  name: test
  namespace: default
spec:
  allocateLoadBalancerNodePorts: true
  externalIPs:
  - $IP_MASTER1
  - $IP_MASTER2
  - $IP_MASTER3
  ports:
  - nodePort: 30380
    port: 8080
    protocol: TCP
    targetPort: 80
  selector:
    app: test
  sessionAffinity: None
  type: LoadBalancer

DEPEOF
    kubectl apply -f test-app.yaml
    kubectl apply -f test-svc.yaml
EOF
            ssh $IP_MASTER1 '/bin/sh -s' < deploy.sh
            read -p '[======== Go Back ======] press enter key'

        ;;

esac
done
Common.sh
#!/bin/bash

            echo "Installing Docker..."
                #yum -y update
                yum install -y yum-utils device-mapper-persistent-data lvm2
                yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
                yum -y install docker-ce
                containerd config default > /etc/containerd/config.toml
                systemctl restart containerd
                systemctl enable --now docker
            echo "Check Docker Status"
                systemctl status docker

            echo "Install kubelet kubeadm kubectl"
                cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

                yum install -y kubelet kubeadm kubectl
                yum install -y nfs-utils
                systemctl enable --now kubelet
                systemctl status kubelet

                sed -i '/swap/d' /etc/fstab
                swapoff -a

                cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
                sysctl --system

                setenforce 0
                sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

                systemctl stop firewalld
                systemctl disable firewalld
                #echo "Reboot OS in 10 seconds..."
                /usr/bin/sleep 5
                #reboot

                kubeadm config images pull
        #docker pull mysql:8.0.28
        #docker pull bitnami/phpmyadmin
3
Feb

Dockerfile

   Posted by: admin    in Mẹo vặt của hiếu râu

build the Dockerfile with :

docker build -f Dockerfile ./
docker login
docker tag c2740dd0aeea hieuvpn/lap:5
docker push hieuvpn/lap:5
docker run -ti -p 80:80 –rm c2740dd0aeea

==========Dockerfile==========

FROM jdeathe/centos-ssh
RUN yum -y install httpd php mod_php
COPY ./index.php /var/www/html/index.php
RUN touch /var/www/html/counter
RUN chmod 666 /var/www/html/counter
RUN echo "KeepAlive Off" >> /etc/httpd/conf/httpd.conf
RUN mkdir -p /_100MB/test/counter
RUN chmod 777 /_100MB/test/counter
EXPOSE 80
CMD ["sh","-c","/usr/sbin/httpd ; sleep 5; tail -f /var/log/httpd/access_log"]

=========index.php=============

<?php $refresh=$_GET['refresh']?$_GET['refresh']:2; ?>
<head>
 <meta http-equiv="refresh" content="<?php echo $refresh; ?>">
</head>
<?php
$ip = explode('.',$_SERVER["SERVER_ADDR"]);
echo "<div style=\"width: 20%; background-color:#".dechex($ip[1]%16*16*256*256+$ip[2]%16*16*256+$ip[3]%8*32)."\">";
echo ($ip[1] % 16 * 16)." ".($ip[2]%16*16)." ".($ip[3]%8*32)."<BR>";
echo "</div>";

echo "<H3> server IP = ". $_SERVER["SERVER_ADDR"]." hostname: ".getenv("HOSTNAME");
echo "<H3> remote IP = ". $_SERVER["REMOTE_ADDR"];
echo "<HR>".date("Y-m-d H:i:s");
$i = file_get_contents("./counter");
$i = $i * 1;
$i++;
file_put_contents("./counter",$i);
file_put_contents("/_100MB/test/counter/".getenv("HOSTNAME"),$i);
echo "<HR>Counter: $i <HR>";
exec('for i in `ls /_100MB/test/counter/`; do echo -n $i" : "; cat /_100MB/test/counter/$i; echo ; done;',$out);
echo implode("<BR>",$out);
if ($_GET['info']) phpinfo();


3
Feb

kubectl

   Posted by: admin    in Mẹo vặt của hiếu râu

Create a deployment and expose service port

kubectl create deployment json-server --image=hieuvpn/lap:8 -r 2
kubectl expose deployment json-server --type=LoadBalancer --name=json-server --port=8092

#allow pods run on master-node
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl taint nodes --all  node-role.kubernetes.io/control-plane-
#kubectl taint node master-node node-role.kubernetes.io/master=:NoSchedule

expose port 8080

‘ name: json-server
‘ ports:
‘ - containerPort: 80
‘   protocol: TCP
‘ resources: {}

Limit resouce

‘ resources:
‘  limits:
‘   cpu: 200m
‘   memory: 300M
‘  requests:
‘   cpu: 50m
‘   memory: 200M

Volume mount

‘ resources: {}
‘ volumeMounts:
‘ - mountPath: /data
‘   name: jsrv
‘   subPath: json-server

‘ terminationGracePeriodSeconds: 30
‘ volumes:
‘ - name: jsrv
‘     persistentVolumeClaim:
‘       claimName: json-server

Liveness Probe

‘        resources: {}
‘        livenessProbe:
‘          failureThreshold: 3
‘          httpGet:
‘            path: /livez
‘            port: https
‘            scheme: HTTPS
‘          periodSeconds: 10
‘          successThreshold: 1
‘          timeoutSeconds: 1
‘        readinessProbe:
‘          failureThreshold: 3
‘          exec:
‘            command:
‘            - /bin/bash
‘            - c
‘            - /ready.sh

‘          initialDelaySeconds: 20
‘          periodSeconds: 10
‘          successThreshold: 1
‘          timeoutSeconds: 1

Public IP

clusterIPs:
- 10.110.15.54
externalIPs:
- 69.30.241.22

Longhorn

curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/scripts/environment_check.sh | bash
#kernel version (uname -r) must be the same on all nodes
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/deploy/prerequisite/longhorn-iscsi-installation.yaml
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/deploy/prerequisite/longhorn-nfs-installation.yaml
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/deploy/longhorn.yaml
web portal = service longhorn-frontend
Create Volume, PV/PVC, mount /dev/sdb /mnt/longhorn
MetricServer

wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -O metrics-server-components.yaml

wget  https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability.yaml -O metrics-server-ha.yaml

kubectl apply -f  metrics-server-ha.yaml

edit the deployment, add - --kubelet-insecure-tls
Execute pod command
kubectl exec mysql-set-0 -- sh -c "mysql --defaults-extra-file=/etc/mysql/conf.d/my.key <  /etc/mysql/conf.d/init.sql"
kubectl exec mysql-set-0 -i -- bash < mysql-set0.cmd

Restart Pod
kubectl delete pod  mysql-set-0
Restart Deployment
kubectl rollout restart deployment json-server


Docker rmi --prune

crictl -r /run/containerd/containerd.sock rmi --prune

docker system prune --volumes

Copy file from host to pods

kubectl cp /tmp/foo <some-namespace>/<some-pod>:/tmp/bar