3
Feb

Kubernetes MultiMasters

   Posted by: admin   in Mẹo vặt của hiếu râu

#!/bin/bash

if [ ! -f /usr/bin/dialog ]; then
   yum -y install dialog
fi

HEIGHT=20
WIDTH=85
CHOICE_HEIGHT=14
BACKTITLE="DEFEND Test Lab"
TITLE="Install Kubernetes - Multi-master with HAProxy - Ubuntu 18.04 LTS"
MENU="Choose one of the following options:"

            IP_LBS=`cat txt |head -n 1 | tail -n 1`
            IP_MASTER1=`cat txt |head -n 2 | tail -n 1`
            IP_MASTER2=`cat txt |head -n 3 | tail -n 1`
            IP_MASTER3=`cat txt |head -n 4 | tail -n 1`
            JOIN_TOKEN=`cat txt |head -n 5 | tail -n 1`

while [ "$CHOICE" != "8" ]; do

CHOICE=$(dialog --clear \
                --backtitle "$BACKTITLE" \
                --title "$TITLE" \
                --menu "$MENU" \
                $HEIGHT $WIDTH $CHOICE_HEIGHT \
         1 "Cluster Configuration"\
         2 "Setup Load Balancer"\
         3 "Install Primary Controller node"\
         4 "Get Cluster Join Command" \
         5 "Install Secondary Controller node"\
         6 "Install Worker node"\
         7 "Install Test HTTP deployment/service"\
         8 "Exit" 2>&1 >/dev/tty)
clear
case $CHOICE in
        1)

            dialog --form "Enter IP Address" 12 60 8 \
                    "LBS: " 1 1 "$IP_LBS" 1 15 16 0 \
                    "Master1:" 2 1 "$IP_MASTER1" 2 15 16 0 \
                    "Master2:" 3 1 "$IP_MASTER2" 3 15 16 0 \
                    "Master3:" 4 1 "$IP_MASTER3" 4 15 16 0 \
                    "JoinToken:" 5 1 "$JOIN_TOKEN" 5 15 500 0 \
                    2> txt

            IP_LBS=`cat txt |head -n 1 | tail -n 1`
            IP_MASTER1=`cat txt |head -n 2 | tail -n 1`
            IP_MASTER2=`cat txt |head -n 3 | tail -n 1`
            IP_MASTER3=`cat txt |head -n 4 | tail -n 1`
            JOIN_TOKEN=`cat txt |head -n 5 | tail -n 1`

            ;;
        2)
            cat << EOF
GPC : Network Services - Create Network LoadBalancer - TCP Load Balancing -
      Start Configuration - External - Single region only - Target Pool 
      Backend configuration - backends Existing Instances - create a health check
      Firewall allow ingress TCP/6443 from 130.211.0.0/22 35.191.0.0/16
AWS : TBD
EOF
            read -p '[======== Go Back ======] press enter key'
        ;;

     3)
             cat common.sh > master1.sh
             cat << EOF >> master1.sh
cat << M1EOFM1 > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: stable
apiServer:
  certSANs:
  - "$IP_LBS"
controlPlaneEndpoint: "$IP_LBS:6443"
M1EOFM1
kubeadm config images pull
kubeadm init --config=kubeadm-config.yaml
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
kubectl get node
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml
yum -y install wget
wget 'https://github.com/derailed/k9s/releases/download/v0.25.18/k9s_Linux_x86_64.tar.gz'
tar -xvf k9s_Linux_x86_64.tar.gz
chmod +x k9s
mv k9s /usr/bin
k9s
EOF
            ssh $IP_MASTER1 '/bin/sh -s' < master1.sh
            read -p '[======== Go Back ======] press enter key'
        ;;

     4)
            echo '/usr/bin/kubeadm token create --print-join-command' > get_token.sh
            JOIN_TOKEN=`ssh $IP_MASTER1 '/bin/sh -s' < get_token.sh`
            echo $JOIN_TOKEN
            cat << EOF > txt
$IP_HAPROXY
$IP_MASTER1
$IP_MASTER2
$IP_MASTER3
$JOIN_TOKEN
EOF
            read -p '[======== Go Back ======] press enter key'
        ;;

      5)
            if [ ! -f /usr/bin/sshpass ]; then
              yum -y install sshpass
            fi
            read -p "Enter ssh password for Secondary Controller : " PASSWORD

            cat << EOF > copy.sh
USER=root
if [ ! -f /usr/bin/sshpass ]; then
      yum -y install sshpass
fi
MASTER_NODE_IPS="$IP_MASTER2 $IP_MASTER3" # apply the correct master1 and master2 ips
for host in \${MASTER_NODE_IPS}; do
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/ca.crt "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/ca.key "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/sa.key "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/sa.pub "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/front-proxy-ca.crt "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/front-proxy-ca.key "\${USER}"@\$host:
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/etcd/ca.crt "\${USER}"@\$host:etcd-ca.crt
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/etcd/ca.key "\${USER}"@\$host:etcd-ca.key
   sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /etc/kubernetes/admin.conf "\${USER}"@\$host:
done
EOF
            sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $IP_MASTER1 '/bin/sh -s' < copy.sh
            cat common.sh > move.sh
            cat << EOF >> move.sh
USER=root
mkdir -p /etc/kubernetes/pki/etcd
mv /\${USER}/ca.crt /etc/kubernetes/pki/
mv /\${USER}/ca.key /etc/kubernetes/pki/
mv /\${USER}/sa.pub /etc/kubernetes/pki/
mv /\${USER}/sa.key /etc/kubernetes/pki/
mv /\${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /\${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /\${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /\${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /\${USER}/admin.conf /etc/kubernetes/admin.conf

$JOIN_TOKEN --control-plane
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
kubectl get node
yum -y install wget
wget 'https://github.com/derailed/k9s/releases/download/v0.25.18/k9s_Linux_x86_64.tar.gz'
tar -xvf k9s_Linux_x86_64.tar.gz
chmod +x k9s
mv k9s /usr/bin

EOF
            sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $IP_MASTER2 '/bin/sh -s' < move.sh
            sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $IP_MASTER3 '/bin/sh -s' < move.sh
            read -p '[======== Go Back ======] press enter key'
        ;;
      
      6)
            read -p "Enter worker node IP: " wip

            cat common.sh > worker.sh
            cat << EOF >> worker.sh

$JOIN_TOKEN 
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 10
kubectl get node
yum -y install wget
wget 'https://github.com/derailed/k9s/releases/download/v0.25.18/k9s_Linux_x86_64.tar.gz'
tar -xvf k9s_Linux_x86_64.tar.gz
chmod +x k9s
mv k9s /usr/bin

EOF
            ssh $wip '/bin/sh -s' < worker.sh
            read -p '[======== Go Back ======] press enter key'
        ;;

      7)
            cat << EOF > deploy.sh
cat << DEPEOF > test-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: test
  name: test-app
  namespace: default
spec:
  minReadySeconds: 5
  progressDeadlineSeconds: 600
  replicas: 4
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: test
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
    type: RollingUpdate
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: test
    spec:
      containers:
      - image: hieuvpn/lap:6
        imagePullPolicy: Always
        name: lap
        ports:
        - containerPort: 80
          protocol: TCP
        resources:
          limits:
            cpu: 200m
          requests:
            cpu: 50m
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30

DEPEOF

cat << DEPEOF > test-svc.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    app: test
  name: test
  namespace: default
spec:
  allocateLoadBalancerNodePorts: true
  externalIPs:
  - $IP_MASTER1
  - $IP_MASTER2
  - $IP_MASTER3
  ports:
  - nodePort: 30380
    port: 8080
    protocol: TCP
    targetPort: 80
  selector:
    app: test
  sessionAffinity: None
  type: LoadBalancer

DEPEOF
    kubectl apply -f test-app.yaml
    kubectl apply -f test-svc.yaml
EOF
            ssh $IP_MASTER1 '/bin/sh -s' < deploy.sh
            read -p '[======== Go Back ======] press enter key'

        ;;

esac
done
Common.sh
#!/bin/bash

            echo "Installing Docker..."
                #yum -y update
                yum install -y yum-utils device-mapper-persistent-data lvm2
                yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
                yum -y install docker-ce
                containerd config default > /etc/containerd/config.toml
                systemctl restart containerd
                systemctl enable --now docker
            echo "Check Docker Status"
                systemctl status docker

            echo "Install kubelet kubeadm kubectl"
                cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

                yum install -y kubelet kubeadm kubectl
                yum install -y nfs-utils
                systemctl enable --now kubelet
                systemctl status kubelet

                sed -i '/swap/d' /etc/fstab
                swapoff -a

                cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
                sysctl --system

                setenforce 0
                sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

                systemctl stop firewalld
                systemctl disable firewalld
                #echo "Reboot OS in 10 seconds..."
                /usr/bin/sleep 5
                #reboot

                kubeadm config images pull
        #docker pull mysql:8.0.28
        #docker pull bitnami/phpmyadmin
3
Feb

Dockerfile

   Posted by: admin   in Mẹo vặt của hiếu râu

docker build -f Dockerfile ./
docker login
docker tag c2740dd0aeea hieuvpn/lap:5
docker push hieuvpn/lap:5
docker run -ti -p 80:80 –rm c2740dd0aeea

FROM jdeathe/centos-ssh
RUN yum -y install httpd php mod_php
COPY ./index.php /var/www/html/index.php
RUN touch /var/www/html/counter
RUN chmod 666 /var/www/html/counter
RUN echo "KeepAlive Off" >> /etc/httpd/conf/httpd.conf
RUN mkdir -p /_100MB/test/counter
RUN chmod 777 /_100MB/test/counter
EXPOSE 80
CMD ["sh","-c","/usr/sbin/httpd ; sleep 5; tail -f /var/log/httpd/access_log"]
index.php

<?php $refresh=$_GET['refresh']?$_GET['refresh']:2; ?>
<head>
 <meta http-equiv="refresh" content="<?php echo $refresh; ?>">
</head>
<?php
$ip = explode('.',$_SERVER["SERVER_ADDR"]);
echo "<div style=\"width: 20%; background-color:#".dechex($ip[1]%16*16*256*256+$ip[2]%16*16*256+$ip[3]%8*32)."\">";
echo ($ip[1] % 16 * 16)." ".($ip[2]%16*16)." ".($ip[3]%8*32)."<BR>";
echo "</div>";

echo "<H3> server IP = ". $_SERVER["SERVER_ADDR"]." hostname: ".getenv("HOSTNAME");
echo "<H3> remote IP = ". $_SERVER["REMOTE_ADDR"];
echo "<HR>".date("Y-m-d H:i:s");
$i = file_get_contents("./counter");
$i = $i * 1;
$i++;
file_put_contents("./counter",$i);
file_put_contents("/_100MB/test/counter/".getenv("HOSTNAME"),$i);
echo "<HR>Counter: $i <HR>";
exec('for i in `ls /_100MB/test/counter/`; do echo -n $i" : "; cat /_100MB/test/counter/$i; echo ; done;',$out);
echo implode("<BR>",$out);
if ($_GET['info']) phpinfo();


3
Feb

kubectl

   Posted by: admin   in Mẹo vặt của hiếu râu

Create a deployment and expose service port

kubectl create deployment json-server --image=hieuvpn/lap:8 -r 2
kubectl expose deployment json-server --type=LoadBalancer --name=json-server --port=8092

#allow pods run on master-node
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl taint nodes --all  node-role.kubernetes.io/control-plane-
#kubectl taint node master-node node-role.kubernetes.io/master=:NoSchedule

expose port 8080

‘ name: json-server
‘ ports:
‘ - containerPort: 80
‘   protocol: TCP
‘ resources: {}

Limit resouce

‘ resources:
‘  limits:
‘   cpu: 200m
‘   memory: 300M
‘  requests:
‘   cpu: 50m
‘   memory: 200M

Volume mount

‘ resources: {}
‘ volumeMounts:
‘ - mountPath: /data
‘   name: jsrv
‘   subPath: json-server

‘ terminationGracePeriodSeconds: 30
‘ volumes:
‘ - name: jsrv
‘     persistentVolumeClaim:
‘       claimName: json-server

Liveness Probe

‘        resources: {}
‘        livenessProbe:
‘          failureThreshold: 3
‘          httpGet:
‘            path: /livez
‘            port: https
‘            scheme: HTTPS
‘          periodSeconds: 10
‘          successThreshold: 1
‘          timeoutSeconds: 1
‘        readinessProbe:
‘          failureThreshold: 3
‘          httpGet:
‘            path: /readyz
‘            port: https
‘            scheme: HTTPS
‘          initialDelaySeconds: 20
‘          periodSeconds: 10
‘          successThreshold: 1
‘          timeoutSeconds: 1

Public IP

clusterIPs:
- 10.110.15.54
externalIPs:
- 69.30.241.22

Longhorn

curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/scripts/environment_check.sh | bash
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/deploy/prerequisite/longhorn-iscsi-installation.yaml
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/deploy/prerequisite/longhorn-nfs-installation.yaml
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.4.0/deploy/longhorn.yaml
web portal = service longhorn-frontend
Create Volume, PV/PVC, mount /dev/sdb /mnt/longhorn
MetricServer

wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -O metrics-server-components.yaml

wget  https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability.yaml -O metrics-server-ha.yaml

kubectl apply -f  metrics-server-ha.yaml

edit the deployment, add - --kubelet-insecure-tls
Execute pod command
kubectl exec mysql-set-0 -- sh -c "mysql --defaults-extra-file=/etc/mysql/conf.d/my.key <  /etc/mysql/conf.d/init.sql"
kubectl exec mysql-set-0 -i -- bash < mysql-set0.cmd

Restart Pod
kubectl delete pod  mysql-set-0
Restart Deployment
kubectl rollout restart deployment json-server
10
Jan

gitlab CICD

   Posted by: admin   in Lăng nhăng lít nhít

.gitlab-ci.yml

stages:
  - build
  - deploy_to_cluster
  - rollback_on_failure
  - cleanup

variables:
  PIPELINE_ID: "pipeline_id"
  GIT_STRATEGY: clone
 
build:
  stage: build
  tags:
    - build
  only:
    - master
  script:
    - |
        echo "Build"
        sudo /bin/docker image build -t $CI_REGISTRY/jason/$CI_PROJECT_NAME/frontend_harry:$CI_PIPELINE_ID -f Dockerfile .
        sudo /bin/docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
        sudo /bin/docker push $CI_REGISTRY/jason/$CI_PROJECT_NAME/frontend_harry:$CI_PIPELINE_ID
        cat data/db.json > /home/gitlab-runner/db.json
        sudo /bin/docker kill json-server || echo "no json-server running"
        echo "restart json-server..."
        sudo /bin/docker run --rm -d -p 127.0.0.1:8091:80 -v /home/gitlab-runner/db.json:/data/db.json --name json-server clue/json-server

kubernetes_deploy:
  stage: deploy_to_cluster
  tags:
    - build
  only:
    - master
  timeout: 30m
  script:
    - |
        echo "Deploy revision $CI_PIPELINE_ID @ $CI_REGISTRY to Kubernetes cluster"
        echo "`date`" > build_time
        sudo /bin/kubectl get secret/regcred || sudo /bin/kubectl create secret docker-registry regcred --docker-server="$CI_REGISTRY" --docker-username="k8s_git_runner" --docker-password="$k8s_git_runner" --docker-email="harry@helpusdefend.com"
        sudo /bin/kubectl get service frontend-harry || sudo /bin/kubectl apply -f k8s/frontend_harry.svc.yaml
        sudo /bin/kubectl get deploy frontend-harry-app || sudo /bin/kubectl apply -f k8s/frontend_harry-app.yaml
        sudo /bin/kubectl set image deployment frontend-harry-app frontend-harry=$CI_REGISTRY/jason/$CI_PROJECT_NAME/frontend_harry:$CI_PIPELINE_ID --record
        echo "Deploy $CI_REGISTRY/jason/$CI_PROJECT_NAME/frontend_harry:$CI_PIPELINE_ID" > imagetag.txt
        sudo /bin/kubectl rollout history deploy frontend-harry-app
        sudo /bin/kubectl rollout status --timeout=5m deploy frontend-harry-app

rollback_to_previous_version:
  stage: rollback_on_failure
  when: on_failure
  tags:
    - build
  only:
    - master
  script:
    - |
        echo "rollback to previous version"
        cat imagetag.txt && kubectl rollout undo deploy frontend-harry-app --to-revision=`sudo /bin/kubectl rollout history  deploy/frontend-harry-app | tail -n 3 | head -n 1 | cut -f 1 -d' '`
        
cleanup_build:
  stage: cleanup
  when: always
  tags:
    - build
  only:
    - master
  script:
    - |
        echo "cleanup"
        rm -f build_time
        rm -f imagetag.txt

frontend-harry.svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: frontend-harry
  labels:
    app: frontend-harry
spec:
  type: LoadBalancer
  selector:
    app: frontend-harry
  ports:
    - port: 9080
      targetPort: 8000

frontend-harry-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: frontend-harry-app
  labels:
    app: frontend-harry
spec:
  replicas: 3
  selector:
    matchLabels:
      app: frontend-harry
  minReadySeconds: 5
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
    type: RollingUpdate      
  template:
    metadata:
      labels:
        app: frontend-harry
    spec:
      containers:
        - image: c..com:5050/jason/front-end-/frontend_harry:latest
          name: frontend-harry
          ports:
          - containerPort: 8000
      imagePullSecrets:
      - name: regcred

gitlab-runner ALL=(ALL) NOPASSWD: /bin/yum, /bin/docker, /bin/pip3, /bin/docker-composei, /bin/kubectl, /bin/k9s

wget https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh

sh script.rpm.sh

yum -y install gitlab-runner

gitlab menu -> /settings/reposistory/deploy-tokens/read_registry

squid.conf

# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS #

# Example rule allowing access from your local networks.

# Adapt localnet in the ACL section to list your (internal) IP networks

# from where browsing should be allowed

# Squid normally listens to port 3128

http_port 80 accel defaultsite=46.17.175.144 vhost

cache_peer 10.109.76.54 parent 9080 0 no-query originserver name=harry_frontend

cache_peer 127.0.0.1 parent 8091 0 no-query originserver name=json_server

cache_peer 192.168.5.5 parent 8091 0 no-query originserver name=json_server2

cache_peer 10.100.43.53 parent 8888 0 no-query originserver name=anna_api

acl sites_harry_frontend dstdomain frontend.helpusdefend.org www.helpusdefend.org

acl sites_json_server dstdomain json-server.helpusdefend.org

acl sites_anna_api dstdomain anna.api.helpusdefend.org

cache_peer_access harry_frontend allow sites_harry_frontend

cache_peer_access json_server allow sites_json_server

cache_peer_access json_server2 allow sites_json_server

cache_peer_access anna_api allow sites_anna_api

http_access allow sites_harry_frontend

http_access allow sites_json_server

http_access allow sites_anna_api

http_access allow localnet

http_access allow localhost

# And finally deny all other access to this proxy

http_access deny all

# Squid normally listens to port 3128

#http_port 3128

15
Dec

OCR pytesseract and google OCR

   Posted by: admin   in Mẹo vặt của hiếu râu

27  yum install gcc openssl-devel bzip2-devel libffi-devel zlib-devel xz-devel

31  wget https://www.python.org/ftp/python/3.7.11/Python-3.7.11.tgz

32  tar -xvf Python-3.7.11.tgz

33  cd Python-3.7.11

34  ./configure –enable-optimizations

35  make altinstall

37  yum install -y https://repo.ius.io/ius-release-el7.rpm

38  yum install -y python36u python36u-libs python36u-devel python36u-pip

39  yum install epel-release

43  python3.7 –version

44  python3.7 -m pip

47  python3.7 -m ensurepip

49  pip3.7 install pytesseract

50  pip3.7 install tox

70  wget –no-check-certificate  https://download.opensuse.org/repositories/home:/Alexander_Pozdnyakov/CentOS_7/x86_64/tesseract-4.1.3+git4271-3.1.x86_64.rpm

73  wget  –no-check-certificate https://download.opensuse.org/repositories/home:/Alexander_Pozdnyakov/CentOS_7/noarch/tesseract-langpack-eng-4.00~git30-5.5.noarch.rpm

74  rpm -ivh tesseract-langpack-eng-4.00~git30-5.5.noarch.rpm

78  wget –no-check-certificate  https://download.opensuse.org/repositories/home:/Alexander_Pozdnyakov/CentOS_7/noarch/tesseract-langpack-osd-4.00~git30-5.5.noarch.rpm

87  rpm –nodeps -ivh tesseract-langpack-osd-4.00~git30-5.5.noarch.rpm

88  rpm -ivh tesseract-4.1.3+git4271-3.1.x86_64.rpm

90  wget –no-check-certificate  https://download.opensuse.org/repositories/home:/Alexander_Pozdnyakov/CentOS_7/x86_64/leptonica-1.76.0-2.5.x86_64.rpm

92  yum install libjpeg

94  yum install libtiff

96  yum install libwebp

97  rpm -ivh leptonica-1.76.0-2.5.x86_64.rpm

98  rpm -ivh tesseract-4.1.3+git4271-3.1.x86_64.rpm

[root@centos7-min OCR]# tox
ROOT: No tox.ini or setup.cfg or pyproject.toml found, assuming empty tox.ini at /root/OCR
py: OK (0.05 seconds)
congratulations :) (0.15 seconds)
[root@centos7-min OCR]#
=====================
https://github.com/tesseract-ocr/tesseract
https://tesseract-ocr.github.io/tessdoc/Installation.html
https://download.opensuse.org/repositories/home:/Alexander_Pozdnyakov/CentOS_7/
# Import modules
from PIL import Image
import pytesseract
# Include tesseract executable in your path
#pytesseract.pytesseract.tesseract_cmd = r”./Tesseract-OCR/tesseract.exe”
pytesseract.pytesseract.tesseract_cmd = r”/usr/bin/tesseract”
# Create an image object of PIL library
image = Image.open(’example.jpg’)
# pass image into pytesseract module
# pytesseract is trained in many languages
image_to_text = pytesseract.image_to_string(image, lang=’eng’)
# Print the text
print(image_to_text)
=========== Dockerfile =================
FROM jdeathe/centos-ssh
RUN yum -y install httpd php mod_php
COPY ./index.php /var/www/html/index.php
RUN touch /var/www/html/counter
RUN chmod 666 /var/www/html/counter
RUN echo "KeepAlive Off" >> /etc/httpd/conf/httpd.conf
RUN mkdir -p /_100MB/facecrop
RUN chmod 777 /_100MB/facecrop
WORKDIR /opt
RUN yum install -y  gcc openssl-devel bzip2-devel libffi-devel zlib-devel xz-devel
RUN yum -y install wget
RUN cd /opt
RUN wget https://www.python.org/ftp/python/3.7.11/Python-3.7.11.tgz
RUN tar -xvf Python-3.7.11.tgz
RUN cd /opt/Python-3.7.11
WORKDIR /opt/Python-3.7.11
RUN chmod 755 configure
RUN ./configure -enable-optimizations
RUN make altinstall
#RUN yum install -y https://repo.ius.io/ius-release-el7.rpm
#RUN yum install -y python36u python36u-libs python36u-devel python36u-pip
#RUN yum install epel-release
RUN python3.7 -m ensurepip
RUN pip3.7 install pytesseract
RUN pip3.7 install tox
RUN wget --no-check-certificate https://download.opensuse.org/repositories/home:/Alexander_Pozdnyakov/CentOS_7/x86_64/tesseract-4.1.3+git4271-3.1.x86_64.rpm
RUN wget --no-check-certificate https://download.opensuse.org/repositories/home:/Alexander_Pozdnyakov/CentOS_7/noarch/tesseract-langpack-eng-4.00~git30-5.5.noarch.rpm
RUN rpm -ivh tesseract-langpack-eng-4.00~git30-5.5.noarch.rpm
RUN wget --no-check-certificate https://download.opensuse.org/repositories/home:/Alexander_Pozdnyakov/CentOS_7/noarch/tesseract-langpack-osd-4.00~git30-5.5.noarch.rpm
RUN rpm --nodeps -ivh tesseract-langpack-osd-4.00~git30-5.5.noarch.rpm
RUN wget --no-check-certificate https://download.opensuse.org/repositories/home:/Alexander_Pozdnyakov/CentOS_7/x86_64/leptonica-1.76.0-2.5.x86_64.rpm
RUN yum -y install libjpeg libtiff libwebp libpng
RUN rpm -ivh leptonica-1.76.0-2.5.x86_64.rpm
RUN rpm -ivh tesseract-4.1.3+git4271-3.1.x86_64.rpm
RUN yum -y install libglvnd-devel
RUN pip3.7 install opencv-python==4.6.0.66
WORKDIR /opt
COPY . /opt
EXPOSE 80
CMD ["sh","-c","python3.7 detect.py --image harry.jpg"]
#"/usr/sbin/httpd ; sleep 5; tail -f /var/log/httpd/access_log"]

3
Dec

AWS Lambda - Layer

   Posted by: admin   in Mẹo vặt của hiếu râu, python

mkdir lambda-layer

cd lambda-layer

cat requirements.txt

pgpy

docker run -v "$PWD":/var/task "lambci/lambda:build-python3.8" /bin/sh \
  -c "pip install -r requirements.txt -t python/lib/python3.8/site-packages/; exit"
zip -r -9 layer.zip python
AWS Lambda, create layer from zip file, linux_x86 python3.8
AWS Lambda, create function, add custom layer
https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html
https://docs.aws.amazon.com/lambda/latest/dg/python-package.html
import json, boto3,os, sys, uuid, pgpy

s3_client = boto3.client("s3")
S3_BUCKET = 'defend-ai-images'
S3_ENCRYPTED = 'defend-encrypted-images'
S3_PREFIX = 'my'

def lambda_handler(event, context):
    response = s3_client.list_objects_v2(
        Bucket=S3_BUCKET,)
    keyCount = response["KeyCount"]
    if keyCount==0:
        return keyCount
    s3_files = response["Contents"]

    pub_blob="""-----BEGIN PGP PUBLIC KEY BLOCK-----

xsFNBGOI6lMBEACYAJgGRJaIXz1XWimgsuCLwXV2l3aPYCIFv4dRg6deLKJH/bVW
R35eileDuA8e3ynvlsBHwiJL+l2gxDMUI03iL+DxXN7OqpR43Hxh/E8MyPHVsi19
OB6XDtkJ8LdFGAZqBP+E6tctw02nn7D4PoGdDa7LSDFnjivBXLQcT5omH+7ftbkI
cUTQWGg4dTHtSOIR8IvCm9r8oF4lDAjiwIIQn1NPTfYsYy+IS+nJ9uzUBCr9PCuK
6SJmnw+ikd+nuhl/ljJiWRL0JcJrKxk7W1UuoqVITmNsrtVDCjF0tKZEy/uIDrYH
9d9KMKcN0zR/dZZZrPVtl5cWg0lBkAplk800nx56z6MHQs0SDOI/bLssqeP0auml
rnTxFpCaQXSgFsE9AvjE8zX807gok7D4tpjFIWPA6Ns9W8wUoNbu2mgiJzYhZCQW
lqJ+N0FuJ1oTllyne+YVzWe/MydU6e9N6e1y6NkrKkqUHMG6b8igePoX3ug2z2MZ
XRnSMVkcq50+VV3c6ymBL8GtqNgYf7e3njfQicnGfniG1tE8iPcy8gcKOgjO8mZe
50eyWHRR1kFMWsbVthQByzPnkfAK4dCb7tcZ6SHruuU9iF+6fvKLxsERC9jsWIqm
8zJ1Q4ZsxL/NNXRaCED4DHWrjiAZBN+QTsluZQnah6rattXl0/CQJflVJwARAQAB
zTBIYXJyeSAoSGFycnlzIHRlc3Qga2V5KSA8aGFycnlAaGVscHVzZGVmZW5kLmNv
bT7CwYoEEwEIADQFAmOI6q0CGw4ECwkIBwUVCAkKCwUWAgMBAAIeARYhBBXleiAB
F1iZVTFWE9uksLIDVhvQAAoJENuksLIDVhvQ+GkQAIfDEYgvQMWRSWdJQ7LRAiHr
7OJY21CCJhUZdfJnNqVmcuK3n9n7m+qRpb8FjrKPgXjspGzUHbGpVnRurBOLRFFE
71V56xMmBzsR2Ku47pxoif3ZBZjOHE8DplMTP62uAcBjMaHpkHCjZh1FCzj7syEZ
khtO+Zhc6WCIZBzRPSoS7EBRG8ayxlox09PnDOnWNIanA/fHrY9RsGA36XToamQ+
0Kv8H2i+kfPVFxIWFAJea+Oaud1igzOATcu5NmYcF9Cc17ELy1JMYnxxoCZIAZs3
mLPhv+7BqWiflIuxx7hKPlJ1N4s7dsJNE+emMuXf/N9r8p9hOnCfscrW1qxsv+if
WekfuAmjs/E29pA1MVe3Rqrf65aQcjXPynRQbbIdVo0Ln9mkJL+6SkTV/Nl014YU
w33+Tuep7MFERRuBBGQ1RMgRqSd8T5f4DEnlwVkEvd5F2ZafjfWf01SZ2q5Dmowa
nS/MOtOTMYSCn3SlQyCMKDdbJ6pMe1GYWfPUIU2EEG/JtBuiCtjw1m/8EpJIoGo6
4DgxPjQEQR1Dqz0ZZ/fQii+F6EFrVGsEOBZgnMYod9m4ONxU1zDJAxvihb0gi0hm
rv+u4/VyE7nrsFxDMFN6lzQd6psSe5m++ENjzTNA/pN1B7HtLlTc/ioL6qaCMDg0
tRbz8aGnRHl9XWWIPPsW
=28Qw
-----END PGP PUBLIC KEY BLOCK-----
"""
    pubkey, _ = pgpy.PGPKey.from_blob(pub_blob)
    s3_jpgs = filter(lambda obj: obj['Key'].endswith('.jpg'), s3_files)
    s3 = boto3.resource("s3")
    for s3_file in s3_jpgs:
        file_content = s3_client.get_object(
            Bucket=S3_BUCKET, Key=s3_file["Key"])["Body"].read()
        message = pgpy.PGPMessage.new(file_content)
        encrypt_content = pubkey.encrypt(message)
        output_file_name=s3_file["Key"]
        s3.meta.client.put_object(Body=bytes(encrypt_content), Bucket=S3_ENCRYPTED, Key=output_file_name)
        s3.meta.client.delete_object(Bucket=S3_BUCKET, Key=s3_file["Key"])

    return keyCount

pip3 install pgpy

[root@master-node pgp]# cat keygen.py

from pgpy.constants import PubKeyAlgorithm, KeyFlags, HashAlgorithm, SymmetricKeyAlgorithm, CompressionAlgorithm

import pgpy

key = pgpy.PGPKey.new(PubKeyAlgorithm.RSAEncryptOrSign, 4096)

uid = pgpy.PGPUID.new(’Harry’, comment=’Harrys test key’, email=’harry@——.com’)

key.add_uid(uid, usage={KeyFlags.Sign, KeyFlags.EncryptCommunications, KeyFlags.EncryptStorage},

hashes=[HashAlgorithm.SHA256, HashAlgorithm.SHA384, HashAlgorithm.SHA512, HashAlgorithm.SHA224],

ciphers=[SymmetricKeyAlgorithm.AES256, SymmetricKeyAlgorithm.AES192, SymmetricKeyAlgorithm.AES128],

compression=[CompressionAlgorithm.ZLIB, CompressionAlgorithm.BZ2, CompressionAlgorithm.ZIP, CompressionAlgorithm.Uncompressed])

print(key)

print(key.pubkey)

==================================
[root@master-node pgp]# cat encrypt.py
import sys
import pgpy
pubkey, _ = pgpy.PGPKey.from_file(”pgpkey.pub”)
message = pgpy.PGPMessage.new(sys.argv[1], file=True)
encrypted_message = pubkey.encrypt(message)
fo=sys.argv[2]
f2=open(fo,”w”)
f2.write(str(encrypted_message))
f2.close()
=====================================
[root@master-node pgp]# cat decrypt.py
import sys
import pgpy
key, _ = pgpy.PGPKey.from_file(”pgpkey”)
#f1=open(sys.argv[1],”rb”)
#message=f1.read()
#f1.close()
message = pgpy.PGPMessage.from_file(sys.argv[1])
decrypted_message = key.decrypt(message).message
fo=sys.argv[2]
f2=open(fo,”wb”)
f2.write(bytes(decrypted_message))
f2.close()
=========================================

pip3 install pyopenssl

pip3 install cryptography

============================================

[root@master-node encryption]# cat keygen.py
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
# Generate the RSA private key
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
print(pem)
public_key = key.public_key()
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
print(pem)
===========================================
[root@master-node encryption]# cat encrypt.py
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.hazmat.primitives.serialization import load_pem_public_key
import sys
public_pem_data=b’—–BEGIN PUBLIC KEY—–\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1ikdHKIUkRKKmxm5OMmH\nX8T2mx05ggDD7oEqZBd8×9lrzLOmADPoYT/qZGpFkXu6ys9IWlIqGp96qRsXQaRA\nO5EJziNdrCpMYGZFX5cxc8hnVh15h8DrvWj7pKoNmWkZhLiQ+vFsWLq3m41omQi/\ndhNMybPLsLGqS7EOO17z1VifSp33XOXLNZkrU3otItoqPPNq6nAXuINXZsPTdRY7\nk/ERmEXU2l6+GMKWnesRWm7txJSTgdpH8hjfoSZmJTHy7+uZqTdHC3PpCojZeIRw\ndgOEErYnKEHMQ6/4DV0a0tF5BzwXhrolSYkWmpt65pblbLQAzgR0KA91F8iJHp5Y\ncQIDAQAB\n—–END PUBLIC KEY—–\n’
key = load_pem_public_key(public_pem_data)
fi.open(sys.argv[1],’rb’)
message = fi.read()
fi.close()
ciphertext = key.encrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
fo=open(sys.argv[2],’wb’)
f.write(ciphertext)
f.close()
===========================================
[root@master-node encryption]# cat decrypt.py
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
pem_data=b’—–BEGIN RSA PRIVATE KEY—–\nMIIEogIAdf8a1kupHcqgVHzcBlgBfRDBr\nEQyKr9JWXzLTwgbpft/7qvOkv4T0pOzhWBvKJaKvm1sY+4l+Z1g=\n—–END RSA PRIVATE KEY—–\n’

fi=open(sys.argv[1],’rb’)
ciphertext=fi.read()
fi.close()
key = load_pem_private_key(pem_data, password=None)
plaintext = key.decrypt(
ciphertext,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
print(plaintext)
fo=open(sys.argv[2],’wb’)
f.write(plaintext)
f.close()
==========================================
https://www.misterpki.com/python-public-private-key-encryption/
19
Oct

pv - Progress monitoring

   Posted by: admin   in Mẹo vặt của hiếu râu

  • To turn on the display bar, use the -p option.
  • To view the elapsed time, use the –timer option.
  • To turn on ETA timer which tries to guess how long it will take before completion of an operation, use the –eta option. The guess is based on previous transfer rates and the total data size.
  • To turn on a rate counter use the –rate option.
  • To display the total amount of data transferred so far, use the –bytes option.
  • To display progress inform of integer percentage instead of visual indication, use the -n option. This can be good when using pv with the dialog command to show progress in a dialog box.
copy file
# pv source > /tmp/dest
zip a file
#pv /var/log/syslog | zip > syslog.zip
tar dir
# tar -czf - ./Downloads/ | (pv -p --timer --rate --bytes > backup.tgz)
tar with dialog
# tar -czf - ./Documents/ | (pv -n > backup.tgz) 2>&1 | dialog --gauge "Progress" 10 70
26
Aug

iSCSI targetcli

   Posted by: admin   in Linux nông dân

#yum install targetcli
#targetcli
/> ls
o- / [...]
o- backstores [...]
| o- block [Storage Objects: 1]
| | o- dev_sdb [/dev/sdb (60.0GiB) write-thru activated]
| | o- alua [ALUA Groups: 1]
| | o- default_tg_pt_gp [ALUA state: Active/optimized]
| o- fileio [Storage Objects: 0]
| o- pscsi [Storage Objects: 0]
| o- ramdisk [Storage Objects: 0]
o- iscsi [Targets: 1]
| o- iqn.2003-01.org.linux-iscsi.iscsid.x8664:sn.bfc48a1cbef2 [TPGs: 1]
| o- tpg1 [no-gen-acls, no-auth]
| o- acls [ACLs: 1]
| | o- iqn.1998-01.com.vmware:3a4fc8a8-5206-a0e0-146c-500100030000-057c77a5 [Mapped LUNs: 1]
| | o- mapped_lun1 [lun1 block/dev_sdb (rw)]
| o- luns [LUNs: 1]
| | o- lun1 [block/dev_sdb (/dev/sdb) (default_tg_pt_gp)]
| o- portals [Portals: 1]
| o- 0.0.0.0:3260 [OK]
o- loopback [Targets: 0]
/>

To create a block backstore from the targetcli shell:

/> cd /backstores/block
/backstores/block> create name=LUN_1 dev=/dev/xvdb

To create a fileio backstore from the targetcli shell:

/> cd /backstores/fileio
/backstores/fileio> create name=LUN_3 /root/disk1.img 5G

To create an iSCSI target

/> cd /iscsi
/iscsi> create

cd tpg1/luns
tpg1/luns> create /backstores/block/LUN_1 lun1

ACLs
cd tpg1/acls
create iqn.1998-01.com.vmware:3a4fc8a8-5206-a0e0-146c-500100030000-057c77a5

/>saveconfig