카타코더 Logging with EFK

1 개요[ | ]

카타코더 Logging with EFK

2 Your Kubernetes Cluster[ | ]

root@master:~# kubectl version
Client Version: version.Info{Major:"1", Minor:"14", GitVersion:"v1.14.0", GitCommit:"641856db18352033a0d96dbc99153fa3b27298e5", GitTreeState:"clean", BuildDate:"2019-03-25T15:53:57Z", GoVersion:"go1.12.1", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"14", GitVersion:"v1.14.0", GitCommit:"641856db18352033a0d96dbc99153fa3b27298e5", GitTreeState:"clean", BuildDate:"2019-03-25T15:45:25Z", GoVersion:"go1.12.1", Compiler:"gc", Platform:"linux/amd64"}
root@master:~# kubectl cluster-info
Kubernetes master is running at https://172.17.0.124:6443
dash-kubernetes-dashboard is running at https://172.17.0.124:6443/api/v1/namespaces/kube-system/services/dash-kubernetes-dashboard:http/proxy
KubeDNS is running at https://172.17.0.124:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
root@master:~# kubectl get nodes
NAME     STATUS   ROLES    AGE   VERSION
master   Ready    master   10m   v1.14.0
node01   Ready    <none>   10m   v1.14.0
root@master:~# kubectl get deployments,pods,services
NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   10m
root@master:~# helm version
Client: &version.Version{SemVer:"v2.13.1", GitCommit:"618447cbf203d147601b4b9bd7f8c37a5d39fbb4", GitTreeState:"clean"}
Server: &version.Version{SemVer:"v2.13.1", GitCommit:"618447cbf203d147601b4b9bd7f8c37a5d39fbb4", GitTreeState:"clean"}
root@master:~# export TOKEN=$(kubectl describe secret $(kubectl get secret | awk '/^dashboard-token-/{print $1}') | awk '$1=="token:"{print $2}') &&
> echo -e "\n--- Copy and paste this token for dashboard access --\n$TOKEN\n---"

--- Copy and paste this token for dashboard access --
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tanZmZzQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjVmZmI1NjVkLTljYjQtMTFlOS04YmJhLTAyNDJhYzExMDA3YyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.FX7YSw-OVkD7eSzfwkuEtCofwSABDHQxBQFweC-F9_ZdnuWifrwdEZUzsqVnOYekhrqcljzty-qekiQqwJwEXTfOxZZTbBkj67Uy5Lum0knUiDVkjFdG9V3SL13AMhVLW05RRzMAh1hxKIJZL9c6A5F8gZ00pdJYkM2hsuda7k-g3gJVMPGHYfsKFiQIgboKBlRdqPmpGzs7c11QYWfKEUJTycvzPIi7-8Y5FjKbHFTzGJ9jKoQFGCvYP2c2xkIj1E-LgU0scy-IAwOUKv2SNJBaxDRKbNb0VnQ29tUTcXw2pGeD4oY-4FgH9UAVDLM7VLtMMvn5L6UlEXQpIWmB4g
---

3 Deploy ElasticSearch[ | ]

root@master:~# mkdir -p /mnt/data/efk-master && kubectl create -f pv-master.yaml
persistentvolume/efk-master-volume created
pv-master.yaml
kind: PersistentVolume
apiVersion: v1
metadata:
  name: efk-master-volume
  labels:
    type: local
spec:
  storageClassName: elasticsearch-master
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/mnt/data/efk-master"
root@master:~# mkdir -p /mnt/data/efk-data && kubectl create -f pv-data.yaml
persistentvolume/efk-data-volume created
pv-data.yaml
kind: PersistentVolume
apiVersion: v1
metadata:
  name: efk-data-volume
  labels:
    type: local
spec:
  storageClassName: elasticsearch-data
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/mnt/data/efk-data"
root@master:~# helm install stable/elasticsearch --name=elasticsearch --namespace=logs \
> --set client.replicas=1 \
> --set master.replicas=1 \
> --set cluster.env.MINIMUM_MASTER_NODES=1 \
> --set cluster.env.RECOVER_AFTER_MASTER_NODES=1 \
> --set cluster.env.EXPECTED_MASTER_NODES=1 \
> --set data.replicas=1 \
> --set data.heapSize=300m \
> --set master.persistence.storageClass=elasticsearch-master \
> --set master.persistence.size=5Gi \
> --set data.persistence.storageClass=elasticsearch-data \
> --set data.persistence.size=5Gi
NAME:   elasticsearch
LAST DEPLOYED: Tue Jul  2 10:48:30 2019
NAMESPACE: logs
STATUS: DEPLOYED

RESOURCES:
==> v1/ConfigMap
NAME                DATA  AGE
elasticsearch       4     2s
elasticsearch-test  1     2s

==> v1/Pod(related)
NAME                                   READY  STATUS    RESTARTS  AGE
elasticsearch-client-5ff64d4bc4-22gxt  0/1    Init:0/1  0         1s
elasticsearch-data-0                   0/1    Pending   0         1s
elasticsearch-master-0                 0/1    Pending   0         1s

==> v1/Service
NAME                     TYPE       CLUSTER-IP     EXTERNAL-IP  PORT(S)   AGE
elasticsearch-client     ClusterIP  10.106.170.20  <none>       9200/TCP  1s
elasticsearch-discovery  ClusterIP  None           <none>       9300/TCP  1s

==> v1/ServiceAccount
NAME                  SECRETS  AGE
elasticsearch-client  1        2s
elasticsearch-data    1        1s
elasticsearch-master  1        1s

==> v1beta1/Deployment
NAME                  READY  UP-TO-DATE  AVAILABLE  AGE
elasticsearch-client  0/1    1           0          1s

==> v1beta1/StatefulSet
NAME                  READY  AGE
elasticsearch-data    0/1    1s
elasticsearch-master  0/1    1s


NOTES:
The elasticsearch cluster has been installed.

Elasticsearch can be accessed:

  * Within your cluster, at the following DNS name at port 9200:

    elasticsearch-client.logs.svc

  * From outside the cluster, run these commands in the same shell:

    export POD_NAME=$(kubectl get pods --namespace logs -l "app=elasticsearch,component=client,release=elasticsearch" -o jsonpath="{.items[0].metadata.name}")
    echo "Visit http://127.0.0.1:9200 to use Elasticsearch"
    kubectl port-forward --namespace logs $POD_NAME 9200:9200

4 Deploy Fluent Bit[ | ]

root@master:~# helm install stable/fluent-bit --name=fluent-bit --namespace=logs --set backend.type=es --set backend.es.host=elasticsearch-client
NAME:   fluent-bit
LAST DEPLOYED: Tue Jul  2 10:49:03 2019
NAMESPACE: logs
STATUS: DEPLOYED

RESOURCES:
==> v1/ClusterRole
NAME        AGE
fluent-bit  1s

==> v1/ClusterRoleBinding
NAME        AGE
fluent-bit  1s

==> v1/ConfigMap
NAME               DATA  AGE
fluent-bit-config  6     1s
fluent-bit-test    2     1s

==> v1/Pod(related)
NAME              READY  STATUS             RESTARTS  AGE
fluent-bit-45rm4  0/1    ContainerCreating  0         1s

==> v1/Secret
NAME                      TYPE    DATA  AGE
fluent-bit-es-tls-secret  Opaque  1     1s

==> v1/ServiceAccount
NAME        SECRETS  AGE
fluent-bit  1        1s

==> v1beta1/DaemonSet
NAME        DESIRED  CURRENT  READY  UP-TO-DATE  AVAILABLE  NODE SELECTOR  AGE
fluent-bit  1        1        0      1           0          <none>         1s


NOTES:
fluent-bit is now running.

It will forward all container logs to the svc named elasticsearch-client on port: 9200

5 Deploy Kibana[ | ]

root@master:~# helm install stable/kibana --name=kibana --namespace=logs --set env.ELASTICSEARCH_HOSTS=http://elasticsearch-client:9200 --set service.type=NodePort --set service.nodePort=31000
NAME:   kibana
LAST DEPLOYED: Tue Jul  2 10:49:58 2019
NAMESPACE: logs
STATUS: DEPLOYED

RESOURCES:
==> v1/ConfigMap
NAME         DATA  AGE
kibana       1     1s
kibana-test  1     1s

==> v1/Pod(related)
NAME                     READY  STATUS             RESTARTS  AGE
kibana-6765bdc769-tgfzw  0/1    ContainerCreating  0         1s

==> v1/Service
NAME    TYPE      CLUSTER-IP    EXTERNAL-IP  PORT(S)        AGE
kibana  NodePort  10.105.71.25  <none>       443:31000/TCP  1s

==> v1beta1/Deployment
NAME    READY  UP-TO-DATE  AVAILABLE  AGE
kibana  0/1    1           0          1s


NOTES:
To verify that kibana has started, run:

  kubectl --namespace=logs get pods -l "app=kibana"

Kibana can be accessed:

  * From outside the cluster, run these commands in the same shell:

    export NODE_PORT=$(kubectl get --namespace logs -o jsonpath="{.spec.ports[0].nodePort}" services kibana)
    export NODE_IP=$(kubectl get nodes --namespace logs -o jsonpath="{.items[0].status.addresses[0].address}")
    echo http://$NODE_IP:$NODE_PORT

6 Verify Running Stack[ | ]

root@master:~# kubectl get deployments,pods,services --namespace=logs
NAME                                         READY   UP-TO-DATE   AVAILABLE   AGE
deployment.extensions/elasticsearch-client   1/1     1            1           3m20s
deployment.extensions/kibana                 1/1     1            1           112s

NAME                                        READY   STATUS    RESTARTS   AGE
pod/elasticsearch-client-5ff64d4bc4-22gxt   1/1     Running   0          3m20s
pod/elasticsearch-data-0                    1/1     Running   0          3m20s
pod/elasticsearch-master-0                  1/1     Running   0          3m20s
pod/fluent-bit-45rm4                        1/1     Running   0          2m48s
pod/kibana-6765bdc769-tgfzw                 1/1     Running   0          112s

NAME                              TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
service/elasticsearch-client      ClusterIP   10.106.170.20   <none>        9200/TCP        3m20s
service/elasticsearch-discovery   ClusterIP   None            <none>        9300/TCP        3m20s
service/kibana                    NodePort    10.105.71.25    <none>        443:31000/TCP   112s

7 Generate Log Events[ | ]

root@master:~# kubectl run random-logger --image=chentex/random-logger
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/random-logger created
root@master:~# kubectl logs deployment/random-logger
2019-07-02T10:52:30+0000 ERROR something happened in this execution.
2019-07-02T10:52:34+0000 DEBUG first loop completed.
2019-07-02T10:52:35+0000 DEBUG first loop completed.
2019-07-02T10:52:38+0000 WARN variable not in use.
2019-07-02T10:52:41+0000 DEBUG first loop completed.
2019-07-02T10:52:44+0000 INFO takes the value and converts it to string.
2019-07-02T10:52:45+0000 INFO takes the value and converts it to string.
2019-07-02T10:52:49+0000 WARN variable not in use.

8 View Log Events[ | ]

9 같이 보기[ | ]

문서 댓글 ({{ doc_comments.length }})
{{ comment.name }} {{ comment.created | snstime }}