EFK

初始化配置文件准备

        将下载的 kubernetes-server-linux-amd64.tar.gz 解压后,再解压其中的 kubernetes-src.tar.gz 文件。kubernetes/cluster/addons/fluentd-elasticsearch这是文件所在的路径

        es 数据默认的存储在docker里面,在用的是node节点的空间,而node节点我们不可能都准备很大的空间,那样很浪费资源,所以这里我们需要准备外部的nfs存储空间,然后通过pv的模式进行挂载,数据存储到nfs服务器上,这样保障了es收集数据的可用性。

创建存储介质

1
2
3
4
5
6
7
8
9
10
$ cat > pvc.yaml <<EOF
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: es-nfs-data
provisioner: fuseim.pri/ifs
EOF

$ kubectl apply -f pvc.yaml

修改配置文件

  • es-statefulset.yaml

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    # RBAC authn and authz
    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: elasticsearch-logging
    namespace: kube-system
    labels:
    k8s-app: elasticsearch-logging
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: elasticsearch-logging
    labels:
    k8s-app: elasticsearch-logging
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    rules:
    - apiGroups:
    - ""
    resources:
    - "services"
    - "namespaces"
    - "endpoints"
    verbs:
    - "get"
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    namespace: kube-system
    name: elasticsearch-logging
    labels:
    k8s-app: elasticsearch-logging
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    subjects:
    - kind: ServiceAccount
    name: elasticsearch-logging
    namespace: kube-system
    apiGroup: ""
    roleRef:
    kind: ClusterRole
    name: elasticsearch-logging
    apiGroup: ""
    ---
    # Elasticsearch deployment itself
    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
    name: elasticsearch-logging
    namespace: kube-system
    labels:
    k8s-app: elasticsearch-logging
    version: v6.6.1
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    spec:
    serviceName: elasticsearch-logging
    replicas: 2
    selector:
    matchLabels:
    k8s-app: elasticsearch-logging
    version: v6.6.1
    template:
    metadata:
    labels:
    k8s-app: elasticsearch-logging
    version: v6.6.1
    kubernetes.io/cluster-service: "true"
    spec:
    serviceAccountName: elasticsearch-logging
    containers:
    - image: elasticsearch:6.6.1
    name: elasticsearch-logging
    resources:
    # need more cpu upon initialization, therefore burstable class
    limits:
    cpu: 1000m
    requests:
    cpu: 100m
    ports:
    - containerPort: 9200
    name: db
    protocol: TCP
    - containerPort: 9300
    name: transport
    protocol: TCP
    volumeMounts:
    - name: elasticsearch-logging
    mountPath: /data
    env:
    - name: "NAMESPACE"
    valueFrom:
    fieldRef:
    fieldPath: metadata.namespace
    # Elasticsearch requires vm.max_map_count to be at least 262144.
    # If your OS already sets up this number to a higher value, feel free
    # to remove this init container.
    initContainers:
    - image: alpine:3.6
    command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
    name: elasticsearch-logging-init
    securityContext:
    privileged: true
    volumeClaimTemplates:
    - metadata:
    name: elasticsearch-logging
    spec:
    accessModes: [ "ReadWriteMany" ]
    storageClassName: "es-nfs-data"
    resources:
    requests:
    storage: 30Gi
  • fluentd-es-ds.yaml

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: fluentd-es
    namespace: kube-system
    labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: fluentd-es
    labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    rules:
    - apiGroups:
    - ""
    resources:
    - "namespaces"
    - "pods"
    verbs:
    - "get"
    - "watch"
    - "list"
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: fluentd-es
    labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    subjects:
    - kind: ServiceAccount
    name: fluentd-es
    namespace: kube-system
    apiGroup: ""
    roleRef:
    kind: ClusterRole
    name: fluentd-es
    apiGroup: ""
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
    name: fluentd-es-v2.4.0
    namespace: kube-system
    labels:
    k8s-app: fluentd-es
    version: v2.4.0
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    spec:
    selector:
    matchLabels:
    k8s-app: fluentd-es
    version: v2.4.0
    template:
    metadata:
    labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    version: v2.4.0
    # This annotation ensures that fluentd does not get evicted if the node
    # supports critical pod annotation based priority scheme.
    # Note that this does not guarantee admission on the nodes (#40573).
    annotations:
    scheduler.alpha.kubernetes.io/critical-pod: ''
    seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
    priorityClassName: system-node-critical
    serviceAccountName: fluentd-es
    containers:
    - name: fluentd-es
    image: docker.io/xxlaila/fluentd-elasticsearch:v2.4.0
    env:
    - name: FLUENTD_ARGS
    value: --no-supervisor -q
    resources:
    limits:
    memory: 500Mi
    requests:
    cpu: 100m
    memory: 200Mi
    volumeMounts:
    - name: varlog
    mountPath: /var/log
    - name: varlibdockercontainers
    mountPath: /var/lib/docker/containers
    readOnly: true
    - name: config-volume
    mountPath: /etc/fluent/config.d
    terminationGracePeriodSeconds: 30
    volumes:
    - name: varlog
    hostPath:
    path: /var/log
    - name: varlibdockercontainers
    hostPath:
    path: /var/lib/docker/containers
    - name: config-volume
    configMap:
    name: fluentd-es-config-v0.2.0
  • kibana-deployment.yaml
            注释里面的两行配置,不注释的话,打开kibana的时候会提示kibana {"statusCode":404,"error":"Not Found","message":"Not Found"},参考解决方案,注释配置如下:

    1
    2
    - name: SERVER_BASEPATH
    value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy

执行创建

1
$ kubectl apply -f ./

查看创建

  • 查看pod

    1
    2
    3
    4
    5
    6
    7
    8
    9
    $ kubectl get pods -n kube-system |egrep "kibana|elasticsearch|fluentd"
    elasticsearch-logging-0 1/1 Running 0 65m
    elasticsearch-logging-1 1/1 Running 0 61m
    fluentd-es-v2.4.0-4fp28 1/1 Running 0 30m
    fluentd-es-v2.4.0-b7k67 1/1 Running 0 30m
    fluentd-es-v2.4.0-f8jzp 1/1 Running 0 30m
    fluentd-es-v2.4.0-shwzm 1/1 Running 0 30m
    fluentd-es-v2.4.0-ww8r8 1/1 Running 0 30m
    kibana-logging-57b55f58bc-xh5lp 1/1 Running 0 6m35s
  • 查看service

    1
    2
    3
    $ kubectl get svc -n kube-system |egrep "kibana|elasticsearch"
    elasticsearch-logging ClusterIP 10.254.30.110 <none> 9200/TCP 9s
    kibana-logging ClusterIP 10.254.188.5 <none> 5601/TCP 16h
  • 查看pv,pvc

    1
    2
    3
    4
    5
    6
    7
    8
    $  kubectl get pv,pvc -n kube-system
    NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
    persistentvolume/pvc-65fdd14e-dffc-11e9-bc90-fa163e5af833 30Gi RWX Delete Bound kube-system/elasticsearch-logging-elasticsearch-logging-0 es-nfs-data 21m
    persistentvolume/pvc-fe818f55-dffc-11e9-bc90-fa163e5af833 30Gi RWX Delete Bound kube-system/elasticsearch-logging-elasticsearch-logging-1 es-nfs-data 16m

    NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
    persistentvolumeclaim/elasticsearch-logging-elasticsearch-logging-0 Bound pvc-65fdd14e-dffc-11e9-bc90-fa163e5af833 30Gi RWX es-nfs-data 21m
    persistentvolumeclaim/elasticsearch-logging-elasticsearch-logging-1 Bound pvc-fe818f55-dffc-11e9-bc90-fa163e5af833 30Gi RWX es-nfs-data 17m

创建web访问

  • kibana-Ingress.yaml

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    $ cat > kibana-Ingress.yaml <<EOF
    ---
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
    name: kibana-web-ui
    namespace: kube-system
    annotations:
    kubernetes.io/ingress.class: traefik
    spec:
    rules:
    - host: kibana.xxlaila.cn
    http:
    paths:
    - path: /
    backend:
    serviceName: kibana-logging
    servicePort: 5601
    EOF
  • es-Ingress.yaml

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    $ cat > es-Ingress <<EOF
    ---
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
    name: es-web-ui
    namespace: kube-system
    annotations:
    kubernetes.io/ingress.class: traefik
    spec:
    rules:
    - host: es.xxlaila.cn
    http:
    paths:
    - path: /
    backend:
    serviceName: elasticsearch-logging
    servicePort: 9200
    EOF
  • 执行创建

    1
    $ kubectl apply -f es-Ingress.yaml kibana-Ingress.yaml
  • 在浏览器访问es
    img

  • 浏览器访问kibana
    img
    建立索引,默认的索引是根据天来自动创建在es里面,这里我是在kibana里面是根据月来却分的
    img

坚持原创技术分享,您的支持将鼓励我继续创作!
0%