跳至主要內容

K8S中使用Operator方式部署Elasticsearch集群

大约 3 分钟约 912 字

K8S中使用Operator方式部署Elasticsearch集群

0 参考资料

  • https://github.com/elastic/cloud-on-k8s/tree/main/deploy
  • https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-install-helm.html#k8s-install-helm-global
  • https://github.com/elastic/cloud-on-k8s/blob/main/deploy/eck-elasticsearch/examples/hot-warm-cold.yaml
  • https://www.se7enshare.cn/shi-yong-eck-zai-kubernetes-ji-qun-zhong-guan-li-elastic-stack/

1 Operator部署

helm repo add elastic https://helm.elastic.co
helm pull elastic/eck-operator

docker pull docker.elastic.co/eck/eck-operator:2.6.1
docker tag docker.elastic.co/eck/eck-operator:2.6.1 harbor.leadchina.cn/eck/eck-operator:2.6.1
docker push harbor.leadchina.cn/eck/eck-operator:2.6.1

# helm install elastic-operator elastic/eck-operator -n elastic-system --create-namespace

helm install -f values.yaml elastic-operator ./ -n elastic-system --create-namespace
kubectl logs -n elastic-system sts/elastic-operator

2 Elasticsearch集群部署

可参考官方仓库示例: https://github.com/elastic/cloud-on-k8s/blob/main/deploy/eck-elasticsearch/examples/hot-warm-cold.yaml

# es1集群资源清单文件
# k8s中会创建一个3个节点的名为es1的es集群, 并自动创建相关的service资源
[root@salt-master-50 ~/elasticsearch-operator]# cat quickstart.yaml 
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
  name: es1
  namespace: default
spec:
  version: 7.14.1
  image: harbor.leadchina.cn/elasticsearch/elasticsearch:7.14.1
  nodeSets:
  - name: dn
    config:
      node.master: true
      node.data: true
    count: 3
    volumeClaimTemplates:
    - metadata:
        name: elasticsearch-data 
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 5Gi
        storageClassName: csi-rbd-sc
    podTemplate:
      spec:
        initContainers:
        - name: sysctl
          securityContext:
            privileged: true
            runAsUser: 0
          command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']

# 耐心等待几分钟等待es集群初始化完成
# 测试访问es集群
INSTANCE='es1'
PASSWORD=$(kubectl get secret ${INSTANCE}-es-elastic-user -o go-template='{{.data.elastic | base64decode}}')
echo "PASSWORD: ${PASSWORD}"
echo curl -u "elastic:${PASSWORD}" -XGET "http://${INSTANCE}-es-http.default.svc.cluster.local:9200/_cluster/state/nodes?pretty"

# 登录到es集群的任意一个pod中,通过下面命令查看集群节点状态
curl -u "elastic:q1R0F2eF2IzEpP99903NqC9d" -XGET 'http://${INSTANCE}-es-http.default.svc.cluster.local:9200/_cluster/state/nodes?pretty'

3 Kibana实例部署

[root@salt-master-50 ~/elasticsearch-operator]# cat kibana-hw.yaml 
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
  name: my-kibana-hw
spec:
  version: 7.14.1
  image: harbor.leadchina.cn/kibana/kibana:7.14.1
  count: 1
  elasticsearchRef:
    name: hw-es
# 部署cerebro监控管理ES集群工具,可以查看集群状态并执行ES访问接口
[root@salt-master-50 ~/elasticsearch-operator]# cat cerebro.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: cerebro
  labels:
    app: cerebro
spec:
  replicas: 1
  selector:
    matchLabels:
      app: cerebro
  template:
    metadata:
      labels:
        app: cerebro
    spec:
      containers:
      - name: cerebro
        image: harbor.leadchina.cn/lmenezes/cerebro:0.9.4
        ports:
        - containerPort: 9000
              
# 服务
[root@salt-master-50 ~/elasticsearch-operator]# cat cerebro-svc.yaml 
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: Service
metadata:
  labels:
    app: cerebro-svc
  name: cerebro-svc
  namespace: default
spec:
  ports:
  - nodePort: 32090
    port: 9000
    protocol: TCP
    targetPort: 9000
  selector:
    app: cerebro
  sessionAffinity: None
  type: NodePort


# 可以通过kibana前端管理界面登录,也可以使用es监控工具登录 http://hw-es-es-http.default.svc.cluster.local:9200 , 使用用户名: elastic
INSTANCE='es1'
密码为: kubectl get secret ${INSTANCE}-es-elastic-user  -o go-template='{{.data.elastic | base64decode}}'

4 hot-swarm集群部署

参考文档: https://www.se7enshare.cn/shi-yong-eck-zai-kubernetes-ji-qun-zhong-guan-li-elastic-stack/

[root@salt-master-50 ~/elasticsearch-operator]# cat hot-warm.yaml 
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
  name: hw-es
spec:
  #禁用 https
  http:
    tls:
      selfSignedCertificate:
        disabled: true
  version: 7.14.1
  image: harbor.leadchina.cn/elasticsearch/elasticsearch:7.14.1
  nodeSets:
  #master 节点
  - name: master
    count: 3
    config:
      node.roles: ["master"]
    podTemplate:
      spec:
        initContainers:
        - name: sysctl
          securityContext:
            privileged: true
            runAsUser: 0
          command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
        containers:
        - name: elasticsearch
          resources:
            #限制资源使用
            limits:
              memory: 1Gi
              cpu: 1
    volumeClaimTemplates:
    - metadata:
        name: elasticsearch-data
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 20Gi
        storageClassName: csi-rbd-sc
  # hot 节点
  - name: hot
    count: 3
    config:
      node.attr.data: hot #节点属性
      node.roles: ["data","ingest"]
    podTemplate:
      spec:
        initContainers:
        - name: sysctl
          securityContext:
            privileged: true
            runAsUser: 0
          command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
        containers:
        - name: elasticsearch
          resources:
            limits:
              memory: 2Gi
              cpu: 2
    volumeClaimTemplates:
    - metadata:
        name: elasticsearch-data
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 20Gi
        storageClassName: csi-rbd-sc
  # warm 节点
  - name: warm
    count: 3
    config:
      node.attr.data: warm #节点属性
      node.roles: ["data","ingest"]
    podTemplate:
      spec:
        initContainers:
        - name: sysctl
          securityContext:
            privileged: true
            runAsUser: 0
          command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
        containers:
        - name: elasticsearch
          resources:
            limits:
              memory: 2Gi
              cpu: 1
    volumeClaimTemplates:
    - metadata:
        name: elasticsearch-data
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 60Gi
        storageClassName: csi-rbd-sc
  # cold 节点
  - name: cold
    count: 3
    config:
      node.attr.data: cold 
      node.roles: ["data","ingest"]
    podTemplate:
      spec:
        initContainers:
        - name: sysctl
          securityContext:
            privileged: true
            runAsUser: 0
          command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
        containers:
        - name: elasticsearch
          resources:
            limits:
              memory: 2Gi
              cpu: 1
    volumeClaimTemplates:
    - metadata:
        name: elasticsearch-data
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 100Gi
        storageClassName: managed-nfs-storage
[root@salt-master-50 ~/elasticsearch-operator]# 

5 监控及管理工具配置

可以考虑采用Prometheus采集ES集群各节点指标数据并通过grafana图表展示,以及通过alertmanager组件发送告警消息。