平台搭建记录-外网测试环境搭建高可用k8s集群
阅读原文时间:2021年04月23日阅读:1

前言

此为之前文章的参考,主要记录方便以后直接快速搭建

准备

服务器

ip

系统

角色

master1

192.168.31.100

centos7.6

k8s-master节点1,ceph-node,ceph-osd,ceph-mds,ceph-mgr

node1

192.168.31.101

centos7.6

k8s-node节点1,ceph-node,ceph-osd,ceph-mds

node2

192.168.31.102

centos7.6

k8s-node节点2,ceph-node,ceph-osd,ceph-mds

服务器软件准备

  • 保证master1,node1,node2互通

  • 配置hosts,hostname

    配置hostname

    hostnamectl set-hostname master1
    hostnamectl set-hostname node1
    hostnamectl set-hostname node2

    每台机器配置hosts

    cat >/etc/hosts <<EOF
    192.168.31.100 master1
    192.168.31.101 node1
    192.168.31.102 node2
    EOF

  • 关闭防火墙,selinux,swap

    systemctl stop firewalld
    systemctl disable firewalld
    setenforce 0
    sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
    swapoff -a
    sed -i 's/.swap./#&/' /etc/fstab

  • 配置内核参数,iptables

    配置k8s iptables

    cat << EOF > /etc/sysctl.d/k8s.conf
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    vm.swappiness=0
    EOF

    配置ceph iptables

    cat > /etc/sysctl.d/ceph.conf <<EOF
    net.ipv4.ip_forward = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    EOF
    sysctl --system

第一步-配置yum国内源

root@ALL:

第三步-在master1节点上搭建haproxy+keepalived框架

root@master1:

  • 安装相关插件

    yum install -y haproxy keepalived net-tools nmap-ncat

  • 配置haproxy

    sed -i '$a<br /> #---------------------------------------------------------------------</p>

    kube-api-server-listen</h1>

    #---------------------------------------------------------------------<br /> listen kube-api-lb<br /> bind 0.0.0.0:10443<br /> mode tcp<br /> balance roundrobin<br /> server master1 192.168.31.100:6443 weight 1 maxconn 10000 check inter 10s<br /> <br /> #---------------------------------------------------------------------</p>

    kube-api-haproxy-stats</h1>

    #---------------------------------------------------------------------<br /> listen admin_stats<br /> bind 0.0.0.0:8099<br /> mode http<br /> option httplog<br /> maxconn 10<br /> stats refresh 30s<br /> stats uri /stats' /etc/haproxy/haproxy.cfg

  • 配置keepalived

    mkdir -p /etc/keepalived/scripts/
    cat > /etc/keepalived/scripts/haproxy_check.sh << \EOF #!/bin/bash if [ ps -C haproxy --no-header |wc -l -eq 0 ] then systemctl start haproxy sleep 3 if [ ps -C haproxy --no-header |wc -l -eq 0 ] then systemctl stop keepalived fi fi EOF cat > /etc/keepalived/scripts/notifi_master.sh << \EOF #!/bin/bash VIP=192.xx.xx.xx GATEWAY=192.xx.xx.xx /sbin/arping -I eth0 -c 5 -s $VIP $GATEWAY &>/dev/null
    EOF
    chmod +x /etc/keepalived/scripts/haproxy_check.sh /etc/keepalived/scripts/notifi_master.sh
    cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.backup

  • 这里把master1节点作为主节点,master2节点作为备用节点
    在master1节点上配置主keepalived.conf

    cat << EOF > /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived

    global_defs {
    router_id Haproxy-Master
    script_user root
    enable_script_security
    vrrp_skip_check_adv_addr
    vrrp_iptables
    vrrp_garp_interval 0
    vrrp_gna_interval 0
    # vrrp_strict
    }

    vrrp_script chk_haproxy
    {
    script "/etc/keepalived/scripts/haproxy_check.sh"
    interval 5
    fall 2
    }

    vrrp_instance haproxy {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
    auth_type PASS
    auth_pass 5e97s45a2
    }
    unicast_src_ip 192.168.31.100
    unicast_peer {
    192.168.31.101
    192.168.31.102
    }
    track_script {
    chk_haproxy
    }
    virtual_ipaddress {
    192.168.31.150
    }
    notify_master "/etc/keepalived/scripts/notifi_master.sh"
    }

    EOF

  • 启动keepalived

    systemctl stop NetworkManager
    systemctl start keepalived
    systemctl enable keepalived
    netstat -ntplu|grep 10443

第四步-部署k8s集群

root@ALL:

  • 安装kubelet,kubectl,kubeadm

    yum install -y kubelet-1.17.2 kubeadm-1.17.2 kubectl-1.17.2
    systemctl enable kubelet

root@master1:

  • 使用kubeadm部署master1
    pod-network-cidr为flanneld网络默认地址

    kubeadm init --kubernetes-version=1.17.2 <br /> --apiserver-advertise-address=192.168.31.100 <br /> --image-repository registry.aliyuncs.com/google_containers <br /> --service-cidr=10.1.0.0/16 <br /> --control-plane-endpoint "192.168.31.150:10443" <br /> --upload-certs <br /> --pod-network-cidr=10.244.0.0/16

  • 记录加入master节点的token和加入node节点的token

  • 配置kubectl工具

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    kubectl get nodes
    kubectl get cs

  • 部署flanneld网络
    创建文件flanneld.yaml

    生成文件夹

    mkdir -p yaml/flanneld

    写入文件

    cat << EOF > yaml/flanneld/flanneld.yaml

    apiVersion: policy/v1beta1
    kind: PodSecurityPolicy
    metadata:
    name: psp.flannel.unprivileged
    annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
    spec:
    privileged: false
    volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
    allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
    readOnlyRootFilesystem: false
    # Users and groups
    runAsUser:
    rule: RunAsAny
    supplementalGroups:
    rule: RunAsAny
    fsGroup:
    rule: RunAsAny
    # Privilege Escalation
    allowPrivilegeEscalation: false
    defaultAllowPrivilegeEscalation: false
    # Capabilities
    allowedCapabilities: ['NET_ADMIN']
    defaultAddCapabilities: []
    requiredDropCapabilities: []
    # Host namespaces
    hostPID: false
    hostIPC: false
    hostNetwork: true
    hostPorts:

    • min: 0
      max: 65535
      # SELinux
      seLinux:
      # SELinux is unused in CaaSP

    rule: 'RunAsAny'

    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
    name: flannel
    rules:

    • apiGroups: ['extensions']
      resources: ['podsecuritypolicies']
      verbs: ['use']
      resourceNames: ['psp.flannel.unprivileged']
    • apiGroups:
      • ""
        resources:
      • pods
        verbs:
      • get
    • apiGroups:
      • ""
        resources:
      • nodes
        verbs:
      • list
      • watch
    • apiGroups:
      • ""
        resources:
      • nodes/status
        verbs:

    - patch

    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
    name: flannel
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: ClusterRole
    name: flannel
    subjects:

    • kind: ServiceAccount
      name: flannel

    namespace: kube-system

    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: flannel

    namespace: kube-system

    kind: ConfigMap
    apiVersion: v1
    metadata:
    name: kube-flannel-cfg
    namespace: kube-system
    labels:
    tier: node
    app: flannel
    data:
    cni-conf.json: |
    {
    "name": "cbr0",
    "cniVersion": "0.3.1",
    "plugins": [
    {
    "type": "flannel",
    "delegate": {
    "hairpinMode": true,
    "isDefaultGateway": true
    }
    },
    {
    "type": "portmap",
    "capabilities": {
    "portMappings": true
    }
    }
    ]
    }
    net-conf.json: |
    {
    "Network": "10.244.0.0/16",
    "Backend": {
    "Type": "vxlan"
    }

    }

    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
    name: kube-flannel-ds-amd64
    namespace: kube-system
    labels:
    tier: node
    app: flannel
    spec:
    selector:
    matchLabels:
    app: flannel
    template:
    metadata:
    labels:
    tier: node
    app: flannel
    spec:
    affinity:
    nodeAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    nodeSelectorTerms:
    - matchExpressions:
    - key: beta.kubernetes.io/os
    operator: In
    values:
    - linux
    - key: beta.kubernetes.io/arch
    operator: In
    values:
    - amd64
    hostNetwork: true
    tolerations:
    - operator: Exists
    effect: NoSchedule
    serviceAccountName: flannel
    initContainers:
    - name: install-cni
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-amd64
    command:
    - cp
    args:
    - -f
    - /etc/kube-flannel/cni-conf.json
    - /etc/cni/net.d/10-flannel.conflist
    volumeMounts:
    - name: cni
    mountPath: /etc/cni/net.d
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    containers:
    - name: kube-flannel
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-amd64
    command:
    - /opt/bin/flanneld
    args:
    - --ip-masq
    - --kube-subnet-mgr
    resources:
    requests:
    cpu: "100m"
    memory: "50Mi"
    limits:
    cpu: "100m"
    memory: "50Mi"
    securityContext:
    privileged: false
    capabilities:
    add: ["NET_ADMIN"]
    env:
    - name: POD_NAME
    valueFrom:
    fieldRef:
    fieldPath: metadata.name
    - name: POD_NAMESPACE
    valueFrom:
    fieldRef:
    fieldPath: metadata.namespace
    volumeMounts:
    - name: run
    mountPath: /run/flannel
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    volumes:
    - name: run
    hostPath:
    path: /run/flannel
    - name: cni
    hostPath:
    path: /etc/cni/net.d
    - name: flannel-cfg
    configMap:

    name: kube-flannel-cfg

    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
    name: kube-flannel-ds-arm64
    namespace: kube-system
    labels:
    tier: node
    app: flannel
    spec:
    selector:
    matchLabels:
    app: flannel
    template:
    metadata:
    labels:
    tier: node
    app: flannel
    spec:
    affinity:
    nodeAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    nodeSelectorTerms:
    - matchExpressions:
    - key: beta.kubernetes.io/os
    operator: In
    values:
    - linux
    - key: beta.kubernetes.io/arch
    operator: In
    values:
    - arm64
    hostNetwork: true
    tolerations:
    - operator: Exists
    effect: NoSchedule
    serviceAccountName: flannel
    initContainers:
    - name: install-cni
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-arm64
    command:
    - cp
    args:
    - -f
    - /etc/kube-flannel/cni-conf.json
    - /etc/cni/net.d/10-flannel.conflist
    volumeMounts:
    - name: cni
    mountPath: /etc/cni/net.d
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    containers:
    - name: kube-flannel
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-arm64
    command:
    - /opt/bin/flanneld
    args:
    - --ip-masq
    - --kube-subnet-mgr
    resources:
    requests:
    cpu: "100m"
    memory: "50Mi"
    limits:
    cpu: "100m"
    memory: "50Mi"
    securityContext:
    privileged: false
    capabilities:
    add: ["NET_ADMIN"]
    env:
    - name: POD_NAME
    valueFrom:
    fieldRef:
    fieldPath: metadata.name
    - name: POD_NAMESPACE
    valueFrom:
    fieldRef:
    fieldPath: metadata.namespace
    volumeMounts:
    - name: run
    mountPath: /run/flannel
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    volumes:
    - name: run
    hostPath:
    path: /run/flannel
    - name: cni
    hostPath:
    path: /etc/cni/net.d
    - name: flannel-cfg
    configMap:

    name: kube-flannel-cfg

    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
    name: kube-flannel-ds-arm
    namespace: kube-system
    labels:
    tier: node
    app: flannel
    spec:
    selector:
    matchLabels:
    app: flannel
    template:
    metadata:
    labels:
    tier: node
    app: flannel
    spec:
    affinity:
    nodeAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    nodeSelectorTerms:
    - matchExpressions:
    - key: beta.kubernetes.io/os
    operator: In
    values:
    - linux
    - key: beta.kubernetes.io/arch
    operator: In
    values:
    - arm
    hostNetwork: true
    tolerations:
    - operator: Exists
    effect: NoSchedule
    serviceAccountName: flannel
    initContainers:
    - name: install-cni
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-arm
    command:
    - cp
    args:
    - -f
    - /etc/kube-flannel/cni-conf.json
    - /etc/cni/net.d/10-flannel.conflist
    volumeMounts:
    - name: cni
    mountPath: /etc/cni/net.d
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    containers:
    - name: kube-flannel
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-arm
    command:
    - /opt/bin/flanneld
    args:
    - --ip-masq
    - --kube-subnet-mgr
    resources:
    requests:
    cpu: "100m"
    memory: "50Mi"
    limits:
    cpu: "100m"
    memory: "50Mi"
    securityContext:
    privileged: false
    capabilities:
    add: ["NET_ADMIN"]
    env:
    - name: POD_NAME
    valueFrom:
    fieldRef:
    fieldPath: metadata.name
    - name: POD_NAMESPACE
    valueFrom:
    fieldRef:
    fieldPath: metadata.namespace
    volumeMounts:
    - name: run
    mountPath: /run/flannel
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    volumes:
    - name: run
    hostPath:
    path: /run/flannel
    - name: cni
    hostPath:
    path: /etc/cni/net.d
    - name: flannel-cfg
    configMap:

    name: kube-flannel-cfg

    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
    name: kube-flannel-ds-ppc64le
    namespace: kube-system
    labels:
    tier: node
    app: flannel
    spec:
    selector:
    matchLabels:
    app: flannel
    template:
    metadata:
    labels:
    tier: node
    app: flannel
    spec:
    affinity:
    nodeAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    nodeSelectorTerms:
    - matchExpressions:
    - key: beta.kubernetes.io/os
    operator: In
    values:
    - linux
    - key: beta.kubernetes.io/arch
    operator: In
    values:
    - ppc64le
    hostNetwork: true
    tolerations:
    - operator: Exists
    effect: NoSchedule
    serviceAccountName: flannel
    initContainers:
    - name: install-cni
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-ppc64le
    command:
    - cp
    args:
    - -f
    - /etc/kube-flannel/cni-conf.json
    - /etc/cni/net.d/10-flannel.conflist
    volumeMounts:
    - name: cni
    mountPath: /etc/cni/net.d
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    containers:
    - name: kube-flannel
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-ppc64le
    command:
    - /opt/bin/flanneld
    args:
    - --ip-masq
    - --kube-subnet-mgr
    resources:
    requests:
    cpu: "100m"
    memory: "50Mi"
    limits:
    cpu: "100m"
    memory: "50Mi"
    securityContext:
    privileged: false
    capabilities:
    add: ["NET_ADMIN"]
    env:
    - name: POD_NAME
    valueFrom:
    fieldRef:
    fieldPath: metadata.name
    - name: POD_NAMESPACE
    valueFrom:
    fieldRef:
    fieldPath: metadata.namespace
    volumeMounts:
    - name: run
    mountPath: /run/flannel
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    volumes:
    - name: run
    hostPath:
    path: /run/flannel
    - name: cni
    hostPath:
    path: /etc/cni/net.d
    - name: flannel-cfg
    configMap:

    name: kube-flannel-cfg

    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
    name: kube-flannel-ds-s390x
    namespace: kube-system
    labels:
    tier: node
    app: flannel
    spec:
    selector:
    matchLabels:
    app: flannel
    template:
    metadata:
    labels:
    tier: node
    app: flannel
    spec:
    affinity:
    nodeAffinity:
    requiredDuringSchedulingIgnoredDuringExecution:
    nodeSelectorTerms:
    - matchExpressions:
    - key: beta.kubernetes.io/os
    operator: In
    values:
    - linux
    - key: beta.kubernetes.io/arch
    operator: In
    values:
    - s390x
    hostNetwork: true
    tolerations:
    - operator: Exists
    effect: NoSchedule
    serviceAccountName: flannel
    initContainers:
    - name: install-cni
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-s390x
    command:
    - cp
    args:
    - -f
    - /etc/kube-flannel/cni-conf.json
    - /etc/cni/net.d/10-flannel.conflist
    volumeMounts:
    - name: cni
    mountPath: /etc/cni/net.d
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    containers:
    - name: kube-flannel
    image: quay.mirrors.ustc.edu.cn/coreos/flannel:v0.11.0-s390x
    command:
    - /opt/bin/flanneld
    args:
    - --ip-masq
    - --kube-subnet-mgr
    resources:
    requests:
    cpu: "100m"
    memory: "50Mi"
    limits:
    cpu: "100m"
    memory: "50Mi"
    securityContext:
    privileged: false
    capabilities:
    add: ["NET_ADMIN"]
    env:
    - name: POD_NAME
    valueFrom:
    fieldRef:
    fieldPath: metadata.name
    - name: POD_NAMESPACE
    valueFrom:
    fieldRef:
    fieldPath: metadata.namespace
    volumeMounts:
    - name: run
    mountPath: /run/flannel
    - name: flannel-cfg
    mountPath: /etc/kube-flannel/
    volumes:
    - name: run
    hostPath:
    path: /run/flannel
    - name: cni
    hostPath:
    path: /etc/cni/net.d
    - name: flannel-cfg
    configMap:
    name: kube-flannel-cfg
    EOF

  • 创建flanneld网络

    kubectl create -f yaml/flanneld/flanneld.yaml

    检测集群状态

    kubectl get nodes

root@node1,node2:

  • 使用之前记录的加入命令加入k8s集群

    kubectl get pod,svc,cs,node -Ao wide

第五步-部署ingress

root@master1

  • 生成nginx-ingress部署文件

    创建目录

    mkdir yaml/nginx-ingress

    生成文件

    cat << EOF > yaml/nginx-ingress/deployment.yaml
    apiVersion: v1
    kind: Namespace
    metadata:
    name: ingress-nginx
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx


    kind: ConfigMap
    apiVersion: v1
    metadata:
    name: nginx-configuration
    namespace: ingress-nginx
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx


    kind: ConfigMap
    apiVersion: v1
    metadata:
    name: tcp-services
    namespace: ingress-nginx
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx


    kind: ConfigMap
    apiVersion: v1
    metadata:
    name: udp-services
    namespace: ingress-nginx
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx


    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx


    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRole
    metadata:
    name: nginx-ingress-clusterrole
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    rules:

    • apiGroups:
      • ""
        resources:
      • configmaps
      • endpoints
      • nodes
      • pods
      • secrets
        verbs:
      • list
      • watch
    • apiGroups:
      • ""
        resources:
      • nodes
        verbs:
      • get
    • apiGroups:
      • ""
        resources:
      • services
        verbs:
      • get
      • list
      • watch
    • apiGroups:
      • ""
        resources:
      • events
        verbs:
      • create
      • patch
    • apiGroups:
      • "extensions"
      • "networking.k8s.io"
        resources:
      • ingresses
        verbs:
      • get
      • list
      • watch
    • apiGroups:
      • "extensions"
      • "networking.k8s.io"
        resources:
      • ingresses/status
        verbs:
      • update

    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: Role
    metadata:
    name: nginx-ingress-role
    namespace: ingress-nginx
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    rules:

    • apiGroups:
      • ""
        resources:
      • configmaps
      • pods
      • secrets
      • namespaces
        verbs:
      • get
    • apiGroups:
      • ""
        resources:
      • configmaps
        resourceNames:
        # Defaults to "-"
        # Here: "-"
        # This has to be adapted if you change either parameter
        # when launching the nginx-ingress-controller.
      • "ingress-controller-leader-nginx"
        verbs:
      • get
      • update
    • apiGroups:
      • ""
        resources:
      • configmaps
        verbs:
      • create
    • apiGroups:
      • ""
        resources:
      • endpoints
        verbs:
      • get

    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: RoleBinding
    metadata:
    name: nginx-ingress-role-nisa-binding
    namespace: ingress-nginx
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: Role
    name: nginx-ingress-role
    subjects:

    • kind: ServiceAccount
      name: nginx-ingress-serviceaccount
      namespace: ingress-nginx

    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRoleBinding
    metadata:
    name: nginx-ingress-clusterrole-nisa-binding
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: ClusterRole
    name: nginx-ingress-clusterrole
    subjects:

    • kind: ServiceAccount
      name: nginx-ingress-serviceaccount
      namespace: ingress-nginx

    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: nginx-ingress-controller
    namespace: ingress-nginx
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    spec:
    replicas: 1
    selector:
    matchLabels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    template:
    metadata:
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    annotations:
    prometheus.io/port: "10254"
    prometheus.io/scrape: "true"
    spec:
    # wait up to five minutes for the drain of connections
    terminationGracePeriodSeconds: 300
    serviceAccountName: nginx-ingress-serviceaccount
    nodeSelector:
    kubernetes.io/os: linux
    containers:
    - name: nginx-ingress-controller
    image: quay.mirrors.ustc.edu.cn/kubernetes-ingress-controller/nginx-ingress-controller:0.26.1
    args:
    - /nginx-ingress-controller
    - --configmap=$(POD_NAMESPACE)/nginx-configuration
    - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
    - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
    - --publish-service=$(POD_NAMESPACE)/ingress-nginx
    - --annotations-prefix=nginx.ingress.kubernetes.io
    securityContext:
    allowPrivilegeEscalation: true
    capabilities:
    drop:
    - ALL
    add:
    - NET_BIND_SERVICE
    # www-data -> 33
    runAsUser: 33
    env:
    - name: POD_NAME
    valueFrom:
    fieldRef:
    fieldPath: metadata.name
    - name: POD_NAMESPACE
    valueFrom:
    fieldRef:
    fieldPath: metadata.namespace
    ports:
    - name: http
    containerPort: 80
    protocol: TCP
    - name: https
    containerPort: 443
    protocol: TCP
    livenessProbe:
    failureThreshold: 3
    httpGet:
    path: /healthz
    port: 10254
    scheme: HTTP
    initialDelaySeconds: 10
    periodSeconds: 10
    successThreshold: 1
    timeoutSeconds: 10
    readinessProbe:
    failureThreshold: 3
    httpGet:
    path: /healthz
    port: 10254
    scheme: HTTP
    periodSeconds: 10
    successThreshold: 1
    timeoutSeconds: 10
    lifecycle:
    preStop:
    exec:
    command:
    - /wait-shutdown


    apiVersion: v1
    kind: Service
    metadata:
    name: ingress-nginx
    namespace: ingress-nginx
    labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    spec:
    type: NodePort
    ports:
    - name: http
    port: 80
    targetPort: 80
    protocol: TCP
    nodePort: 30080
    - name: https
    port: 443
    targetPort: 443
    protocol: TCP
    nodePort: 30443
    selector:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx


    EOF

    部署nginx-ingress

    kubectl apply -f yaml/nginx-ingress/deployment.yaml

第五步-部署dashboard

root@master1:

  • 生成dashboard.yaml

    生成文件夹

    mkdir yaml/kube-dashboard

    创建文件

    cat << \EOF > yaml/kube-dashboard/deploy.yaml

    Copyright 2017 The Kubernetes Authors.

    #

    Licensed under the Apache License, Version 2.0 (the "License");

    you may not use this file except in compliance with the License.

    You may obtain a copy of the License at

    #

    http://www.apache.org/licenses/LICENSE-2.0

    #

    Unless required by applicable law or agreed to in writing, software

    distributed under the License is distributed on an "AS IS" BASIS,

    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

    See the License for the specific language governing permissions and

    limitations under the License.

    apiVersion: v1
    kind: Namespace
    metadata:
    name: kubernetes-dashboard


    apiVersion: v1
    kind: ServiceAccount
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard


    kind: Service
    apiVersion: v1
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard
    spec:
    ports:
    - port: 443
    targetPort: 8443
    selector:
    k8s-app: kubernetes-dashboard


    apiVersion: v1
    kind: Secret
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard-certs
    namespace: kubernetes-dashboard
    type: Opaque


    apiVersion: v1
    kind: Secret
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard-csrf
    namespace: kubernetes-dashboard
    type: Opaque
    data:
    csrf: ""


    apiVersion: v1
    kind: Secret
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard-key-holder
    namespace: kubernetes-dashboard
    type: Opaque


    kind: ConfigMap
    apiVersion: v1
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard-settings
    namespace: kubernetes-dashboard


    kind: Role
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard
    rules:
    # Allow Dashboard to get, update and delete Dashboard exclusive secrets.

    • apiGroups: [""]
      resources: ["secrets"]
      resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
      verbs: ["get", "update", "delete"]
      # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
    • apiGroups: [""]
      resources: ["configmaps"]
      resourceNames: ["kubernetes-dashboard-settings"]
      verbs: ["get", "update"]
      # Allow Dashboard to get metrics.
    • apiGroups: [""]
      resources: ["services"]
      resourceNames: ["heapster", "dashboard-metrics-scraper"]
      verbs: ["proxy"]
    • apiGroups: [""]
      resources: ["services/proxy"]
      resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
      verbs: ["get"]

    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard
    rules:
    # Allow Metrics Scraper to get metrics from the Metrics server

    • apiGroups: ["metrics.k8s.io"]
      resources: ["pods", "nodes"]
      verbs: ["get", "list", "watch"]

    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: Role
    name: kubernetes-dashboard
    subjects:

    • kind: ServiceAccount
      name: kubernetes-dashboard
      namespace: kubernetes-dashboard

    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
    name: kubernetes-dashboard
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: ClusterRole
    name: kubernetes-dashboard
    subjects:

    • kind: ServiceAccount
      name: kubernetes-dashboard
      namespace: kubernetes-dashboard

    kind: Deployment
    apiVersion: apps/v1
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard
    spec:
    replicas: 1
    revisionHistoryLimit: 10
    selector:
    matchLabels:
    k8s-app: kubernetes-dashboard
    template:
    metadata:
    labels:
    k8s-app: kubernetes-dashboard
    spec:
    containers:
    - name: kubernetes-dashboard
    image: kubernetesui/dashboard:v2.0.0-rc3
    imagePullPolicy: Always
    ports:
    - containerPort: 8443
    protocol: TCP
    args:
    - --auto-generate-certificates
    - --namespace=kubernetes-dashboard
    # Uncomment the following line to manually specify Kubernetes API server Host
    # If not specified, Dashboard will attempt to auto discover the API server and connect
    # to it. Uncomment only if the default does not work.
    # - --apiserver-host=http://my-address:port
    volumeMounts:
    - name: kubernetes-dashboard-certs
    mountPath: /certs
    # Create on-disk volume to store exec logs
    - mountPath: /tmp
    name: tmp-volume
    livenessProbe:
    httpGet:
    scheme: HTTPS
    path: /
    port: 8443
    initialDelaySeconds: 30
    timeoutSeconds: 30
    securityContext:
    allowPrivilegeEscalation: false
    readOnlyRootFilesystem: true
    runAsUser: 1001
    runAsGroup: 2001
    volumes:
    - name: kubernetes-dashboard-certs
    secret:
    secretName: kubernetes-dashboard-certs
    - name: tmp-volume
    emptyDir: {}
    serviceAccountName: kubernetes-dashboard
    nodeSelector:
    "beta.kubernetes.io/os": linux
    # Comment the following tolerations if Dashboard must not be deployed on master
    tolerations:
    - key: node-role.kubernetes.io/master
    effect: NoSchedule


    kind: Service
    apiVersion: v1
    metadata:
    labels:
    k8s-app: dashboard-metrics-scraper
    name: dashboard-metrics-scraper
    namespace: kubernetes-dashboard
    spec:
    ports:
    - port: 8000
    targetPort: 8000
    selector:
    k8s-app: dashboard-metrics-scraper


    kind: Deployment
    apiVersion: apps/v1
    metadata:
    labels:
    k8s-app: dashboard-metrics-scraper
    name: dashboard-metrics-scraper
    namespace: kubernetes-dashboard
    spec:
    replicas: 1
    revisionHistoryLimit: 10
    selector:
    matchLabels:
    k8s-app: dashboard-metrics-scraper
    template:
    metadata:
    labels:
    k8s-app: dashboard-metrics-scraper
    annotations:
    seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
    containers:
    - name: dashboard-metrics-scraper
    image: kubernetesui/metrics-scraper:v1.0.1
    ports:
    - containerPort: 8000
    protocol: TCP
    livenessProbe:
    httpGet:
    scheme: HTTP
    path: /
    port: 8000
    initialDelaySeconds: 30
    timeoutSeconds: 30
    volumeMounts:
    - mountPath: /tmp
    name: tmp-volume
    securityContext:
    allowPrivilegeEscalation: false
    readOnlyRootFilesystem: true
    runAsUser: 1001
    runAsGroup: 2001
    serviceAccountName: kubernetes-dashboard
    nodeSelector:
    "beta.kubernetes.io/os": linux
    # Comment the following tolerations if Dashboard must not be deployed on master
    tolerations:
    - key: node-role.kubernetes.io/master
    effect: NoSchedule
    volumes:
    - name: tmp-volume
    emptyDir: {}
    EOF

  • 生成dahsboard

    kubectl apply -f yaml/kube-dashboard/deploy.yaml

  • 替换过期证书

    #生成证书
    mkdir -p key/kube-dashboard-key
    openssl req -new -nodes -x509 <br /> -subj "/O=IT/CN=kube-dashboard.com" -days 3650 <br /> -keyout key/kube-dashboard-key/tls.key <br /> -out key/kube-dashboard-key/tls.crt <br /> -extensions v3_ca
    #删除原有的证书secret
    kubectl delete secret kubernetes-dashboard-certs -n kubernetes-dashboard
    #创建新的证书secret
    kubectl create secret tls kubernetes-dashboard-certs --cert='key/kube-dashboard-key/tls.crt' --key='key/kube-dashboard-key/tls.key' -n kubernetes-dashboard
    #重启pod
    kubectl delete pod $(kubectl -n kubernetes-dashboard get pod | awk '/kubernetes-dashboard/{print $1}') -n kubernetes-dashboard

  • 配置ingress

    生成ingress

    openssl req -new -nodes -x509 <br /> -subj "/O=IT/CN=kubernetes-dashboard.com" -days 3650 <br /> -keyout key/nginx-ingress/kubernetes-dashboard/tls.key <br /> -out key/nginx-ingress/kubernetes-dashboard/tls.crt <br /> -extensions v3_ca

    新建ingress的tls-secret

    kubectl create secret tls ingress-kubernetes-dashboard-certs --cert='key/nginx-ingress/kubernetes-dashboard/tls.crt' --key='key/nginx-ingress/kubernetes-dashboard/tls.key' -n kubernetes-dashboard

    生成ingress规则

    cat << EOF > yaml/nginx-ingress/kubernetes-dashboard-ingress.yaml
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
    name: ingress-kubernetes-dashboard-tls
    namespace: kubernetes-dashboard
    annotations:
    nginx.ingress.kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
    nginx.ingress.kubernetes.io/ssl-passthrough: "true"
    nginx.ingress.kubernetes.io/rewrite-target: /\$1
    spec:
    tls:

    • secretName: ingress-kubernetes-dashboard-certs
      rules:
    • http: paths:
      • path: /dashboard/(.*)
        backend:
        serviceName: kubernetes-dashboard
        servicePort: 443
        EOF

    部署ingress规则

    kubectl apply -f yaml/nginx-ingress/kubernetes-dashboard-ingress.yaml

  • 创建账号访问

    kubectl create serviceaccount dashboard-admin -n kubernetes-dashboard
    kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboard-admin

    获取账号token

    kubectl describe secrets -n kubernetes-dashboard $(kubectl -n kubernetes-dashboard get secret | awk '/dashboard-admin/{print $1}')

  • 登陆dashboard
    输入https://192.168.31.150:30443/dashboard/访问dashboard

第六步-搭建ceph

root@ALL:

  • 配置myceph用户

    useradd -d /home/myceph -m myceph
    passwd myceph

  • 为myceph添加sudo权限

    sed -i "/## Allow root to run any commands anywhere /a\\myceph ALL = (ALL) ALL" /etc/sudoers
    echo "myceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/myceph
    sudo chmod 0440 /etc/sudoers.d/myceph

  • 在所有节点安装ceph组件

    yum install -y ceph-14.2.4-0.el7 ceph-radosgw-14.2.4-0.el7 yum-plugin-priorities

myceph@master1:

  • 生成rsa-key实现免密登陆

    生成key

    ssh-keygen -t rsa

    拷贝到各个节点

    ssh-copy-id -i .ssh/id_rsa.pub myceph@node1
    ssh-copy-id -i .ssh/id_rsa.pub myceph@node2

  • 配置ssh-config

    cat > ~/.ssh/config <<EOF
    Host ceph-node1
    Hostname master1
    User myceph

    Host ceph-node2
    Hostname node1
    User myceph

    Host ceph-node3
    Hostname node2
    User myceph
    EOF

    配置该文件权限

    sudo chmod 600 ~/.ssh/config

  • 安装ceph-deploy

    sudo yum install -y python-backports python-execnet python-ipaddress python-remoto python-setuptools
    sudo yum install ceph-deploy -y

  • 搭建ceph集群

    mkdir my-cluster
    cd my-cluster

    部署mon

    ceph-deploy new master1 node1 node2

  • 修改ceph.conf在最后一行添加public network配置

    vi ceph.conf

ceph.conf

[global]
fsid = b764d705-101f-4041-bbc1-871bbe4a8277
mon_initial_members = master1, node1, node2
mon_host = 192.168.31.100,192.168.31.101,192.168.31.102
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public_network = 192.168.31.0/24
  • 初始化mon,mgr,osd,mds

    部署mon

    ceph-deploy mon create-initial

    赋予节点admin权限

    ceph-deploy admin master1 node1 node2

    增加mgr

    ceph-deploy mgr create master1

    增加osd

    ceph-deploy osd create --data /dev/sdb master1
    ceph-deploy osd create --data /dev/sdb node1
    ceph-deploy osd create --data /dev/sdb node2

    增加mds

    ceph-deploy mds create master1 node1 node2

root@node2:

  • 开启dashboard

    yum install -y ceph-mgr-dashboard-14.2.4-0.el7
    ceph mgr module enable dashboard

    生成证书

    mkdir -p key/ceph-dashboard
    openssl req -new -nodes -x509 <br /> -subj "/O=IT/CN=ceph-mgr-dashboard" -days 3650 <br /> -keyout key/ceph-dashboard/tls.key -out key/ceph-dashboard/tls.crt -extensions v3_ca
    ceph dashboard set-ssl-certificate -i key/ceph-dashboard/tls.crt
    ceph dashboard set-ssl-certificate-key -i key/ceph-dashboard/tls.key

    设置端口,ip

    ceph config set mgr mgr/dashboard/server_addr 192.168.31.100
    ceph config set mgr mgr/dashboard/server_port 7000
    ceph config set mgr mgr/dashboard/ssl_server_port 8443

    设置用户

    ceph dashboard ac-user-create admin admin administrator

    查看地址

    ceph mgr services

  • 登陆ceph-dashboard
    输入https://192.168.31.100:8443/ 用户admin 密码admin登陆

第七步-搭建cephFS文件系统并以storageclass方式提供服务

root@node2:

  • 创建cephfs

    创建cephfs所需pool

    ceph osd pool create fs_kube_data 64
    ceph osd pool create fs_kube_metadata 64
    ceph fs new cephfs fs_kube_metadata fs_kube_data

  • 测试挂载cephfs

    yum -y install ceph-fuse-14.2.4-0.el7
    mkdir /mnt/cephfs

    将cephfs挂载在mnt目录下

    ceph-fuse -m 192.168.31.100:6789 /mnt/cephfs

    查看是否挂载成功

    df -h |grep ceph-fuse

root@master1

  • 创建admin-secret

    ceph auth get-key client.admin > /tmp/secret
    kubectl create ns ceph
    kubectl create secret generic ceph-secret-admin --from-file=/tmp/secret --namespace=ceph

  • 创建cephfs-provisioner文件

    创建文件夹

    mkdir -p yaml/cephfs

    创建文件

    cat << EOF > yaml/cephfs/provisioner.yaml

    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: cephfs-provisioner
    namespace: ceph
    rules:

    • apiGroups: [""]
      resources: ["persistentvolumes"]
      verbs: ["get", "list", "watch", "create", "delete"]
    • apiGroups: [""]
      resources: ["persistentvolumeclaims"]
      verbs: ["get", "list", "watch", "update"]
    • apiGroups: ["storage.k8s.io"]
      resources: ["storageclasses"]
      verbs: ["get", "list", "watch"]
    • apiGroups: [""]
      resources: ["events"]
      verbs: ["create", "update", "patch"]
    • apiGroups: [""]
      resources: ["services"]
      resourceNames: ["kube-dns","coredns"]
      verbs: ["list", "get"]
    • apiGroups: [""]
      resources: ["secrets"]
      verbs: ["get", "create", "delete"]

    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: cephfs-provisioner
    subjects:

    • kind: ServiceAccount
      name: cephfs-provisioner
      namespace: ceph
      roleRef:
      kind: ClusterRole
      name: cephfs-provisioner
      apiGroup: rbac.authorization.k8s.io

    apiVersion: rbac.authorization.k8s.io/v1
    kind: Role
    metadata:
    name: cephfs-provisioner
    namespace: ceph
    rules:

    • apiGroups: [""]
      resources: ["secrets"]
      verbs: ["create", "get", "delete"]
    • apiGroups: [""]
      resources: ["endpoints"]
      verbs: ["get", "list", "watch", "create", "update", "patch"]

    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
    name: cephfs-provisioner
    namespace: ceph
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: Role
    name: cephfs-provisioner
    subjects:

    • kind: ServiceAccount
      name: cephfs-provisioner

    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: cephfs-provisioner
    namespace: ceph


    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: cephfs-provisioner
    namespace: ceph
    spec:
    replicas: 1
    selector:
    matchLabels:
    app: cephfs-provisioner
    strategy:
    type: Recreate
    template:
    metadata:
    labels:
    app: cephfs-provisioner
    spec:
    containers:
    - name: cephfs-provisioner
    image: quay.azk8s.cn/external_storage/cephfs-provisioner:latest
    env:
    - name: PROVISIONER_NAME
    value: ceph.com/cephfs
    - name: PROVISIONER_SECRET_NAMESPACE
    value: ceph
    command:
    - "/usr/local/bin/cephfs-provisioner"
    args:
    - "-id=cephfs-provisioner-1"
    serviceAccount: cephfs-provisioner


    EOF

  • 创建cephfs-provisioner

    kubectl create -f yaml/cephfs/provisioner.yaml

    查看是否创建成功

    kubectl get pod -n=ceph -o wide

  • 创建storageclass文件及测试文件

    创建storageclass文件

    cat << EOF > yaml/cephfs/storageclass.yaml
    kind: StorageClass
    apiVersion: storage.k8s.io/v1
    metadata:
    name: cephfs
    provisioner: ceph.com/cephfs
    parameters:
    monitors: 192.168.31.100:6789,192.168.31.101:6789,192.168.31.102:6789
    adminId: admin
    adminSecretName: ceph-secret-admin
    adminSecretNamespace: "ceph"
    claimRoot: /pvc-volumes
    EOF

    创建pvc测试文件

    cat << EOF > yaml/cephfs/test-pvc.yaml

    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
    name: test-pvc
    namespace: ceph
    spec:
    storageClassName: cephfs
    accessModes:
    - ReadWriteMany
    resources:
    requests:
    storage: 5Gi
    EOF

    创建pod测试挂载pvc

    cat << EOF > yaml/cephfs/test-pod.yaml

    kind: Pod
    apiVersion: v1
    metadata:
    name: test-pod
    namespace: ceph
    spec:
    containers:

    • name: test-pod image: registry.aliyuncs.com/google_containers/busybox:1.24 command:
      • "/bin/sh"
        args:
      • "-c"
      • "touch /mnt/SUCCESS && exit 0 || exit 1"
        volumeMounts:
      • name: pvc mountPath: "/mnt" subPath: "test-pod" restartPolicy: "Never" volumes:
        • name: pvc
          persistentVolumeClaim:
          claimName: test-pvc
          EOF
  • 部署storageclass以及测试pvc,pod,查看是否成功

    kubectl apply -f yaml/cephfs/storageclass.yaml
    kubectl apply -f yaml/cephfs/test-pvc.yaml

    等待创建完成,再进行测试

    kubectl apply -f yaml/cephfs/test-pod.yaml

    查看是否创建成功

    kubectl describe pvc -n=ceph
    kubectl describe pod/test-pod -n=ceph

第八步-搭建cephRBD存储并以storageclass方式提供服务

root@node2:

  • 创建ceph-pool并启用mon

    创建pool

    ceph osd pool create rbd_kube 8 8

    启用mon

    ceph osd pool application enable rbd_kube mon

    设置读写权限

    ceph auth add client.kube mon 'allow r' osd 'allow rwx pool=rbd_kube'

root@master1:

  • 创建ceph-secret

    ceph auth get-key client.kube > /tmp/kube-secret
    kubectl create secret generic ceph-rbd-secret --from-file=/tmp/kube-secret --namespace=ceph --type=kubernetes.io/rbd

  • 创建ceph-rbd-provisioner文件

    创建文件夹

    mkdir -p yaml/ceph-rbd

    cat << EOF > yaml/ceph-rbd/provisioner.yaml

    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: rbd-provisioner
    namespace: ceph
    rules:

    • apiGroups: [""]
      resources: ["persistentvolumes"]
      verbs: ["get", "list", "watch", "create", "delete"]
    • apiGroups: [""]
      resources: ["persistentvolumeclaims"]
      verbs: ["get", "list", "watch", "update"]
    • apiGroups: ["storage.k8s.io"]
      resources: ["storageclasses"]
      verbs: ["get", "list", "watch"]
    • apiGroups: [""]
      resources: ["events"]
      verbs: ["create", "update", "patch"]
    • apiGroups: [""]
      resources: ["services"]
      resourceNames: ["kube-dns","coredns"]
      verbs: ["list", "get"]
    • apiGroups: [""]
      resources: ["endpoints"]
      verbs: ["get", "list", "watch", "create", "update", "patch"]
    • apiGroups: [""]
      resources: ["secrets"]
      verbs: ["get", "create", "delete"]

    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: rbd-provisioner
    subjects:

    • kind: ServiceAccount
      name: rbd-provisioner
      namespace: ceph
      roleRef:
      kind: ClusterRole
      name: rbd-provisioner
      apiGroup: rbac.authorization.k8s.io

    apiVersion: rbac.authorization.k8s.io/v1
    kind: Role
    metadata:
    name: rbd-provisioner
    namespace: ceph
    rules:

    • apiGroups: [""]
      resources: ["secrets"]
      verbs: ["get"]
    • apiGroups: [""]
      resources: ["endpoints"]
      verbs: ["get", "list", "watch", "create", "update", "patch"]

    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
    name: rbd-provisioner
    namespace: ceph
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: Role
    name: rbd-provisioner
    subjects:

    • kind: ServiceAccount
      name: rbd-provisioner
      namespace: default

    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: rbd-provisioner
    namespace: ceph


    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: rbd-provisioner
    namespace: ceph
    spec:
    replicas: 1
    selector:
    matchLabels:
    app: rbd-provisioner
    strategy:
    type: Recreate
    template:
    metadata:
    labels:
    app: rbd-provisioner
    spec:
    containers:
    - name: rbd-provisioner
    image: quay.azk8s.cn/external_storage/rbd-provisioner:latest
    env:
    - name: PROVISIONER_NAME
    value: ceph.com/rbd
    serviceAccount: rbd-provisioner


    EOF

  • 部署rbd-provisioner

    kubectl create -f yaml/ceph-rbd/provisioner.yaml

    查看是否已经成功部署

    kubectl get pod -n=ceph -o wide

  • 创建storageclass文件和测试文件

    cat << EOF > yaml/ceph-rbd/storageclass.yaml

    kind: StorageClass
    apiVersion: storage.k8s.io/v1
    metadata:
    name: rbd
    provisioner: ceph.com/rbd
    parameters:
    monitors: 192.168.31.100:6789,192.168.31.101:6789,192.168.31.102:6789
    pool: rbd_kube
    adminId: admin
    adminSecretNamespace: ceph
    adminSecretName: ceph-secret-admin
    userId: kube
    userSecretNamespace: ceph
    userSecretName: ceph-rbd-secret
    imageFormat: "2"
    imageFeatures: layering

    allowVolumeExpansion: true

    EOF

    创建测试pvc

    cat << EOF > yaml/ceph-rbd/test-pvc.yaml

    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
    name: test-pvc-rbd
    spec:
    accessModes:
    - ReadWriteOnce
    storageClassName: rbd
    resources:
    requests:
    storage: 5Gi


    EOF

    创建测试pod

    cat << EOF > yaml/ceph-rbd/test-pod.yaml

    kind: Pod
    apiVersion: v1
    metadata:
    name: test-pod
    spec:
    containers:

    • name: test-pod image: registry.aliyuncs.com/google_containers/busybox:1.24 command:
      • "/bin/sh"
        args:
      • "-c"
      • "touch /mnt/SUCCESS && exit 0 || exit 1"
        volumeMounts:
      • name: pvc
        mountPath: "/mnt"
        restartPolicy: "Never"
        volumes:
    • name: pvc
      persistentVolumeClaim:
      claimName: test-pvc-rbd

    EOF

  • 测试pod挂载pvc(RBD方式)

    kubectl create -f yaml/ceph-rbd/storageclass.yaml
    kubectl create -f yaml/ceph-rbd/test-pvc.yaml

    等前面创建完成再创建

    kubectl create -f yaml/ceph-rbd/test-pod.yaml

    查看是否创建成功

    kubectl describe pvc -n=ceph
    kubectl describe pod/test-pod -n=ceph