«   2025/01   »
1 2 3 4
5 6 7 8 9 10 11
12 13 14 15 16 17 18
19 20 21 22 23 24 25
26 27 28 29 30 31
Tags more
Archives
Today
Total
01-15 22:22
관리 메뉴

+1-1+1-1+1-1+1-1...

쿠버네틱스 설치 실습 본문

Linux/Docker & Kubernetes

쿠버네틱스 설치 실습

투명인간 2021. 2. 3. 13:02
728x90

설치 환경 : Centos 7.X

Master 서버 > 192.168.60.17 k8s-master

Node 1 서버 > 192.168.60.18 k8s-node1

Node 2 서버 > 192.168.60.19 k8s-node2

 

1. 사전 환경 구성

1) SELinux 설정

쿠버네티스가 Pod Network에 필요한 호스트 파일 시스템에 액세스가 가능하도록 설정

 

[root@localhost ~]# setenforce 0

 

[root@localhost ~]# sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

 

[root@localhost ~]reboot

 

[root@localhost ~]# sestatus

SELinux status:                 enabled

SELinuxfs mount:                /sys/fs/selinux

SELinux root directory:         /etc/selinux

Loaded policy name:             targeted

Current mode:                   permissive

Mode from config file:          permissive

Policy MLS status:              enabled

Policy deny_unknown status:     allowed

Max kernel policy version:      28

 

2) 방화벽 해제

[root@localhost ~]#  systemctl stop firewalld && systemctl disable firewalld

Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

Removed symlink /etc/systemd/system/basic.target.wants/firewalld.service.

 

3) Swap 기능 비활성화 - 사용시 고려사항이 있음 일단 비활성 처리

참고 사이트 : https://github.com/kubernetes/kubernetes/issues/53533

[root@localhost ~]# swapoff -a && sed -i '/ swap / s/^/#/' /etc/fstab

[root@localhost ~]# cat /etc/fstab

#

# /etc/fstab

# Created by anaconda on Sun Jan 31 18:17:13 2021

#

# Accessible filesystems, by reference, are maintained under '/dev/disk'

# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info

#

/dev/mapper/rhel-root   /                       xfs     defaults        0 0

UUID=f86f6225-9bb5-410e-8182-f6691211a934 /boot                   xfs     defaults        0 0

/dev/mapper/rhel-home   /home                   xfs     defaults        0 0

#/dev/mapper/rhel-swap   swap                    swap    defaults        0 0

 

4) iptables 커널 옵션 활성화

RHEL, CentOS7 사용시 iptables가 무시되서 트래픽이 잘못 라우팅되는 문제가 발생함

 

[root@localhost ~]# cat << EOF > /etc/sysctl.d/k8s.conf

> net.bridge.bridge-nf-call-ip6tables = 1

> net.bridge.bridge-nf-call-iptables = 1

> EOF

[root@localhost ~]# sysctl --system

* Applying /usr/lib/sysctl.d/00-system.conf ...

* Applying /usr/lib/sysctl.d/50-default.conf ...

kernel.sysrq = 16

kernel.core_uses_pid = 1

net.ipv4.conf.default.rp_filter = 1

net.ipv4.conf.all.rp_filter = 1

net.ipv4.conf.default.accept_source_route = 0

net.ipv4.conf.all.accept_source_route = 0

net.ipv4.conf.default.promote_secondaries = 1

net.ipv4.conf.all.promote_secondaries = 1

fs.protected_hardlinks = 1

fs.protected_symlinks = 1

* Applying /etc/sysctl.d/99-sysctl.conf ...

* Applying /etc/sysctl.d/k8s.conf ...

* Applying /etc/sysctl.conf ...

 

5) 쿠버네티스 YUM Repository 설정

[root@localhost ~]# cat << EOF > /etc/yum.repos.d/kubernetes.repo

> [kubernetes]

> name=kubernetes

> baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64

> enabled=1

> gpgcheck=1

> repo_gpgcehcek=1

> gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg http://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

> EOF

 

6) Linux (Centos) Update

! RHEL Subscrition 미등록 이슈로 업데이트가 되지 않음 - 일단 아래 skip 옵션 줘서 설치해봄

안되면 cenos로 깔아서 다시 해야지..ㅜㅜ

[root@localhost ~]# yum update

 

7) 호스트 파일 수정

[root@localhost yum.repos.d]# cat << EOF > /etc/hosts

> 192.168.60.17 k8s-master

> 192.168.60.18 k8s-node1

> 192.168.60.19 k8s-node2

 

2. 설치 (Master, Node1, Node2)

1) Docker 설치

설치에 필요한 패키지 설치

yum install -y yum-utils device-mapper-persistent-data lvm2

 

도커 설치를 위한 저장소 설정

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

 

도커 패키지 설치

! RHEL Subscrition 미등록 이슈로  docker.repo 파일에 $releaseserver 값이 안나와 설치가 실패함....

CeonOS 설치 후 성공

yum update && yum install docker-ce-18.06.2.ce

 

도커 관련 예제 파일 생성

[root@localhost ~]# mkdir /etc/docker

[root@localhost ~]# cat << EOF > /etc/docker/daemon.json

> {

>  "exec-opts": ["native.cgroupdirver=systemd"],

>  "log-driver": "json-file",

>  "log-opts": {

>    "max-size": "100m"

>  },

>  "storage-driver": "overlay2",

>  "storage-opts": [

>    "overlay2.override_kernel_check=true"

>   ]

>  }

> EOF

[root@localhost ~]# cat /etc/docker/daemon.json

{

"exec-opts": ["native.cgroupdirver=systemd"],

"log-driver": "json-file",

"log-opts": {

   "max-size": "100m"

},

"storage-driver": "overlay2",

"storage-opts": [

   "overlay2.override_kernel_check=true"

  ]

}

 

[root@localhost /]# mkdir -p /etc/systemd/system/docker.service.d

[root@localhost /]# cd /etc/systemd/system/docker.service.d

[root@localhost docker.service.d]#

 

2) Kubernetes 설치

[root@localhost /]# yum install -y --disableexcludes=kubernetes kubeadm-1.15.5-0.x86_64 kubectl-1.15.5-0.x86_64 kubelet-1.15.5-0.x86_64

 

3. Node 설정

1) 서버 호스트명 변경

Node1번 서버 호스트 이름 변경

[root@localhost /]# hostnamectl set-hostname k8s-node1

Node2번 서버 호스트 이름 변경

[root@localhost /]# hostnamectl set-hostname k8s-node2

 

4. Initialize Master and Join Node (Master 서버)

1) 도커 및 쿠버네티스 실행

# 변경한 설정 파일을 반영

daemon-reload

       Reload systemd manager configuration. This will rerun all generators (see systemd.generator(7)), reload

       all unit files, and recreate the entire dependency tree. While the daemon is being reloaded, all sockets

       systemd listens on behalf of user configuration will stay accessible.

       This command should not be confused with the reload command.

[root@localhost /]# systemctl daemon-reload

# 도커 서비스 등록

[root@localhost /]# systemctl enable --now docker

Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

# dock 정상 설치 확인 테스트

[root@localhost /]# docker run hello-world

Hello from Docker!

This message shows that your installation appears to be working correctly.

To generate this message, Docker took the following steps:

1. The Docker client contacted the Docker daemon.

2. The Docker daemon pulled the "hello-world" image from the Docker Hub.

    (amd64)

3. The Docker daemon created a new container from that image which runs the

    executable that produces the output you are currently reading.

4. The Docker daemon streamed that output to the Docker client, which sent it

    to your terminal.

To try something more ambitious, you can run an Ubuntu container with:

$ docker run -it ubuntu bash

Share images, automate workflows, and more with a free Docker ID:

https://hub.docker.com/

For more examples and ideas, visit:

https://docs.docker.com/get-started/

 

#쿠버네티스 실행

[root@localhost /]# systemctl enable --now kubelet

Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

 

2) 쿠버네티스 초기화 명령 실행

참고사이트 : https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/

pod-network-cidr’ – Pod IP가 자동 생성할 네트워크 대역 정의

service-cidr’ – 서비스의 IP가 자동 생성할 네트워크 대역 정의

[root@localhost /]# kubeadm init --pod-network-cidr=20.96.0.0/12

I0202 23:33:36.269148   25341 version.go:248] remote version is much newer: v1.20.2; falling back to: stable-1.15

[init] Using Kubernetes version: v1.15.12

[preflight] Running pre-flight checks

        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/

[preflight] Pulling images required for setting up a Kubernetes cluster

[preflight] This might take a minute or two, depending on the speed of your internet connection

[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'

[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"

[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"

[kubelet-start] Activating the kubelet service

[certs] Using certificateDir folder "/etc/kubernetes/pki"

[certs] Generating "front-proxy-ca" certificate and key

[certs] Generating "front-proxy-client" certificate and key

[certs] Generating "etcd/ca" certificate and key

[certs] Generating "apiserver-etcd-client" certificate and key

[certs] Generating "etcd/server" certificate and key

[certs] etcd/server serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [10.0.2.16 127.0.0.1 ::1]

[certs] Generating "etcd/healthcheck-client" certificate and key

[certs] Generating "etcd/peer" certificate and key

[certs] etcd/peer serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [10.0.2.16 127.0.0.1 ::1]

[certs] Generating "ca" certificate and key

[certs] Generating "apiserver" certificate and key

[certs] apiserver serving cert is signed for DNS names [localhost.localdomain kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.2.16]

[certs] Generating "apiserver-kubelet-client" certificate and key

[certs] Generating "sa" key and public key

[kubeconfig] Using kubeconfig folder "/etc/kubernetes"

[kubeconfig] Writing "admin.conf" kubeconfig file

[kubeconfig] Writing "kubelet.conf" kubeconfig file

[kubeconfig] Writing "controller-manager.conf" kubeconfig file

[kubeconfig] Writing "scheduler.conf" kubeconfig file

[control-plane] Using manifest folder "/etc/kubernetes/manifests"

[control-plane] Creating static Pod manifest for "kube-apiserver"

[control-plane] Creating static Pod manifest for "kube-controller-manager"

[control-plane] Creating static Pod manifest for "kube-scheduler"

[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"

[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s

[apiclient] All control plane components are healthy after 19.002741 seconds

[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace

[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster

[upload-certs] Skipping phase. Please see --upload-certs

[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the label "node-role.kubernetes.io/master=''"

[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]

[bootstrap-token] Using token: 2qmh69.duwivjjhoz9picge

[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles

[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials

[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token

[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster

[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace

[addons] Applied essential addon: CoreDNS

[addons] Applied essential addon: kube-proxy

 

Your Kubernetes control-plane has initialized successfully! # 초기화 성공 확인 메시지

 

To start using your cluster, you need to run the following as a regular user:

 

  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config

 

You should now deploy a pod network to the cluster.

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:

  https://kubernetes.io/docs/concepts/cluster-administration/addons/

 

Then you can join any number of worker nodes by running the following on each as root:

 

kubeadm join 10.0.2.16:6443 --token 2qmh69.duwivjjhoz9picge \

    --discovery-token-ca-cert-hash sha256:a01814b80844813fe31c0e4c7d906fc93b332f0d89c264d39112b5113bd402e8

-> 해당 내용을 복사해서 별도로 저장해두자

-> join 명령 구문을 generate 할때  enp0s3 인터페이스의 IP를 읽어왔음…-> 쿠버네티스 인프라가 공유하는 네트워크는 192.168.60.0/24 대역으로 변경해서 Join 해보자…

 

3) 환경 변수 설정

[root@localhost /]# mkdir -p $HOME/.kube

[root@localhost /]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

[root@localhost /]# chown $(id -u):$(id -g) $HOME/.kube/config

 

4) Kubectl 자동 완성 기능 설치

Kubectl 사용 시 [tab] 버튼을 이용 명령어 리스트를 자동 조회하도록 설치함

[root@localhost /]# yum install bash-completion -y

[root@localhost /]# source <(kubectl completion bash)

[root@localhost /]# echo "(kubectl completion bash)" >> ~/.bashrc

 

5. Node 서버 설정

Docker, 쿠버네티스 서비스 설정 및 실행

[root@localhost /]# systemctl daemon-reload

[root@localhost /]# systemctl enable --now docker

Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

[root@localhost /]# systemctl enable --now kubelet

Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

 

마스터 서버에 Node 서버 조인 하기

Master init 시 복사했었던 내용 붙여 넣기 ( 192.168.60.0/24 대역 IP로 변경해서 했더니 실패 ㅜㅜ)

그래서 다시 원래 대로 복사 실행

[root@localhost docker.service.d]# kubeadm join 10.0.2.16:6443 --token 2qmh69.duwivjjhoz9picge \

>     --discovery-token-ca-cert-hash sha256:a01814b80844813fe31c0e4c7d906fc93b332f0d89c264d39112b5113bd402e8

[preflight] Running pre-flight checks

        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/

[preflight] Reading configuration from the cluster...

[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'

[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace

[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"

[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"

[kubelet-start] Activating the kubelet service

[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

 

This node has joined the cluster:

* Certificate signing request was sent to apiserver and a response was received.

* The Kubelet was informed of the new secure connection details.

 

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

 

마스터 서버에서 Node 조인 상태 확인

root@localhost /]# kubectl get nodes

NAME                    STATUS     ROLES    AGE   VERSION

k8s-node1               NotReady   <none>   10m   v1.15.5

k8s-node2               NotReady   <none>   82s   v1.15.5

localhost.localdomain   NotReady   master   41m   v1.15.5

 

6. Plugin 설치

1) 쿠버네티스 클러스터 네트워킹 관련 플러스인 Calico 설치 및 설정하기

[root@localhost /]# curl -O https://docs.projectcalico.org/v3.9/manifests/calico.yaml

  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current

                                 Dload  Upload   Total   Spent    Left  Speed

100 20674  100 20674    0     0   7698      0  0:00:02  0:00:02 --:--:--  7696

설정 파일 IP 대역 변경 및 적용

[root@localhost /]# sed s/192.168.0.0\\/16/20.96.0.0\\/12/g -i calico.yaml

[root@localhost /]# kubectl apply -f calico.yaml

configmap/calico-config created

customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created

customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created

clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created

clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created

clusterrole.rbac.authorization.k8s.io/calico-node created

clusterrolebinding.rbac.authorization.k8s.io/calico-node created

daemonset.apps/calico-node created

serviceaccount/calico-node created

deployment.apps/calico-kube-controllers created

serviceaccount/calico-kube-controllers created

 

calico coredns 관련 Pod status Running인지 확인

[root@localhost /]# kubectl get pods --all-namespaces

NAMESPACE     NAME                                            READY   STATUS              RESTARTS   AGE

kube-system   calico-kube-controllers-75dbcbbf8b-4fxls        0/1     ContainerCreating   0          82s

kube-system   calico-node-8h5s9                               0/1     Running             0          82s

kube-system   calico-node-j448w                               1/1     Running             0          82s

kube-system   calico-node-m5rsc                               1/1     Running             0          82s

kube-system   coredns-5c98db65d4-7k5rn                        1/1     Running             0          50m

kube-system   coredns-5c98db65d4-kcs4b                        1/1     Running             0          50m

kube-system   etcd-localhost.localdomain                      1/1     Running             0          50m

kube-system   kube-apiserver-localhost.localdomain            1/1     Running             0          50m

kube-system   kube-controller-manager-localhost.localdomain   1/1     Running             0          50m

kube-system   kube-proxy-2klgn                                1/1     Running             0          20m

kube-system   kube-proxy-jz26z                                1/1     Running             0          11m

kube-system   kube-proxy-l9j7c                                1/1     Running             0          50m

kube-system   kube-scheduler-localhost.localdomain            1/1     Running             0          49m

 

2) Dashboard 설치하기

[root@localhost /]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml

secret/kubernetes-dashboard-certs created

serviceaccount/kubernetes-dashboard created

role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created

rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created

deployment.apps/kubernetes-dashboard created

service/kubernetes-dashboard created

 

권한 해지 설정 > 접속 인증시 Skip 설정 아래 명령을 통해 수정 모드로 진입

노란색 문구 아래에 –enable-skip-login 옵션(붉은색 표시부분) 추가

[root@localhost /]# kubectl -n kube-system edit deployments.apps kubernetes-dashboard

# Please edit the object below. Lines beginning with a '#' will be ignored,

# and an empty file will abort the edit. If an error occurs while saving this file will be

# reopened with the relevant failures.

#

apiVersion: apps/v1

kind: Deployment

metadata:

  annotations:

    deployment.kubernetes.io/revision: "1"

    kubectl.kubernetes.io/last-applied-configuration: |

      {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"k8s-app":"kubernetes-dashboard"},"name":"kubernetes-dashboard","namespace":"kube-system"},"spec":{"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"k8s-app":"kubernetes-dashboard"}},"template":{"metadata":{"labels":{"k8s-app":"kubernetes-dashboard"}},"spec":{"containers":[{"args":["--auto-generate-certificates"],"image":"k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1","livenessProbe":{"httpGet":{"path":"/","port":8443,"scheme":"HTTPS"},"initialDelaySeconds":30,"timeoutSeconds":30},"name":"kubernetes-dashboard","ports":[{"containerPort":8443,"protocol":"TCP"}],"volumeMounts":[{"mountPath":"/certs","name":"kubernetes-dashboard-certs"},{"mountPath":"/tmp","name":"tmp-volume"}]}],"serviceAccountName":"kubernetes-dashboard","tolerations":[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"}],"volumes":[{"name":"kubernetes-dashboard-certs","secret":{"secretName":"kubernetes-dashboard-certs"}},{"emptyDir":{},"name":"tmp-volume"}]}}}}

  creationTimestamp: "2021-02-02T15:30:06Z"

  generation: 1

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard

  namespace: kube-system

  resourceVersion: "5158"

  selfLink: /apis/apps/v1/namespaces/kube-system/deployments/kubernetes-dashboard

  uid: 01c6d729-74c9-4029-a119-013ddb265b0f

spec:

  progressDeadlineSeconds: 600

  replicas: 1

  revisionHistoryLimit: 10

  selector:

    matchLabels:

      k8s-app: kubernetes-dashboard

  strategy:

    rollingUpdate:

      maxSurge: 25%

      maxUnavailable: 25%

    type: RollingUpdate

  template:

    metadata:

      creationTimestamp: null

      labels:

        k8s-app: kubernetes-dashboard

spec:

      containers:

      - args:

        - --auto-generate-certificates

- --enable-skip-login

        image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1

        imagePullPolicy: IfNotPresent

        livenessProbe:

          failureThreshold: 3

          httpGet:

            path: /

            port: 8443

            scheme: HTTPS

          initialDelaySeconds: 30

          periodSeconds: 10

          successThreshold: 1

          timeoutSeconds: 30

        name: kubernetes-dashboard

        ports:

        - containerPort: 8443"/tmp/kubectl-edit-ijy6p.yaml" 101L, 4023C

 

Dashboard Admin 권한 부여하기

[root@localhost /]# cat << EOF | kubectl create -f -

> apiVersion: rbac.authorization.k8s.io/v1beta1

> kind: ClusterRoleBinding

> metadata:

>  name: kubernetes-dashboard

>  labels:

>    k8s-app: kubernetes-dashboard

> roleRef:

>   apiGroup: rbac.authorization.k8s.io

>   kind: ClusterRole

>   name: Cluster-admin

> subjects:

> - kind: ServiceAccount

>   name: kubernetes-dashboard

>   namespace: kube-system

> EOF

clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created

 

백그라운드로 proxy 띄우기

--address 에 자신의 Host IP 입력

[root@localhost /]# nohup kubectl proxy --port=8001 --address=192.168.60.17 --accept-hosts='^*$' >/dev/null 2>&1 &

[1] 14238

사이트 확인

http://192.168.60.17:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login

 

그러나 막상 화면을 들어가면 다음과 같이 에러남 ㅜㅜ;; 뭐가 잘못되었을까…

 

 

에러 확인 결과

Dashboard Admin 권한 부여하기 과정에 오타 (빨간색 대문자 C)

[root@localhost /]# cat << EOF | kubectl create -f -

> apiVersion: rbac.authorization.k8s.io/v1beta1

> kind: ClusterRoleBinding

> metadata:

>  name: kubernetes-dashboard

>  labels:

>    k8s-app: kubernetes-dashboard

> roleRef:

>   apiGroup: rbac.authorization.k8s.io

>   kind: ClusterRole

>   name: Cluster-admin

> subjects:

> - kind: ServiceAccount

>   name: kubernetes-dashboard

>   namespace: kube-system

> EOF

clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created

 

기존 설정 삭제 후 재등록

[root@localhost /]# cat << EOF | kubectl delete -f -

> apiVersion: rbac.authorization.k8s.io/v1beta1

> kind: ClusterRoleBinding

> metadata:

>  name: kubernetes-dashboard

>  labels:

>    k8s-app: kubernetes-dashboard

> roleRef:

>   apiGroup: rbac.authorization.k8s.io

>   kind: ClusterRole

>   name: Cluster-admin

> subjects:

> - kind: ServiceAccount

>   name: kubernetes-dashboard

>   namespace: kube-system

> EOF

 

[[root@localhost /]# cat << EOF | kubectl create -f -

> apiVersion: rbac.authorization.k8s.io/v1beta1

> kind: ClusterRoleBinding

> metadata:

>  name: kubernetes-dashboard

>  labels:

>    k8s-app: kubernetes-dashboard

> roleRef:

>   apiGroup: rbac.authorization.k8s.io

>   kind: ClusterRole

>   name: cluster-admin

> subjects:

> - kind: ServiceAccount

>   name: kubernetes-dashboard

>   namespace: kube-system

> EOF

clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created

 

결과 확인

 

 성공!!

 

 

반응형

'Linux > Docker & Kubernetes' 카테고리의 다른 글

Docker for Beginner  (0) 2021.02.25
Docker 시작하기  (0) 2021.02.22
Kubernetes for Beginner  (0) 2021.02.21
쿠버네티스...  (0) 2021.02.03