Monday, July 24, 2017

Architect of OpenStack L3 Router HA

l3routerha

在两个netowrk node中,我们分别看到了virtual-router qrouter-f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec

[root@openstackcontroller13 ~]# ip netns list|grep f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec
qrouter-f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec
[root@openstackcontroller13 ~]# ip netns exec qrouter-f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec bash
[root@openstackcontroller13 ~]# ifconfig
ha-880fa0e2-8d: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 169.254.192.4  netmask 255.255.192.0  broadcast 169.254.255.255
        inet6 fe80::f816:3eff:fe90:adf0  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:90:ad:f0  txqueuelen 0  (Ethernet)
        RX packets 609493  bytes 32927812 (31.4 MiB)
        RX errors 0  dropped 43  overruns 0  frame 0
        TX packets 304608  bytes 16449072 (15.6 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 0  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qg-3674d949-4c: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 10.89.151.168  netmask 255.255.0.0  broadcast 0.0.0.0
        inet6 fe80::f816:3eff:fe8e:b815  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:8e:b8:15  txqueuelen 0  (Ethernet)
        RX packets 68441559  bytes 19571436387 (18.2 GiB)
        RX errors 0  dropped 2251  overruns 0  frame 0
        TX packets 55319  bytes 5194356 (4.9 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qr-352424b9-3e: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 192.168.20.1  netmask 255.255.255.0  broadcast 0.0.0.0
        inet6 fe80::f816:3eff:fe12:5526  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:12:55:26  txqueuelen 0  (Ethernet)
        RX packets 3675  bytes 366823 (358.2 KiB)
        RX errors 0  dropped 13  overruns 0  frame 0
        TX packets 1394  bytes 132232 (129.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@openstackcontroller12 ~]# ip netns exec qrouter-f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec bash
[root@openstackcontroller12 ~]# ifconfig
ha-71d6264d-9d: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 169.254.192.3  netmask 255.255.192.0  broadcast 169.254.255.255
        inet6 fe80::f816:3eff:fee7:4c03  prefixlen 64  scopeid 0x20<link>
        ether fa:16:3e:e7:4c:03  txqueuelen 0  (Ethernet)
        RX packets 800714  bytes 43265351 (41.2 MiB)
        RX errors 0  dropped 31  overruns 0  frame 0
        TX packets 12  bytes 1008 (1008.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 0  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qg-3674d949-4c: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        ether fa:16:3e:8e:b8:15  txqueuelen 0  (Ethernet)
        RX packets 58884872  bytes 18031883270 (16.7 GiB)
        RX errors 0  dropped 2002  overruns 0  frame 0
        TX packets 1  bytes 110 (110.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

qr-352424b9-3e: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        ether fa:16:3e:12:55:26  txqueuelen 0  (Ethernet)
        RX packets 500  bytes 57320 (55.9 KiB)
        RX errors 0  dropped 17  overruns 0  frame 0
        TX packets 1  bytes 110 (110.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

我们查一下keepalived

ps aux|grep keepalived |grep f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec

root     39958  0.0  0.0 111636  1364 ?        Ss   Oct26   0:23 keepalived -P -f /var/lib/neutron/ha_confs/f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec/keepalived.conf -p /var/lib/neutron/ha_confs/f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec.pid -r /var/lib/neutron/ha_confs/f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec.pid-vrrp

上述进程是透过network namespace下执行的。然而,network namespace并没有隔离进程,因此,在任何地方均可以看到全部进程。 顺便refer我之前对network namespace的研究

http://gogosatellite.blogspot.tw/2016/06/playing-openvswitch-and-namespace-veth.html

/var/lib/neutron/ha_confs/f8d376d2-6d8a-4ec1-9939-4c6ec25f8cec/keepalived.conf

vrrp_instance VR_2 {
    state BACKUP
    interface ha-71d6264d-9d
    virtual_router_id 2
    priority 50
    garp_master_delay 60
    nopreempt
    advert_int 2
    track_interface {
        ha-71d6264d-9d
    }
    virtual_ipaddress {
        169.254.0.2/24 dev ha-71d6264d-9d
    }
    virtual_ipaddress_excluded {
        10.89.151.168/16 dev qg-3674d949-4c
        192.168.20.1/24 dev qr-352424b9-3e
        fe80::f816:3eff:fe12:5526/64 dev qr-352424b9-3e scope link
        fe80::f816:3eff:fe8e:b815/64 dev qg-3674d949-4c scope link
    }
    virtual_routes {
        0.0.0.0/0 via 10.89.1.254 dev qg-3674d949-4c
    }
}

官网的解释

https://wiki.openstack.org/wiki/Neutron/L3HighAvailability_VRRP

global_defs {
    router_id ${VR_ID}
}
vrrp_sync_group VG${VR_GROUP_ID} {
    group {
        VI_HA
    }
    % if NOTIFY_SCRIPT:
    notify_master ${NOTIFY_SCRIPT}
    % endif
}

vrrp_instance VI_HA {
    % if TYPE == 'MASTER':
    state MASTER
    % else:
    state SLAVE
    % endif
    interface ${L3_AGENT.get_ha_device_name(TRACK_PORT_ID)}
    virtual_router_id ${VR_ID}
    priority ${PRIORITY}
    track_interface {
        ${L3_AGENT.get_ha_device_name(TRACK_PORT_ID)}
    }
    virtual_ipaddress {
        % if EXTERNAL_PORT:
        ${EXTERNAL_PORT['ip_cidr']} dev ${L3_AGENT.get_external_device_name(EXTERNAL_PORT['id'])}
        % if FLOATING_IPS:
        ${FLOATING_IPS[0]['floating_ip_address']}/32 dev ${L3_AGENT.get_external_device_name(EXTERNAL_PORT['id'])}
        % endif
        % endif

        % if INTERNAL_PORTS:
        ${INTERNAL_PORTS[0]['ip_cidr']} dev ${L3_AGENT.get_internal_device_name(INTERNAL_PORTS[0]['id'])}
        % endif
    }
    virtual_ipaddress_excluded {
        % if EXTERNAL_PORT:
        % for FLOATING_IP in FLOATING_IPS[1:]:
        ${FLOATING_IP['floating_ip_address']}/32 dev ${L3_AGENT.get_external_device_name(EXTERNAL_PORT['id'])}
        % endfor
        % endif

        % for INTERNAL_PORT in INTERNAL_PORTS[1:]:
        ${INTERNAL_PORT['ip_cidr']} dev ${L3_AGENT.get_internal_device_name(INTERNAL_PORT['id'])}
        % endfor
    }

    % if EXTERNAL_PORT:
    virtual_routes {
        0.0.0.0/0 via ${EXTERNAL_PORT['ip_cidr'].split('/')[0]} dev ${L3_AGENT.get_external_device_name(EXTERNAL_PORT['id'])}
    }
    % endif
}

virtual_ipaddress为VIP的设定,virtual_ipaddress_excluded为namespace内network device的IP的设定。 standby并未设定任何值,直到fail over产生。此网路设定还包含Mac Address的设定,两台相同。

Friday, July 21, 2017

How to Setup High Availability Kubernetes

hakubernetes

Setup Kubernetes HA in

Environment

Kubernetes Version 1.6.

We have two servers. Master1: 172.16.155.158 Master2: 172.16.155.165 Etcd: 172.16.155.158

Here we only have one etcd, and we focus on discussion how to set up 2 Kubernetes Masters. And not considering etcd clustering.

There are two ways to connect to two api-servers. 1. How container connects to Kubernetes API Server -> kubernetes.default service 2. How Minion Node connect to Kubernetes API Server -> Loadbalancer to Host IP

We discuss how to setup kubernetes.default service to allow all conainer connect to API-server. and we then discuss how minion connects to api-server through a real loadbalancer to API-Server. And finally we discuss how it works while launching a container.

Master1 Setting

root@kuberm:~/kube1.6config/deploy/webscale# cat /lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
User=root
ExecStart=/opt/bin/kube-apiserver \
 --insecure-bind-address=0.0.0.0 \
 --insecure-port=8080 \
 --etcd-servers=http://172.16.155.158:2379\
 --logtostderr=false \
 --allow-privileged=false \
 --service-cluster-ip-range=172.18.0.0/16 \
 --admission-control=NamespaceLifecycle,ServiceAccount,LimitRanger,SecurityContextDeny,ResourceQuota \
 --service-node-port-range=30000-32767 \
 --advertise-address=172.16.155.158 \
 --v=6 \
 --storage-backend="etcd2" \
 --log-dir="/var/log/kubernetes" \
 --client-ca-file=/srv/kubernetes/ca.crt \
 --tls-private-key-file=/srv/kubernetes/server.key \
 --tls-cert-file=/srv/kubernetes/server.cert \
 --service_account_key_file=/srv/kubernetes/server.key \
 --secure-port=6443 \
 --apiserver-count=2

Restart=on-failure
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

we mention that --advertise-address=172.16.155.158 \ is quite critical that the api-server is only allow this host ip. That's why kuberetes.default will route to hostip to connect to api-server.

where the kubernetes service is located in container ip address and route to hostip address to connect to api-server claimed in config file.

root@kuberm:~/kube1.6config/deploy/webscale# kubectl get endpoints kubernetes
NAME         ENDPOINTS                                 AGE
kubernetes   172.16.155.158:6443,172.16.155.165:6443   34d
root@kuberm:~/kube1.6config/deploy/webscale# cat /lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
User=root
ExecStart=/opt/bin/kube-controller-manager \
  --master=172.16.155.158:8080 \
  --root-ca-file=/srv/kubernetes/ca.crt \
  --service-account-private-key-file=/srv/kubernetes/server.key \
  --logtostderr=false \
  --log-dir="/var/log/kubernetes" \
  --v=3 \
  --leader-elect=true
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
root@kuberm:~/kube1.6config/deploy/webscale# cat /lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
User=root
ExecStart=/opt/bin/kube-scheduler \
  --logtostderr=true \
  --master=172.16.155.158:8080 \
  --leader-elect=true
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
root@kuberm:~/kube1.6config/deploy/webscale# cat /lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
ExecStart=/opt/bin/kube-proxy  \
  --hostname-override=172.16.155.158 \
  --master=http://172.16.155.158:8080 \
  --logtostderr=true
Restart=on-failure

[Install]
WantedBy=multi-user.target
root@kuberm:~/kube1.6config/deploy/webscale# cat /lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
ExecStart=/opt/bin/kubelet \
  --hostname-override=172.16.155.158 \
  --api-servers=http://172.16.155.158:8080 \
  --register-node=true \
  --logtostderr=false \
  --log-dir="/var/log/kubernetes" \
  --v=3 \
  --cluster_dns=172.18.0.5 \
  --cluster_domain=cluster.local
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target

Master2 Setting

root@kuberm2:~# cat /lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
User=root
ExecStart=/opt/bin/kube-apiserver \
 --insecure-bind-address=0.0.0.0 \
 --insecure-port=8080 \
 --etcd-servers=http://172.16.155.158:2379\
 --logtostderr=false \
 --allow-privileged=true \
 --service-cluster-ip-range=172.18.0.0/16 \
 --admission-control=NamespaceLifecycle,ServiceAccount,LimitRanger,SecurityContextDeny,ResourceQuota \
 --service-node-port-range=30000-32767 \
 --advertise-address=172.16.155.165 \
 --v=6 \
 --storage-backend="etcd2" \
 --log-dir="/var/log/kubernetes" \
 --client-ca-file=/srv/kubernetes/ca.crt \
 --tls-private-key-file=/srv/kubernetes/server.key \
 --tls-cert-file=/srv/kubernetes/server.cert \
 --service_account_key_file=/srv/kubernetes/server.key \
 --secure-port=6443 \
 --apiserver-count=2

Restart=on-failure
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
root@kuberm2:~# cat /lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
User=root
ExecStart=/opt/bin/kube-controller-manager \
  --master=172.16.155.165:8080 \
  --root-ca-file=/srv/kubernetes/ca.crt \
  --service-account-private-key-file=/srv/kubernetes/server.key \
  --logtostderr=false \
  --log-dir="/var/log/kubernetes" \
  --v=3 \
  --leader-elect=true

Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
root@kuberm2:~# cat /lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
User=root
ExecStart=/opt/bin/kube-scheduler \
  --logtostderr=true \
  --master=172.16.155.165:8080 \
  --leader-elect=true
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

Check Result

That's check the container connected api-server, said kubernetes.default.
It shoud have two api-servers through kube-proxy loadbalancer.

root@kuberm:~/kube1.6config/deploy/webscale# kubectl get svc kubernetes
NAME         CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   172.18.0.1   <none>        443/TCP   34d
root@kuberm:~/kube1.6config/deploy/webscale# kubectl get endpoints kubernetes
NAME         ENDPOINTS                                 AGE
kubernetes   172.16.155.158:6443,172.16.155.165:6443   34d
root@kuberm:~/kube1.6config/deploy/webscale# kubectl get svc kubernetes -o yaml
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: 2017-05-03T05:08:44Z
  labels:
    component: apiserver
    provider: kubernetes
  name: kubernetes
  namespace: default
  resourceVersion: "397592"
  selfLink: /api/v1/namespaces/default/services/kubernetes
  uid: 94c098f6-2fbe-11e7-9a3a-000c295cb5bb
spec:
  clusterIP: 172.18.0.1
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: 6443
  sessionAffinity: ClientIP
  type: ClusterIP
status:
  loadBalancer: {}
root@kuberm:~/kube1.6config/deploy/webscale# /opt/bin/etcdctl get /registry/services/endpoints/default/kubernetes
{"kind":"Endpoints","apiVersion":"v1","metadata":{"name":"kubernetes","namespace":"default","selfLink":"/api/v1/namespaces/default/endpoints/kubernetes","uid":"94c24aba-2fbe-11e7-9a3a-000c295cb5bb","creationTimestamp":"2017-05-03T05:08:44Z"},"subsets":[{"addresses":[{"ip":"172.16.155.158"},{"ip":"172.16.155.165"}],"ports":[{"name":"https","port":6443,"protocol":"TCP"}]}]}

Check multiple kube-scheduler and kube-controller

Using the config of --leader-elect=true for both kube-scheduler and kube-controller, you will see the process is still running but not both of them is working. You might see the log to understand that. Only one process is working, eventhough the process is existed.

Slave Of Kube-Scheduler

ube-scheduler 200 OK in 1 milliseconds
I0607 11:00:05.179156   17298 leaderelection.go:248] lock is held by kuberm and has not yet expired
I0607 11:00:05.179162   17298 leaderelection.go:185] failed to acquire lease kube-system/kube-scheduler
I0607 11:00:09.334061   17298 round_trippers.go:417] GET http://172.16.155.165:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 2 milliseconds
I0607 11:00:09.334184   17298 leaderelection.go:248] lock is held by kuberm and has not yet expired
I0607 11:00:09.334191   17298 leaderelection.go:185] failed to acquire lease kube-system/kube-scheduler
I0607 11:00:12.110069   17298 round_trippers.go:417] GET http://172.16.155.165:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 2 milliseconds
I0607 11:00:12.110194   17298 leaderelection.go:248] lock is held by kuberm and has not yet expired
I0607 11:00:12.110201   17298 leaderelection.go:185] failed to acquire lease kube-system/kube-scheduler

Master of Kube-Scheduler

I0607 11:00:53.679760   18257 round_trippers.go:417] GET http://172.16.155.158:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 1 milliseconds
I0607 11:00:53.684782   18257 round_trippers.go:417] PUT http://172.16.155.158:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 4 milliseconds
I0607 11:00:53.684914   18257 leaderelection.go:204] succesfully renewed lease kube-system/kube-scheduler
I0607 11:00:55.686845   18257 round_trippers.go:417] GET http://172.16.155.158:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 1 milliseconds
I0607 11:00:55.693945   18257 round_trippers.go:417] PUT http://172.16.155.158:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 6 milliseconds
I0607 11:00:55.694060   18257 leaderelection.go:204] succesfully renewed lease kube-system/kube-scheduler

Master of Kube-controller

607 11:03:56.630699   17442 nodecontroller.go:1044] node kubermnode2 hasn't been updated for 8m30.112359775s. Last OutOfDisk is: &NodeCondition{Type:OutOfDisk,Status:Unknown,LastHeartbeatTime:2017-05-12 18:15:25 +0800 CST,LastTransitionTime:2017-06-06 14:35:21 +0800 CST,Reason:NodeStatusUnknown,Message:Kubelet stopped posting node status.,}
I0607 11:03:56.630725   17442 nodecontroller.go:1044] node kubermnode2 hasn't been updated for 8m30.112386319s. Last MemoryPressure is: &NodeCondition{Type:MemoryPressure,Status:Unknown,LastHeartbeatTime:2017-05-12 18:15:25 +0800 CST,LastTransitionTime:2017-06-06 14:35:21 +0800 CST,Reason:NodeStatusUnknown,Message:Kubelet stopped posting node status.,}
I0607 11:03:56.630739   17442 nodecontroller.go:1044] node kubermnode2 hasn't been updated for 8m30.112399821s. Last DiskPressure is: &NodeCondition{Type:DiskPressure,Status:Unknown,LastHeartbeatTime:2017-05-12 18:15:25 +0800 CST,LastTransitionTime:2017-06-06 14:35:21 +0800 CST,Reason:NodeStatusUnknown,Message:Kubelet stopped posting node status.,}

Slave of kube-controller

ube-controller-manager 200 OK in 1 milliseconds
I0607 11:04:32.485502   17291 leaderelection.go:248] lock is held by kuberm and has not yet expired
I0607 11:04:32.485506   17291 leaderelection.go:185] failed to acquire lease kube-system/kube-controller-manager
I0607 11:04:36.263032   17291 round_trippers.go:417] GET http://172.16.155.165:8080/api/v1/namespaces/kube-system/endpoints/kube-controller-manager 200 OK in 1 milliseconds
I0607 11:04:36.263122   17291 leaderelection.go:248] lock is held by kuberm and has not yet expired
I0607 11:04:36.263125   17291 leaderelection.go:185] failed to acquire lease kube-system/kube-controller-manager

Failed Over on Kube-Scheduler

Stop the kube-schduler in Master, in slave you will see the failed over after 10 sec. You might try kube-controller, their failed over is individual.

I0607 11:27:38.747508    1487 leaderelection.go:248] lock is held by kuberm and has not yet expired
I0607 11:27:38.747513    1487 leaderelection.go:185] failed to acquire lease kube-system/kube-scheduler
I0607 11:27:41.168987    1487 round_trippers.go:417] GET http://172.16.155.165:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 3 milliseconds
I0607 11:27:41.176275    1487 round_trippers.go:417] PUT http://172.16.155.165:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 4 milliseconds
I0607 11:27:41.176565    1487 leaderelection.go:189] successfully acquired lease kube-system/kube-scheduler
I0607 11:27:41.179086    1487 event.go:217] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"kube-scheduler", UID:"758d2812-4b2d-11e7-9e10-000c295cb5bb", APIVersion:"v1", ResourceVersion:"558042", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' kuberm2 became leader
I0607 11:27:41.181071    1487 round_trippers.go:417] POST http://172.16.155.165:8080/api/v1/namespaces/kube-system/events 201 Created in 2 milliseconds
I0607 11:27:43.178612    1487 round_trippers.go:417] GET http://172.16.155.165:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 1 milliseconds
I0607 11:27:43.182060    1487 round_trippers.go:417] PUT http://172.16.155.165:8080/api/v1/namespaces/kube-system/endpoints/kube-scheduler 200 OK in 3 milliseconds
I0607 11:27:43.182111    1487 leaderelection.go:204] succesfully renewed lease kube-system/kube-scheduler