Friday, March 30, 2018

SetUp Kubernetes 1.6

kubernetes

In Ubuntu 14.04

Starting cluster using provider: ubuntu
... calling verify-prereqs
Identity added: /root/.ssh/id_rsa (/root/.ssh/id_rsa)
... calling kube-up
FLANNEL_NET
172.16.0.0/16
Deploying master and minion on machine 127.0.0.1

config-default.sh                                                                                                                                  100% 3034     3.0KB/s   00:00
util.sh                                                                                                                                            100%   13KB  13.4KB/s   00:00
kube-controller-manager.conf                                                                                                                       100%  746     0.7KB/s   00:00
etcd.conf                                                                                                                                          100%  576     0.6KB/s   00:00
flanneld.conf                                                                                                                                      100%  569     0.6KB/s   00:00
kube-scheduler.conf                                                                                                                                100%  676     0.7KB/s   00:00
kube-apiserver.conf                                                                                                                                100%  676     0.7KB/s   00:00
kube-apiserver                                                                                                                                     100% 2358     2.3KB/s   00:00
etcd                                                                                                                                               100% 2073     2.0KB/s   00:00
kube-controller-manager                                                                                                                            100% 2672     2.6KB/s   00:00
flanneld                                                                                                                                           100% 2159     2.1KB/s   00:00
kube-scheduler                                                                                                                                     100% 2360     2.3KB/s   00:00
reconfDocker.sh                                                                                                                                    100% 1493     1.5KB/s   00:00
etcd.conf                                                                                                                                          100%  576     0.6KB/s   00:00
flanneld.conf                                                                                                                                      100%  569     0.6KB/s   00:00
kube-proxy.conf                                                                                                                                    100%  648     0.6KB/s   00:00
kubelet.conf                                                                                                                                       100%  634     0.6KB/s   00:00
etcd                                                                                                                                               100% 2073     2.0KB/s   00:00
kubelet                                                                                                                                            100% 2162     2.1KB/s   00:00
kube-proxy                                                                                                                                         100% 2230     2.2KB/s   00:00
flanneld                                                                                                                                           100% 2159     2.1KB/s   00:00
kube-apiserver                                                                                                                                     100%   34MB  33.7MB/s   00:01
etcd                                                                                                                                               100% 6494KB   6.3MB/s   00:00
kube-controller-manager                                                                                                                            100%   26MB  26.2MB/s   00:00
etcdctl                                                                                                                                            100% 6041KB   5.9MB/s   00:00
flanneld                                                                                                                                           100% 8695KB   8.5MB/s   00:00
kube-scheduler                                                                                                                                     100%   17MB  17.0MB/s   00:00
etcd                                                                                                                                               100% 6494KB   6.3MB/s   00:01
etcdctl                                                                                                                                            100% 6041KB   5.9MB/s   00:00
kubelet                                                                                                                                            100%   33MB  33.2MB/s   00:00
kube-proxy                                                                                                                                         100%   17MB  16.8MB/s   00:00
flanneld
sudo: unable to resolve host kuber
sudo: unable to resolve host kuber

sudo: unable to resolve host kuber
sudo: unable to resolve host kuber
sudo: unable to resolve host kuber
sudo: unable to resolve host kuber
sudo: unable to resolve host kuber
etcd start/running, process 4453
sudo: unable to resolve host kuber
Connection to 127.0.0.1 closed.
Validating master
Validating root@127.0.0.1

Kubernetes cluster is running.  The master is running at:

  http://127.0.0.1

FLANNEL_NET
172.16.0.0/16
Using master 127.0.0.1
Wrote config for ubuntu to /root/.kube/config
... calling validate-cluster
Found 1 nodes.
        NAME        LABELS                             STATUS
     1  127.0.0.1   kubernetes.io/hostname=127.0.0.1   Ready
Validate output:
NAME                 STATUS    MESSAGE   ERROR
controller-manager   Healthy   ok        nil
scheduler            Healthy   ok        nil
etcd-0               Healthy   {"action":"get","node":{"dir":true,"nodes":[{"key":"/registry","dir":true,"modifiedIndex":3,"createdIndex":3}]}}
                     nil
Cluster validation succeeded
Done, listing cluster services:

Kubernetes master is running at http://127.0.0.1:8080
root@kuber:~/kubernetes/cluster# ps aux|grep kube
root       4464  2.3  2.9 379528 30040 ?        Ssl  16:20   0:01 /opt/bin/kubelet --address=0.0.0.0 --port=10250 --hostname_override=127.0.0.1 --api_servers=http://127.0.0.1:8080 --logtostderr=true --cluster_dns=192.168.3.10 --cluster_domain=cluster.local
root       4465  0.4  1.1  16276 11900 ?        Ssl  16:20   0:00 /opt/bin/kube-scheduler --logtostderr=true --master=127.0.0.1:8080
root       4469  5.2  4.3  52164 43332 ?        Ssl  16:20   0:02 /opt/bin/kube-apiserver --address=0.0.0.0 --port=8080 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true --service-cluster-ip-range=192.168.3.0/24 --admission_control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,ServiceAccount,ResourceQuota --service_account_key_file=/tmp/kube-serviceaccount.key --service_account_lookup=false
root       4470  0.8  1.9  25556 19332 ?        Ssl  16:20   0:00 /opt/bin/kube-controller-manager --master=127.0.0.1:8080 --service_account_private_key_file=/tmp/kube-serviceaccount.key --logtostderr=true
root       4471  0.2  1.5 203044 15940 ?        Ssl  16:20   0:00 /opt/bin/kube-proxy --master=http://127.0.0.1:8080 --logtostderr=true
root       5079  0.0  0.2  10476  2188 pts/3    S+   16:21   0:00 grep --color=auto kube
ps aux|grep etcd

/opt/bin/etcd -name infra0 -initial-advertise-peer-urls http://127.0.0.1:2380 -listen-peer-urls http://127.0.0.1:2380 -initial-cluster-token etcd-cluster-1 -initial-cluster infra0=http://127.0.0.1:2380 -initial-cluster-state new
root@kuber:~/kubernetes/cluster# export PATH=$PATH:~/kubernetes/cluster/ubuntu/binaries
root@kuber:~/kubernetes/cluster# kubectl get nodes
NAME        LABELS                             STATUS
127.0.0.1   kubernetes.io/hostname=127.0.0.1   Ready

Adding Node(Minior)

Following standard installation, and reboot it. Since, there is no systemd/upstart during installation.

At Node Server.

/opt/bin/kubelet --address=0.0.0.0 --port=10250 --hostname_override=kubernode --api_servers=http://192.168.2.161:8080 --logtostderr=true --cluster_dns=192.168.3.10 --cluster_domain=cluster.local &

/opt/bin/kube-proxy --master=http://192.168.2.161:8080 --logtostderr=true

where 192.168.2.161 is the master(api_server) IP.

At Master

root@kuber:~/kubernetes/cluster# kubectl get node
NAME        LABELS                             STATUS
127.0.0.1   kubernetes.io/hostname=127.0.0.1   Ready
kubernode   kubernetes.io/hostname=kubernode   Ready
root@kuber:~/kubernetes/cluster# kubectl get node
NAME        LABELS                             STATUS
127.0.0.1   kubernetes.io/hostname=127.0.0.1   Ready
kubernode   kubernetes.io/hostname=kubernode   Ready

Lauch Container Again

At Master

kubectl run wordpress1 --image=tutum/wordpress --port=80 --hostport=81

After scheduling, the container might launch to new node we added.

In Node

root@kubernode:~# docker ps
CONTAINER ID        IMAGE                                  COMMAND             CREATED             STATUS              PORTS                NAMES
cb68476d1d40        tutum/wordpress:latest                 "/run.sh"           2 minutes ago       Up 2 minutes                             k8s_wordpress1.a6871d09_wordpress1-idcqp_default_2ba3e9ba-03a1-11e7-9227-000c293a2024_efa9455d
3726852dba12        gcr.io/google_containers/pause:0.8.0   "/pause"            4 minutes ago       Up 4 minutes        0.0.0.0:81->80/tcp   k8s_POD.9a88e88a_wordpress1-idcqp_default_2ba3e9ba-03a1-11e7-9227-000c293a2024_81d9e905

You wiil see the wordpress1 running on new node kubernode. That prove our way is working now.

Adding More Nodes (New Ways with a few steps)

At new node, please follow the installation and stop installation at ./build.sh After that, you will see the binary directory. Since we are running Minion Node so we just copy it to /opt/bin for convinuence.

cp binaries/minion/* /opt/bin/. -rf

Node need to run kube-up script

Then, execute the binary

/opt/bin/kubelet --address=0.0.0.0 --port=10250 --hostname_override=$HOSTNAME --api_servers=http://192.168.2.161:8080 --logtostderr=true --cluster_dns=192.168.3.10 --cluster_domain=cluster.local &

/opt/bin/kube-proxy --master=http://192.168.2.161:8080 --logtostderr=true

At Master

root@kuber:~/kubernetes/cluster# kubectl get nodes
NAME         LABELS                              STATUS
127.0.0.1    kubernetes.io/hostname=127.0.0.1    Ready
kubernode    kubernetes.io/hostname=kubernode    Ready
kubernode1   kubernetes.io/hostname=kubernode1   Ready

You will see the two Minions join to system.

At Master, launch container

kubectl run wordpress1 --image=tutum/wordpress --port=80 --hostport=81

At New node. You will see the Container running on kubernode1, it takes more time so wait it.

root@kubernode1:~# docker ps
CONTAINER ID        IMAGE                                  COMMAND             CREATED             STATUS              PORTS                NAMES
76feb9f0b977        tutum/wordpress:latest                 "/run.sh"           36 seconds ago      Up 35 seconds                            k8s_wordpress2.a96c1d0a_wordpress2-exiea_default_1c777366-03a5-11e7-bf81-000c293a2024_ac68b30b
39016c54169c        gcr.io/google_containers/pause:0.8.0   "/pause"            6 minutes ago       Up 6 minutes        0.0.0.0:81->80/tcp   k8s_POD.9a88e88a_wordpress2-exiea_default_1c777366-03a5-11e7-bf81-000c293a2024_4f5c468d

At Master

root@kuber:~/kubernetes/cluster# kubectl get pods
NAME               READY     REASON    RESTARTS   AGE
wordpress-bzzqt    1/1       Running   0          1h
wordpress1-idcqp   1/1       Running   0          35m
wordpress2-exiea   1/1       Running   0          7m

Remove Node

After shutdown the kubernode1, we can see the node status in master after about 1min.

root@kuber:~/kubernetes/cluster# kubectl get nodes
NAME         LABELS                              STATUS
127.0.0.1    kubernetes.io/hostname=127.0.0.1    Ready
kubernode    kubernetes.io/hostname=kubernode    Ready
kubernode1   kubernetes.io/hostname=kubernode1   NotReady

Delete

Delete Pods

kubectl delete pods --all
kubectl delete pods name

After you execute it, the pod will be delete and start a new pod soon. And the service will run automatically.

You cannot delete Pods directly, but you can delete RC. Remeber the sturcutre is RC->Pod->service. So you have to delete RC.

kubectl get rc
kubectl delete rc wordpress

Now the Pods will be deleted, and it's a beautiful thinking.

Attach Label to Node

kubectl label nodes <node-name> <label-key>=<label-value>

Failed Over

Shutdown Node

All pods will reschedule to a new node automatically.

Pending Pods

If a Pod cannot be schedule due to no suitable node (port conflict), adding new node it will reschedule to new node automatically.

Create By file

With 3 frontend, 1 redis-master(write) and 2 redis-slave(read).

root@kuber:~/deploy/phpredis# cat redis-master-controller.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: redis-master
  labels:
    name: redis-master
spec:
  replicas: 1
  selector:
    name: redis-master
  template:
    metadata:
      labels:
        name: redis-master
    spec:
      containers:
      - name: master
        image: kubeguide/redis-master
        ports:
        - containerPort: 6379
root@kuber:~/deploy/phpredis# cat redis-master-service.yaml
apiVersion: v1
kind: Service
metadata:
    name: redis-master
    labels:
      name: redis-master
spec:
  ports:
    - port: 6379
      targetPort: 6379
  selector:
    name: redis-master

root@kuber:~/deploy/phpredis# cat redis-slave-controller.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: redis-slave
  labels:
    name: redis-slave
spec:
  replicas: 2
  selector:
    name: redis-slave
  template:
    metadata:
      labels:
        name: redis-slave
    spec:
      containers:
      - name: slave
        image: kubeguide/guestbook-redis-slave
        env:
        - name: GET_HOSTS_FROM
          value: env
        ports:
        - containerPort: 6379

        
        
root@kuber:~/deploy/phpredis# cat redis-slave-service.yaml
apiVersion: v1
kind: Service
metadata:
    name: redis-slave
    labels:
      name: redis-slave
spec:
  ports:
    - port: 6379
  selector:
    name: redis-slave
root@kuber:~/deploy/phpredis# cat frontend-controller.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: frontend
  labels:
    name: frontend
spec:
  replicas: 3
  selector:
    name: frontend
  template:
    metadata:
      labels:
        name: frontend
    spec:
      containers:
      - name: slave
        image: kubeguide/guestbook-php-frontend
        env:
        - name: GET_HOSTS_FROM
          value: env
        ports:
        - containerPort: 80

root@kuber:~/deploy/phpredis# cat frontend-service.yaml
apiVersion: v1
kind: Service
metadata:
    name: frontend
    labels:
      name: frontend
spec:
  type: NodePort
  ports:
    - port: 80
      nodePort: 30001
  selector:
    name: frontend
kubectl create -f redis-master-controller.yaml
kubectl create -f redis-master-service.yaml
kubectl create -f redis-slave-controller.yaml
kubectl create -f redis-slave-service.yaml
kubectl create -f frontend-controller.yaml
kubectl create -f frontend-service.yaml

Since we are running in Mac with VMware.
So you can access from Mac Via Browser.

http://192.168.2.161:30001
and 
http://192.168.2.162:30001
and
http://192.168.2.163:30001

Also you can access it by using curl in the running node of frontend container. Note: that the 30001 binding ports is running on each node that running frontend containers. here is 192.168.2.161,192.168.2.162,192.168.2.163 and it's random choosen by k8s scheduler.

Login to frontend container

root@kuber:~/deploy/phpredis# docker ps
CONTAINER ID        IMAGE                                     COMMAND                CREATED             STATUS              PORTS               NAMES
cfa2c3eb75cf        kubeguide/guestbook-php-frontend:latest   "apache2-foreground"   34 minutes ago      Up 34 minutes                           k8s_slave.e2fe3ce6_frontend-bq1f8_default_9503f391-053b-11e7-86d2-000c293a2024_76982035
4dd3e4bc6409        gcr.io/google_containers/pause:0.8.0      "/pause"               36 minutes ago      Up 36 minutes                           k8s_POD.ef28e851_frontend-bq1f8_default_9503f391-053b-11e7-86d2-000c293a2024_e13e09d6


root@kuber:~/deploy/phpredis# docker exec -i -t cfa2c3eb75cf bash

That's see the environment setting, that means you don't have to worry about IP. You can just use GET_HOSTS_FROM=env REDIS_MASTER_PORT_6379_TCP_ADDR to be your communication endpoint.

root@frontend-bq1f8:/var/www/html# env
REDIS_SLAVE_PORT_6379_TCP=tcp://192.168.3.2:6379
FRONTEND_PORT_80_TCP_ADDR=192.168.3.104
REDIS_SLAVE_SERVICE_HOST=192.168.3.2
HOSTNAME=frontend-bq1f8
KUBERNETES_PORT=tcp://192.168.3.1:443
KUBERNETES_PORT_443_TCP_PORT=443
PHP_INI_DIR=/usr/local/etc/php
REDIS_SLAVE_PORT=tcp://192.168.3.2:6379
FRONTEND_PORT_80_TCP_PORT=80
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_HOST=192.168.3.1
GET_HOSTS_FROM=env
FRONTEND_PORT_80_TCP_PROTO=tcp
REDIS_MASTER_PORT_6379_TCP_ADDR=192.168.3.98
REDIS_MASTER_PORT_6379_TCP=tcp://192.168.3.98:6379
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
REDIS_SLAVE_PORT_6379_TCP_PROTO=tcp
GPG_KEYS=0BD78B5F97500D450838F95DFE857D9A90D90EC1 6E4F6AB321FDC07F2C332E3AC2BF0BC433CFC8B3
REDIS_MASTER_SERVICE_PORT=6379
PWD=/var/www/html
REDIS_SLAVE_SERVICE_PORT=6379
FRONTEND_PORT=tcp://192.168.3.104:80
FRONTEND_SERVICE_PORT=80
REDIS_MASTER_SERVICE_HOST=192.168.3.98
SHLVL=1
HOME=/root
REDIS_SLAVE_PORT_6379_TCP_ADDR=192.168.3.2
FRONTEND_SERVICE_HOST=192.168.3.104
KUBERNETES_PORT_443_TCP_PROTO=tcp
REDIS_MASTER_PORT_6379_TCP_PORT=6379
FRONTEND_PORT_80_TCP=tcp://192.168.3.104:80
REDIS_MASTER_PORT_6379_TCP_PROTO=tcp
REDIS_SLAVE_PORT_6379_TCP_PORT=6379
PHP_EXTRA_BUILD_DEPS=apache2-dev
REDIS_MASTER_PORT=tcp://192.168.3.98:6379
KUBERNETES_PORT_443_TCP_ADDR=192.168.3.1
KUBERNETES_PORT_443_TCP=tcp://192.168.3.1:443
PHP_VERSION=5.6.12
PHP_EXTRA_CONFIGURE_ARGS=--with-apxs2

That's see the code

root@frontend-bq1f8:/var/www/html# cat guestbook.php
<?

set_include_path('.:/usr/local/lib/php');

error_reporting(E_ALL);
ini_set('display_errors', 1);

require 'Predis/Autoloader.php';

Predis\Autoloader::register();

if (isset($_GET['cmd']) === true) {
  $host = 'redis-master';
  if (getenv('GET_HOSTS_FROM') == 'env') {
    $host = getenv('REDIS_MASTER_SERVICE_HOST');
  }

No IP just use env setting.

Adding LoadBalancer

kube create -f lb-service.yaml
{
    "kind": "Service",
    "apiVersion": "v1",
    "metadata": {
        "name": "my-service"
    },
    "spec": {
        "selector": {
            "name": "frontend"
        },
        "ports": [
            {
                "protocol": "TCP",
                "port": 80,
                "targetPort": 80,
                "nodePort": 30002
            }
        ],
        "clusterIP": "192.168.3.105",
        "loadBalancerIP": "192.168.2.128",
        "type": "LoadBalancer"
    },
    "status": {
        "loadBalancer": {
            "ingress": [
                {
                    "ip": "192.168.2.128"
                }
            ]
        }
    }
}

where selector choose frontend that is the pod name of webservice. nodePort is the LoadBalancerIP (OA IP)'s port number. finally, Kubernetes will not generate LoadBalancer IP for you.

ifconfig eth0:11 192.168.2.128

try it

curl http://192.168.2.128:30002

Modify Container networking

Follow the above installation, the network is well done but changing the nameserver.

root@frontend-bq1f8:/var/www/html# echo "nameserver 8.8.8.8" >> /etc/resolv.conf
root@frontend-bq1f8:/var/www/html# ping www.google.com
PING www.google.com (64.233.187.103): 56 data bytes
64 bytes from 64.233.187.103: icmp_seq=0 ttl=127 time=17.823 ms
64 bytes from 64.233.187.103: icmp_seq=1 ttl=127 time=11.412 ms

Check Iptables

root@kuber:~/deploy/phpredis# iptables -L -t nat
Chain PREROUTING (policy ACCEPT)
target     prot opt source               destination
KUBE-PORTALS-CONTAINER  all  --  anywhere             anywhere             /* handle ClusterIPs; NOTE: this must be before the NodePort rules */
DOCKER     all  --  anywhere             anywhere             ADDRTYPE match dst-type LOCAL
KUBE-NODEPORT-CONTAINER  all  --  anywhere             anywhere             ADDRTYPE match dst-type LOCAL /* handle service NodePorts; NOTE: this must be the last rule in the chain */

Chain INPUT (policy ACCEPT)
target     prot opt source               destination

Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination
KUBE-PORTALS-HOST  all  --  anywhere             anywhere             /* handle ClusterIPs; NOTE: this must be before the NodePort rules */
DOCKER     all  --  anywhere            !127.0.0.0/8          ADDRTYPE match dst-type LOCAL
KUBE-NODEPORT-HOST  all  --  anywhere             anywhere             ADDRTYPE match dst-type LOCAL /* handle service NodePorts; NOTE: this must be the last rule in the chain */

Chain POSTROUTING (policy ACCEPT)
target     prot opt source               destination
MASQUERADE  all  --  172.17.0.0/16        anywhere
MASQUERADE  tcp  --  172.17.0.4           172.17.0.4           tcp dpt:81

Chain DOCKER (2 references)
target     prot opt source               destination
DNAT       tcp  --  anywhere             anywhere             tcp dpt:89 to:172.17.0.4:81

Chain KUBE-NODEPORT-CONTAINER (1 references)
target     prot opt source               destination
REDIRECT   tcp  --  anywhere             anywhere             /* default/frontend: */ tcp dpt:30001 redir ports 38790
REDIRECT   tcp  --  anywhere             anywhere             /* default/my-service: */ tcp dpt:30002 redir ports 47418

Chain KUBE-NODEPORT-HOST (1 references)
target     prot opt source               destination
DNAT       tcp  --  anywhere             anywhere             /* default/frontend: */ tcp dpt:30001 to:192.168.2.161:38790
DNAT       tcp  --  anywhere             anywhere             /* default/my-service: */ tcp dpt:30002 to:192.168.2.161:47418

Chain KUBE-PORTALS-CONTAINER (1 references)
target     prot opt source               destination
REDIRECT   tcp  --  anywhere             192.168.3.104        /* default/frontend: */ tcp dpt:http redir ports 38790
REDIRECT   tcp  --  anywhere             192.168.3.1          /* default/kubernetes: */ tcp dpt:https redir ports 56476
REDIRECT   tcp  --  anywhere             192.168.3.105        /* default/my-service: */ tcp dpt:http redir ports 47418
REDIRECT   tcp  --  anywhere             192.168.3.98         /* default/redis-master: */ tcp dpt:6379 redir ports 34987
REDIRECT   tcp  --  anywhere             192.168.3.2          /* default/redis-slave: */ tcp dpt:6379 redir ports 56210

Chain KUBE-PORTALS-HOST (1 references)
target     prot opt source               destination
DNAT       tcp  --  anywhere             192.168.3.104        /* default/frontend: */ tcp dpt:http to:192.168.2.161:38790
DNAT       tcp  --  anywhere             192.168.3.1          /* default/kubernetes: */ tcp dpt:https to:192.168.2.161:56476
DNAT       tcp  --  anywhere             192.168.3.105        /* default/my-service: */ tcp dpt:http to:192.168.2.161:47418
DNAT       tcp  --  anywhere             192.168.3.98         /* default/redis-master: */ tcp dpt:6379 to:192.168.2.161:34987
DNAT       tcp  --  anywhere             192.168.3.2          /* default/redis-slave: */ tcp dpt:6379 to:192.168.2.161:56210

Networking seems wrong

root@kuber:~/deploy/phpredis# docker inspect 6ab98a408322 |grep IPAddress
        "IPAddress": "172.17.0.2",
        
root@kuber:~/deploy/phpredis# kubectl describe svc redis-master
W0310 14:30:20.928608   48930 request.go:302] field selector: v1 - events - involvedObject.name - redis-master: need to check if this is versioned correctly.
W0310 14:30:20.929393   48930 request.go:302] field selector: v1 - events - involvedObject.namespace - default: need to check if this is versioned correctly.
W0310 14:30:20.929811   48930 request.go:302] field selector: v1 - events - involvedObject.kind - Service: need to check if this is versioned correctly.
W0310 14:30:20.930247   48930 request.go:302] field selector: v1 - events - involvedObject.uid - 97350d7d-053a-11e7-bf70-000c293a2024: need to check if this is versioned correctly.
Name:           redis-master
Labels:         name=redis-master
Selector:       name=redis-master
Type:           ClusterIP
IP:         192.168.3.98
Port:           <unnamed>   6379/TCP
Endpoints:      172.17.0.2:6379

according to

https://read01.com/QMQJmE.html

More detailed information

root@kuber:~/deploy/phpredis# kubectl get services
NAME           LABELS                                    SELECTOR            IP(S)           PORT(S)
frontend       name=frontend                             name=frontend       192.168.3.104   80/TCP
kubernetes     component=apiserver,provider=kubernetes   <none>              192.168.3.1     443/TCP
my-service     <none>                                    name=frontend       192.168.3.105   80/TCP
redis-master   name=redis-master                         name=redis-master   192.168.3.98    6379/TCP
redis-slave    name=redis-slave                          name=redis-slave    192.168.3.2     6379/TCP

Networking Setting

As above, we got wrong result. We can see how to setup network first.

Setup Dcoker bridge

In each node, we have to reset docker0 bridge through /etc/default/docker.

root@kuber:~/deploy/phpredis# cat /etc/default/docker
DOCKER_OPTS="--bip=172.17.42.1/24"

We can set it as followed and reboot system

In Master: Docker0: 172.17.42.1/24, Server IP: 172.16.155.130, hostname: kuber
In Node1: Docker0: 172.17.43.1/24, Server IP: 172.16.155.131, hostname: kubernode1
In Node2: Docker0: 172.17.44.1/24, Server IP: 172.16.155.132, hostname: kubernode2

Setup Static Route

However, docker cannot connect eachother at different node. So that, we have to set up static route for containers communication in different node.

root@kuber:~/deploy/phpredis# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet dhcp
up ip route add 172.17.43.0/24 via 172.16.155.131 dev eth0 ||true
up ip route add 172.17.44.0/24 via 172.16.155.132 dev eth0 ||true

If we have other nodes with docker0 172.17.43.0/24 and 172.17.44.0/24 and server node ip 172.16.155.131 and 172.16.155.132, and go out through eth0.

We need to setup all nodes

In Kubenode1

root@kubernode1:~# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet dhcp
up ip route add 172.17.42.0/24 via 172.16.155.130 dev eth0 ||true
up ip route add 172.17.44.0/24 via 172.16.155.132 dev eth0 ||true

In kubenode2

root@kubernode2:~# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet dhcp
up ip route add 172.17.42.0/24 via 172.16.155.130 dev eth0 ||true
up ip route add 172.17.43.0/24 via 172.16.155.131 dev eth0 ||true

Reboot system, and relaunch all kubernetes process. Now, system works greate and container can connect each other, through container IP and service IP.

Yes, it seems not a good architect, since you need to set up static route to every server. So use flannel now. We might avoid the static route setting.

More

Deploying master and node on machine localhost
saltbase/salt/generate-cert/make-ca-cert.sh: No such file or directory
easy-rsa.tar.gz                                                                                                                                    100%   42KB  42.4KB/s   00:00
config-default.sh                                                                                                                                  100% 6271     6.1KB/s   00:00
util.sh                                                                                                                                            100%   29KB  29.0KB/s   00:00
kubelet.conf                                                                                                                                       100%  645     0.6KB/s   00:00
kube-proxy.conf                                                                                                                                    100%  688     0.7KB/s   00:00
kube-proxy                                                                                                                                         100% 2233     2.2KB/s   00:00
kubelet                                                                                                                                            100% 2158     2.1KB/s   00:00
kube-controller-manager.conf                                                                                                                       100%  761     0.7KB/s   00:00
etcd.conf                                                                                                                                          100%  707     0.7KB/s   00:00
kube-apiserver.conf                                                                                                                                100%  682     0.7KB/s   00:00
kube-scheduler.conf                                                                                                                                100%  682     0.7KB/s   00:00
kube-apiserver                                                                                                                                     100% 2358     2.3KB/s   00:00
kube-controller-manager                                                                                                                            100% 2672     2.6KB/s   00:00
kube-scheduler                                                                                                                                     100% 2360     2.3KB/s   00:00
etcd                                                                                                                                               100% 2073     2.0KB/s   00:00
reconfDocker.sh                                                                                                                                    100% 2183     2.1KB/s   00:00
etcdctl                                                                                                                                            100%   14MB  13.7MB/s   00:00
kube-apiserver                                                                                                                                     100%  119MB  59.5MB/s   00:02
kube-controller-manager                                                                                                                            100%   97MB  96.9MB/s   00:01
flanneld                                                                                                                                           100%   16MB  15.8MB/s   00:01
kube-scheduler                                                                                                                                     100%   50MB  50.4MB/s   00:01
etcd                                                                                                                                               100%   16MB   8.0MB/s   00:02
kube-proxy                                                                                                                                         100%   44MB  43.6MB/s   00:01
kubelet                                                                                                                                            100%  103MB  17.2MB/s   00:06
flanneld

/opt/etcd/config/etcd.conf

ETCD_DATA_DIR=/var/lib/etcd
ETCD_NAME=u16kuber
ETCD_INITIAL_CLUSTER=u16kuber=http://127.0.0.1:2380
ETCD_INITIAL_CLUSTER_STATE=new
ETCD_LISTEN_PEER_URLS=http://127.0.0.1:2380
ETCD_INITIAL_ADVERTISE_PEER_URLS=http://127.0.0.1:2380
ETCD_ADVERTISE_CLIENT_URLS=http://127.0.0.1:2379
ETCD_LISTEN_CLIENT_URLS=http://192.168.2.129:2379,http://127.0.0.1:2379,http://127.0.0.1:4001,http://192.168.2.129:4001
GOMAXPROCS=1

adding 4001, 2379 that will be connect by kube-apiserver by 4001 if you lauched by kube-up.sh

In Ubuntu 14.04 edit config /etc/default/etcd ETCD_OPTS=" -name infra --listen-client-urls=http://127.0.0.1:4001,http://127.0.0.1:2379 -advertise-client-urls http://127.0.0.1:2379" And all the kube config located in /etc/default

root@kuber154:~# cat /etc/default/docker
DOCKER_OPTS=" -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock --bip=${FLANNEL_SUBNET} --mtu=1472"
root@kuber154:~# cat /run/flannel/subnet.env
FLANNEL_SUBNET=172.16.60.1/24
FLANNEL_MTU=1472
FLANNEL_IPMASQ=true

Install Docker Registry

apt-get install build-essential python-dev libevent-dev python-pip libssl-dev liblzma-dev libffi-dev
git clone https://github.com/docker/docker-registry.git -b 0.8
python setup install 

set up docker as insecure to every server and client.

root@dockerregistry:~# cat /etc/default/docker
# Docker Upstart and SysVinit configuration file

# Customize location of Docker binary (especially for development testing).
#DOCKER="/usr/local/bin/docker"

# Use DOCKER_OPTS to modify the daemon startup options.
#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4"

# If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/"

# This is also a handy place to tweak where Docker's temporary files go.
#export TMPDIR="/mnt/bigdrive/docker-tmp"
#DOCKER_OPTS="--insecure-registry 172.16.155.136:5000"
DOCKER_OPTS="--insecure-registry 172.16.155.136:5000 --bip=172.17.42.1/24"

running reposity

We start to run registry, and keep rebootable for docker daemon, and keep data persistence, to replace default /tmp directory.

docker run -d -p 5000:5000 -v /opt/registry:/tmp/registry --restart always registry
docker ps -a  # to show all status, if without -a then it shows only running status

Build the first image

docker pull ubuntu 
docker tag ubuntu 172.16.155.136:5000/testubuntu2
docker push 172.16.155.136:5000/testubuntu2

check repositoy, DO NOT USE docker images command, it seems like lag.

root@dockerhub:~# curl http://172.16.155.136:5000/v2/_catalog

Pull and Run in Local Machine

docker pull 172.16.155.136:5000/testubuntu2

docker run -t -i 172.16.155.136:5000/testubuntu2 /bin/bash

Show the repository images, and tags. You will not see the image by using command docker images, but after a while ....(strange)

root@dockerhub:~# curl http://172.16.155.136:5000/v2/_catalog
{"repositories":["newubuntu","nutest","testubuntu2"]}

root@dockerhub:~# curl http://172.16.155.136:5000/v2/nutest/tags/list
{"name":"nutest","tags":["latest"]}

Standard operation for remote reposity

In 172.16.155.137

How to pull, run and store an image

docker pull 172.16.155.136:5000/testubuntu2
docker run -it 172.16.155.136:5000/testubuntu2 bash
touch iamclient2
docker commit 13931c68a3a9 172.16.155.136:5000/newcommit
docker push 172.16.155.136:5000/newcommit

check the repositoy

root@dockerhub:~# curl http://172.16.155.136:5000/v2/_catalog

Docker Image Created

After we have a images and with runing script in ./run.sh You can execute docker command.

docker run -td 172.16.155.136:5000/uwebserver bash ./run.sh

or we can assign the running script to images

docker commit --change='CMD ["bash", "./run.sh"]' 919eac1f9753 172.16.155.136:5000/uwebserverv5
docker push 172.16.155.136/uwebserverv5

so that we can run the image directly without input command. note that 919eac1f9753 is a running container not a image.

docker run -td 172.16.155.136:5000/uwebserverv5

root@dockerhubc:~# docker ps
CONTAINER ID        IMAGE                                     COMMAND                CREATED             STATUS              PORTS               NAMES
242a3a43d2d5        172.16.155.136:5000/uwebserverv5:latest   "bash ./run.sh"        2 minutes ago       Up 2 minutes                            focused_turing

You can see the command is assign by our definition bash ./run.sh. td is background daemon argument.

try it, connecting to container

curl http://containerip:8080

You can also hack container stored json file located in

root@dockerhub:~# vim /var/lib/docker/containers/25335c116bcf9e0ae891da0265f41a551b645dd0f82d8513fcc67f223497ab41/config.json

An simple python seb server for testing. The return value is container hostname, so you can relaize which container is runing especially testing in loadbalancer.

pyweb.py

#/usr/bin/python
import sys
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import socket
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer

HandlerClass = SimpleHTTPRequestHandler
ServerClass  = BaseHTTPServer.HTTPServer
Protocol     = "HTTP/1.0"


class myHandler(BaseHTTPRequestHandler):

   #Handler for the GET requests
   def do_GET(self):
        self.send_response(200)
        self.send_header('Content-type','text/html')
        self.end_headers()
        # Send the html message
        host = socket.gethostname()
        self.wfile.write("%s\n "%host)
        return



if sys.argv[1:]:
  port = int(sys.argv[1])
else:
  port = 8000
server_address = ('0.0.0.0', port)

HandlerClass.protocol_version = Protocol
#httpd = ServerClass(server_address, HandlerClass)
httpd = ServerClass(server_address, myHandler)

sa = httpd.socket.getsockname()
s = socket.gethostname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
print sa,s
httpd.serve_forever()
chown a+x pyweb.py
docker commit --change='CMD ["./run.py"]' 242a3a43d2d5 172.16.155.136:5000/uwebserverv6

Here we change the CMD to the python web file.

docker push 172.16.155.136:5000/uwebserverv6
docker run -td  172.16.155.136:5000/uwebserverv6

try it

curl http://xxxx:8000

To trace the network

Launch a Container through private Docker Registry

Here is the webserver-controller.yaml.

apiVersion: v1
kind: ReplicationController
metadata:
  name: webserver
  labels:
    name: webserver
spec:
  replicas: 2
  selector:
    name: webserver
  template:
    metadata:
      labels:
        name: webserver
    spec:
      containers:
      - name: webserver
        image: 172.16.155.136:5000/uwebserverv6
        env:
        - name: GET_HOSTS_FROM
          value: env
        ports:
        - containerPort: 8000
kube create -f webserver-controller.yaml 

To trace webserver NodePort service IP.

root@kuber:~/deploy/webscale# kubectl get service
NAME           LABELS                                    SELECTOR            IP(S)          PORT(S)
frontend       name=frontend                             name=frontend       192.168.3.25   80/TCP
kubernetes     component=apiserver,provider=kubernetes   <none>              192.168.3.1    443/TCP
my-service     <none>                                    name=frontend       192.168.3.33   8033/TCP
redis-master   name=redis-master                         name=redis-master   192.168.3.42   6379/TCP
redis-slave    name=redis-slave                          name=redis-slave    192.168.3.52   6379/TCP
webserver      name=webserver                            name=webserver      192.168.3.29   8000/TCP


root@kuber:~/deploy/webscale# iptables-save |grep 29
-A KUBE-PORTALS-CONTAINER -d 192.168.3.29/32 -p tcp -m comment --comment "default/webserver:" -m tcp --dport 8000 -j REDIRECT --to-ports 33589
-A KUBE-PORTALS-HOST -d 192.168.3.29/32 -p tcp -m comment --comment "default/webserver:" -m tcp --dport 8000 -j DNAT --to-destination 172.16.155.130:33589


root@kuber:~/deploy/webscale# lsof|grep 33589
kube-prox  2054             root   14u     IPv6              72930       0t0        TCP *:33589 (LISTEN)
kube-prox  2054  2065       root   14u     IPv6              72930       0t0        TCP *:33589 (LISTEN)

test result

root@kuber:~/deploy/webscale# curl http://192.168.3.29:8000
webserver-sttr2

root@kuber:~/deploy/webscale# curl http://192.168.3.29:8000
webserver-3rbsd

Since we deploy 2 replicas, and use the service to loadbalance the web server through kube-proxy.

Kubernetes Namespace

root@kuber:~/deploy/namespace# cat createnamespace.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: inq
root@kuber:~/deploy/namespace# kubectl create -f createnamespace.yaml


root@kuber:~/deploy/namespace# kubectl get namespace
NAME      LABELS    STATUS
default   <none>    Active
inq       <none>    Active

Use the same yaml, that means the namespace is not in yaml file.

root@kuber:~/deploy/namespace# kubectl --namespace=inq create -f webserver-controller.yaml
root@kuber:~/deploy/namespace# kubectl --namespace=inq get pod
NAME              READY     REASON    RESTARTS   AGE
webserver-3ooh1   1/1       Running   0          1m
webserver-wtpb8   1/1       Running   0          1m

You will not see the pods in kubectl get pod command, since it's running on default namespace.

root@kuber:~/deploy/namespace# kubectl get namespace
NAME      LABELS    STATUS
default   <none>    Active
inq       <none>    Active
root@kuber:~/deploy/namespace# kubectl --namespace=inq create -f webserver-service.yaml

root@kuber:~/deploy/namespace# kubectl --namespace=inq get service
NAME        LABELS           SELECTOR         IP(S)          PORT(S)
webserver   name=webserver   name=webserver   192.168.3.59   8000/TCP

The kubernetes namespace is just a abstraction layer of RC, POD, and Service, and NOT linux namespace. You can check it. So do not confuse it with linux namespace.

root@kubernode1:~# ip netns
root@kubernode1:~#

The greate thing is sky-DNS supports Kubernetes Namespace.

Multiple Container in One Pods

apiVersion: v1
kind: ReplicationController
metadata:
  name: multiserver
  labels:
    name: multiserver
spec:
  replicas: 2
  selector:
    name: multiserver
  template:
    metadata:
      labels:
        name: multiserver
    spec:
      containers:
      - name: pythonserver
        image: 172.16.155.136:5000/uwebserverv6
        env:
        - name: GET_HOSTS_FROM
          value: env
        ports:
        - containerPort: 8000
      - name: phpserver
        image: kubeguide/guestbook-php-frontend
        env:
        - name: GET_HOSTS_FROM
          value: env
        ports:
        - containerPort: 80

The difference is adding -name: phpserver section.

root@kuber:~/deploy/multiplecontainer# kubectl get pod
NAME                 READY     REASON    RESTARTS   AGE
multiserver-avtsz    2/2       Running   0          2m
multiserver-dx1k3    2/2       Running   0          2m

You will see multiple containers in host.

root@kuber:~/deploy/multiplecontainer# docker ps
CONTAINER ID        IMAGE                                     COMMAND                CREATED             STATUS              PORTS               NAMES
58f45d61c57c        kubeguide/guestbook-php-frontend:latest   "apache2-foreground"   6 minutes ago       Up 6 minutes                            k8s_phpserver.dc7e3eaa_multiserver-avtsz_default_20595e72-0882-11e7-be78-000c29d96baa_80e20899
9fe54ffe77d2        172.16.155.136:5000/uwebserverv6:latest   "./run.py"             6 minutes ago       Up 6 minutes                            k8s_pythonserver.e3143c8f_multiserver-avtsz_default_20595e72-0882-11e7-be78-000c29d96baa_a76ecb02

Let's login to one of container.

root@kuber:~/deploy/multiplecontainer# docker exec -it 58f45d61c57c bash

You can access components through localhost that in one pod.

root@multiserver-avtsz:/var/www/html# curl http://localhost:80
<html ng-app="redis">
  <head>
    <title>Guestbook</title>
    
    
root@multiserver-avtsz:/var/www/html# curl http://localhost:8000
multiserver-avtsz

Share Same Pod IP

root@multiserver-avtsz:/# ip addr list
7: eth0: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether 02:42:ac:11:2a:03 brd ff:ff:ff:ff:ff:ff
    inet 172.17.42.3/24 scope global eth0
    
root@multiserver-avtsz:/var/www/html# ip addr list
7: eth0: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether 02:42:ac:11:2a:03 brd ff:ff:ff:ff:ff:ff
    inet 172.17.42.3/24 scope global eth0

Two Containers in One Pod share same IP and even same Mac address.

Adding google_containers/pause to private registry

root@dockerhub:~# docker pull gcr.io/google_containers/pause
root@dockerhub:~# docker tag gcr.io/google_containers/pause 172.16.155.136:5000/google_containers/pause:latest
root@dockerhub:~# docker push 172.16.155.136:5000/google_containers/pause:latest

root@dockerhub:~# curl http://172.16.155.136:5000/v2/_catalog
{"repositories":["google_containers/pause","newubuntudatacommit","ubuntu","uwebserver","uwebserverv5","uwebserverv6"]}

Adding to kubelet.conf (not yet) in /etc/default/kubelet or process run.sh

KUBELET_ARGS= "--pod_infra_container_image=172.16.155.136:5000/google_containers/pause:latest"

How to provide Service

Method 1

Adding network node to system, but only run kube-proxy and routing or flannel.
You will not see the networknode by typing kuber get node, since we did not run kubelet. In network node, we can check the iptables

root@kubernetwork:~# iptables-save
# Generated by iptables-save v1.4.21 on Tue Mar 14 16:53:27 2017
*nat
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:DOCKER - [0:0]
:KUBE-NODEPORT-CONTAINER - [0:0]
:KUBE-NODEPORT-HOST - [0:0]
:KUBE-PORTALS-CONTAINER - [0:0]
:KUBE-PORTALS-HOST - [0:0]
-A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER
-A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST
-A POSTROUTING -s 172.17.55.0/24 ! -o docker0 -j MASQUERADE
-A KUBE-NODEPORT-CONTAINER -p tcp -m comment --comment "default/frontend:" -m tcp --dport 30001 -j REDIRECT --to-ports 55539
-A KUBE-NODEPORT-HOST -p tcp -m comment --comment "default/frontend:" -m tcp --dport 30001 -j DNAT --to-destination 172.16.155.138:55539
-A KUBE-PORTALS-CONTAINER -d 192.168.3.1/32 -p tcp -m comment --comment "default/kubernetes:" -m tcp --dport 443 -j REDIRECT --to-ports 47032
-A KUBE-PORTALS-CONTAINER -d 192.168.3.2/32 -p tcp -m comment --comment "default/redis-master:" -m tcp --dport 6379 -j REDIRECT --to-ports 46541
-A KUBE-PORTALS-CONTAINER -d 192.168.3.14/32 -p tcp -m comment --comment "default/redis-slave:" -m tcp --dport 6379 -j REDIRECT --to-ports 43424
-A KUBE-PORTALS-CONTAINER -d 192.168.3.39/32 -p tcp -m comment --comment "default/frontend:" -m tcp --dport 80 -j REDIRECT --to-ports 55539
-A KUBE-PORTALS-HOST -d 192.168.3.1/32 -p tcp -m comment --comment "default/kubernetes:" -m tcp --dport 443 -j DNAT --to-destination 172.16.155.138:47032
-A KUBE-PORTALS-HOST -d 192.168.3.2/32 -p tcp -m comment --comment "default/redis-master:" -m tcp --dport 6379 -j DNAT --to-destination 172.16.155.138:46541
-A KUBE-PORTALS-HOST -d 192.168.3.14/32 -p tcp -m comment --comment "default/redis-slave:" -m tcp --dport 6379 -j DNAT --to-destination 172.16.155.138:43424
-A KUBE-PORTALS-HOST -d 192.168.3.39/32 -p tcp -m comment --comment "default/frontend:" -m tcp --dport 80 -j DNAT --to-destination 172.16.155.138:55539
COMMIT
# Completed on Tue Mar 14 16:53:27 2017
# Generated by iptables-save v1.4.21 on Tue Mar 14 16:53:27 2017
*filter
:INPUT ACCEPT [1600:143533]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [1480:179463]
:DOCKER - [0:0]
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
COMMIT
# Completed on Tue Mar 14 16:53:27 2017

That contains all the iptable setting.

find out the service.

root@kuber:~/deploy/phpredis# kubectl get service
NAME           LABELS                                    SELECTOR            IP(S)          PORT(S)
frontend       name=frontend                             name=frontend       192.168.3.39   80/TCP

try to connect front-end service ip from network node. connect to frontend service ip.

curl http://192.168.3.39:80

Now you can attach OA ip on kubernetwork node and with external nat to Public IP. We might have a tenant loadbalancer that can be route to different backend depends on different urlpath. That provides the service.

Method 2

iptables-save
...
..
KUBE-PORTALS-CONTAINER -d 192.168.3.33/32 -p tcp -m comment --comment "default/my-service:" -m tcp --dport 8033 -j REDIRECT --to-ports 48686

where 192.168.3.33/32 is frontend loadbalance service IP and port 48686 is in kube-proxy. So we can use hostIP:48686 to direct access service without mapping OA again. but the port is dynamically changed.
It's a more convinuence solution, even in DEMO.

You now can connect http://172.16.155.130:48686 through OA browser.

Container Environment Setting

All the container service will be assgin by container linux env, if you were setting in yaml file.

If you did not set env in yaml file.

apiVersion: v1
kind: ReplicationController
metadata:
  name: redis-master
  labels:
    name: redis-master
spec:
  replicas: 1
  selector:
    name: redis-master
  template:
    metadata:
      labels:
        name: redis-master
    spec:
      containers:
      - name: master
        image: kubeguide/redis-master
        ports:
        - containerPort: 6379
[ root@redis-master-nqv02:/data ]$ env
GIT_PS1_SHOWDIRTYSTATE=1
GREP_COLOR=1;31
HOSTNAME=redis-master-nqv02
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT=tcp://192.168.3.1:443
CLICOLOR=1
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_HOST=192.168.3.1
LS_COLORS=di=34:ln=35:so=32:pi=33:ex=1;40:bd=34;40:cd=34;40:su=0;40:sg=0;40:tw=0;40:ow=0;40:
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/data
PS1=\[\033[40m\]\[\033[34m\][ \u@\H:\[\033[36m\]\w$(__git_ps1 " \[\033[35m\]{\[\033[32m\]%s\[\033[35m\]}")\[\033[34m\] ]$\[\033[0m\]
SHLVL=1
HOME=/root
GREP_OPTIONS=--color=auto
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP_ADDR=192.168.3.1
KUBERNETES_PORT_443_TCP=tcp://192.168.3.1:443
_=/usr/bin/env

If you were setting env in yaml file.

apiVersion: v1
kind: ReplicationController
metadata:
  name: frontend
  labels:
    name: frontend
spec:
  replicas: 3
  selector:
    name: frontend
  template:
    metadata:
      labels:
        name: frontend
    spec:
      containers:
      - name: slave
        image: kubeguide/guestbook-php-frontend
        env:
        - name: GET_HOSTS_FROM
          value: env
        ports:
        - containerPort: 80

You will see all the network service IP related to all service setting in container env (teanant based).


root@kubernode2:~# docker exec -it 34736e01497c bash
root@frontend-laatp:/var/www/html# env
REDIS_SLAVE_PORT_6379_TCP=tcp://192.168.3.14:6379
REDIS_SLAVE_SERVICE_HOST=192.168.3.14
HOSTNAME=frontend-laatp
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT=tcp://192.168.3.1:443
PHP_INI_DIR=/usr/local/etc/php
REDIS_SLAVE_PORT=tcp://192.168.3.14:6379
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_HOST=192.168.3.1
GET_HOSTS_FROM=env
REDIS_MASTER_PORT_6379_TCP_ADDR=192.168.3.2
REDIS_MASTER_PORT_6379_TCP=tcp://192.168.3.2:6379
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
REDIS_SLAVE_PORT_6379_TCP_PROTO=tcp
GPG_KEYS=0BD78B5F97500D450838F95DFE857D9A90D90EC1 6E4F6AB321FDC07F2C332E3AC2BF0BC433CFC8B3
REDIS_MASTER_SERVICE_PORT=6379
PWD=/var/www/html
REDIS_SLAVE_SERVICE_PORT=6379
REDIS_MASTER_SERVICE_HOST=192.168.3.2
SHLVL=1
HOME=/root
REDIS_SLAVE_PORT_6379_TCP_ADDR=192.168.3.14
KUBERNETES_PORT_443_TCP_PROTO=tcp
REDIS_MASTER_PORT_6379_TCP_PORT=6379
REDIS_SLAVE_PORT_6379_TCP_PORT=6379
REDIS_MASTER_PORT_6379_TCP_PROTO=tcp
PHP_EXTRA_BUILD_DEPS=apache2-dev
REDIS_MASTER_PORT=tcp://192.168.3.2:6379
KUBERNETES_PORT_443_TCP_ADDR=192.168.3.1
KUBERNETES_PORT_443_TCP=tcp://192.168.3.1:443
PHP_VERSION=5.6.12
PHP_EXTRA_CONFIGURE_ARGS=--with-apxs2
_=/usr/bin/env

Different Between NodePort and LoadBalance

root@kuber:~/deploy/webscale# iptables-save |grep 30001
-A KUBE-NODEPORT-CONTAINER -p tcp -m comment --comment "default/frontend:" -m tcp --dport 30001 -j REDIRECT --to-ports 37688
-A KUBE-NODEPORT-HOST -p tcp -m comment --comment "default/frontend:" -m tcp --dport 30001 -j DNAT --to-destination 172.16.155.130:37688

root@kuber:~/deploy/webscale# iptables-save |grep 8033
-A KUBE-PORTALS-CONTAINER -d 192.168.3.33/32 -p tcp -m comment --comment "default/my-service:" -m tcp --dport 8033 -j REDIRECT --to-ports 57959
-A KUBE-PORTALS-HOST -d 192.168.3.33/32 -p tcp -m comment --comment "default/my-service:" -m tcp --dport 8033 -j DNAT --to-destination 172.16.155.130:57959

30001 is nodeport, 192.168.3.33:8033 loadbalance. You can connect tp any host with host:30001 to reach the container browser. But you can not direct connect to 192.168.3.33:8033 since it's proxy awared IP. And also you can not use hostip:8033. Where -d 192.168.3.33/32 is the key, that one can use host:30001 and not host:8033. -d is destination. It means destination with 192.168.3.33/32 and destination port 8033 will be routed to proxy. for 30001, it means any destination ip and destination port 30001 will be routed to proxy.

For multi tenant, I suggest to use loadbalance mode, since we can arrange a LB IP to a tenant to avoid port conflict. One LB IP related to a given tenant. And the tenant can use any port on this LB to provide any services.

Flannel SetUP

In Minion

root@kubernode1:~/flannel-0.4.0# cat /etc/default/flanneld

FLANNEL_OPTS="-etcd-endpoints=http://172.16.155.130:4001 -etcd-prefix="/coreos.com/network""
root@kubernode1:~/flannel-0.4.0# ifconfig
docker0   Link encap:Ethernet  HWaddr 56:84:7a:fe:97:99
          inet addr:172.16.15.1  Bcast:0.0.0.0  Mask:255.255.255.0
          UP BROADCAST MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

eth0      Link encap:Ethernet  HWaddr 00:0c:29:70:0f:e0
          inet addr:172.16.155.131  Bcast:172.16.155.255  Mask:255.255.255.0
          inet6 addr: fe80::20c:29ff:fe70:fe0/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:6626 errors:0 dropped:0 overruns:0 frame:0
          TX packets:4443 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:541646 (541.6 KB)  TX bytes:1175519 (1.1 MB)

flannel0  Link encap:UNSPEC  HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00
          inet addr:172.16.70.0  P-t-P:172.16.70.0  Mask:255.255.0.0
          UP POINTOPOINT RUNNING  MTU:1472  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:500
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

You will see the flannel0 there, but if you cannot see the flannel0. Make sure the etcd location and make sure the etcd is remote accesible.

root@kubernode1:~/flannel-0.4.0# curl http://172.16.155.130:4001/v2/keys/coreos.com/network
{"action":"get","node":{"key":"/coreos.com/network","dir":true,"nodes":[{"key":"/coreos.com/network/config","value":"{\"Network\":\"172.16.0.0/16\"}","modifiedIndex":78694,"createdIndex":78694},{"key":"/coreos.com/network/subnets","dir":true,"modifiedIndex":28260,"createdIndex":28260}],"modifiedIndex":28255,"createdIndex":28255}}

If not, make sure the etcd config file is right and can be accessed

ETCD_OPTS="-name infra0 -initial-advertise-peer-urls http://127.0.0.1:2380   -listen-peer-urls http://127.0.0.1:2380   -initial-cluster-token etcd-cluster-1   -initial-cluster infra0=http://127.0.0.1:2380 -advertise-client-urls http://172.16.155.131:2379,http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 -initial-cluster-state new"

make sure the setting

-advertise-client-urls http://172.16.155.131:2379,http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001

that will enable the remote access or it can only be connected from local. Here peer means the etcd cluster, and client remote client.

flannel config setting

FLANNEL_OPTS="-etcd-endpoints=http://172.16.155.130:4001 -etcd-prefix="/coreos.com/network""

How to start k8s

In Master

service docker stop
service etcd start
service flannel start
service docker start

where flannel is strongly depending on etcd.

root@kuber:~# cat /run/flannel/subnet.env
FLANNEL_SUBNET=172.16.14.1/24
FLANNEL_MTU=1472
FLANNEL_IPMASQ=false

In runmaster.sh

/opt/bin/kubelet --address=0.0.0.0 --port=10250 --hostname_override=127.0.0.1 --api_servers=http://127.0.0.1:8080 --logtostderr=true --cluster_dns=8.8.8.8 --cluster_domain=cluster.local &
/opt/bin/kube-scheduler --logtostderr=true --master=127.0.0.1:8080 &
/opt/bin/kube-apiserver --address=0.0.0.0 --port=8080 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true --service-cluster-ip-range=192.168.3.0/26 --admission_control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,ServiceAccount,ResourceQuota --service_account_key_file=/tmp/kube-serviceaccount.key --service_account_lookup=false &
/opt/bin/kube-controller-manager --master=127.0.0.1:8080 --service_account_private_key_file=/tmp/kube-serviceaccount.key --logtostderr=true &
/opt/bin/kube-proxy --master=http://127.0.0.1:8080 --logtostderr=true &


/opt/bin/kubelet --address=0.0.0.0 --port=10250 --hostname_override=127.0.0.1 --api_servers=http://127.0.0.1:8080 --logtostderr=true --cluster_dns=8.8.8.8 --cluster_domain=cluster.local
/opt/bin/kube-scheduler --logtostderr=true --master=127.0.0.1:8080
/opt/bin/kube-apiserver --address=0.0.0.0 --port=8080 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true --service-cluster-ip-range=192.168.3.0/26 --admission_control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,ServiceAccount,ResourceQuota --service_account_key_file=/opt/kube/kube-serviceaccount.key --service_account_lookup=false
root       1745  1.0  1.8  25556 18572 ?        Ssl  09:35   0:00 /opt/bin/kube-controller-manager --master=127.0.0.1:8080 --service_account_private_key_file=/opt/kube/kube-serviceaccount.key --logtostderr=true
root       1746  0.1  1.3 201956 13380 ?        Ssl  09:35   0:00 /opt/bin/kube-proxy --master=http://127.0.0.1:8080 --logtostderr=true


make sure the remote connection

curl http://172.16.155.130:4001/v2/keys/coreos.com/network

set up flannel subnet

/opt/bin/etcdctl set /coreos.com/network/config '{"Network":"172.16.0.0/16"}'

get current running subenet

/opt/bin/etcdctl ls /coreos.com/network/subnets

Clear subnet setting

/opt/bin/etcdctl rm /coreos.com/network/subnets/172.16.91.0-24

RESET Flannel Network

service docker stop
service flanneld stop
/opt/bin/etcdctl ls /coreos.com/network/subnets
/opt/bin/etcdctl rm  /coreos.com/network/subnets/172.16.14.0-24
/opt/bin/etcdctl rm  /coreos.com/network/subnets/172.16.37.0-24
/opt/bin/etcdctl set /coreos.com/network/config '{"Network":"172.20.0.0/16"}'
service flanneld start  # it will auto generate subenet!! 
/opt/bin/etcdctl ls /coreos.com/network/subnets
ifconfig
edit /etc/default/etcd --bip --bip=172.20.73.1/24 # depends
service docker start
ifconfig

After setting the flannel, you now can run kubernetes processes.

bash runmaster.sh

VXLAN Setting

/opt/bin/etcdctl set /coreos.com/network/config '{"Network":"172.20.0.0/16","Backend": { "Type": "vxlan", "VNI": 1 } }'

DNS

In Master

kubectl create -f dns-controller.yaml --namespace=kube-system

apiVersion: v1
kind: ReplicationController
metadata:
  name: kube-dns-v8
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    version: v8
    kubernetes.io/cluster-service: "true"
spec:
  replicas: 1
  selector:
      k8s-app: kube-dns
      version: v8
  template:
    metadata:
      labels:
        k8s-app: kube-dns
        version: v8
        kubernetes.io/cluster-service: "true"
    spec:
      containers:
      - name: etcd
        image: gcr.io/google_containers/etcd:2.0.9
        resources:
          limits:
            cpu: 100m
            memory: 50Mi
        command:
        - /usr/local/bin/etcd
        - -data-dir
        - /var/etcd/data
        - -listen-client-urls
        - http://127.0.0.1:2379,http://127.0.0.1:4001
        - -advertise-client-urls
        - http://127.0.0.1:2379,http://127.0.0.1:4001
        - -initial-cluster-token
        - skydns-etcd
        volumeMounts:
        - name: etcd-storage
          mountPath: /var/etcd/data
      - name: kube2sky
        image: gcr.io/google_containers/kube2sky:1.11
        resources:
          limits:
            cpu: 100m
            memory: 50Mi
        args:
        - --kube_master_url=http://172.16.155.130:8080
        - -domain=cluster.local
      - name: skydns
        image: gcr.io/google_containers/skydns:2015-03-11-001
        resources:
          limits:
            cpu: 100m
            memory: 50Mi
        args:
          - -machines=http://localhost:4001
          - -addr=0.0.0.0:53
          - -domain=cluster.local
        ports:
          - containerPort: 53
            name: dns
            protocol: UDP
          - containerPort: 53
            name: dns-tcp
            protocol: TCP
      volumes:
      - name: etcd-storage
        emptyDir: {}
      dnsPolicy: Default

kubectl create -f dns-service.yaml --namespace=kube-system

apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 192.168.3.55
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP

runmaster.sh

/opt/bin/kube-apiserver --address=0.0.0.0 --port=8080 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true --service-cluster-ip-range=192.168.3.0/26 --admission_control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,ServiceAccount,ResourceQuota --service_account_key_file=/opt/kube/kube-serviceaccount.key --service_account_lookup=false &
sleep 5
/opt/bin/kube-controller-manager --master=127.0.0.1:8080 --service_account_private_key_file=/opt/kube/kube-serviceaccount.key --logtostderr=true &
sleep 5
/opt/bin/kube-scheduler --logtostderr=true --master=127.0.0.1:8080 &
sleep 5
/opt/bin/kube-proxy --master=http://127.0.0.1:8080 --logtostderr=true &
sleep 5
/opt/bin/kubelet --address=0.0.0.0 --port=10250 --hostname_override=127.0.0.1 --api_servers=http://127.0.0.1:8080 --logtostderr=true --cluster-dns=192.168.3.55 --cluster_domain=cluster.local &

In Node

run.sh

/opt/bin/kubelet --address=0.0.0.0 --port=10250 --hostname_override=$HOSTNAME --api_servers=http://172.16.155.130:8080 --logtostderr=true --cluster_dns=192.168.3.55 --cluster_domain=cluster.local &

/opt/bin/kube-proxy --master=http://172.16.155.130:8080 --logtostderr=true &

You can login to container and use nslookup command.

root@webserver-cgmd1:/# nslookup inq/webserver
Server:     192.168.3.55
Address:    192.168.3.55#53

Name:   inq/webserver.svc.cluster.local
Address: 192.168.3.11

New Docker Installation

sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
sudo bash -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
sudo apt-get update
apt-get install -y lxc-docker

Adding Env to RC

In the yaml file, we can add DEMO_GREETING key to container Env. That might help us deploy cluster by pre-define parameters, such as meber service ip or DNS name.

env:
- name: GET_HOSTS_FROM
  value: env
- name: DEMO_GREETING
  value: "Hello from the environment"

Adding Volume to Host. Use name first and define name then. the volumes is parallel with container

      containers:
      - name: webserver
        image: 172.16.155.136:5000/uwebserverv6
        env:
        - name: GET_HOSTS_FROM
          value: env
        - name: DEMO_GREETING
          value: "Hello from the environment"
        ports:
        - containerPort: 8000
        volumeMounts:
        - name: hddstorage
          mountPath: /var/etcd/data

      volumes:
      - name: hddstorage
        hostPath:
          path: /data

Combination

  • pass variable
  • use host storage
apiVersion: v1
kind: ReplicationController
metadata:
  name: webserver
  labels:
    name: webserver
spec:
  replicas: 2
  selector:
    name: webserver
  template:
    metadata:
      labels:
        name: webserver
    spec:
      containers:
      - name: webserver
        image: 172.16.155.136:5000/uwebserverv6
        env:
        - name: GET_HOSTS_FROM
          value: env
        - name: DEMO_GREETING
          value: "Hello from the environment"
        ports:
        - containerPort: 8000
        volumeMounts:
        - name: hddstorage
          mountPath: /var/etcd/data

      volumes:
      - name: hddstorage
        hostPath:
          path: /data

DNS And Namespace

The DNS is great, every tenant can use its domain name webserver defined by name of service IP. You don't need to take care your name, such as webserver.{{Tenant Name}}, but if you want to hack.

The following the a experience.

If you have three user inq, inq1 and default. If user inq launch a Container c1 with a dns name webserver. If user default launch a Container c1 with a dns name webserver. If user inq1 launch a Container c1 with a dns name webserver. Login to inq's container, and curl http://webserver:8000. What's hapening. The answer is go to inq's container NOT inq1 NOR default. So it's really have the ability of multi-tenancy. But without "good" isolation. Since you can do curl http://webserver.default:8000 or curl http://webserver.inq1:8000 to access other tenant's services.

Rolling-Update

Method 1

webserver-controller.yaml

apiVersion: v1
kind: ReplicationController
metadata:
  name: webserver-v1
  labels:
    name: webserver
    version: v1
spec:
  replicas: 2
  selector:
    name: webserver
    version: v1
  template:
    metadata:
      labels:
        name: webserver
        version: v1
    spec:
      containers:
      - name: webserver
        image: 172.16.155.136:5000/uwebserverv6
        env:
        - name: GET_HOSTS_FROM
          value: env
        - name: mongocluster
          value: "mongodb01, mongodb02"
        ports:
        - containerPort: 8000
        volumeMounts:
        - name: hddstorage
          mountPath: /var/etcd/data

      volumes:
      - name: hddstorage
        hostPath:
          path: /data

launch it

kubectl create -f webserver-service.yaml

launch service

webserver-service.yaml

apiVersion: v1
kind: Service
metadata:
    name: webserver
    labels:
      name: webserver
spec:
  type: NodePort
  ports:
    - port: 8000
      nodePort: 31800
  selector:
    name: webserver

execute it.

kubectl create -f webserver-service.yaml

upgrade to another yaml

webserver-service-up.yaml

apiVersion: v1
kind: ReplicationController
metadata:
  name: webserver-v2
  labels:
    name: webserver
    version: v2
spec:
  replicas: 2
  selector:
    name: webserver
    version: v2
  template:
    metadata:
      labels:
        name: webserver
        version: v2
    spec:
      containers:
      - name: webserver
        image: 172.16.155.136:5000/uwebserverv6
        env:
        - name: GET_HOSTS_FROM
          value: env
        - name: mongocluster
          value: "mongodb01, mongodb02, mongodb03"
        ports:
        - containerPort: 8000
        volumeMounts:
        - name: hddstorage
          mountPath: /var/etcd/data

      volumes:
      - name: hddstorage
        hostPath:
          path: /data

rolling update now

kubectl rolling-update webserver-v1  -f webserver-controller-up.yaml

You might start to check the result, and you will see the rolling upgrade and with the same service IP.

 root@kuber:~/deploy/envadd# curl http://192.168.3.45:8000
webserver-v1-i0b0g
 root@kuber:~/deploy/envadd# curl http://192.168.3.45:8000
webserver-v2-9h0t3

Mention that: the selector is all the same amoung webserver-service.yaml, webserver-controller.yaml, and webserver-controller-up.yaml called webserver.

Method 2

kubectl rolling-update webserver --image=172.16.155.136:5000/uwebserverv7

webserver name is the same with before but --image is new.

Docker Trouble Shooting

root@dockerhubc:~# docker exec -it a1135e3c72e2 sh
FATA[0000] Error response from daemon: Container a1135e3c72e2 is not running

root@dockerhubc:~# docker start a1135e3c72e2
add-apt-repositor cannot found

apt-get install python-software-properties
apt-get install software-properties-common

Problem

After my settup, it appear

E0319 06:30:30.290806    2975 server.go:144] Starting health server failed: listen tcp 127.0.0.1:10249: bind: address already in use

MongoDB Cluster

Install Mongodb in Ubuntu 16.04

apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list

apt-get update

apt-get install -y mongodb-org --force-yes

create running directory

sudo mkdir -p /var/lib/mongodb/rs-a
sudo chown -R mongodb:mongodb /var/lib/mongodb/rs-a

/etc/hosts.conf

root@mongodb01:~# cat /etc/hosts
127.0.0.1   localhost

# The following lines are desirable for IPv6 capable hosts
::1     localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.16.155.149 mongodb01
172.16.155.150 mongodb02

/etc/mongod.conf

storage:
  dbPath: /var/lib/mongodb/rs-a
  journal:
    enabled: true

systemLog:
  destination: file
  logAppend: true
  path: /var/log/mongodb/mongod.log

# network interfaces
net:
  port: 27017
  bindIp: 0.0.0.0



replication:
   replSetName: rs-a

restart Service

root@mongodb01:~# service mongod restart

The running process. It's really help us to run it on Container.

root@mongodb01:~# ps axu|grep mongo
mongodb    6838  0.9  9.6 655852 96592 ?        Ssl  16:52   0:09 /usr/bin/mongod --quiet --config /etc/mongod.conf

Cluster Mongodb

How to execute in command line directly in console, you don't need to login to mongo CLI. That will help you to make it script running.

root@mongodb01:~# mongo mongodb01:27017 --eval 'rs.initiate()'
root@mongodb01:~# mongo mongodb01:27017 --eval 'rs.add("mongodb02:27017")'
root@mongodb01:~# mongo mongodb01:27017 --eval 'rs.remove("mongodb02:27017")'
root@mongodb01:~# mongo mongodb01:27017 --eval 'rs.status()'

Using SSH

Take care about and changing " to ".

root@mongodb02:~# ssh root@mongodb01 "mongo mongodb01:27017 --eval 'rs.initiate()' --quiet"
root@mongodb02:~# ssh root@mongodb01 "mongo mongodb01:27017 --eval 'rs.add(\"mongodb02:27017\")' --quiet"

root@mongodb02:~# ssh root@mongodb01 "mongo mongodb01:27017 --eval 'rs.remove(\"mongodb02:27017\")' --quiet"

You have to access to Primary to execute replicaSet, and you can understand where is the Primary by using the following command line and parse members.stateStr to get which one is PRIMARY

root@mongodb01:~# mongo mongodb01:27017 --eval 'rs.initiate();rs.status()' --quiet
{
    "set" : "rs-a",
    "date" : ISODate("2017-03-27T09:19:49.585Z"),
    "myState" : 1,
    "term" : NumberLong(1),
    "heartbeatIntervalMillis" : NumberLong(2000),
    "members" : [
        {
            "_id" : 0,
            "name" : "mongodb01:27017",
            "health" : 1,
            "state" : 1,
            "stateStr" : "PRIMARY",
            "uptime" : 1634,
            "optime" : {
                "ts" : Timestamp(1490606344, 1),
                "t" : NumberLong(1)
            },
            "optimeDate" : ISODate("2017-03-27T09:19:04Z"),
            "electionTime" : Timestamp(1490604895, 2),
            "electionDate" : ISODate("2017-03-27T08:54:55Z"),
            "configVersion" : 10,
            "self" : true
        },
        {
            "_id" : 1,
            "name" : "mongodb02:27017",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",
            "uptime" : 40,
            "optime" : {
                "ts" : Timestamp(1490606344, 1),
                "t" : NumberLong(1)
            },
            "optimeDate" : ISODate("2017-03-27T09:19:04Z"),
            "lastHeartbeat" : ISODate("2017-03-27T09:19:48.985Z"),
            "lastHeartbeatRecv" : ISODate("2017-03-27T09:19:48.998Z"),
            "pingMs" : NumberLong(0),
            "configVersion" : 10
        }
    ],
    "ok" : 1
}

Adding --quiet for removing some unnecessary message for decode to json.

More for mongo status

You can direct access the attribute, as followed.

root@e7ad0459ca95:/# ssh root@172.17.0.2 "mongo 172.17.0.3:27017 --eval 'rs.status().members[0].stateStr' --quiet"

PRIMARY

No need to switch admin and create openssl

http://gogosatellite.blogspot.tw/search?q=mongodb

Playing With 16.04

systemd

cat /lib/systemd/system/mongodb.service

[Unit]
Description=MongoDB Database Service
Wants=network.target
After=network.target

[Service]
ExecStart=/usr/bin/mongod --config /etc/mongod.conf
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
User=mongodb
Group=mongodb
StandardOutput=syslog
StandardError=syslog

[Install]
WantedBy=multi-user.target
sudo systemctl start mongodb
systemctl stop mongodb
systemctl enable mongodb.service
systemctl daemon-reload
root@mongodb001:~# journalctl -u mongodb.service  --since today

Install MongoDB 3.01 to Ubuntu16.04

apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.0.list
apt-get update
apt-get install mongodb-org

However the install is modified by Ubuntu Trusty version, so it still have upstart script. And we need to modify it to systemd according to our discussion before.

MongoDB Operation

rs-a:PRIMARY> db.runCommand({isMaster:1})
{
    "hosts" : [
        "e73f1e761874:27017",
        "a61d2871e2a4:27017"
    ],
    "setName" : "rs-a",
    "setVersion" : 10,
    "ismaster" : true,
    "secondary" : false,
    "primary" : "e73f1e761874:27017",
    "me" : "e73f1e761874:27017",
    "electionId" : ObjectId("7fffffff0000000000000004"),
    "maxBsonObjectSize" : 16777216,
    "maxMessageSizeBytes" : 48000000,
    "maxWriteBatchSize" : 1000,
    "localTime" : ISODate("2017-03-29T05:12:40.090Z"),
    "maxWireVersion" : 4,
    "minWireVersion" : 0,
    "ok" : 1
}

How to generate file embeded in Golang code

package main

import (
    "fmt"
    "html/template"
    "os"
)

type Person struct {
    UserName string
}

func main() {
    //type 1
    var tmpl = `
    123
    aaaa aaa   aaa aa  %s aaaaaa
    bbbb bbb   bbb bb     bbbbbb
    cccc ccc   ccc cc     cccccc
    `
    names := []string{"john", "jim"}
    for _, v := range names {
        fmt.Printf(tmpl, v)
        fmt.Printf("\n")
    }
    //type 2
    fmt.Println("-------------")

    t := template.New("fieldname example")
    t, _ = t.Parse("hello {{.UserName}}!")
    p := Person{UserName: "Astaxie"}
    t.Execute(os.Stdout, p)
    fmt.Println("-------------")
    //type 3
    var tmpl2 = `
hello {{.UserName}}! what is that!!
  is that good to drink ?
    `
    tt := template.New("fieldname example")
    tt, _ = tt.Parse(tmpl2)
    pp := Person{UserName: "Astaxie"}
    tt.Execute(os.Stdout, pp)

}

The output is:

root@golang:~/golang/template# go run t.go

    123
    aaaa aaa   aaa aa  john aaaaaa
    bbbb bbb   bbb bb     bbbbbb
    cccc ccc   ccc cc     cccccc


    123
    aaaa aaa   aaa aa  jim aaaaaa
    bbbb bbb   bbb bb     bbbbbb
    cccc ccc   ccc cc     cccccc

-------------
hello Astaxie!-------------

hello Astaxie! what is that!!
  is that good to drink ?

where type 3 is our choise.

Docker Etcd

docker run -p 2379:2379 --name etcd quay.io/coreos/etcd:v3.0.16 /usr/local/bin/etcd -advertise-client-urls http://0.0.0.0:2379 -listen-client-urls http://0.0.0.0:2379

So greate.

Install kubernetes 1.6

make quick-release

Upgrade docker to 1.21.1

put it in /etc/apt/sources.list deb http://cz.archive.ubuntu.com/ubuntu trusty main

apt-get -y update
apt-get -y install apt-transport-https ca-certificates
apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
echo "deb https://apt.dockerproject.org/repo ubuntu-trusty experimental" > /etc/apt/sources.list.d/docker.list
apt-get -y update
apt-get purge lxc-docker
apt-cache policy docker-engine
apt-get -y install linux-image-extra-$(uname -r)
apt-get install libsystemd-journal0
vim /etc/apt/sources.list
apt-get update
apt-get install libsystemd-journal0
apt-get -y install docker-engine
docker --version

ETCD3

ETCDCTL_API=3 /opt/bin/etcdctl --endpoints=127.0.0.1:2379  --insecure-skip-tls-verify=true get /haha

To watch log

journalctl -u kube-apiserver.service -f

-f can watch latest messages.

Install Kube-DNS in K8S 1.6

Thanks for the blog

https://github.com/kubernetes/kubernetes/issues/27442

first of all, create openssl.conf

vim /etc/kubernetes/openssl.conf

[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]

[v3_req]

# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names

#List of Subject Alternate Names the server will be known as - list out short and FQDN versions
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
DNS.5 = huff-kubmst01
DNS.6 = huff-kubmst01.huffnet.org
DNS.7 = kubmst01
DNS.8 = kubmst01.huffnet.org
IP.1 = 172.18.0.1
#IP.1 = 172.16.155.157
#Add more IPs if your server will serve on multiple IPs like IP.2, IP.3, etc

You will need the openssl to make sure the SkyDNS running well. Where 172.18.0.1 is the kubernetes gateway that is defined by --service-cluster-ip-range=172.18.0.0/16 of api-server. And you can get it by

root@kuber16master:~/dns# kubectl get svc
NAME         CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   172.18.0.1   <none>        443/TCP   1d

write it as a script.

openssl genrsa -out /etc/kubernetes/certs/ca.key 2048
sleep 3
mkdir /etc/kubernetes/certs
sleep 3
chmod 744 /etc/kubernetes/certs
sleep 3
openssl genrsa -out /etc/kubernetes/certs/ca.key 2048
sleep 3
openssl req -x509 -new -nodes -key /etc/kubernetes/certs/ca.key -subj "/CN=kubernetes" -days 10000 -out /etc/kubernetes/certs/ca.crt -config /etc/kubernetes/openssl.conf
sleep 3
openssl genrsa -out /etc/kubernetes/certs/server.key 2048
sleep 3
openssl req -new -key /etc/kubernetes/certs/server.key -subj "/CN='kubernetes'" -out /etc/kubernetes/certs/server.csr
sleep 3
openssl x509 -req -in /etc/kubernetes/certs/server.csr -CA /etc/kubernetes/certs/ca.crt -CAkey /etc/kubernetes/certs/ca.key -CAcreateserial -out /etc/kubernetes/certs/server.cert -days 10000 -extensions v3_req -extfile /etc/kubernetes/openssl.conf
sleep 3
rm /srv/kubernetes/* -rf
sleep 3
cp /etc/kubernetes/certs/* /srv/kubernetes/. -rf
sleep 3
chmod 440 /srv/kubernetes/*

Mention that the CN=kubernetes and it IP IP.=172.18.0.1 is necessary or SkyDNS will be failed. And execute the script.

vi /lib/systemd/system/kube-apiserver.service

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
User=root
ExecStart=/opt/bin/kube-apiserver \
 --insecure-bind-address=0.0.0.0 \
 --insecure-port=8080 \
 --etcd-servers=http://172.16.155.157:2379\
 --logtostderr=false \
 --allow-privileged=false \
 --service-cluster-ip-range=172.18.0.0/16 \
 --admission-control=NamespaceLifecycle,ServiceAccount,LimitRanger,SecurityContextDeny,ResourceQuota \
 --service-node-port-range=30000-32767 \
 --advertise-address=172.16.155.157 \
 --v=6 \
 --storage-backend="etcd2" \
 --log-dir="/var/log/kubernetes" \
 --client-ca-file=/srv/kubernetes/ca.crt \
 --tls-private-key-file=/srv/kubernetes/server.key \
 --tls-cert-file=/srv/kubernetes/server.cert \
 --service_account_key_file=/srv/kubernetes/server.key \
 --secure-port=6443

Restart=on-failure
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

vi /lib/systemd/system/kube-controller-manager.service

[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
User=root
ExecStart=/opt/bin/kube-controller-manager \
  --master=172.16.155.157:8080 \
  --root-ca-file=/srv/kubernetes/ca.crt \
  --service-account-private-key-file=/srv/kubernetes/server.key \
  --logtostderr=false \
  --log-dir="/var/log/kubernetes" \
  --v=3
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

vi /lib/systemd/system/kube-scheduler.service

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
User=root
ExecStart=/opt/bin/kube-scheduler \
  --logtostderr=true \
  --master=172.16.155.157:8080
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

vi /lib/systemd/system/kube-proxy.service

[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
ExecStart=/opt/bin/kube-proxy  \
  --hostname-override=172.16.155.157 \
  --master=http://172.16.155.157:8080 \
  --logtostderr=true
Restart=on-failure

[Install]
WantedBy=multi-user.target

vi /lib/systemd/system/kubelet.service

[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
ExecStart=/opt/bin/kubelet \
  --hostname-override=172.16.155.157 \
  --api-servers=http://172.16.155.157:8080 \
  --register-node=true \
  --logtostderr=false \
  --log-dir="/var/log/kubernetes" \
  --v=3 \
  --cluster_dns=172.18.0.5 \
  --cluster_domain=cluster.local
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target

here we set dns-ip is 172.18.0.5, and remember the ip it will apear again when setting SKYDNS. where 172.16.155.157 is hostip.

After that, restart all the kube processes.

systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kube-proxy.service kubelet.service
systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kube-proxy.service kubelet.service

Running SkyDNS

You can find the sample file in k8s 1.6 directory.

kubernetes/cluster/addons/dns

Now create serviceaccouts and configmap

kubectl create -f kubedns-cm.yaml -n kube-system
kubectl create -f kubedns-sa.yaml -n kube-system

where

root@kuber16master:~/dns# cat kubedns-cm.yaml


apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: EnsureExists
root@kuber16master:~/dns# cat kubedns-sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-dns
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile

kubedns-controller.yaml

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      volumes:
      - name: kube-dns-config
        configMap:
          name: kube-dns
          optional: true
      containers:
      - name: kubedns
        image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1
        resources:
          # TODO: Set memory limits when we've profiled the container for large
          # clusters, then set request = limit to keep this container in
          # guaranteed class. Currently, this container falls into the
          # "burstable" category so the kubelet doesn't backoff from restarting it.
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        livenessProbe:
          httpGet:
            path: /healthcheck/kubedns
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8081
            scheme: HTTP
          # we poll on pod startup for the Kubernetes master service and
          # only setup the /readiness HTTP server once that's available.
          initialDelaySeconds: 3
          timeoutSeconds: 5
        args:
        - --domain=cluster.local
        - --dns-port=10053
        - --config-dir=/kube-dns-config
        - --v=2
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 10053
          name: dns-local
          protocol: UDP
        - containerPort: 10053
          name: dns-tcp-local
          protocol: TCP
        - containerPort: 10055
          name: metrics
          protocol: TCP
        volumeMounts:
        - name: kube-dns-config
          mountPath: /kube-dns-config
      - name: dnsmasq
        image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1
        livenessProbe:
          httpGet:
            path: /healthcheck/dnsmasq
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - -v=2
        - -logtostderr
        - -configDir=/etc/k8s/dns/dnsmasq-nanny
        - -restartDnsmasq=true
        - --
        - -k
        - --cache-size=1000
        - --log-facility=-
        - --server=/cluster.local/127.0.0.1#10053
        - --server=/in-addr.arpa/127.0.0.1#10053
        - --server=/ip6.arpa/127.0.0.1#10053
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
        resources:
          requests:
            cpu: 150m
            memory: 20Mi
        volumeMounts:
        - name: kube-dns-config
          mountPath: /etc/k8s/dns/dnsmasq-nanny
      - name: sidecar
        image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1
        livenessProbe:
          httpGet:
            path: /metrics
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
        ports:
        - containerPort: 10054
          name: metrics
          protocol: TCP
        resources:
          requests:
            memory: 20Mi
            cpu: 10m
      dnsPolicy: Default  # Don't use cluster DNS.
      serviceAccountName: kube-dns
kubectl create -f kubedns-controller.yaml -n kube-system

kubedns-svc.yaml

apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 172.18.0.5
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP

We just modify the $DOMAIN_IP in sample file.

kubectl create -f kubedns-svc.yaml -n kube-system

Debug

  1. Make sure kube-dns is running well.
root@kuber16master:~/dns# docker ps
CONTAINER ID        IMAGE                                                                                                                          COMMAND                  CREATED             STATUS              PORTS               NAMES
0fe040f82081        gcr.io/google_containers/k8s-dns-sidecar-amd64@sha256:d33a91a5d65c223f410891001cd379ac734d036429e033865d700a4176e944b0         "/sidecar --v=2 --log"   54 minutes ago      Up 54 minutes                           k8s_sidecar_kube-dns-215283998-ntvdg_kube-system_98116b9a-1a6b-11e7-aaeb-000c29ac079e_0
f518b378a3bd        gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64@sha256:89c9a1d3cfbf370a9c1a949f39f92c1dc2dbe8c3e6cc1802b7f2b48e4dfe9a9e   "/dnsmasq-nanny -v=2 "   54 minutes ago      Up 54 minutes                           k8s_dnsmasq_kube-dns-215283998-ntvdg_kube-system_98116b9a-1a6b-11e7-aaeb-000c29ac079e_0
6caeaf2cdd45        gcr.io/google_containers/k8s-dns-kube-dns-amd64@sha256:33914315e600dfb756e550828307dfa2b21fb6db24fe3fe495e33d1022f9245d        "/kube-dns --domain=c"   54 minutes ago      Up 54 minutes                           k8s_kubedns_kube-dns-215283998-ntvdg_kube-system_98116b9a-1a6b-11e7-aaeb-000c29ac079e_0
05dc4974adbb        gcr.io/google_containers/pause-amd64:3.0                                                                                       "/pause"                 54 minutes ago      Up 54 minutes                           k8s_POD_kube-dns-215283998-ntvdg_kube-system_98116b9a-1a6b-11e7-aaeb-000c29ac079e_0

You will see the kube-dns running well, if your CA setting well.

root@kuber16master:~/dns# docker logs 6caeaf2cdd45 -f
I0406 01:51:48.101334       1 dns.go:49] version: v1.5.2-beta.0+$Format:%h$
I0406 01:51:48.102463       1 server.go:70] Using configuration read from directory: /kube-dns-config%!(EXTRA time.Duration=10s)
I0406 01:51:48.102529       1 server.go:112] FLAG: --alsologtostderr="false"
I0406 01:51:48.102553       1 server.go:112] FLAG: --config-dir="/kube-dns-config"
I0406 01:51:48.102581       1 server.go:112] FLAG: --config-map=""
I0406 01:51:48.102594       1 server.go:112] FLAG: --config-map-namespace="kube-system"
I0406 01:51:48.102610       1 server.go:112] FLAG: --config-period="10s"
I0406 01:51:48.102625       1 server.go:112] FLAG: --dns-bind-address="0.0.0.0"
I0406 01:51:48.102645       1 server.go:112] FLAG: --dns-port="10053"
I0406 01:51:48.102660       1 server.go:112] FLAG: --domain="cluster.local."
I0406 01:51:48.102674       1 server.go:112] FLAG: --federations=""
I0406 01:51:48.102687       1 server.go:112] FLAG: --healthz-port="8081"
I0406 01:51:48.102704       1 server.go:112] FLAG: --initial-sync-timeout="1m0s"
I0406 01:51:48.102724       1 server.go:112] FLAG: --kube-master-url=""
I0406 01:51:48.102738       1 server.go:112] FLAG: --kubecfg-file=""
I0406 01:51:48.102750       1 server.go:112] FLAG: --log-backtrace-at=":0"
I0406 01:51:48.102763       1 server.go:112] FLAG: --log-dir=""
I0406 01:51:48.102775       1 server.go:112] FLAG: --log-flush-frequency="5s"
I0406 01:51:48.102794       1 server.go:112] FLAG: --logtostderr="true"
I0406 01:51:48.102808       1 server.go:112] FLAG: --nameservers=""
I0406 01:51:48.102819       1 server.go:112] FLAG: --stderrthreshold="2"
I0406 01:51:48.102830       1 server.go:112] FLAG: --v="2"
I0406 01:51:48.102841       1 server.go:112] FLAG: --version="false"
I0406 01:51:48.102855       1 server.go:112] FLAG: --vmodule=""
I0406 01:51:48.102897       1 server.go:175] Starting SkyDNS server (0.0.0.0:10053)
I0406 01:51:48.105859       1 server.go:197] Skydns metrics enabled (/metrics:10055)
I0406 01:51:48.105894       1 dns.go:147] Starting endpointsController
I0406 01:51:48.105917       1 dns.go:150] Starting serviceController
I0406 01:51:48.107424       1 logs.go:41] skydns: ready for queries on cluster.local. for tcp://0.0.0.0:10053 [rcache 0]
I0406 01:51:48.107464       1 logs.go:41] skydns: ready for queries on cluster.local. for udp://0.0.0.0:10053 [rcache 0]
  1. SkyDNS combines the concept of secrets and serviceaccouts, you can use the following command to check it.
root@kuber16master:~/dns# kubectl get po kube-dns-215283998-ntvdg -n kube-system -o yaml
apiVersion: v1
kind: Pod
metadata:
  annotations:
    kubernetes.io/created-by: |
      {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"ReplicaSet","namespace":"kube-system","name":"kube-dns-215283998","uid":"980db9b4-1a6b-11e7-aaeb-000c29ac079e","apiVersion":"extensions","resourceVersion":"43419"}}
    scheduler.alpha.kubernetes.io/critical-pod: ""
  creationTimestamp: 2017-04-06T01:51:47Z
  generateName: kube-dns-215283998-
  labels:
    k8s-app: kube-dns
    pod-template-hash: "215283998"
  name: kube-dns-215283998-ntvdg
  namespace: kube-system
  ownerReferences:
  - apiVersion: extensions/v1beta1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: kube-dns-215283998
    uid: 980db9b4-1a6b-11e7-aaeb-000c29ac079e
  resourceVersion: "43448"
  selfLink: /api/v1/namespaces/kube-system/pods/kube-dns-215283998-ntvdg
  uid: 98116b9a-1a6b-11e7-aaeb-000c29ac079e
spec:
  containers:
  - args:
    - --domain=cluster.local
    - --dns-port=10053
    - --config-dir=/kube-dns-config
    - --v=2
    env:
    - name: PROMETHEUS_PORT
      value: "10055"
    image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 5
      httpGet:
        path: /healthcheck/kubedns
        port: 10054
        scheme: HTTP
      initialDelaySeconds: 60
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 5
    name: kubedns
    ports:
    - containerPort: 10053
      name: dns-local
      protocol: UDP
    - containerPort: 10053
      name: dns-tcp-local
      protocol: TCP
    - containerPort: 10055
      name: metrics
      protocol: TCP
    readinessProbe:
      failureThreshold: 3
      httpGet:
        path: /readiness
        port: 8081
        scheme: HTTP
      initialDelaySeconds: 3
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 5
    resources:
      limits:
        memory: 170Mi
      requests:
        cpu: 100m
        memory: 70Mi
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /kube-dns-config
      name: kube-dns-config
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-dns-token-gg40v

The k8s auto inject a CA to container to

/var/run/secrets/kubernetes.io/serviceaccount

you can check kube-dns-token-gg40v, the token is automatically generated by k8s, When you setup kube-apiserver as our setting, --admission-control with ServiceAccount.

Create An APP to access k8s with CA

Create ConfigMap of kube-dsd that is under the kube-system

root@kuber16master:~/testsecrets# cat kubedsd-cm.yaml

apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-dsd
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: EnsureExists
kubectl create -f kubedsd-cm.yaml -n kube-system

Create Service Account

root@kuber16master:~/testsecrets# cat kubedsd-sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-dsd
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
kubectl create -f kubedsd-sa.yaml -n kube-system

Setup Controller

root@kuber16master:~/testsecrets# cat kubedsd-controller.yaml

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-dsd
  namespace: kube-system
  labels:
    k8s-app: kube-dsd
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:

  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dsd
  template:
    metadata:
      labels:
        k8s-app: kube-dsd
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      volumes:
      - name: kube-dsd-config
        configMap:
          name: kube-dsd
          optional: true
      containers:
      - name: kubedns
        image: 172.16.155.136:5000/uwebserverv6
        resources:
          # TODO: Set memory limits when we've profiled the container for large
          # clusters, then set request = limit to keep this container in
          # guaranteed class. Currently, this container falls into the
          # "burstable" category so the kubelet doesn't backoff from restarting it.
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 8000
          name: dsd-local
          protocol: TCP
        volumeMounts:
        - name: kube-dsd-config
          mountPath: /kube-dsd-config
      dnsPolicy: Default  # Don't use cluster DNS.
      serviceAccountName: kube-dsd
kubectl create -f kubedsd-controller.yaml -n kube-system

where /kube-dsd-config is the major setting that enable the injection. and you can see the

root@kuber16master:~/testsecrets# /opt/bin/etcdctl ls /registry/secrets/kube-system
/registry/secrets/kube-system/kube-dsd-token-w66gc

The k8s auto generates the kube-dsd token in master. and inject to container.

connect to Container and ready to accees k8s api.

TOKEN=`cat /var/run/secrets/kubernetes.io/serviceaccount/token`

curl -v -k -H "Authorization: Bearer $TOKEN" https://172.18.0.1:443/api/v1/endpoints

Now you can access to K8S API with Secure Process. Where Bearer is hardcord in k8s code!?

Why is that?

The k8s will automatcally inject the CA and token to container, based on our previous setting.

kubectl get po kube-dsd-4112003190-vpvf4 -n kube-system -o yaml

.
.
    volumeMounts:
    - mountPath: /kube-dsd-config
      name: kube-dsd-config
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-dsd-token-w66gc
      readOnly: true

Mention that we set --admission-control=NamespaceLifecycle,ServiceAccount,LimitRanger,SecurityContextDeny,ResourceQuota \ and with alot of setting of the SSL to enable the service account.

StatefulSet (PetSet)

PetSet's been changed after k8s 1.5. The following is an example. Don't use PetSet in Kubernetes 1.6.

apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: nginx
spec:
  serviceName: "nginx"
  replicas: 2
  template:
    metadata:
      labels:
        app: nginx
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
    spec:
      containers:
      - name: nginx
        image: 172.16.155.136:5000/uwebserverv6
        ports:
        - containerPort: 8000
---
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 8000
    name: nginx
  clusterIP: None
  selector:
    app: nginx
        

Take care the network setting, StatefulSet has its own network setting. It must contains CLusterIP, or you will get error message as followed. clusterIP:None for pod name DNS accessable.

A KUBE-SERVICES -d 172.18.6.120/32 -p tcp -m comment --comment "default/nginx: cluster IP" -m tcp --dport 8000 -j KUBE-SVC-4N57TFCL4MD7ZTDA
-A KUBE-SERVICES -d 172.18.6.120/32 -p tcp -m comment --comment "default/nginx: has no endpoints" -m tcp --dport 8000 -j REJECT --reject-with icmp-port-unreachable

and the DNS also record as nginx as its name.

root@webserver2-7qdb5:/# curl http://nginx:8000
nginx-1
 root@webserver2-7qdb5:/# curl http://nginx:8000
nginx-0

Accessing to a specific container by statefulset

Since the statefulset is a cluster description. So it shoud access to cluster container directly. You also can connect by a specific stateful container as followed. by {podname.servicename} here we set podname=servicename. BUT, you have to set CluseterIP=None

curl http://nginx-1.nginx:8000

Now you can access it by nginx-1.nginx to a speific container or nginx for loadbalaner.

Set ClusterIP:None for DNS PodName access

apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 8000
    name: nginx
  clusterIP: None
  selector:
    app: nginx

if you setup clusterIP not None, the DNS pod name is not accessable.

root@webserver2-7qdb5:/# curl http://nginx-0.nginx:8000
curl: (6) Could not resolve host: nginx-0.nginx

StateFulSet Behavior

  • shutdown one server that causes one clustered container offline. The k8s will not create a new container insteaded.

After shutdown, you will see the status becomming Unknow after 5+mins.

root@kuberm:~/kube1.6config/deploy/statefuleset# kubectl get po
NAME      READY     STATUS    RESTARTS   AGE
nginx-0   1/1       Unknown   0          8m
nginx-1   1/1       Running   0          8m
root@kuberm:~/kube1.6config/deploy/statefuleset# kubectl describe po nginx-0
Name:               nginx-0

Conditions:
  Type      Status
  Initialized   True
  Ready     False
  PodScheduled  True

The status of Ready will becom False quickly (<2mins).

Most situation, will will not hope the new container created in this situation.

  • host back, you will see the nginx-0 is back.
root@kuberm:~/kube1.6config/deploy/statefuleset# kubectl get po
NAME      READY     STATUS    RESTARTS   AGE
nginx-0   1/1       Running   0          10s
nginx-1   1/1       Running   0          12m
  • host down and adding a new cluster member
root@kuberm:~/kube1.6config/deploy/statefuleset# kubectl get po
NAME      READY     STATUS    RESTARTS   AGE
nginx-0   1/1       Unknown   0          10m
nginx-1   1/1       Running   0          23m
root@kuberm:~/kube1.6config/deploy/statefuleset# kubectl get statefulsets  nginx
NAME      DESIRED   CURRENT   AGE
nginx     2         1         23m
kubectl scale statefulsets nginx --replicas=3

However, it is not working, since k8s will freeze the cluster, if the cluster status is not healthy.

  • kube scaling

You must scale it at a all healthy state.

kubectl scale statefulsets nginx --replicas=3
  • Freeze State, Cannot ScaleUp and ScaleDown

An big problem is if the system is not healthy(one of it), it cannot operate scaling.
As our previous example, the nginx-0 is in the Unknow status. So the system will be freeze in this state (cannot adding and cannot delete). So we have to recover the nginx-0 first by using the following command.

kubectl delete pods nginx-0 --grace-period=0 --force

You need to --grace-period=0 --force that is necessary.

root@kuberm:~/kube1.6config/deploy/statefuleset# kubectl get po
NAME      READY     STATUS    RESTARTS   AGE
nginx-0   1/1       Running   0          4m
nginx-1   1/1       Running   0          57m

Now you can scale it up and down. It's a very good concept to avoid clustering problem. You have fix it first (delete), then change it(scale).

After the origin host back, the ngin-0 container will not back to origin host, it will stay in the current host. (great!!!)

  • delete statefulset
root@kuberm:~/kube1.6config/deploy/statefuleset# kubectl get statefulsets
NAME      DESIRED   CURRENT   AGE
nginx     2         2         2h
kubectl delete statefulsets nginx
  • It satisfy my Cluster Concept so now we can use it to treat the cluster application now.

StatefulSet Combines with PersistentVolume

We can create persistent volume(pv) for user, and scheduled by container.

create pv.yaml

kind: PersistentVolume
apiVersion: v1
metadata:
  name: pv0001
  labels:
    foo: foo
    bar: bar
spec:
  accessModes:
    - ReadWriteOnce
  capacity:
    storage: 5Gi
  hostPath:
    path: /data/pv0001/
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: pv0002
  labels:
    foo: foo
    bar: bar
spec:
  accessModes:
    - ReadWriteOnce
  capacity:
    storage: 5Gi
  hostPath:
    path: /data/pv0002/

Create pv.

kubectl create -f pv.yaml

Check pv.

root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get pv
NAME      CAPACITY   ACCESSMODES   RECLAIMPOLICY   STATUS      CLAIM                                       STORAGECLASS   REASON    AGE
pv0001    5Gi        RWO           Retain          Availible                 
pv0002    5Gi        RWO           Retain          Availible                                 

here the volume of pv0001, and pv0002 will not be created in physical node until the container schduled to a certain node.

The following is the StatefulSet and Persistent-Store yaml file.

apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: nginxstor
spec:
  serviceName: "nginxstor"
  replicas: 2
  template:
    metadata:
      labels:
        app: nginxstor
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
    spec:
      containers:
      - name: nginxstor
        image: 172.16.155.136:5000/uwebserverv6
        ports:
          - containerPort: 8000
        volumeMounts:
          - name: db-persistent-storage
            mountPath: /data/db
  volumeClaimTemplates:
  - metadata:
      name: db-persistent-storage
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 1Gi

---
apiVersion: v1
kind: Service
metadata:
  name: nginxstor
  labels:
    app: nginxstor
spec:
  ports:
  - port: 8000
    name: nginxstor
  clusterIP: None
  selector:
    app: nginxstor

Mention the volumeClaimTemplates.

The statefulSet container will be schedule to one of pv and schedule to a certain host, it then create a volume in host, then create container in host. To see the persistenVolume and Container.

root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get pvc
NAME                                STATUS    VOLUME    CAPACITY   ACCESSMODES   STORAGECLASS   AGE
db-persistent-storage-nginxstor-0   Bound     pv0001    5Gi        RWO                          28m
db-persistent-storage-nginxstor-1   Bound     pv0002    5Gi        RWO                          22m
db-persistent-storage-nginxstor-2   Bound     pv0003    5Gi        RWO                          18m

If you don't have enough persistentVolume(if 2), the third container will be failed, since no persistent volume available. This is quite suitable for statefulSet since the statefulSet will quite steack in node and will not arbitray running when node failed or container crashed.

delete pvc, and pv

kubectl delete pvc db-persistent-storage-nginxstor-1
kubectl delete pv pv0001

you must delete them independently, or it will record the status if you use the same name.

Persistent Volume Scheduer

root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get pv
NAME      CAPACITY   ACCESSMODES   RECLAIMPOLICY   STATUS      CLAIM     STORAGECLASS   REASON    AGE
pv0001    1Gi        RWO           Retain          Available                                      5s
pv0002    5Gi        RWO           Retain          Available                                      4s
pv0003    5Gi        RWO           Retain          Available                                      7s
pv0004    5Gi        RWO           Retain          Available                                      7s

As our setting, we set 3 pv with 5G, and 1 with 1G.

now we launch 3 containers with 2 G using statefulset

root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get pv
NAME      CAPACITY   ACCESSMODES   RECLAIMPOLICY   STATUS      CLAIM                                       STORAGECLASS   REASON    AGE
pv0001    1Gi        RWO           Retain          Available                                                                        4m
pv0002    5Gi        RWO           Retain          Bound       default/db-persistent-storage-nginxstor-0                            4m
pv0003    5Gi        RWO           Retain          Bound       default/db-persistent-storage-nginxstor-1                            4m
pv0004    5Gi        RWO           Retain          Bound       default/db-persistent-storage-nginxstor-2                            4m

You will see the pv0001(1G) will be filtered by the scheduler.

now we resize it to 4

kubectl scale statefulsets nginxstor --replicas=4
root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get statefulsets
NAME        DESIRED   CURRENT   AGE
nginxstor   4         3         3m

The new conatiner will be failed, since no suitable pv availible.

if you create more pv

root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get pv
NAME      CAPACITY   ACCESSMODES   RECLAIMPOLICY   STATUS      CLAIM                                       STORAGECLASS   REASON    AGE
pv0001    1Gi        RWO           Retain          Available                                                                        7m
pv0002    5Gi        RWO           Retain          Bound       default/db-persistent-storage-nginxstor-0                            7m
pv0003    5Gi        RWO           Retain          Bound       default/db-persistent-storage-nginxstor-1                            7m
pv0004    5Gi        RWO           Retain          Bound       default/db-persistent-storage-nginxstor-2                            7m
pv0005    5Gi        RWO           Retain          Bound       default/db-persistent-storage-nginxstor-3                            14s
pv0006    5Gi        RWO           Retain          Available                                                                        14s

The cluster automatically get up. (Greate kubernetes)

root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get statefulsets
NAME        DESIRED   CURRENT   AGE
nginxstor   4         4         6m

Remeber that, if you want to reize the cluster, you must make the cluster status is healthy first. So every unhealthy status must be treated first. And in this situation we add the pv and the cluster becoming healthy automatically, that is consistet with the design philosophy of k8s. (Really Beautiful)

Problems

But after resize the cluster to 2.

The pv still there and will not be clean up, that's the problem.

root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get pvc
NAME                                STATUS    VOLUME    CAPACITY   ACCESSMODES   STORAGECLASS   AGE
db-persistent-storage-nginxstor-0   Bound     pv0002    5Gi        RWO                          16m
db-persistent-storage-nginxstor-1   Bound     pv0003    5Gi        RWO                          16m
db-persistent-storage-nginxstor-2   Bound     pv0004    5Gi        RWO                          16m
db-persistent-storage-nginxstor-3   Bound     pv0005    5Gi        RWO                          13m

Set up PVC

In the above setion, we introduce pv, that container choose persistent volume by a given size and scheduled by k8s.

If we need to choose a given persistent volume, how to do it. Let's started PVC.

vim pvc.yaml

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: pvc0001
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 3Gi
kubectl create -f pvc.yaml
persistentvolumeclaim "pvc0001" created
root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get pvc
NAME                                STATUS    VOLUME    CAPACITY   ACCESSMODES   STORAGECLASS   AGE
db-persistent-storage-nginxstor-0   Bound     pv0002    5Gi        RWO                          40m
db-persistent-storage-nginxstor-1   Bound     pv0003    5Gi        RWO                          40m
db-persistent-storage-nginxstor-2   Bound     pv0004    5Gi        RWO                          40m
db-persistent-storage-nginxstor-3   Bound     pv0005    5Gi        RWO                          37m
pvc0001                             Bound     pv0006    5Gi        RWO                          3s

Now you got a pvc0001

And let's start container with pvc and choose pvc0001 as our pvc.

apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: nginxstor1
spec:
  serviceName: "nginxstor1"
  replicas: 2
  template:
    metadata:
      labels:
        app: nginxstor1
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
    spec:
      containers:
      - name: nginxstor1
        image: 172.16.155.136:5000/uwebserverv6
        ports:
          - containerPort: 8000
        volumeMounts:
          - name: db-persistent-storage1
            mountPath: /data/db
      volumes:
        - name: db-persistent-storage1
          persistentVolumeClaim:
            claimName: pvc0001

And now the volume has been attached to the container.

kubectl get pv
NAME      CAPACITY   ACCESSMODES   RECLAIMPOLICY   STATUS      CLAIM                                       STORAGECLASS   REASON    AGE
pv0006    5Gi        RWO           Retain          Bound       default/pvc0001

Persistent Volume with Label Scheduling

To create a persistent volume (admin)

pv.yaml

kind: PersistentVolume
apiVersion: v1
metadata:
  name: pv0001
  labels:
    type: fast
spec:
  accessModes:
    - ReadWriteOnce
  capacity:
    storage: 5Gi
  hostPath:
    path: /data/pv0001/
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: pv0002
  labels:
    type: local
spec:
  accessModes:
    - ReadWriteOnce
  capacity:
    storage: 5Gi
  hostPath:
    path: /data/pv0002/

To create a container with persistent volume label selecting

apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: nginxstor
spec:
  serviceName: "nginxstor"
  replicas: 2
  template:
    metadata:
      labels:
        app: nginxstor
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
    spec:
      containers:
      - name: nginxstor
        image: 172.16.155.136:5000/uwebserverv6
        ports:
          - containerPort: 8000
        volumeMounts:
          - name: db-persistent-storage
            mountPath: /data/db
  volumeClaimTemplates:
  - metadata:
      name: db-persistent-storage
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 2Gi
      selector:
        matchLabels:
          type: fast

See the selector we use type:fast as our scheduling fileter. And we ran 2 containers, and we have only one persistent volume with label type: fast. So one container will be launched, and the other shows pending til we create another persistent volume with type:fast

Affinity and Anti-affinity

HardPodAffinity: RequiredDuringScheduling
SoftPodAffinity: PreferredDuringScheduling
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: nginxstor
spec:
  serviceName: "nginxstor"
  replicas: 5
  template:
    metadata:
      labels:
        app: nginxstor
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
                - key: app
                  operator: In
                  values:
                  - nginxstor
            topologyKey: kubernetes.io/hostname
      containers:
      - name: nginxstor
        image: 172.16.155.136:5000/uwebserverv6
        ports:
          - containerPort: 8000

---
apiVersion: v1
kind: Service
metadata:
  name: nginxstor
  labels:
    app: nginxstor
spec:
  ports:
  - port: 8000
    name: nginxstor
  clusterIP: None
  selector:
    app: nginxstor

If we want to launch multiple container and each container will not run at the same node, we can use above yaml file to create container.

In above yaml file, we can set affinity

      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
                - key: app
                  operator: In
                  values:
                  - nginxstor
            topologyKey: kubernetes.io/hostname

And the key and vaule can get from the labels app: nginxstor in the yaml file. and the topologyKey can be obtain from

root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl describe node kubermnode1
Name:           kubermnode1
Role:
Labels:         beta.kubernetes.io/arch=amd64
            beta.kubernetes.io/os=linux
            kubernetes.io/hostname=kubermnode1

But only key is necessary. That is initial setting by kubernetes.

If we just have 3 nodes, and create 5 container and after that you will see

root@kuberm:~/kube1.6config/deploy/statefulesetStorage# kubectl get po  -w
NAME          READY     STATUS    RESTARTS   AGE
nginxstor-0   1/1       Running   0          5m
nginxstor-1   1/1       Running   0         4m
nginxstor-2   1/1       Running   0         4m
nginxstor-3   0/1       Pending   0         4m

The forth container will be pending there forever til we adding new node in system.

Nested Json parser

For Golang, it's not easy to parse the result using UnMarshal.

{
  "kind": "StatefulSetList",
  "apiVersion": "apps/v1beta1",
  "metadata": {
    "selfLink": "/apis/apps/v1beta1/namespaces/inq/statefulsets",
    "resourceVersion": "213660"
  },
  "items": [
    {
      "metadata": {
        "name": "mongodb",
        "namespace": "inq",
        "selfLink": "/apis/apps/v1beta1/namespaces/inq/statefulsets/mongodb",
        "uid": "979100c0-2588-11e7-ad85-000c295cb5bb",
        "resourceVersion": "197089",
        "generation": 1,
        "creationTimestamp": "2017-04-20T05:17:04Z",
        "labels": {
          "app": "mongodb"
        }
      },
      "spec": {
        "replicas": 3,
        "selector": {
          "matchLabels": {
            "app": "mongodb"
          }
        },
        "template": {
          "metadata": {
            "creationTimestamp": null,
            "labels": {
              "app": "mongodb"
            },
            "annotations": {
              "pod.alpha.kubernetes.io/initialized": "true"
            }
          },
          "spec": {
            "containers": [
              {
                "name": "mongodb",
                "image": "172.16.155.136:5000/dsd-mongodb-3.2",
                "ports": [
                  {
                    "containerPort": 8000,
                    "protocol": "TCP"
                  }
                ],
                "resources": {},
                "volumeMounts": [
                  {
                    "name": "db-persistent-storage",
                    "mountPath": "/var/lib/mongodb/rs-a"
                  }
                ],
                "terminationMessagePath": "/dev/termination-log",
                "terminationMessagePolicy": "File",
                "imagePullPolicy": "Always"
              }
            ],
            "restartPolicy": "Always",
            "terminationGracePeriodSeconds": 30,
            "dnsPolicy": "ClusterFirst",
            "securityContext": {},
            "affinity": {
              "podAntiAffinity": {
                "requiredDuringSchedulingIgnoredDuringExecution": [
                  {
                    "labelSelector": {
                      "matchExpressions": [
                        {
                          "key": "app",
                          "operator": "In",
                          "values": [
                            "mongodb"
                          ]
                        }
                      ]
                    },
                    "topologyKey": "kubernetes.io/hostname"
                  }
                ]
              }
            },
            "schedulerName": "default-scheduler"
          }
        },
        "volumeClaimTemplates": [
          {
            "metadata": {
              "name": "db-persistent-storage",
              "creationTimestamp": null
            },
            "spec": {
              "accessModes": [
                "ReadWriteOnce"
              ],
              "selector": {
                "matchLabels": {
                  "type": "mongodbpv"
                }
              },
              "resources": {
                "requests": {
                  "storage": "2Gi"
                }
              }
            },
            "status": {
              "phase": "Pending"
            }
          }
        ],
        "serviceName": "mongodb"
      },
      "status": {
        "observedGeneration": 1,
        "replicas": 3
      }
    }
  ]
}

Here we use gjson, an great library to parse string data combine with exec.Command or http return.

go get -u github.com/tidwall/gjson

Here is func CurlCommand

   runcommand := exec.Command("curl", sshcmd1, sshcmd2, sshcmd3, sshcmd4)
    fmt.Println(runcommand.Args)
    var out bytes.Buffer
    runcommand.Stdout = &out

    err := runcommand.Run()
    fmt.Println(out.String())
    //value := gjson.Get(out.String(), "items.0.metadata")
    return out.String()

We get the return string, and directly parse by gjson.

    outstring := CurlCommand(bearheader)
    value := gjson.Get(outstring, "items.0.spec.replicas")
    fmt.Println(value.String())
    return value.Int()

list can be used 0 to access it. and the value can transfer to any type, here is int.

Do not forget to import the gjson

import "github.com/tidwall/gjson"

Sticky Session Setting

Reference

https://www.snip2code.com/Snippet/1558669/Kubernetes-configuration-file-to-create-/

Edit nl.yaml

---
################################################################################
## K8S Default Backend for Nginx if no endpoint is available e.g. 404 servers
###############################################################################
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-default-backend
  namespace: kube-system
  labels:
    app: nginx-default-backend
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx-default-backend
  template:
    metadata:
      labels:
        app:  nginx-default-backend
        group: lb
    spec:
      terminationGracePeriodSeconds: 60
      containers:
      - name:  defaultbackend
        image: 172.16.155.136:5000/uwebserverv6
        imagePullPolicy: IfNotPresent
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8000
            scheme: HTTP
          initialDelaySeconds: 30
          timeoutSeconds: 5
        ports:
        - containerPort: 8000
        resources:
          limits:
            cpu: 10m
            memory: 20Mi
          requests:
            cpu: 10m
            memory: 20Mi

---
################################################################################
## K8S Service configuration default backend. in NGINX Deployment config
## use arg --default-backend-service=$(POD_NAMESPACE)/nginx-default-backend
################################################################################
apiVersion: v1
kind: Service
metadata:
  name: nginx-default-backend
  namespace: kube-system
  labels:
    app: nginx-default-backend
    group: lb
spec:
  type: NodePort
  ports:
  - port: 8000
    targetPort: 8000
  selector:
    app: nginx-default-backend

---
##################################################################################################
## K8S config map for NGINX LB Controller. supply as arg in deployment config
## - --nginx-configmap=$(POD_NAMESPACE)/nginx-ingress-lb-cfg
## See link below for all config options
## https://github.com/kubernetes/contrib/blob/master/ingress/controllers/nginx/configuration.md
###################################################################################################
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-ingress-lb-cfg
  namespace: kube-system
  labels:
    app: nginx-ingress-lb
    group: lb
data:
  enable-sticky-sessions: 'true'   ## use ROUTE cookie to provide session affinity
  enable-vts-status: 'true'   ## Allows the replacement of the default status page nginx-module-vts

---
############################################################################################
## K8S deplox config for NGINX LB gcr.io/google_containers/nginx-ingress-controller
## https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx
#########################################################################################
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: kube-system
  labels:
    app: nginx-ingress-lb
    group: lb
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-ingress-lb
  template:
    metadata:
      labels:
        app: nginx-ingress-lb
        name: nginx-ingress-lb
        group: lb
    spec:
      terminationGracePeriodSeconds: 60
      containers:
      - name: nginx-ingress-lb
        image: gcr.io/google_containers/nginx-ingress-controller:0.8.3
        imagePullPolicy: IfNotPresent
        readinessProbe:
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
        livenessProbe:
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          timeoutSeconds: 1
        # use downward API
        env:
          - name: POD_NAME
            valueFrom:
              fieldRef:
                fieldPath: metadata.name
          - name: POD_NAMESPACE
            valueFrom:
              fieldRef:
                fieldPath: metadata.namespace
        ports:
        - containerPort: 80
          hostPort: 80
        ## if you terminate SSL at the AWS ELB you don't need port 443 here
        - containerPort: 18080 ## we expose 18080 to access nginx stats in url /nginx-status
          hostPort: 18080
        ## https://github.com/kubernetes/contrib/issues/1662 --watch-namespace  to limit on one namespace
        args:
        - /nginx-ingress-controller
        - --default-backend-service=$(POD_NAMESPACE)/nginx-default-backend
        - --nginx-configmap=$(POD_NAMESPACE)/nginx-ingress-lb-cfg
---
#######################################
# K8S NGINX LB Controller Service
#######################################
apiVersion: v1
kind: Service
metadata:
  name: nginx-ingress-lb
  namespace: kube-system
  labels:
    app: nginx-ingress-lb
    group: lb
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
    service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "YOUR_AWS_CERT_ID"
    service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https
    service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
spec:
  type: LoadBalancer
  ports:
  - port: 443
    targetPort: 80 ## terminate ssl
    name: https
    protocol: TCP
  #- port: 80   ### disable unless you have http->https redirect or really want to support http besides https
  #  targetPort: 80
  #  name: http
  #  protocol: TCP
  - port: 18080
    targetPort: 18080
    name: nginxstatus
    protocol: TCP
  selector:
    app: nginx-ingress-lb
---
############################################################
## K8S Ingress to access Nginx status page from LB
############################################################
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
 name: nginx-ingress-lb-stats
 namespace: kube-system
 labels:
  app: nginx-ingress-lb
  group: lb
  stage: kube-system
 annotations:
  kubernetes.io/ingress.class: nginx
spec:
 rules:
 - host: your.host.com
   http:
    paths:
    - path: /nginx_status
      backend:
       serviceName: nginx-ingress-lb
       servicePort: 18080

Create Pod with above yaml file.

kubectl create -f nl.yaml

To get loadbalancer address

root@kuberm:~/kube1.6config/deploy/nginxlb2# kubectl get ing --all-namespaces
NAMESPACE     NAME                     HOSTS           ADDRESS          PORTS     AGE
kube-system   nginx-ingress-lb-stats   your.host.com   172.16.155.160   80        1h

Client test, and with cookie setting, sticky session.

Create cookie and stor to file a.txt

curl -v  -c a.txt http://172.16.155.160

Use a.txt as cookie source.

curl -v  --cookie a.txt http://172.16.155.160

.
.
nginx-default-backend-1891278369-dwwft  

You will get the result of sticky session.

No comments:

Post a Comment