Environment:
K8S All in one node running on a VM with two interfaces, ens3 for management, ens7 for k8s node/pod network
(use default simple docker bridge network for node/pod so no extra flannel/calico network setup)
vincent@Kubernete-Dev:~/kubernetes$ ip addr show dev ens32: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:ac:0c:27 brd ff:ff:ff:ff:ff:ff
inet 192.168.48.68/24 brd 192.168.48.255 scope global ens3
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:feac:c27/64 scope link
valid_lft forever preferred_lft forever
vincent@Kubernete-Dev:~/kubernetes$ ip addr show ens7
3: ens7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:cf:3c:e0 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.1/24 brd 192.168.1.255 scope global ens7
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:fecf:3ce0/64 scope link
valid_lft forever preferred_lft forever
BIGIP VE VM
[root@bigip-kvm:Active:Standalone] config # tmsh list net self
net self external {
address 192.168.1.2/24
allow-service all
traffic-group traffic-group-local-only
vlan external
}
follow link below to setup All-in-one K8S, replace all 127.0.0.1 with 192.168.1.1 in hack/local-up-cluster.sh
https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md**KUBEPROXY_MODE=ipvs is new kube proxy mode, performance is better than iptables, but require kernel ip_vs/ip_vs_rr/ip_vs_sh/ip_vs_wrr module**
**remove KUBEPROXY_MODE=ipvs environment variable to just use iptables for easy setup**
** hack/local-up-cluster.sh diff **
vincent@Kubernete-Dev:~/kubernetes$ git diff
diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh
index 98a0a6b..be143af 100755
--- a/hack/local-up-cluster.sh
+++ b/hack/local-up-cluster.sh
@@ -64,7 +64,7 @@ DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"}
KUBECTL=${KUBECTL:-cluster/kubectl.sh}
WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-20}
ENABLE_DAEMON=${ENABLE_DAEMON:-false}
-HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"127.0.0.1"}
+HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"192.168.1.1"}
CLOUD_PROVIDER=${CLOUD_PROVIDER:-""}
CLOUD_CONFIG=${CLOUD_CONFIG:-""}
FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=false"}
@@ -218,12 +218,12 @@ API_SECURE_PORT=${API_SECURE_PORT:-6443}
# WARNING: For DNS to work on most setups you should export API_HOST as the docker0 ip address,
API_HOST=${API_HOST:-localhost}
-API_HOST_IP=${API_HOST_IP:-"127.0.0.1"}
+API_HOST_IP=${API_HOST_IP:-"192.168.1.1"}
ADVERTISE_ADDRESS=${ADVERTISE_ADDRESS:-""}
API_BIND_ADDR=${API_BIND_ADDR:-"0.0.0.0"}
EXTERNAL_HOSTNAME=${EXTERNAL_HOSTNAME:-localhost}
-KUBELET_HOST=${KUBELET_HOST:-"127.0.0.1"}
+KUBELET_HOST=${KUBELET_HOST:-"192.168.1.1"}
# By default only allow CORS for requests on localhost
API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$}
KUBELET_PORT=${KUBELET_PORT:-10250}
@@ -753,7 +753,7 @@ function start_kubelet {
-i \
--cidfile=$KUBELET_CIDFILE \
gcr.io/google_containers/kubelet \
- /kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" --cloud-provider="${CLOUD_PROVIDER}" --cloud-config="${CLOUD_CONFIG}" \ --address="127.0.0.1" --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG &
+ /kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" --cloud-provider="${CLOUD_PROVIDER}" --cloud-config="${CLOUD_CONFIG}" \ --address="192.168.1.1" --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG &
fi
}
1: start k8s
root@Kubernete-Dev:/home/vincent/kubernetes# KUBEPROXY_MODE=ipvs hack/local-up-cluster.sh -OWARNING : The kubelet is configured to not fail if swap is enabled; production deployments should disable swap.
skipped the build.
WARNING: No swap limit support
Kubelet cgroup driver defaulted to use: cgroupfs
API SERVER insecure port is free, proceeding...
API SERVER secure port is free, proceeding...
Detected host and ready to start services. Doing some housekeeping first...
Using GO_OUT /home/vincent/kubernetes/_output/bin
Starting services now!
Starting etcd
etcd --advertise-client-urls http://127.0.0.1:2379 --data-dir /tmp/tmp.K1tMwh88m3 --listen-client-urls http://127.0.0.1:2379 --debug > "/dev/null" 2>/dev/null
Waiting for etcd to come up.
+++ [0110 10:54:27] On try 2, etcd: : http://127.0.0.1:2379
{"action":"set","node":{"key":"/_test","value":"","modifiedIndex":4,"createdIndex":4}}
Generating a 2048 bit RSA private key
.................................................................................................................+++
.....................+++
writing new private key to '/var/run/kubernetes/server-ca.key'
-----
Generating a 2048 bit RSA private key
...........+++
Local Kubernetes cluster is running. Press Ctrl-C to shut it down.
Logs:
/tmp/kube-apiserver.log
/tmp/kube-controller-manager.log
/tmp/kube-proxy.log
/tmp/kube-scheduler.log
/tmp/kubelet.log
To start using your cluster, you can open up another terminal/tab and run:
export KUBECONFIG=/var/run/kubernetes/admin.kubeconfig
cluster/kubectl.sh
Alternatively, you can write to the default kubeconfig:
export KUBERNETES_PROVIDER=local
cluster/kubectl.sh config set-cluster local --server=https://localhost:6443 --
cluster/kubectl.sh config set-credentials myself --client-key=/var/run/kuberne
cluster/kubectl.sh config set-context local --cluster=local --user=myself
cluster/kubectl.sh config use-context local
cluster/kubectl.sh
2: create bigip admin login secret in k8s
vincent@Kubernete-Dev:~/kubernetes$ export KUBECONFIG=/var/run/kubernetes/admin.kubeconfig
vincent@Kubernete-Dev:~/kubernetes$ cluster/kubectl.sh create secret generic bigip-login --namespace kube-system --from-literal=username=admin --from-literal=password=admin
secret "bigip-login" created
3: deployment F5 k8s-bigip-ctlr
vincent@Kubernete-Dev:~/kubernetes$ cluster/kubectl.sh apply -f f5-k8s-bigip-ctlr-deployment.yaml
deployment "test-bigip-controller-1" created
serviceaccount "bigip-ctlr-serviceaccount" created
clusterrole "bigip-ctlr-clusterrole" created
clusterrolebinding "bigip-ctlr-clusterrole-binding" created
4: show the f5-k8s-bigip-ctlr pod
vincent@Kubernete-Dev:~/kubernetes$ cluster/kubectl.sh get po --namespace=kube-system
NAME READY STATUS RESTARTS AGE
kube-dns-6c857864fb-lcxgf 3/3 Running 0 2m
test-bigip-controller-1-67864586dd-jxq8p 1/1 Running 0 40s
5: show f5-k8s-bigip-ctlr pod logs
vincent@Kubernete-Dev:~/kubernetes$ cluster/kubectl.sh logs test-bigip-controller-1-67864586dd-jxq8p --namespace=kube-system | tail -10
2018/01/11 22:51:03 [INFO] [2018-01-11 22:51:03,010 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
2018/01/11 22:51:05 [INFO] [2018-01-11 22:51:05,092 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
2018/01/11 22:51:07 [INFO] [2018-01-11 22:51:07,109 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
2018/01/11 22:51:09 [INFO] [2018-01-11 22:51:09,109 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
2018/01/11 22:51:11 [INFO] [2018-01-11 22:51:11,127 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
2018/01/11 22:51:13 [INFO] [2018-01-11 22:51:13,117 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
2018/01/11 22:51:15 [INFO] [2018-01-11 22:51:15,209 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
2018/01/11 22:51:17 [INFO] [2018-01-11 22:51:17,210 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
2018/01/11 22:51:19 [INFO] [2018-01-11 22:51:19,243 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
2018/01/11 22:51:21 [INFO] [2018-01-11 22:51:21,408 f5_cccl.resource.resource INFO] Updating ApiIRule: /test/http_redirect_irule
6: f5-k8s-bigip-ctlr containers runs two processes
/app/bin/k8s-bigip-ctlr written in Golang to pull configs from k8s
/app/python/bigipconfigdriver.py in python to push config to BIGIP ( replace it with https://github.com/e-XpertSolutions/f5-rest-client ?)
vincent@Kubernete-Dev:~/kubernetes$ cluster/kubectl.sh exec -it test-bigip-controller-1-67864586dd-jxq8p --namespace=kube-system -- /bin/sh
/app $ ps -ef
PID USER TIME COMMAND
1 ctlr 0:01 /app/bin/k8s-bigip-ctlr --bigip-partition test --bigip-url 192.168.1.2 --bigip-username admin --bigip-password admin --verify-interval 2 --namespace
14 ctlr 0:10 python /app/python/bigipconfigdriver.py --config-file /tmp/k8s-bigip-ctlr.config450095162/config.json
/app $ ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
40: eth0@if41: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.3/16 scope global eth0
valid_lft forever preferred_lft forever
f5-k8s-bigip-ctlr-deployment.yaml
## https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md
# change kubernetes/hack/local-up-cluster.sh all 127.0.0.1 to private ip 192.168.1.1 that external k8s BIGIP can reach to
# http://clouddocs.f5.com/containers/v2/kubernetes/kctlr-app-install.html
# step 1:
#create BIGIP login secret first
#kubectl create secret generic bigip-login --namespace kube-system --from-literal=username=admin --from-literal=password=admin
# step 2:
# cluster/kubectl.sh apply -f following yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
labels:
app: test-bigip-controller-1
name: test-bigip-controller-1
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: test-bigip-controller-1
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: test-bigip-controller-1
spec:
serviceAccountName: bigip-ctlr-serviceaccount
containers:
- args:
- --bigip-partition
- test
- --bigip-url
- 192.168.1.2
- --bigip-username
- admin
- --bigip-password
- admin
- --verify-interval
- "2"
- --namespace
- default
- --node-poll-interval
- "1"
- --pool-member-type
- nodeport
- --log-level
- INFO
command:
- /app/bin/k8s-bigip-ctlr
image: f5networks/k8s-bigip-ctlr:latest
imagePullPolicy: Always
name: test-bigip-controller-1
resources:
limits:
cpu: 100m
memory: 128M
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: bigip-ctlr-serviceaccount
namespace: kube-system
---
# for use in k8s clusters using RBAC
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: bigip-ctlr-clusterrole
rules:
- apiGroups:
- ""
- "extensions"
resources:
- nodes
- services
- endpoints
- namespaces
- ingresses
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
- "extensions"
resources:
- configmaps
- events
- ingresses/status
verbs:
- get
- list
- watch
- update
- create
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: bigip-ctlr-clusterrole-binding
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: bigip-ctlr-clusterrole
subjects:
- kind: ServiceAccount
name: bigip-ctlr-serviceaccount
namespace: kube-system
ISSUE
vincent@Kubernete-Dev:~/kubernetes$ cluster/kubectl.sh logs test-bigip-controller-1-7878d8b849-jdtr5 --namespace=kube-system
2018/01/04 20:56:17 [ERROR] [2018-01-04 20:56:17,650 __main__ ERROR] HTTPSConnectionPool(host='172.16.2.51', port=443): Max retries exceeded with url: /mgmt/shared/authn/login (Caused by NewConnectionError('<requests.packages.urllib3.connection.VerifiedHTTPSConnection object at 0x7f273380e650>: Failed to establish a new connection: [Errno 110] Operation timed out',))
2018/01/04 20:56:17 [CRITICAL] Config driver exited: 1
https://github.com/F5Networks/k8s-bigip-ctlr/issues/438 Investigate and document behavior when controller cannot communicate with BIG-IP
Is there a solution for this?
ReplyDeleteI am having issue where kube controller cannot communicate with BIGIP over tcp on port 10250.