现象
执行laf/deploy/build/start.sh
脚本,minio步骤报错
K3S版本
[root@k3s-server-001 ~]# kubectl version
WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version.
Client Version: version.Info{Major:"1", Minor:"26", GitVersion:"v1.26.4+k3s1", GitCommit:"8d0255af07e95b841952563253d27b0d10bd72f0", GitTreeState:"clean", BuildDate:"2023-04-20T00:33:18Z", GoVersion:"go1.19.8", Compiler:"gc", Platform:"linux/amd64"}
Kustomize Version: v4.5.7
Server Version: version.Info{Major:"1", Minor:"26", GitVersion:"v1.26.4+k3s1", GitCommit:"8d0255af07e95b841952563253d27b0d10bd72f0", GitTreeState:"clean", BuildDate:"2023-04-20T00:33:18Z", GoVersion:"go1.19.8", Compiler:"gc", Platform:"linux/amd64"}
[root@k3s-server-001 ~]#
K3S集群Node状态
[root@k3s-server-001 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k3s-server-002.vartee.com Ready <none> 24h v1.26.4+k3s1
k3s-server-001.vartee.com Ready control-plane,master 24h v1.26.4+k3s1
k3s-server-003.vartee.com Ready <none> 24h v1.26.4+k3s1
[root@k3s-server-001 ~]#
K3S集群Pod状态
[root@k3s-server-001 build]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system local-path-provisioner-76d776f6f9-pkjpq 1/1 Running 0 23h
kube-system coredns-59b4f5bbd5-4jm99 1/1 Running 0 23h
kube-system svclb-traefik-01293634-l67cn 2/2 Running 0 23h
kube-system helm-install-traefik-crd-mxmzj 0/1 Completed 0 23h
kube-system helm-install-traefik-6pzrl 0/1 Completed 1 23h
kube-system traefik-56b8c5fb5c-z6hsz 1/1 Running 0 23h
kube-system metrics-server-7b67f64457-rsn7r 1/1 Running 0 23h
kube-system svclb-traefik-01293634-mkbdd 2/2 Running 0 23h
kube-system svclb-traefik-01293634-7xqg5 2/2 Running 0 23h
kuboard kuboard-etcd-xzpr7 1/1 Running 0 22h
kuboard kuboard-v3-5676ffcf57-v2wqq 1/1 Running 0 22h
kuboard kuboard-agent-2-df7db977c-pmvbg 1/1 Running 2 (22h ago) 22h
kuboard kuboard-agent-777dffc687-gb94l 1/1 Running 2 (22h ago) 22h
安装步骤
[root@k3s-server-001 build]# export DOMAIN=k3s-server-001.vartee.com
[root@k3s-server-001 build]# ./start.sh
DOMAIN: k3s-server-001.vartee.com
namespace/laf-system created
++ DATABASE_URL='mongodb://admin:r390h7kz0u0ezz3r87xgnoya9avg744t@mongodb-0.mongo.laf-system.svc.cluster.local:27017/sys_db?authSource=admin&replicaSet=rs0&w=majority'
++ helm install mongodb -n laf-system --set db.username=admin --set db.password=r390h7kz0u0ezz3r87xgnoya9avg744t --set storage.size=5Gi ./charts/mongodb
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /root/.kube/config
WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /root/.kube/config
NAME: mongodb
LAST DEPLOYED: Thu Apr 27 22:07:39 2023
NAMESPACE: laf-system
STATUS: deployed
REVISION: 1
NOTES:
Use mongo client in cluster:
export ROOT_USERNAME=$(kubectl get secret --namespace laf-system mongodb-mongodb-init -o jsonpath="{.data.username}" | base64 -d)
export ROOT_PASSWORD=$(kubectl get secret --namespace laf-system mongodb-mongodb-init -o jsonpath="{.data.password}" | base64 -d)
export POD_NAME=$(kubectl get pods --namespace laf-system -l "app.kubernetes.io/name=mongodb,app.kubernetes.io/instance=mongodb" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace laf-system $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
Connection URI:
export CONNECTION_URI="mongodb://$ROOT_USERNAME:$ROOT_PASSWORD@mongodb-0.mongo.laf-system.svc.cluster.local:27017/sys_db?authSource=admin&replicaSet=rs0&w=majority"
Connect in cluster:
kubectl run mongo --rm -it --env="URI=$CONNECTION_URI" --image=mongo:5.0.14 -- sh
++ APISIX_API_URL=http://apisix-admin.laf-system.svc.cluster.local:9180/apisix/admin
++ APISIX_API_KEY=r390h7kz0u0ezz3r87xgnoya9avg744t
++ helm install apisix -n laf-system --set apisix.kind=DaemonSet --set apisix.hostNetwork=true --set admin.credentials.admin=r390h7kz0u0ezz3r87xgnoya9avg744t --set etcd.enabled=true --set 'etcd.host[0]=http://apisix-etcd:2379' --set dashboard.enabled=true --set ingress-controller.enabled=true --set ingress-controller.config.apisix.adminKey=r390h7kz0u0ezz3r87xgnoya9avg744t ./charts/apisix
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /root/.kube/config
WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /root/.kube/config
NAME: apisix
LAST DEPLOYED: Thu Apr 27 22:07:39 2023
NAMESPACE: laf-system
STATUS: deployed
REVISION: 1
NOTES:
1. Get the application URL by running these commands:
export NODE_PORT=$(kubectl get --namespace laf-system -o jsonpath="{.spec.ports[0].nodePort}" services apisix-gateway)
export NODE_IP=$(kubectl get nodes --namespace laf-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
++ MINIO_ROOT_ACCESS_KEY=minio-root-user
++ MINIO_ROOT_SECRET_KEY=r390h7kz0u0ezz3r87xgnoya9avg744t
++ MINIO_DOMAIN=oss.k3s-server-001.vartee.com
++ MINIO_EXTERNAL_ENDPOINT=http://oss.k3s-server-001.vartee.com
++ MINIO_INTERNAL_ENDPOINT=http://minio.laf-system.svc.cluster.local:9000
++ helm install minio -n laf-system --set rootUser=minio-root-user --set rootPassword=r390h7kz0u0ezz3r87xgnoya9avg744t --set persistence.size=3Gi --set domain=oss.k3s-server-001.vartee.com --set consoleHost=minio.k3s-server-001.vartee.com ./charts/minio
WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /root/.kube/config
WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /root/.kube/config
Error: INSTALLATION FAILED: failed post-install: 1 error occurred:
* timed out waiting for the condition
laf-system Pod状态
[root@k3s-server-001 ~]# kubectl get pod -n laf-system
NAME READY STATUS RESTARTS AGE
apisix-s8cjn 0/1 Pending 0 67m
apisix-7mvjq 0/1 Pending 0 67m
apisix-22rlq 0/1 Pending 0 67m
apisix-etcd-0 0/1 Pending 0 67m
minio-0 0/1 Pending 0 67m
apisix-ingress-controller-5d549ff57-r7gxp 0/1 Init:0/1 0 67m
mongodb-0 0/1 Pending 0 67m
apisix-dashboard-557dc59b9c-xcvfk 0/1 CrashLoopBackOff 19 (3m44s ago) 67m
[root@k3s-server-001 ~]#
minio-0 状态
[root@k3s-server-001 ~]# kubectl describe pod minio-0 -n laf-system
Name: minio-0
Namespace: laf-system
Priority: 0
Service Account: minio-sa
Node: <none>
Labels: app=minio
controller-revision-hash=minio-6f9db9688c
release=minio
statefulset.kubernetes.io/pod-name=minio-0
Annotations: checksum/config: b3ce4dadb94137af95e63d485984da8ac0d686621b73e1d334008636a25a8d07
checksum/secrets: 2f302dda98971568d637d76b6202441dc101c1ad57302e588ea0849fa8d087eb
Status: Pending
IP:
IPs: <none>
Controlled By: StatefulSet/minio
Containers:
minio:
Image: quay.io/minio/minio:RELEASE.2023-03-22T06-36-24Z
Ports: 9000/TCP, 9001/TCP
Host Ports: 0/TCP, 0/TCP
Command:
/bin/sh
-ce
/usr/bin/docker-entrypoint.sh minio server http://minio-{0...0}.minio-svc.laf-system.svc.cluster.local/export-{0...3} -S /etc/minio/certs/ --address :9000 --console-address :9001
Requests:
memory: 100Mi
Environment:
MINIO_DOMAIN: oss.k3s-server-001.vartee.com
MINIO_ROOT_USER: <set to the key 'rootUser' in secret 'minio'> Optional: false
MINIO_ROOT_PASSWORD: <set to the key 'rootPassword' in secret 'minio'> Optional: false
MINIO_PROMETHEUS_AUTH_TYPE: public
Mounts:
/export-0 from export-0 (rw)
/export-1 from export-1 (rw)
/export-2 from export-2 (rw)
/export-3 from export-3 (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-7vcps (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
export-0:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: export-0-minio-0
ReadOnly: false
export-1:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: export-1-minio-0
ReadOnly: false
export-2:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: export-2-minio-0
ReadOnly: false
export-3:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: export-3-minio-0
ReadOnly: false
minio-user:
Type: Secret (a volume populated by a Secret)
SecretName: minio
Optional: false
kube-api-access-7vcps:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 21m (x10 over 66m) default-scheduler 0/3 nodes are available: pod has unbound immediate PersistentVolumeClaims. preemption: 0/3 nodes are available: 3 No preemption victims found for incoming pod..
[root@k3s-server-001 ~]#
存储类,存储卷状态
[root@k3s-server-001 ~]# kubectl get sc,pv,pvc -n laf-system
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
storageclass.storage.k8s.io/local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 24h
storageclass.storage.k8s.io/database-mongodb openebs.io/local Retain WaitForFirstConsumer false 73m
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/mongodb-db-pvc Pending database-mongodb 73m
persistentvolumeclaim/apisix-etcd-pvc Pending local-hostpath 73m
persistentvolumeclaim/export-0-minio-0 Pending local-hostpath 73m
persistentvolumeclaim/export-1-minio-0 Pending local-hostpath 73m
persistentvolumeclaim/export-2-minio-0 Pending local-hostpath 73m
persistentvolumeclaim/export-3-minio-0 Pending local-hostpath 73m
[root@k3s-server-001 ~]#