Skip to content

Instantly share code, notes, and snippets.

@GoranHalvarsson
Created November 22, 2021 10:23
Show Gist options
  • Save GoranHalvarsson/c204a9c949c7336501fa8a09dcdec469 to your computer and use it in GitHub Desktop.
Save GoranHalvarsson/c204a9c949c7336501fa8a09dcdec469 to your computer and use it in GitHub Desktop.
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: solr-zk-waiter
newName: sandbox.azurecr.io/sandbox-linux-solr-8.4.0
newTag: test
- name: solr
newName: sandbox.azurecr.io/sandbox-linux-solr-8.4.0
newTag: test
resources:
- solr-service.yaml
- solr-statefulset.yaml
apiVersion: v1
kind: Service
metadata:
name: solr-service
namespace: default
labels:
app: solr
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
spec:
type: LoadBalancer
ports:
- port: 8983
selector:
app: solr
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: solr-statefulset
namespace: default
labels:
app: solr
spec:
replicas: 3 # set this to the number of Solr pod instances you want
selector:
matchLabels:
app: solr
serviceName: solr-service
template:
metadata:
namespace: default
labels:
app: solr
spec:
securityContext:
runAsUser: 1001
fsGroup: 1001
nodeSelector:
kubernetes.io/os: linux
agentpool: solr
affinity:
# this causes K8s to only schedule only one Solr pod per node
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- solr
topologyKey: "kubernetes.io/hostname"
containers:
- name: solr
image: solr
env:
# ZK_HOST lists all of the hostnames for all of the Zookeeper instances - this should correspond to however many ZK instances you have running.
- name: ZK_HOST
value: "zookeeper-statefulset-0.zookeeper-service:2181,zookeeper-statefulset-1.zookeeper-service:2181,zookeeper-statefulset-2.zookeeper-service:2181"
- name: SOLR_JAVA_MEM
value: "-Xms4g -Xmx4g" # set the JVM memory usage and limit
ports:
- name: solr
containerPort: 8983
volumeMounts:
- name: solr-pvc
mountPath: /var/solr
livenessProbe:
# runs a built-in script to check for Solr readiness/liveness
exec:
command:
- /bin/bash
- -c
- "/opt/docker-solr/scripts/wait-for-solr.sh"
initialDelaySeconds: 20
timeoutSeconds: 5
readinessProbe:
# runs a built-in script to check for Solr readiness/liveness
exec:
command:
- /bin/bash
- -c
- "/opt/docker-solr/scripts/wait-for-solr.sh"
initialDelaySeconds: 20
timeoutSeconds: 5
initContainers:
# runs a built-script to wait until all Zookeeper instances are up and running
- name: solr-zk-waiter
image: solr-zk-waiter
command:
- /bin/bash
- "-c"
- "/opt/docker-solr/scripts/wait-for-zookeeper.sh"
securityContext:
allowPrivilegeEscalation: false
runAsUser: 0
env:
- name: ZK_HOST
value: "zookeeper-statefulset-0.zookeeper-service:2181,zookeeper-statefulset-1.zookeeper-service:2181,zookeeper-statefulset-2.zookeeper-service:2181"
volumeClaimTemplates:
- metadata:
name: solr-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: managed-premium
resources:
requests:
storage: 10Gi
# this allows for max 3 instance of ZK to be unavailable
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zookeeper-pdb
namespace: default
spec:
selector:
matchLabels:
app: zookeeper
maxUnavailable: 3
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zookeeper-statefulset
namespace: default
labels:
app: zookeeper
spec:
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
selector:
matchLabels:
app: zookeeper
serviceName: zookeeper-service
template:
metadata:
labels:
app: zookeeper
spec:
nodeSelector:
kubernetes.io/os: linux
agentpool: solr
affinity:
# this causes K8s to only schedule only one Zookeeper pod per node
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zookeeper
topologyKey: "kubernetes.io/hostname"
containers:
- name: zookeeper
image: zookeeper
env:
- name: ZK_REPLICAS
value: "3" # informs Zookeeper of the number of intended replicas
- name: ZK_TICK_TIME
value: "2000"
- name: ZOO_4LW_COMMANDS_WHITELIST
value: "mntr,conf,ruok"
- name: ZOO_STANDALONE_ENABLED
value: "false"
- name: ALLOW_ANONYMOUS_LOGIN
value: "yes"
# lists all of the Zookeeper servers that are part of this cluster
- name: ZOO_SERVERS
value: "server.1=zookeeper-statefulset-0.zookeeper-service:2888:3888;2181 server.2=zookeeper-statefulset-1.zookeeper-service:2888:3888;2181 server.3=zookeeper-statefulset-2.zookeeper-service:2888:3888;2181"
- name: ZOO_CFG_EXTRA
value: "quorumListenOnAllIPs=true electionPortBindRetry=0" # quorumListenOnAllIPs allows ZK to listen on all IP addresses for leader election/follower, electionPortBindRetry disables the max retry count as other ZK instances are spinning up
ports:
- name: client
containerPort: 2181
protocol: TCP
- name: server
containerPort: 2888
protocol: TCP
- name: election
containerPort: 3888
protocol: TCP
volumeMounts:
- name: zookeeper-pv
mountPath: /data
livenessProbe:
# runs a shell script to ping the running local Zookeeper instance, which responds with "imok" once the instance is ready
exec:
command:
- sh
- -c
- 'OK=$(echo ruok | nc 127.0.0.1 2181); if [ "$OK" = "imok" ]; then exit 0; else exit 1; fi;'
initialDelaySeconds: 20
timeoutSeconds: 5
readinessProbe:
# runs a shell script to ping the running local Zookeeper instance, which responds with "imok" once the instance is ready
exec:
command:
- sh
- -c
- 'OK=$(echo ruok | nc 127.0.0.1 2181); if [ "$OK" = "imok" ]; then exit 0; else exit 1; fi;'
initialDelaySeconds: 20
timeoutSeconds: 5
initContainers:
# each ZK instance requires an ID specification - since we can't set the ID using env variables, this init container sets the ID for each instance incrementally through a file on a volume mount
- name: zookeeper-id
image: zookeeper-id
command:
- sh
- -c
- echo $((${HOSTNAME##*-}+1)) > /data-new/myid
volumeMounts:
- name: zookeeper-pv
mountPath: /data-new
volumeClaimTemplates:
- metadata:
name: zookeeper-pv
spec:
accessModes:
- ReadWriteOnce
storageClassName: managed-premium
resources:
requests:
storage: 10Gi
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: zookeeper
newName: zookeeper
newTag: 3.5.9
- name: zookeeper-id
newName: busybox
newTag: latest
resources:
- zookeeper-service.yaml
- zookeeper-statefulset.yaml
apiVersion: v1
kind: Service
metadata:
name: zookeeper-service
namespace: default
labels:
app: zookeeper
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
- port: 2181
name: client
clusterIP: None
selector:
app: zookeeper
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment