Kubernets Manifest files Samples
Kubernets Manifest files Samples
pod.yaml
apiVersion: v1 kind: Pod metadata: name: pod-demo spec: containers: - name: nginx image: nginx ports: - containerPort: 80 resources: limits: cpu: 50m memory: 100Mi requests: cpu: 50m memory: 100Mi
podp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: premium-pod-demo
labels:
app: premium
spec:
replicas: 2
selector:
matchLabels:
app: premium
template:
metadata:
labels:
app: premium
spec:
containers:
- name: demo
image: nginx:1.14.2
ports:
- containerPort: 80
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
pods.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: standard-pod-demo
labels:
app: standard
spec:
replicas: 2
selector:
matchLabels:
app: standard
template:
metadata:
labels:
app: standard
spec:
containers:
- name: demo
image: nginx:1.14.2
ports:
- containerPort: 80
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
pv.yaml
kind: PersistentVolume
apiVersion: v1
metadata:
name: research-vol
labels:
type: local
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/data/research-vol"
Pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: research-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: ""
volumeName: research-vol
resources:
requests:
storage: 1Gi
pvexample.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pvc-demo
labels:
app: premium
spec:
replicas: 2
selector:
matchLabels:
app: premium
template:
metadata:
labels:
app: premium
spec:
containers:
- name: demo
image: nginx:1.14.2
ports:
- containerPort: 80
volumeMounts:
- mountPath: "/var/log/test"
name: research-vol
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
volumes:
- name: research-vol
persistentVolumeClaim:
claimName: research-pvc
---
apiVersion: v1
kind: pod
metadata:
name: mypod-cm-file
spec:
containers:
- name: mypod
image: redis
volumeMounts:
- name: foo
mountPath: "/etc/foo"
readOnly: true
volumes:
- name: foo
configMap:
name: myconfigmap
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
replicaset.yaml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: replicaset-demo
labels:
app: guestbook
tier: frontend
spec:
# modify replicas according to your case
replicas: 3
selector:
matchLabels:
tier: frontend
template:
metadata:
labels:
tier: frontend
spec:
containers:
- name: nginx-replicas
image: nginx:1.14.2
secrets.yaml
apiVersion: v1
data:
username: YWRtaW4=
password: MWYyZDFlMmU2N2Rm
kind: Secret
metadata:
annotations:
name: mysecret
type: Opaque
---
apiVersion: v1
kind: Pod
metadata:
name: mypod
spec:
containers:
- name: mypod
image: nginx
volumeMounts:
- name: foo
mountPath: "/etc/foo"
readOnly: true
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
volumes:
- name: foo
secret:
secretName: mysecret
optional: false # default setting; "mysecret" must exist
---
apiVersion: v1
kind: Pod
metadata:
name: mypod2
spec:
containers:
- name: mypod2
image: nginx
volumeMounts:
- name: foo
mountPath: "/etc/foo"
readOnly: true
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
volumes:
- name: foo
secret:
secretName: mysecret
items:
- key: username
path: my-group/my-username
---
apiVersion: v1
kind: Pod
metadata:
name: secret-env-pod
spec:
containers:
- name: mycontainer
image: nginx
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: SECRET_USERNAME
valueFrom:
secretKeyRef:
name: mysecret
key: username
optional: false # same as default; "mysecret" must exist
# and include a key named "username"
- name: SECRET_PASSWORD
valueFrom:
secretKeyRef:
name: mysecret
key: password
optional: false # same as default; "mysecret" must exist
# and include a key named "password"
restartPolicy: Never
service.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: service-lb-demo
labels:
app: MyApp
spec:
replicas: 2
selector:
matchLabels:
app: MyApp
template:
metadata:
labels:
app: MyApp
spec:
containers:
- name: demo
image: nginx:1.14.2
ports:
- containerPort: 80
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
---
apiVersion: v1
kind: Service
metadata:
name: my-service-cip
spec:
selector:
app: MyApp
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: my-service-np
spec:
type: NodePort
selector:
app: MyApp
ports:
# By default and for convenience, the `targetPort` is set to the same value as the `port` field.
- port: 80
targetPort: 80
# Optional field
# By default and for convenience, the Kubernetes control plane will allocate a port from a range (default: 30000-32767)
nodePort: 30007
---
apiVersion: v1
kind: Service
metadata:
name: my-service-lb
spec:
selector:
app: MyApp
ports:
- protocol: TCP
port: 80
targetPort: 9376
type: LoadBalancer
status:
loadBalancer:
ingress:
- ip: 192.0.2.127
Servicep.yaml
apiVersion: v1
kind: Service
metadata:
name: premium-service
spec:
selector:
app: premium
ports:
- protocol: TCP
port: 80
targetPort: 80
services.yaml
apiVersion: v1
kind: Service
metadata:
name: standard-service
spec:
selector:
app: standard
ports:
- protocol: TCP
port: 80
targetPort: 80
statefulset.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: statefulset-demo
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
minReadySeconds: 10 # by default is 0
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginxd
image: nginx:1.14.2
ports:
- containerPort: 80
name: web
job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: say-something
spec:
suspend: false
template:
metadata:
name: say-something
spec:
containers:
- name: say-something
image: nginx
command: ["echo", "Running a job"]
restartPolicy: OnFailure
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: cronjob-hello
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure
configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: game-demo
data:
# property-like keys; each key maps to a simple value
player_initial_lives: "3"
ui_properties_file_name: "user-interface.properties"
# file-like keys
game.properties: |
enemy.types=aliens,monsters
player.maximum-lives=5
user-interface.properties: |
color.good=purple
color.bad=yellow
allow.textmode=true
---
apiVersion: v1
kind: Pod
metadata:
name: configmap-demo-pod
spec:
containers:
- name: demo
image: nginx
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
# Define the environment variable
- name: PLAYER_INITIAL_LIVES # Notice that the case is different here
# from the key name in the ConfigMap.
valueFrom:
configMapKeyRef:
name: game-demo # The ConfigMap this value comes from.
key: player_initial_lives # The key to fetch.
- name: UI_PROPERTIES_FILE_NAME
valueFrom:
configMapKeyRef:
name: game-demo
key: ui_properties_file_name
volumeMounts:
- name: config
mountPath: "/config"
readOnly: true
volumes:
# You set volumes at the Pod level, then mount them into containers inside that Pod
- name: config
configMap:
# Provide the name of the ConfigMap you want to mount.
name: game-demo
# An array of keys from the ConfigMap to create as files
items:
- key: "game.properties"
path: "game.properties"
- key: "user-interface.properties"
path: "user-interface.properties"
demonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: deamonset-demo
namespace: default
labels:
k8s-app: fluentd-logging
spec:
selector:
matchLabels:
name: fluentd-elasticsearch
template:
metadata:
labels:
name: fluentd-elasticsearch
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: fluentd-elasticsearch
image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
Comments
Post a Comment