hexsha
stringlengths
40
40
size
int64
24
1.05M
ext
stringclasses
2 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
5
269
max_stars_repo_name
stringlengths
7
107
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
list
max_stars_count
int64
1
84.9k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
5
269
max_issues_repo_name
stringlengths
7
107
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
list
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
5
269
max_forks_repo_name
stringlengths
7
107
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
list
max_forks_count
int64
1
55.9k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
24
1.05M
avg_line_length
float64
1
304k
max_line_length
int64
14
1.03M
alphanum_fraction
float64
0
1
734f72670c05bb957fdd87bd12cdbbba94a86653
832
yaml
YAML
deployments/zmq-proxies/discord-bucket.yaml
oxylbot/kubernetes
6b55838bf3256decd409d3e84f3d29eafa406346
[ "MIT" ]
null
null
null
deployments/zmq-proxies/discord-bucket.yaml
oxylbot/kubernetes
6b55838bf3256decd409d3e84f3d29eafa406346
[ "MIT" ]
null
null
null
deployments/zmq-proxies/discord-bucket.yaml
oxylbot/kubernetes
6b55838bf3256decd409d3e84f3d29eafa406346
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: discord-bucket-zmq-proxy namespace: "{{namespace}}" spec: selector: matchLabels: app: discord-bucket-zmq-proxy replicas: 1 template: metadata: labels: app: discord-bucket-zmq-proxy zmq-proxy: "true" spec: containers: - name: discord-bucket-zmq-proxy image: oxyl/zmq-proxy:{{tag}} imagePullPolicy: Always env: - name: INCOMING_ADDRESS value: "tcp://0.0.0.0:8900" - name: OUTGOING_ADDRESS value: "tcp://0.0.0.0:8901" - name: INCOMING_TYPE value: "6" - name: OUTGOING_TYPE value: "5" ports: - containerPort: 8900 name: incoming - containerPort: 8901 name: outgoing
24.470588
39
0.551683
734ff2e109652ef2f4388d44e7df4a0d42ea766a
1,219
yaml
YAML
clusters/app.ci/prow/03_deployment/boskos_cleaner.yaml
jewzaam/release
b17192c0bfbd6418b6bb850fd3a3e67aed31a539
[ "Apache-2.0" ]
null
null
null
clusters/app.ci/prow/03_deployment/boskos_cleaner.yaml
jewzaam/release
b17192c0bfbd6418b6bb850fd3a3e67aed31a539
[ "Apache-2.0" ]
null
null
null
clusters/app.ci/prow/03_deployment/boskos_cleaner.yaml
jewzaam/release
b17192c0bfbd6418b6bb850fd3a3e67aed31a539
[ "Apache-2.0" ]
null
null
null
# Boskos cleaner is needed to allow resources created out of dynamic resource lifecycle to # be deleted. # It is responsible for checking if resources that were created based on # dynamic resource lifecycles have child resources defined in their userdata and if yes, # mark those as to be deleted as well. # Afterwards the resources are put into tombstone state which makes boskos delete them # the next time it syncs its config. apiVersion: apps/v1 kind: Deployment metadata: name: boskos-cleaner labels: app: prow component: boskos-cleaner namespace: ci spec: replicas: 1 selector: matchLabels: component: boskos-cleaner template: metadata: labels: app: prow component: boskos-cleaner spec: serviceAccountName: boskos terminationGracePeriodSeconds: 300 containers: - name: boskos-cleaner image: gcr.io/k8s-prow/boskos/cleaner:v20200406-3d3428b91 args: - --boskos-url=http://boskos - --use-v2-implementation=true - --namespace=$(namespace) - --log-level=debug env: - name: namespace valueFrom: fieldRef: fieldPath: metadata.namespace
29.02381
90
0.672683
734ffce6a698fca3a1a1896b954ba22b71193f32
335
yaml
YAML
integration/testdata/manifest/show/workload1.yaml
fstrudel/sheaf
61f4018923c87ca35ccf069cef6a78ae7ce2526b
[ "Apache-2.0" ]
32
2020-02-12T11:22:38.000Z
2021-06-02T17:28:12.000Z
integration/testdata/manifest/show/workload1.yaml
fstrudel/sheaf
61f4018923c87ca35ccf069cef6a78ae7ce2526b
[ "Apache-2.0" ]
50
2020-02-10T13:23:25.000Z
2022-01-28T13:07:42.000Z
integration/testdata/manifest/show/workload1.yaml
fstrudel/sheaf
61f4018923c87ca35ccf069cef6a78ae7ce2526b
[ "Apache-2.0" ]
5
2020-02-10T12:31:16.000Z
2021-09-23T17:39:42.000Z
apiVersion: apps/v1 kind: Deployment metadata: name: workload1 spec: selector: matchLabels: app: hello replicas: 1 template: metadata: labels: app: hello spec: containers: - name: hello image: bryanl/slim-hello-world:v1 ports: - containerPort: 8080
16.75
43
0.576119
735009d1ea0ceb72e4617dff3ca14605ccd09329
404
yaml
YAML
config-root/namespaces/jx/jx-build-controller/jx-build-controller-rb.yaml
martin-noretoft/expert-dollop
aa5569cb79897964f05f967ca976df177a40fc12
[ "Apache-2.0" ]
1
2021-04-22T23:40:27.000Z
2021-04-22T23:40:27.000Z
config-root/namespaces/jx/jx-build-controller/jx-build-controller-rb.yaml
martin-noretoft/expert-dollop
aa5569cb79897964f05f967ca976df177a40fc12
[ "Apache-2.0" ]
302
2020-10-06T15:15:25.000Z
2021-02-05T17:59:24.000Z
config-root/namespaces/jx/jx-build-controller/jx-build-controller-rb.yaml
martin-noretoft/expert-dollop
aa5569cb79897964f05f967ca976df177a40fc12
[ "Apache-2.0" ]
7
2021-04-04T02:25:18.000Z
2021-12-22T09:26:01.000Z
# Source: jx-build-controller/templates/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: jx-build-controller namespace: jx labels: gitops.jenkins-x.io/pipeline: 'namespaces' roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: jx-build-controller subjects: - kind: ServiceAccount name: jenkins-x-controllerbuild namespace: jx
23.764706
56
0.757426
73503eaaca445f64f9b7a80896c007c0da6e2366
894
yaml
YAML
config/manifests/podsecuritypolicy-kubernetes-monitoring.yaml
dlaubi-dt/dynatrace-operator
b0599c543229a90f038841af2d695805a03dd398
[ "Apache-2.0" ]
null
null
null
config/manifests/podsecuritypolicy-kubernetes-monitoring.yaml
dlaubi-dt/dynatrace-operator
b0599c543229a90f038841af2d695805a03dd398
[ "Apache-2.0" ]
null
null
null
config/manifests/podsecuritypolicy-kubernetes-monitoring.yaml
dlaubi-dt/dynatrace-operator
b0599c543229a90f038841af2d695805a03dd398
[ "Apache-2.0" ]
null
null
null
apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: dynatrace-kubernetes-monitoring annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: "docker/default" apparmor.security.beta.kubernetes.io/allowedProfileNames: "runtime/default" seccomp.security.alpha.kubernetes.io/defaultProfileName: "docker/default" apparmor.security.beta.kubernetes.io/defaultProfileName: "runtime/default" spec: privileged: false allowPrivilegeEscalation: false readOnlyRootFilesystem: true requiredDropCapabilities: - ALL volumes: - "configMap" - "emptyDir" - "projected" - "secret" - "downwardAPI" - "persistentVolumeClaim" hostNetwork: false hostIPC: false hostPID: false runAsUser: rule: "MustRunAsNonRoot" seLinux: rule: "RunAsAny" supplementalGroups: rule: "RunAsAny" fsGroup: rule: "RunAsAny"
26.294118
79
0.739374
735066913eaa835e47ee2136cd5341ad53045755
2,948
yaml
YAML
manifests/kustomize/base/pipeline/ml-pipeline-apiserver-deployment.yaml
evan-hataishi/kfp-tekton
6e1f367841c7add4ca13e5472220939846da81b0
[ "Apache-2.0" ]
1
2020-06-24T17:27:08.000Z
2020-06-24T17:27:08.000Z
manifests/kustomize/base/pipeline/ml-pipeline-apiserver-deployment.yaml
fenglixa/kfp-tekton
991d7dcf16fd939787ba7e779842a359d406b8b0
[ "Apache-2.0" ]
1,720
2021-01-25T09:32:00.000Z
2022-03-31T08:09:51.000Z
manifests/kustomize/base/pipeline/ml-pipeline-apiserver-deployment.yaml
fenglixa/kfp-tekton
991d7dcf16fd939787ba7e779842a359d406b8b0
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: labels: app: ml-pipeline name: ml-pipeline spec: selector: matchLabels: app: ml-pipeline template: metadata: labels: app: ml-pipeline spec: containers: - env: - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION valueFrom: configMapKeyRef: name: pipeline-install-config key: autoUpdatePipelineDefaultVersion - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: OBJECTSTORECONFIG_SECURE value: "false" - name: OBJECTSTORECONFIG_BUCKETNAME valueFrom: configMapKeyRef: name: pipeline-install-config key: bucketName - name: DBCONFIG_USER valueFrom: secretKeyRef: name: mysql-secret key: username - name: DBCONFIG_PASSWORD valueFrom: secretKeyRef: name: mysql-secret key: password - name: DBCONFIG_DBNAME valueFrom: configMapKeyRef: name: pipeline-install-config key: pipelineDb - name: DBCONFIG_HOST valueFrom: configMapKeyRef: name: pipeline-install-config key: dbHost - name: DBCONFIG_PORT valueFrom: configMapKeyRef: name: pipeline-install-config key: dbPort - name: OBJECTSTORECONFIG_ACCESSKEY valueFrom: secretKeyRef: name: mlpipeline-minio-artifact key: accesskey - name: OBJECTSTORECONFIG_SECRETACCESSKEY valueFrom: secretKeyRef: name: mlpipeline-minio-artifact key: secretkey - name: PIPELINE_RUNTIME value: tekton image: gcr.io/ml-pipeline/api-server:dummy imagePullPolicy: Always name: ml-pipeline-api-server ports: - name: http containerPort: 8888 - name: grpc containerPort: 8887 readinessProbe: exec: command: - wget - -q # quiet - -S # show server response - -O - "-" # Redirect output to stdout - http://localhost:8888/apis/v1beta1/healthz initialDelaySeconds: 3 periodSeconds: 5 timeoutSeconds: 2 livenessProbe: exec: command: - wget - -q # quiet - -S # show server response - -O - "-" # Redirect output to stdout - http://localhost:8888/apis/v1beta1/healthz initialDelaySeconds: 3 periodSeconds: 5 timeoutSeconds: 2 serviceAccountName: ml-pipeline
28.346154
58
0.522049
7350d00c8f3973369e449468683b25bc94d3311a
211
yaml
YAML
content/zh/examples/policy/priority-class-resourcequota.yaml
rendiputra/website
4b93c0608828e685881be1662e766d696f0b097b
[ "CC-BY-4.0" ]
3,157
2017-10-18T13:28:53.000Z
2022-03-31T06:41:57.000Z
content/zh/examples/policy/priority-class-resourcequota.yaml
rendiputra/website
4b93c0608828e685881be1662e766d696f0b097b
[ "CC-BY-4.0" ]
27,074
2017-10-18T09:53:11.000Z
2022-03-31T23:57:19.000Z
content/zh/examples/policy/priority-class-resourcequota.yaml
rendiputra/website
4b93c0608828e685881be1662e766d696f0b097b
[ "CC-BY-4.0" ]
11,539
2017-10-18T15:54:11.000Z
2022-03-31T12:51:54.000Z
apiVersion: v1 kind: ResourceQuota metadata: name: pods-cluster-services spec: scopeSelector: matchExpressions: - operator : In scopeName: PriorityClass values: ["cluster-services"]
21.1
36
0.691943
73512e5c8c879a849700b70175940eb9ecf6b1fc
341
yml
YAML
kube-samples/deployment.yml
maskiran/sbf-api
3a0bf708606668f92939e3dde9c276112a547c1c
[ "Apache-2.0" ]
null
null
null
kube-samples/deployment.yml
maskiran/sbf-api
3a0bf708606668f92939e3dde9c276112a547c1c
[ "Apache-2.0" ]
null
null
null
kube-samples/deployment.yml
maskiran/sbf-api
3a0bf708606668f92939e3dde9c276112a547c1c
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: echoserver spec: selector: matchLabels: app: echoserver replicas: 1 template: metadata: labels: app: echoserver spec: containers: - name: echoserver image: k8s.gcr.io/echoserver:1.4 ports: - containerPort: 8080
16.238095
40
0.607038
73513e7d167d211e08cfe38521aa78febc84523f
149
yaml
YAML
infrastructure/chartmuseum/storageclass.yaml
Hope3r/fluxcd-helm-github
aaeab7f2f2c8ec8502b6d9df864d4b4fdf814325
[ "Apache-2.0" ]
null
null
null
infrastructure/chartmuseum/storageclass.yaml
Hope3r/fluxcd-helm-github
aaeab7f2f2c8ec8502b6d9df864d4b4fdf814325
[ "Apache-2.0" ]
null
null
null
infrastructure/chartmuseum/storageclass.yaml
Hope3r/fluxcd-helm-github
aaeab7f2f2c8ec8502b6d9df864d4b4fdf814325
[ "Apache-2.0" ]
null
null
null
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: chartmuseum provisioner: kubernetes.io/no-provisioner volumeBindingMode: Immediate
24.833333
41
0.838926
7351a506f2867887b6357d0f6b8a18a4a2faee9b
338
yaml
YAML
config/rbac/foo_viewer_role.yaml
pkbhowmick/kubebuilder-sample-controller
3afef9eec5d9706ef846097d250a03255f566360
[ "Apache-2.0" ]
1
2021-08-12T15:30:01.000Z
2021-08-12T15:30:01.000Z
config/rbac/foo_viewer_role.yaml
pkbhowmick/kubebuilder-sample-controller
3afef9eec5d9706ef846097d250a03255f566360
[ "Apache-2.0" ]
null
null
null
config/rbac/foo_viewer_role.yaml
pkbhowmick/kubebuilder-sample-controller
3afef9eec5d9706ef846097d250a03255f566360
[ "Apache-2.0" ]
null
null
null
# permissions for end users to view foos. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: foo-viewer-role rules: - apiGroups: - samplecontroller.example.com resources: - foos verbs: - get - list - watch - apiGroups: - samplecontroller.example.com resources: - foos/status verbs: - get
16.095238
41
0.704142
73523708b79f08282ab88c84ced011bbae6f3b03
487
yaml
YAML
pv/storageclass-azuredisk-managed.yaml
stcheng/demo
21d20c44c428af1bcb1524d9249758f4e31ca923
[ "MIT" ]
null
null
null
pv/storageclass-azuredisk-managed.yaml
stcheng/demo
21d20c44c428af1bcb1524d9249758f4e31ca923
[ "MIT" ]
null
null
null
pv/storageclass-azuredisk-managed.yaml
stcheng/demo
21d20c44c428af1bcb1524d9249758f4e31ca923
[ "MIT" ]
null
null
null
kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: hdd provisioner: kubernetes.io/azure-disk parameters: skuname: Standard_LRS #alias: storageaccounttype, available values: Standard_LRS, Premium_LRS, (and StandardSSD_LRS, UltraSSD_LRS are supported from k8s v1.13.0) kind: managed # value "dedicated", "shared" are deprecated since it's using unmanaged disk cachingMode: ReadOnly # Only Disk CachingType 'None' is supported for disk with size greater than 4095 GB
48.7
164
0.792608
7352383714717bc51db9a1da7a16b3d6dba9b821
936
yaml
YAML
examples/k8s/dyn/dyn-sc.yaml
NetApp/beegfs-csi-driver
316c1cdac57365ab39c56aabad4354c153eb8579
[ "Apache-2.0" ]
26
2021-02-12T21:29:39.000Z
2022-03-29T06:41:56.000Z
examples/k8s/dyn/dyn-sc.yaml
NetApp/beegfs-csi-driver
316c1cdac57365ab39c56aabad4354c153eb8579
[ "Apache-2.0" ]
5
2021-03-18T19:52:22.000Z
2022-03-30T14:57:54.000Z
examples/k8s/dyn/dyn-sc.yaml
NetApp/beegfs-csi-driver
316c1cdac57365ab39c56aabad4354c153eb8579
[ "Apache-2.0" ]
5
2021-02-12T22:48:53.000Z
2022-03-29T06:41:59.000Z
# Copyright 2021 NetApp, Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0. apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: csi-beegfs-dyn-sc provisioner: beegfs.csi.netapp.com parameters: # Replace "localhost" with the IP address or hostname of the BeeGFS management daemon. sysMgmtdHost: localhost # Replace "name" with a unique k8s cluster name to prevent multiple k8s clusters from dynamically provisioning volumes at the same BeeGFS path. volDirBasePath: k8s/name/dyn # Optionally configure the default stripePattern parameters. All Storage Class values must be strings. Quotes are # required on integers. # stripePattern/storagePoolID: "1" # stripePattern/chunkSize: 512k # stripePattern/numTargets: "4" # permissions/uid: "1000" # permissions/gid: "1000" # permissions/mode: "0644" reclaimPolicy: Delete volumeBindingMode: Immediate allowVolumeExpansion: false
39
145
0.777778
7352961464b072cb7ad91d6fee978681dbbb2bc9
581
yaml
YAML
tests/04-oidc-mocha-job.yaml
bugslifesolutions/helm-nifi
ef77e4648146333541c6bd704d9d1cbd2576b8b3
[ "Apache-2.0" ]
null
null
null
tests/04-oidc-mocha-job.yaml
bugslifesolutions/helm-nifi
ef77e4648146333541c6bd704d9d1cbd2576b8b3
[ "Apache-2.0" ]
null
null
null
tests/04-oidc-mocha-job.yaml
bugslifesolutions/helm-nifi
ef77e4648146333541c6bd704d9d1cbd2576b8b3
[ "Apache-2.0" ]
null
null
null
apiVersion: batch/v1 kind: Job metadata: name: oidc-mocha spec: template: spec: containers: - name: node image: node command: - /bin/bash - -x - -c - | yarn add puppeteer-core yarn add chai yarn add mocha node_modules/mocha/bin/mocha /tests/04-oidc-login-test.js --timeout 30000 volumeMounts: - name: tests mountPath: /tests restartPolicy: Never volumes: - name: tests configMap: name: 04-oidc-login-test
20.75
83
0.521515
7352caec1088879ba813c34db1f822fced566437
1,004
yaml
YAML
K8s/manifests/storageclass_output.yaml
hpides/Request-Generator
40d7d8b02a74ea7fde840ed3d50e0563514bc9fe
[ "Apache-2.0" ]
null
null
null
K8s/manifests/storageclass_output.yaml
hpides/Request-Generator
40d7d8b02a74ea7fde840ed3d50e0563514bc9fe
[ "Apache-2.0" ]
null
null
null
K8s/manifests/storageclass_output.yaml
hpides/Request-Generator
40d7d8b02a74ea7fde840ed3d50e0563514bc9fe
[ "Apache-2.0" ]
null
null
null
# # WALT - A realistic load generator for web applications. # # Copyright 2020 Eric Ackermann <[email protected]>, Hendrik Bomhardt # <[email protected]>, Benito Buchheim # <[email protected]>, Juergen Schlossbauer # <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: output-storage provisioner: kubernetes.io/no-provisioner volumeBindingMode: WaitForFirstConsumer
37.185185
81
0.778884
735363a30f0ccbad253ba9a865f6ab35848a74c6
882
yml
YAML
apps/docs/deployment.yml
ValetteValette/gitops-istio
68d646c34a4882f7847c7f29f7c059215910a9aa
[ "Apache-2.0" ]
null
null
null
apps/docs/deployment.yml
ValetteValette/gitops-istio
68d646c34a4882f7847c7f29f7c059215910a9aa
[ "Apache-2.0" ]
null
null
null
apps/docs/deployment.yml
ValetteValette/gitops-istio
68d646c34a4882f7847c7f29f7c059215910a9aa
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: platform-docs namespace: lcs labels: app: platform-docs spec: minReadySeconds: 5 revisionHistoryLimit: 5 progressDeadlineSeconds: 60 strategy: rollingUpdate: maxUnavailable: 0 type: RollingUpdate selector: matchLabels: app: platform-docs template: metadata: labels: app: platform-docs spec: imagePullSecrets: - name: ecr-docker-login containers: - name: platform-docs image: 615740825886.dkr.ecr.eu-west-3.amazonaws.com/lynceus-platform-docs:0.5.2 imagePullPolicy: IfNotPresent ports: - containerPort: 8000 name: http protocol: TCP resources: limits: cpu: 100m memory: 128Mi requests: cpu: 100m memory: 128Mi
21.512195
87
0.603175
73539a7116f79748f9104af171ed4d4733e465e5
471
yaml
YAML
manifests/prometheus-roleBindingConfig.yaml
markus-codes/kube-prometheus
ade7f0d2f1b5b027b98c3e41db314e986b3b63ca
[ "Apache-2.0" ]
2,062
2020-08-05T15:32:33.000Z
2022-03-31T20:04:30.000Z
manifests/prometheus-roleBindingConfig.yaml
markus-codes/kube-prometheus
ade7f0d2f1b5b027b98c3e41db314e986b3b63ca
[ "Apache-2.0" ]
731
2020-08-05T13:54:27.000Z
2022-03-31T19:29:20.000Z
manifests/prometheus-roleBindingConfig.yaml
markus-codes/kube-prometheus
ade7f0d2f1b5b027b98c3e41db314e986b3b63ca
[ "Apache-2.0" ]
818
2020-08-05T17:42:19.000Z
2022-03-30T10:17:47.000Z
apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: kube-prometheus app.kubernetes.io/version: 2.30.3 name: prometheus-k8s-config namespace: monitoring roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: prometheus-k8s-config subjects: - kind: ServiceAccount name: prometheus-k8s namespace: monitoring
24.789474
46
0.762208
7353a03e82dea38914a02eeeed8477743cb9af77
3,542
yaml
YAML
config/prow/cluster/hook_deployment.yaml
mcshooter/test-infra
c1043ab1c1ffebd8645b83b8fb496b4cb15cf9a9
[ "Apache-2.0" ]
null
null
null
config/prow/cluster/hook_deployment.yaml
mcshooter/test-infra
c1043ab1c1ffebd8645b83b8fb496b4cb15cf9a9
[ "Apache-2.0" ]
null
null
null
config/prow/cluster/hook_deployment.yaml
mcshooter/test-infra
c1043ab1c1ffebd8645b83b8fb496b4cb15cf9a9
[ "Apache-2.0" ]
null
null
null
# Copyright 2016 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apps/v1 kind: Deployment metadata: namespace: default name: hook labels: app: hook spec: replicas: 4 strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 maxUnavailable: 1 selector: matchLabels: app: hook template: metadata: labels: app: hook spec: serviceAccountName: hook terminationGracePeriodSeconds: 180 containers: - name: hook image: gcr.io/k8s-prow/hook:v20210906-6cb55987fb imagePullPolicy: Always args: - --dry-run=false - --slack-token-file=/etc/slack/token - --github-endpoint=http://ghproxy - --github-endpoint=https://api.github.com - --github-token-path=/etc/github/oauth - --config-path=/etc/config/config.yaml - --job-config-path=/etc/job-config env: # Use KUBECONFIG envvar rather than --kubeconfig flag in order to provide multiple configs to merge. - name: KUBECONFIG value: "/etc/kubeconfig/config" ports: - name: http containerPort: 8888 - name: metrics containerPort: 9090 volumeMounts: - name: slack mountPath: /etc/slack - name: hmac mountPath: /etc/webhook readOnly: true - name: oauth mountPath: /etc/github readOnly: true - name: config mountPath: /etc/config readOnly: true - name: job-config mountPath: /etc/job-config readOnly: true - name: plugins mountPath: /etc/plugins readOnly: true - name: cat-api mountPath: /etc/cat-api readOnly: true - name: unsplash-api mountPath: /etc/unsplash-api readOnly: true - name: kubeconfig mountPath: /etc/kubeconfig readOnly: true livenessProbe: httpGet: path: /healthz port: 8081 initialDelaySeconds: 3 periodSeconds: 3 readinessProbe: httpGet: path: /healthz/ready port: 8081 initialDelaySeconds: 10 periodSeconds: 3 timeoutSeconds: 600 volumes: - name: slack secret: secretName: slack-token - name: hmac secret: secretName: hmac-token - name: oauth secret: secretName: oauth-token - name: config configMap: name: config - name: job-config configMap: name: job-config - name: plugins configMap: name: plugins - name: cat-api configMap: name: cat-api-key - name: unsplash-api secret: secretName: unsplash-api-key - name: kubeconfig secret: defaultMode: 420 secretName: kubeconfig
27.457364
108
0.580745
7353b77dad10bf20f4ace2db27ede49917749831
2,180
yaml
YAML
.jx/git-operator/job.yaml
tianjinxgb/jx3-bootstrap-config
15c749700acdf9203e8734facddd24da0e0fe536
[ "Apache-2.0" ]
2
2020-08-03T12:09:38.000Z
2020-08-03T12:09:41.000Z
.jx/git-operator/job.yaml
jstrachan/jx3-demo1-dev
58747fdd3c03c28ad3c84bf37ddf0438ed431c43
[ "Apache-2.0" ]
null
null
null
.jx/git-operator/job.yaml
jstrachan/jx3-demo1-dev
58747fdd3c03c28ad3c84bf37ddf0438ed431c43
[ "Apache-2.0" ]
null
null
null
apiVersion: batch/v1 kind: Job metadata: labels: app: jx-boot jenkins-x.io/kind: jx-git-operator spec: backoffLimit: 4 completions: 1 parallelism: 1 template: metadata: labels: app: jx-boot jenkins-x.io/kind: jx-git-operator spec: initContainers: - args: - '-c' - 'mkdir -p $HOME; git config --global --add user.name $GIT_AUTHOR_NAME; git config --global --add user.email $GIT_AUTHOR_EMAIL; git config --global credential.helper store; git clone ${GIT_URL} ${GIT_SUB_DIR}; echo cloned url: $(inputs.params.url) to dir: ${GIT_SUB_DIR}; cd ${GIT_SUB_DIR}; git checkout ${GIT_REVISION}; echo checked out revision: ${GIT_REVISION} to dir: ${GIT_SUB_DIR}' command: - /bin/sh env: - name: GIT_URL valueFrom: secretKeyRef: key: url name: jx-boot - name: GIT_REVISION value: master - name: GIT_SUB_DIR value: source - name: GIT_AUTHOR_EMAIL value: [email protected] - name: GIT_AUTHOR_NAME value: jenkins-x-labs-bot - name: GIT_COMMITTER_EMAIL value: [email protected] - name: GIT_COMMITTER_NAME value: jenkins-x-labs-bot - name: XDG_CONFIG_HOME value: /workspace/xdg_config image: gcr.io/jenkinsxio-labs-private/jx-cli:0.0.214 name: git-clone volumeMounts: - mountPath: /workspace name: workspace-volume workingDir: /workspace containers: - args: - apply command: - make image: gcr.io/jenkinsxio-labs-private/jx-cli:0.0.214 imagePullPolicy: Always name: job volumeMounts: - mountPath: /workspace name: workspace-volume workingDir: /workspace/source dnsPolicy: ClusterFirst restartPolicy: Never schedulerName: default-scheduler serviceAccountName: jx-boot-job terminationGracePeriodSeconds: 30 volumes: - name: workspace-volume emptyDir: {}
29.066667
92
0.584862
7353cf09b9e5af97866155a555e4470c4990c580
455
yaml
YAML
manifests/grafana-dashboardSources.yaml
zh202821836/kube-prometheus
6e7c31013864dbb4dc9d88d4812f5b00bb36dccb
[ "Apache-2.0" ]
null
null
null
manifests/grafana-dashboardSources.yaml
zh202821836/kube-prometheus
6e7c31013864dbb4dc9d88d4812f5b00bb36dccb
[ "Apache-2.0" ]
null
null
null
manifests/grafana-dashboardSources.yaml
zh202821836/kube-prometheus
6e7c31013864dbb4dc9d88d4812f5b00bb36dccb
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 data: dashboards.yaml: |- { "apiVersion": 1, "providers": [ { "folder": "Default", "name": "0", "options": { "path": "/grafana-dashboard-definitions/0" }, "orgId": 1, "type": "file" } ] } kind: ConfigMap metadata: name: grafana-dashboards namespace: monitoring2
20.681818
62
0.402198
735409af5043dbddcfdbfacae4a2c1abb674e1e7
336
yaml
YAML
deploy/rbac/leader_election_role_binding.yaml
JustinKuli/cert-policy-controller
600d892ace1ec4ef850941a9ebec68fb4955e75b
[ "Apache-2.0" ]
1
2022-01-18T18:58:11.000Z
2022-01-18T18:58:11.000Z
deploy/rbac/leader_election_role_binding.yaml
JustinKuli/cert-policy-controller
600d892ace1ec4ef850941a9ebec68fb4955e75b
[ "Apache-2.0" ]
46
2021-03-08T14:46:42.000Z
2021-12-15T16:37:46.000Z
deploy/rbac/leader_election_role_binding.yaml
JustinKuli/cert-policy-controller
600d892ace1ec4ef850941a9ebec68fb4955e75b
[ "Apache-2.0" ]
8
2021-04-05T17:56:04.000Z
2021-12-15T16:29:47.000Z
apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: cert-policy-controller-leader-election roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: cert-policy-controller-leader-election subjects: - kind: ServiceAccount name: cert-policy-controller namespace: open-cluster-management-agent-addon
25.846154
48
0.797619
73540f5547ef0e40e3cd2daa1d28c044802e2c90
1,159
yaml
YAML
oidc-proxy-dashboard/deployment.yaml
carahsoft/oidc-kubernetes
f07c103fec2aa7609ee56c1753bde0b906515cec
[ "MIT" ]
null
null
null
oidc-proxy-dashboard/deployment.yaml
carahsoft/oidc-kubernetes
f07c103fec2aa7609ee56c1753bde0b906515cec
[ "MIT" ]
null
null
null
oidc-proxy-dashboard/deployment.yaml
carahsoft/oidc-kubernetes
f07c103fec2aa7609ee56c1753bde0b906515cec
[ "MIT" ]
null
null
null
--- kind: Deployment apiVersion: apps/v1 metadata: labels: app: kubernetes-dashboard-oidc name: kubernetes-dashboard-oidc namespace: kube-system spec: replicas: 2 revisionHistoryLimit: 2 selector: matchLabels: app: kubernetes-dashboard-oidc template: metadata: labels: app: kubernetes-dashboard-oidc spec: containers: - name: openresty-oidc image: myobplatform/openresty-oidc:1.1.1 ports: - containerPort: 9000 protocol: TCP env: - name: OIDC_CLIENT_ID valueFrom: secretKeyRef: name: kubernetes-dashboard-oidc key: client_id - name: OIDC_CLIENT_SECRET valueFrom: secretKeyRef: name: kubernetes-dashboard-oidc key: client_secret volumeMounts: - name: openresty-conf mountPath: /conf readOnly: true args: - "-c" - "/conf/nginx.conf" volumes: - name: openresty-conf configMap: name: kubernetes-dashboard-oidc-config
24.145833
50
0.556514
7354104731d8cbca7c310412590a71d1e57f4d10
342
yaml
YAML
applications/echo-server/kube/sidecar/configmap.yaml
bygui86/kubernetes-tests
3f85c9d2fd0dabea48e7d20b018c2323f1c6f4fa
[ "Apache-2.0" ]
1
2021-08-25T14:58:48.000Z
2021-08-25T14:58:48.000Z
applications/echo-server/kube/shell-probes/configmap.yaml
bygui86/kubernetes-tests
3f85c9d2fd0dabea48e7d20b018c2323f1c6f4fa
[ "Apache-2.0" ]
null
null
null
applications/echo-server/kube/shell-probes/configmap.yaml
bygui86/kubernetes-tests
3f85c9d2fd0dabea48e7d20b018c2323f1c6f4fa
[ "Apache-2.0" ]
1
2019-08-21T14:39:32.000Z
2019-08-21T14:39:32.000Z
apiVersion: v1 kind: ConfigMap metadata: name: echo-server labels: app: echo-server group: sample # data: # ECHOSERVER_KUBE_HOST: "0.0.0.0" # ECHOSERVER_KUBE_PORT: "7090" # ECHOSERVER_KUBE_SHUTDOWN_TIMEOUT: "15" # ECHOSERVER_REST_HOST: "0.0.0.0" # ECHOSERVER_REST_PORT: "7001" # ECHOSERVER_REST_SHUTDOWN_TIMEOUT: "15"
22.8
42
0.716374
735461b1896101b9da0679c4ffc98f5383c03697
862
yaml
YAML
operations/helm/tempo-microservices/templates/configmap-tempo-compactor.yaml
Irio/tempo
177040616edc35d835bb47020c26fe6cac49894b
[ "Apache-2.0" ]
null
null
null
operations/helm/tempo-microservices/templates/configmap-tempo-compactor.yaml
Irio/tempo
177040616edc35d835bb47020c26fe6cac49894b
[ "Apache-2.0" ]
null
null
null
operations/helm/tempo-microservices/templates/configmap-tempo-compactor.yaml
Irio/tempo
177040616edc35d835bb47020c26fe6cac49894b
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: ConfigMap metadata: name: tempo-compactor data: tempo.yaml: | auth_enabled: false compactor: compaction: block_retention: {{ .Values.retention }} ring: kvstore: store: memberlist memberlist: abort_if_cluster_join_fails: false bind_port: 7946 join_members: - gossip-ring.default.svc.cluster.local:7946 server: http_listen_port: 3100 storage: trace: {{- toYaml .Values.backend | nindent 12 }} maintenance_cycle: 10m memcached: consistent_hash: true host: memcached service: memcached-client timeout: 500ms pool: queue_depth: 2000 wal: path: /var/tempo/wal
26.121212
54
0.530162
73547e2ad9e2fb621495c8db920d645e7950df64
221
yaml
YAML
config/overlays/psp/resources/psp_role_restricted.yaml
nikore/istio-operator
cf5bca83c739efd4157e242994c3479c2140fb0a
[ "Apache-2.0" ]
529
2019-02-27T12:43:56.000Z
2022-03-30T00:58:33.000Z
config/overlays/psp/resources/psp_role_restricted.yaml
nikore/istio-operator
cf5bca83c739efd4157e242994c3479c2140fb0a
[ "Apache-2.0" ]
226
2019-02-27T12:42:22.000Z
2022-03-31T07:24:32.000Z
config/overlays/psp/resources/psp_role_restricted.yaml
nikore/istio-operator
cf5bca83c739efd4157e242994c3479c2140fb0a
[ "Apache-2.0" ]
109
2019-02-27T13:20:39.000Z
2022-03-09T01:49:29.000Z
apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: psp-restricted rules: - apiGroups: - policy resourceNames: - istio-operator-psp-restricted resources: - podsecuritypolicies verbs: - use
15.785714
40
0.733032
7354a6e979af2877947cfceb8382799af6a5b9f4
15,440
yaml
YAML
contrib/kube-prometheus/manifests/prometheus-rules.yaml
yurrriq/prometheus-operator
4263ea055a146d3eca8cfb24a4fb4bb3a9ea033f
[ "Apache-2.0" ]
1
2019-07-29T10:37:23.000Z
2019-07-29T10:37:23.000Z
contrib/kube-prometheus/manifests/prometheus-rules.yaml
s-mansouri/prometheus-operator
4263ea055a146d3eca8cfb24a4fb4bb3a9ea033f
[ "Apache-2.0" ]
null
null
null
contrib/kube-prometheus/manifests/prometheus-rules.yaml
s-mansouri/prometheus-operator
4263ea055a146d3eca8cfb24a4fb4bb3a9ea033f
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 data: all.rules.yaml: "\"groups\": \n- \"name\": \"k8s.rules\"\n \"rules\": \n - \"expr\": |\n sum(rate(container_cpu_usage_seconds_total{job=\"kubelet\", image!=\"\"}[5m])) by (namespace)\n \"record\": \"namespace:container_cpu_usage_seconds_total:sum_rate\"\n \ - \"expr\": |\n sum(container_memory_usage_bytes{job=\"kubelet\", image!=\"\"}) by (namespace)\n \"record\": \"namespace:container_memory_usage_bytes:sum\"\n \ - \"expr\": |\n sum by (namespace, label_name) (\n sum(rate(container_cpu_usage_seconds_total{job=\"kubelet\", image!=\"\"}[5m])) by (namespace, pod_name)\n * on (namespace, pod_name) group_left(label_name)\n label_replace(kube_pod_labels{job=\"kube-state-metrics\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")\n )\n \"record\": \"namespace_name:container_cpu_usage_seconds_total:sum_rate\"\n \ - \"expr\": |\n sum by (namespace, label_name) (\n sum(container_memory_usage_bytes{job=\"kubelet\",image!=\"\"}) by (pod_name, namespace)\n * on (namespace, pod_name) group_left(label_name)\n \ label_replace(kube_pod_labels{job=\"kube-state-metrics\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")\n )\n \"record\": \"namespace_name:container_memory_usage_bytes:sum\"\n \ - \"expr\": |\n sum by (namespace, label_name) (\n sum(kube_pod_container_resource_requests_memory_bytes{job=\"kube-state-metrics\"}) by (namespace, pod)\n * on (namespace, pod) group_left(label_name)\n label_replace(kube_pod_labels{job=\"kube-state-metrics\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")\n )\n \"record\": \"namespace_name:kube_pod_container_resource_requests_memory_bytes:sum\"\n \ - \"expr\": |\n sum by (namespace, label_name) (\n sum(kube_pod_container_resource_requests_cpu_cores{job=\"kube-state-metrics\"} and on(pod) kube_pod_status_scheduled{condition=\"true\"}) by (namespace, pod)\n \ * on (namespace, pod) group_left(label_name)\n label_replace(kube_pod_labels{job=\"kube-state-metrics\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")\n )\n \"record\": \"namespace_name:kube_pod_container_resource_requests_cpu_cores:sum\"\n- \"name\": \"node.rules\"\n \"rules\": \n - \"expr\": \"sum(min(kube_pod_info) by (node))\"\n \"record\": \":kube_pod_info_node_count:\"\n - \"expr\": |\n \ max(label_replace(kube_pod_info{job=\"kube-state-metrics\"}, \"pod\", \"$1\", \"pod\", \"(.*)\")) by (node, namespace, pod)\n \"record\": \"node_namespace_pod:kube_pod_info:\"\n \ - \"expr\": |\n count by (node) (sum by (node, cpu) (\n node_cpu{job=\"node-exporter\"}\n \ * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n \ ))\n \"record\": \"node:node_num_cpu:sum\"\n - \"expr\": |\n 1 - avg(rate(node_cpu{job=\"node-exporter\",mode=\"idle\"}[1m]))\n \"record\": \":node_cpu_utilisation:avg1m\"\n - \"expr\": |\n 1 - avg by (node) (\n \ rate(node_cpu{job=\"node-exporter\",mode=\"idle\"}[1m])\n * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:)\n \"record\": \"node:node_cpu_utilisation:avg1m\"\n - \"expr\": |\n sum(node_load1{job=\"node-exporter\"})\n \ /\n sum(node:node_num_cpu:sum)\n \"record\": \":node_cpu_saturation_load1:\"\n \ - \"expr\": |\n sum by (node) (\n node_load1{job=\"node-exporter\"}\n \ * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n \ )\n /\n node:node_num_cpu:sum\n \"record\": \"node:node_cpu_saturation_load1:\"\n \ - \"expr\": |\n 1 -\n sum(node_memory_MemFree{job=\"node-exporter\"} + node_memory_Cached{job=\"node-exporter\"} + node_memory_Buffers{job=\"node-exporter\"})\n \ /\n sum(node_memory_MemTotal{job=\"node-exporter\"})\n \"record\": \":node_memory_utilisation:\"\n - \"expr\": |\n sum by (node) (\n (node_memory_MemFree{job=\"node-exporter\"} + node_memory_Cached{job=\"node-exporter\"} + node_memory_Buffers{job=\"node-exporter\"})\n \ * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n \ )\n \"record\": \"node:node_memory_bytes_available:sum\"\n - \"expr\": |\n sum by (node) (\n node_memory_MemTotal{job=\"node-exporter\"}\n \ * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n \ )\n \"record\": \"node:node_memory_bytes_total:sum\"\n - \"expr\": |\n \ (node:node_memory_bytes_total:sum - node:node_memory_bytes_available:sum)\n \ /\n scalar(sum(node:node_memory_bytes_total:sum))\n \"record\": \"node:node_memory_utilisation:ratio\"\n - \"expr\": |\n 1e3 * sum(\n (rate(node_vmstat_pgpgin{job=\"node-exporter\"}[1m])\n \ + rate(node_vmstat_pgpgout{job=\"node-exporter\"}[1m]))\n )\n \"record\": \":node_memory_swap_io_bytes:sum_rate\"\n - \"expr\": |\n 1 -\n sum by (node) (\n (node_memory_MemFree{job=\"node-exporter\"} + node_memory_Cached{job=\"node-exporter\"} + node_memory_Buffers{job=\"node-exporter\"})\n * on (namespace, pod) group_left(node)\n \ node_namespace_pod:kube_pod_info:\n )\n /\n sum by (node) (\n node_memory_MemTotal{job=\"node-exporter\"}\n * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n )\n \"record\": \"node:node_memory_utilisation:\"\n - \"expr\": |\n 1 - (node:node_memory_bytes_available:sum / node:node_memory_bytes_total:sum)\n \"record\": \"node:node_memory_utilisation_2:\"\n \ - \"expr\": |\n 1e3 * sum by (node) (\n (rate(node_vmstat_pgpgin{job=\"node-exporter\"}[1m])\n \ + rate(node_vmstat_pgpgout{job=\"node-exporter\"}[1m]))\n * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n )\n \"record\": \"node:node_memory_swap_io_bytes:sum_rate\"\n - \"expr\": |\n avg(irate(node_disk_io_time_ms{job=\"node-exporter\",device=~\"(sd|xvd).+\"}[1m]) / 1e3)\n \"record\": \":node_disk_utilisation:avg_irate\"\n - \"expr\": |\n \ avg by (node) (\n irate(node_disk_io_time_ms{job=\"node-exporter\",device=~\"(sd|xvd).+\"}[1m]) / 1e3\n * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n \ )\n \"record\": \"node:node_disk_utilisation:avg_irate\"\n - \"expr\": |\n avg(irate(node_disk_io_time_weighted{job=\"node-exporter\",device=~\"(sd|xvd).+\"}[1m]) / 1e3)\n \"record\": \":node_disk_saturation:avg_irate\"\n - \"expr\": |\n \ avg by (node) (\n irate(node_disk_io_time_weighted{job=\"node-exporter\",device=~\"(sd|xvd).+\"}[1m]) / 1e3\n * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n \ )\n \"record\": \"node:node_disk_saturation:avg_irate\"\n - \"expr\": |\n sum(irate(node_network_receive_bytes{job=\"node-exporter\",device=\"eth0\"}[1m])) +\n sum(irate(node_network_transmit_bytes{job=\"node-exporter\",device=\"eth0\"}[1m]))\n \ \"record\": \":node_net_utilisation:sum_irate\"\n - \"expr\": |\n sum by (node) (\n (irate(node_network_receive_bytes{job=\"node-exporter\",device=\"eth0\"}[1m]) +\n irate(node_network_transmit_bytes{job=\"node-exporter\",device=\"eth0\"}[1m]))\n \ * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n \ )\n \"record\": \"node:node_net_utilisation:sum_irate\"\n - \"expr\": |\n sum(irate(node_network_receive_drop{job=\"node-exporter\",device=\"eth0\"}[1m])) +\n sum(irate(node_network_transmit_drop{job=\"node-exporter\",device=\"eth0\"}[1m]))\n \ \"record\": \":node_net_saturation:sum_irate\"\n - \"expr\": |\n sum by (node) (\n (irate(node_network_receive_drop{job=\"node-exporter\",device=\"eth0\"}[1m]) +\n irate(node_network_transmit_drop{job=\"node-exporter\",device=\"eth0\"}[1m]))\n \ * on (namespace, pod) group_left(node)\n node_namespace_pod:kube_pod_info:\n \ )\n \"record\": \"node:node_net_saturation:sum_irate\"\n- \"name\": \"kubernetes-apps\"\n \ \"rules\": \n - \"alert\": \"KubePodCrashLooping\"\n \"annotations\": \n \ \"message\": \"{{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is restarting {{ printf \\\"%.2f\\\" $value }} / second\"\n \"expr\": |\n \ rate(kube_pod_container_status_restarts_total{job=\"kube-state-metrics\"}[15m]) > 0\n \"for\": \"1h\"\n \"labels\": \n \"severity\": \"critical\"\n \ - \"alert\": \"KubePodNotReady\"\n \"annotations\": \n \"message\": \"{{ $labels.namespace }}/{{ $labels.pod }} is not ready.\"\n \"expr\": |\n \ sum by (namespace, pod) (kube_pod_status_phase{job=\"kube-state-metrics\", phase!~\"Running|Succeeded\"}) > 0\n \"for\": \"1h\"\n \"labels\": \n \"severity\": \"critical\"\n - \"alert\": \"KubeDeploymentGenerationMismatch\"\n \"annotations\": \n \"message\": \"Deployment {{ $labels.namespace }}/{{ $labels.deployment }} generation mismatch\"\n \"expr\": |\n kube_deployment_status_observed_generation{job=\"kube-state-metrics\"}\n \ !=\n kube_deployment_metadata_generation{job=\"kube-state-metrics\"}\n \ \"for\": \"15m\"\n \"labels\": \n \"severity\": \"critical\"\n - \"alert\": \"KubeDeploymentReplicasMismatch\"\n \"annotations\": \n \"message\": \"Deployment {{ $labels.namespace }}/{{ $labels.deployment }} replica mismatch\"\n \ \"expr\": |\n kube_deployment_spec_replicas{job=\"kube-state-metrics\"}\n \ !=\n kube_deployment_status_replicas_available{job=\"kube-state-metrics\"}\n \ \"for\": \"15m\"\n \"labels\": \n \"severity\": \"critical\"\n- \"name\": \"kubernetes-resources\"\n \"rules\": \n - \"alert\": \"KubeCPUOvercommit\"\n \ \"annotations\": \n \"message\": \"Overcommited CPU resource requests on Pods, cannot tolerate node failure.\"\n \"expr\": |\n sum(namespace_name:kube_pod_container_resource_requests_cpu_cores:sum)\n \ /\n sum(node:node_num_cpu:sum)\n >\n (count(node:node_num_cpu:sum)-1) / count(node:node_num_cpu:sum)\n \"for\": \"5m\"\n \"labels\": \n \"severity\": \"warning\"\n - \"alert\": \"KubeMemOvercommit\"\n \"annotations\": \n \"message\": \"Overcommited Memory resource requests on Pods, cannot tolerate node failure.\"\n \ \"expr\": |\n sum(namespace_name:kube_pod_container_resource_requests_memory_bytes:sum)\n \ /\n sum(node_memory_MemTotal)\n >\n (count(node:node_num_cpu:sum)-1)\n \ /\n count(node:node_num_cpu:sum)\n \"for\": \"5m\"\n \"labels\": \n \"severity\": \"warning\"\n - \"alert\": \"KubeCPUOvercommit\"\n \"annotations\": \n \"message\": \"Overcommited CPU resource request quota on Namespaces.\"\n \ \"expr\": |\n sum(kube_resourcequota{job=\"kube-state-metrics\", type=\"hard\", resource=\"requests.cpu\"})\n /\n sum(node:node_num_cpu:sum)\n > 1.5\n \"for\": \"5m\"\n \"labels\": \n \"severity\": \"warning\"\n \ - \"alert\": \"KubeMemOvercommit\"\n \"annotations\": \n \"message\": \"Overcommited Memory resource request quota on Namespaces.\"\n \"expr\": |\n \ sum(kube_resourcequota{job=\"kube-state-metrics\", type=\"hard\", resource=\"requests.memory\"})\n \ /\n sum(node_memory_MemTotal{job=\"node-exporter\"})\n > 1.5\n \ \"for\": \"5m\"\n \"labels\": \n \"severity\": \"warning\"\n - \"alert\": \"KubeQuotaExceeded\"\n \"annotations\": \n \"message\": \"{{ printf \\\"%0.0f\\\" $value }}% usage of {{ $labels.resource }} in namespace {{ $labels.namespace }}.\"\n \ \"expr\": |\n 100 * kube_resourcequota{job=\"kube-state-metrics\", type=\"used\"}\n \ / ignoring(instance, job, type)\n kube_resourcequota{job=\"kube-state-metrics\", type=\"hard\"}\n > 90\n \"for\": \"15m\"\n \"labels\": \n \"severity\": \"warning\"\n- \"name\": \"kubernetes-storage\"\n \"rules\": \n - \"alert\": \"KubePersistentVolumeUsageCritical\"\n \"annotations\": \n \"message\": \"The persistent volume claimed by {{ $labels.persistentvolumeclaim }} in namespace {{ $labels.namespace }} has {{ printf \\\"%0.0f\\\" $value }}% free.\"\n \"expr\": |\n 100 * kubelet_volume_stats_available_bytes{job=\"kubelet\"}\n /\n \ kubelet_volume_stats_capacity_bytes{job=\"kubelet\"}\n < 3\n \"for\": \"1m\"\n \"labels\": \n \"severity\": \"critical\"\n - \"alert\": \"KubePersistentVolumeFullInFourDays\"\n \ \"annotations\": \n \"message\": \"Based on recent sampling, the persistent volume claimed by {{ $labels.persistentvolumeclaim }} in namespace {{ $labels.namespace }} is expected to fill up within four days.\"\n \"expr\": |\n predict_linear(kubelet_volume_stats_available_bytes{job=\"kubelet\"}[1h], 4 * 24 * 3600) < 0\n \"for\": \"5m\"\n \"labels\": \n \"severity\": \"critical\"\n- \"name\": \"kubernetes-system\"\n \"rules\": \n - \"alert\": \"KubeNodeNotReady\"\n \"annotations\": \n \"message\": \"{{ $labels.node }} has been unready for more than an hour\"\n \"expr\": |\n max(kube_node_status_ready{job=\"kube-state-metrics\", condition=\"false\"} == 1) BY (node)\n \"for\": \"1h\"\n \"labels\": \n \ \"severity\": \"warning\"\n - \"alert\": \"KubeVersionMismatch\"\n \"annotations\": \n \"message\": \"There are {{ $value }} different versions of Kubernetes components running.\"\n \"expr\": |\n count(count(kubernetes_build_info{job!=\"kube-dns\"}) by (gitVersion)) > 1\n \"for\": \"1h\"\n \"labels\": \n \"severity\": \"warning\"\n - \"alert\": \"KubeClientErrors\"\n \"annotations\": \n \"message\": \"Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ printf \\\"%0.0f\\\" $value }}% errors.'\"\n \"expr\": |\n sum(rate(rest_client_requests_total{code!~\"2..\"}[5m])) by (instance, job) * 100\n /\n sum(rate(rest_client_requests_total[5m])) by (instance, job)\n > 1\n \"for\": \"15m\"\n \"labels\": \n \"severity\": \"warning\"\n - \"alert\": \"KubeClientErrors\"\n \"annotations\": \n \"message\": \"Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ printf \\\"%0.0f\\\" $value }} errors / sec.'\"\n \"expr\": |\n sum(rate(ksm_scrape_error_total{job=\"kube-state-metrics\"}[5m])) by (instance, job) > 0.1\n \"for\": \"15m\"\n \"labels\": \n \"severity\": \"warning\"" kind: ConfigMap metadata: labels: prometheus: k8s role: alert-rules name: prometheus-k8s-rules namespace: monitoring
91.904762
153
0.581477
7354be61b18ec21e257bd54fc6b5e53039fc6608
361
yaml
YAML
infra_setup/crb_sa.yaml
idjohnson/yabbs-in-k8s
4fcc655c068c336750015dca857bf47e914454e8
[ "MIT" ]
null
null
null
infra_setup/crb_sa.yaml
idjohnson/yabbs-in-k8s
4fcc655c068c336750015dca857bf47e914454e8
[ "MIT" ]
null
null
null
infra_setup/crb_sa.yaml
idjohnson/yabbs-in-k8s
4fcc655c068c336750015dca857bf47e914454e8
[ "MIT" ]
null
null
null
apiVersion: rbac.authorization.k8s.io/v1 # This cluster role binding allows anyone in the "manager" group to read secrets in any namespace. kind: ClusterRoleBinding metadata: name: default-cluster-admin subjects: - kind: ServiceAccount name: azdo namespace: default roleRef: kind: ClusterRole name: cluster-admin apiGroup: rbac.authorization.k8s.io
25.785714
98
0.783934
73552fd27a4dad858582667921c908264711f9a6
2,433
yaml
YAML
yamls/metricbeat/deployment_release-name-metricbeat-metrics.yaml
jmnote/kubeyaml
675b07a90d2295f697e89294dd31030943836a0d
[ "Apache-2.0" ]
1
2022-01-28T16:03:48.000Z
2022-01-28T16:03:48.000Z
yamls/metricbeat/deployment_release-name-metricbeat-metrics.yaml
jmnote/kubeyaml
675b07a90d2295f697e89294dd31030943836a0d
[ "Apache-2.0" ]
null
null
null
yamls/metricbeat/deployment_release-name-metricbeat-metrics.yaml
jmnote/kubeyaml
675b07a90d2295f697e89294dd31030943836a0d
[ "Apache-2.0" ]
1
2022-02-15T07:57:28.000Z
2022-02-15T07:57:28.000Z
apiVersion: apps/v1 kind: Deployment metadata: labels: app: RELEASE-NAME-metricbeat-metrics chart: metricbeat-7.16.3 heritage: Helm release: RELEASE-NAME name: RELEASE-NAME-metricbeat-metrics spec: replicas: 1 selector: matchLabels: app: RELEASE-NAME-metricbeat-metrics release: RELEASE-NAME template: metadata: annotations: configChecksum: d7b946c2c62cbf6928fed5219effefad2f1a82ab1c875cc7665a8c8b3c86c7c labels: app: RELEASE-NAME-metricbeat-metrics chart: metricbeat-7.16.3 release: RELEASE-NAME spec: affinity: {} containers: - args: - -e - -E - http.enabled=true env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: KUBE_STATE_METRICS_HOSTS value: $(RELEASE_NAME_KUBE_STATE_METRICS_SERVICE_HOST):$(RELEASE_NAME_KUBE_STATE_METRICS_SERVICE_PORT_HTTP) envFrom: [] image: docker.elastic.co/beats/metricbeat:7.16.3 imagePullPolicy: IfNotPresent livenessProbe: exec: command: - sh - -c - | #!/usr/bin/env bash -e curl --fail 127.0.0.1:5066 failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 5 name: metricbeat readinessProbe: exec: command: - sh - -c - | #!/usr/bin/env bash -e metricbeat test output failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 5 resources: limits: cpu: 1000m memory: 200Mi requests: cpu: 100m memory: 100Mi securityContext: privileged: false runAsUser: 0 volumeMounts: - mountPath: /usr/share/metricbeat/metricbeat.yml name: metricbeat-config readOnly: true subPath: metricbeat.yml nodeSelector: {} serviceAccountName: RELEASE-NAME-metricbeat terminationGracePeriodSeconds: 30 tolerations: [] volumes: - configMap: defaultMode: 384 name: RELEASE-NAME-metricbeat-deployment-config name: metricbeat-config
27.033333
117
0.56679
735534994f75427a4edff42a7bc82cc19065f849
338
yaml
YAML
config/rbac/backupvolume_viewer_role.yaml
paulczar/jenkins-automation-operator
4d9b460ffd5d829bb2929a0cf660808b3efb5384
[ "Apache-2.0" ]
16
2020-05-01T13:21:04.000Z
2021-06-10T07:33:59.000Z
config/rbac/backupvolume_viewer_role.yaml
paulczar/jenkins-automation-operator
4d9b460ffd5d829bb2929a0cf660808b3efb5384
[ "Apache-2.0" ]
198
2019-11-25T12:25:26.000Z
2021-09-23T08:36:20.000Z
config/rbac/backupvolume_viewer_role.yaml
paulczar/jenkins-automation-operator
4d9b460ffd5d829bb2929a0cf660808b3efb5384
[ "Apache-2.0" ]
18
2019-11-19T03:41:39.000Z
2021-09-10T10:17:30.000Z
# permissions for end users to view backupvolumes. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: backupvolume-viewer-role rules: - apiGroups: - jenkins.io resources: - backupvolumes verbs: - get - list - watch - apiGroups: - jenkins.io resources: - backupvolumes/status verbs: - get
16.095238
50
0.710059
7355874ffaafb68ab96d7d4ec6f782e1415614cf
379
yaml
YAML
overlays/production-monitoring/kube-prometheus/grafana-deployment-patch.yaml
euidong/jitsi-deployment
9a7b8ed0dad80fc15363455699c987226c3b7aae
[ "MIT" ]
null
null
null
overlays/production-monitoring/kube-prometheus/grafana-deployment-patch.yaml
euidong/jitsi-deployment
9a7b8ed0dad80fc15363455699c987226c3b7aae
[ "MIT" ]
null
null
null
overlays/production-monitoring/kube-prometheus/grafana-deployment-patch.yaml
euidong/jitsi-deployment
9a7b8ed0dad80fc15363455699c987226c3b7aae
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: labels: app: grafana name: grafana namespace: monitoring spec: selector: matchLabels: name: grafana template: metadata: labels: name: grafana spec: containers: - name: grafana env: - name: GF_SERVER_DOMAIN value: "peopleundercloud.com"
17.227273
43
0.588391
7355f03533643a2e4fee3877103246ab3f83af9b
919
yaml
YAML
examples/complete-example/nginx-ingress-rc.yaml
kongdewen/kubernetes-ingress
3b38473ad5444184ccf8e2f78fc65ada209820ee
[ "Apache-2.0" ]
1
2020-10-01T17:50:36.000Z
2020-10-01T17:50:36.000Z
examples/complete-example/nginx-ingress-rc.yaml
tzumby/kubernetes-ingress
ddcab1dc7ba6ea9d2dc32f321266a63396e921ea
[ "Apache-2.0" ]
null
null
null
examples/complete-example/nginx-ingress-rc.yaml
tzumby/kubernetes-ingress
ddcab1dc7ba6ea9d2dc32f321266a63396e921ea
[ "Apache-2.0" ]
1
2018-11-15T08:55:07.000Z
2018-11-15T08:55:07.000Z
apiVersion: v1 kind: ReplicationController metadata: name: nginx-ingress-rc labels: app: nginx-ingress spec: replicas: 1 selector: app: nginx-ingress template: metadata: labels: app: nginx-ingress spec: containers: - image: nginxdemos/nginx-ingress:1.0.0 imagePullPolicy: Always name: nginx-ingress ports: - containerPort: 80 hostPort: 80 - containerPort: 443 hostPort: 443 env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace # Uncomment the lines below to enable extensive logging and/or customization of # NGINX configuration with configmaps args: #- -v=3 #- -nginx-configmaps=$(POD_NAMESPACE)/nginx-config - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret
25.527778
87
0.603917
7355f0ce6424c5f0347ee84f8f352dc10f4f4ace
315
yaml
YAML
drivers/storage/portworx/testspec/storageClassDbCloudSnapshot.yaml
adityadani/operator
5a54c485dfbd6da5620b9789049c1af46961fb32
[ "Apache-2.0" ]
null
null
null
drivers/storage/portworx/testspec/storageClassDbCloudSnapshot.yaml
adityadani/operator
5a54c485dfbd6da5620b9789049c1af46961fb32
[ "Apache-2.0" ]
null
null
null
drivers/storage/portworx/testspec/storageClassDbCloudSnapshot.yaml
adityadani/operator
5a54c485dfbd6da5620b9789049c1af46961fb32
[ "Apache-2.0" ]
null
null
null
kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: px-db-cloud-snapshot provisioner: kubernetes.io/portworx-volume parameters: repl: "3" snapshotschedule.stork.libopenstorage.org/daily-schedule: | schedulePolicyName: default-daily-policy annotations: portworx/snapshot-type: cloud
26.25
61
0.774603
7355fc0b59ba2240638bdd05c5761fb4e22f97a4
95
yaml
YAML
examples/ceph/rbd-sa.yaml
jcsdatera/kubernetes-lab-tutorial
d7c82a5be75132f6a8dd5c8bae3e5ac35c322484
[ "MIT" ]
null
null
null
examples/ceph/rbd-sa.yaml
jcsdatera/kubernetes-lab-tutorial
d7c82a5be75132f6a8dd5c8bae3e5ac35c322484
[ "MIT" ]
null
null
null
examples/ceph/rbd-sa.yaml
jcsdatera/kubernetes-lab-tutorial
d7c82a5be75132f6a8dd5c8bae3e5ac35c322484
[ "MIT" ]
3
2019-10-02T02:53:12.000Z
2021-04-14T08:00:13.000Z
apiVersion: v1 kind: ServiceAccount metadata: namespace: kube-system name: rbd-provisioner
15.833333
24
0.789474
735647868a052c5a391033fedc47fd9a5fd655c2
218
yaml
YAML
cluster/apps/home/hajimari/config-pvc.yaml
networkpanic/home-infra
83c5e7f2a85b95cfcfc1fbe353d7768026fd2d0d
[ "MIT" ]
1
2022-01-18T17:45:33.000Z
2022-01-18T17:45:33.000Z
cluster/apps/home/hajimari/config-pvc.yaml
networkpanic/home-infra
83c5e7f2a85b95cfcfc1fbe353d7768026fd2d0d
[ "MIT" ]
null
null
null
cluster/apps/home/hajimari/config-pvc.yaml
networkpanic/home-infra
83c5e7f2a85b95cfcfc1fbe353d7768026fd2d0d
[ "MIT" ]
null
null
null
--- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: dash-config namespace: default spec: accessModes: - ReadWriteOnce storageClassName: nfs-client resources: requests: storage: 128Mi
15.571429
30
0.720183
73564c71969d602d440dde30486a98fee59b5655
79
yml
YAML
kubertenes/namespace.yml
JoseJuanM/art-marketplace
5004ea9654ad3b2a34ae5ca880f77fea8f110732
[ "MIT" ]
3
2021-12-28T01:38:04.000Z
2022-02-13T12:58:41.000Z
kubertenes/namespace.yml
JoseJuanM/art-marketplace
5004ea9654ad3b2a34ae5ca880f77fea8f110732
[ "MIT" ]
9
2021-12-15T13:16:10.000Z
2022-01-18T16:29:12.000Z
kubertenes/namespace.yml
JoseJuanM/art-marketplace
5004ea9654ad3b2a34ae5ca880f77fea8f110732
[ "MIT" ]
6
2021-12-15T12:48:16.000Z
2022-01-15T12:07:49.000Z
apiVersion: v1 kind: Namespace metadata: name: 0aps labels: name: 0aps
11.285714
15
0.696203
73566684aee83c0333e91baa4ea2bd613b397a57
263
yaml
YAML
server/k8s/redis-service.yaml
mitrakov/varlam
0ded56e993ac557aa28391448509830dec1cca10
[ "MIT" ]
null
null
null
server/k8s/redis-service.yaml
mitrakov/varlam
0ded56e993ac557aa28391448509830dec1cca10
[ "MIT" ]
null
null
null
server/k8s/redis-service.yaml
mitrakov/varlam
0ded56e993ac557aa28391448509830dec1cca10
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Service metadata: name: redis-service namespace: tomsther labels: app: redis-label spec: type: ClusterIP ports: - name: redis-port protocol: TCP port: 6379 targetPort: 6379 selector: app: redis-label
15.470588
22
0.653992
7356701653b26c6e9d5269ca81d718f70f4d0972
485
yml
YAML
manifests/deployment.yml
NominalTrajectory/demo-theo-lansink
8f82e48d03b60a4f89236ed07dcfbb2f39ff1f74
[ "MIT" ]
null
null
null
manifests/deployment.yml
NominalTrajectory/demo-theo-lansink
8f82e48d03b60a4f89236ed07dcfbb2f39ff1f74
[ "MIT" ]
null
null
null
manifests/deployment.yml
NominalTrajectory/demo-theo-lansink
8f82e48d03b60a4f89236ed07dcfbb2f39ff1f74
[ "MIT" ]
null
null
null
apiVersion : apps/v1 kind: Deployment metadata: name: nominaltrajectorydemotheolansink spec: replicas: 1 selector: matchLabels: app: nominaltrajectorydemotheolansink template: metadata: labels: app: nominaltrajectorydemotheolansink spec: containers: - name: nominaltrajectorydemotheolansink image: snapshotapp.azurecr.io/nominaltrajectorydemotheolansink ports: - containerPort: 80
25.526316
73
0.670103
73569d29a41592d67ba58b8f0c1563af28e4e48f
340
yml
YAML
microservices_deployment/cronjobs/job.yml
MDRCS/grokking-kubernetes
25a409dc64a9c407a5a90febb89d02d6d6b4314e
[ "MIT" ]
1
2021-05-21T16:57:54.000Z
2021-05-21T16:57:54.000Z
microservices_deployment/cronjobs/job.yml
MDRCS/grokking-kubernetes
25a409dc64a9c407a5a90febb89d02d6d6b4314e
[ "MIT" ]
1
2020-06-08T07:10:22.000Z
2020-06-08T07:10:22.000Z
microservices_deployment/cronjobs/job.yml
MDRCS/grokking-kubernetes
25a409dc64a9c407a5a90febb89d02d6d6b4314e
[ "MIT" ]
null
null
null
apiVersion: batch/v1 kind: Job metadata: name: job spec: template: spec: containers: - name: long-job image: python command: ["-c", "import time; print('starting'); time.sleep(30); print('done')"] restartPolicy: Never backoffLimit: 2 # number of times where job could fail and stil restart it.
24.285714
90
0.632353
7356aa8ef832ce98bd43e98c82a139c4d4a24d89
2,315
yaml
YAML
ops/logging/elasticsearch_statefulset.yaml
binarytemple/poc_plug_elixir
56b3780921d8365c5a8ec4cfe9bc7b70aea4caee
[ "Apache-2.0" ]
5
2019-07-23T13:52:33.000Z
2021-11-09T12:37:28.000Z
ops/logging/elasticsearch_statefulset.yaml
binarytemple/is_it_up
56b3780921d8365c5a8ec4cfe9bc7b70aea4caee
[ "Apache-2.0" ]
1
2019-10-01T14:45:45.000Z
2019-10-01T14:45:45.000Z
ops/logging/elasticsearch_statefulset.yaml
binarytemple/poc_plug_elixir
56b3780921d8365c5a8ec4cfe9bc7b70aea4caee
[ "Apache-2.0" ]
2
2019-08-11T11:25:24.000Z
2020-03-03T00:13:32.000Z
apiVersion: apps/v1 kind: StatefulSet metadata: name: es-cluster namespace: kube-logging spec: serviceName: elasticsearch replicas: 3 selector: matchLabels: app: elasticsearch template: metadata: labels: app: elasticsearch spec: containers: - name: elasticsearch image: docker.elastic.co/elasticsearch/elasticsearch:7.3.2 resources: limits: cpu: 1000m requests: cpu: 100m ports: - containerPort: 9200 name: rest protocol: TCP - containerPort: 9300 name: inter-node protocol: TCP volumeMounts: - name: data mountPath: /usr/share/elasticsearch/data env: - name: cluster.name value: k8s-logs - name: node.name valueFrom: fieldRef: fieldPath: metadata.name - name: discovery.seed_hosts value: "es-cluster-0.elasticsearch.kube-logging.svc.cluster.local, es-cluster-1.elasticsearch.kube-logging.svc.cluster.local, es-cluster-2.elasticsearch.kube-logging.svc.cluster.local" #value: "es-cluster-0.elasticsearch.kube-logging.svc.cluster.local" - name: cluster.initial_master_nodes value: "es-cluster-0,es-cluster-1,es-cluster-2" #value: "es-cluster-0.elasticsearch.kube-logging.svc.cluster.local" - name: ES_JAVA_OPTS value: "-Xms512m -Xmx512m" initContainers: - name: fix-permissions image: busybox command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"] securityContext: privileged: true volumeMounts: - name: data mountPath: /usr/share/elasticsearch/data - name: increase-vm-max-map image: busybox command: ["sysctl", "-w", "vm.max_map_count=262144"] securityContext: privileged: true - name: increase-fd-ulimit image: busybox command: ["sh", "-c", "ulimit -n 65536"] securityContext: privileged: true volumeClaimTemplates: - metadata: name: data spec: accessModes: [ "ReadWriteMany" ] resources: requests: storage: 200m
30.064935
196
0.581857
7357a0566aab039bf3e4d161616982254be48589
1,713
yaml
YAML
kustomize/deployment.yaml
siva-27/podinfo
bbe3f5edc9e076f955853d9af8dbc357ee740020
[ "Apache-2.0" ]
null
null
null
kustomize/deployment.yaml
siva-27/podinfo
bbe3f5edc9e076f955853d9af8dbc357ee740020
[ "Apache-2.0" ]
null
null
null
kustomize/deployment.yaml
siva-27/podinfo
bbe3f5edc9e076f955853d9af8dbc357ee740020
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: podinfo spec: minReadySeconds: 3 revisionHistoryLimit: 5 progressDeadlineSeconds: 60 strategy: rollingUpdate: maxUnavailable: 0 type: RollingUpdate selector: matchLabels: app: podinfo template: metadata: annotations: prometheus.io/scrape: "true" prometheus.io/port: "9797" labels: app: podinfo spec: containers: - name: podinfod image: ghcr.io/stefanprodan/podinfo:5.0.2 imagePullPolicy: IfNotPresent ports: - name: http containerPort: 9898 protocol: TCP - name: http-metrics containerPort: 9797 protocol: TCP - name: grpc containerPort: 9999 protocol: TCP command: - ./podinfo - --port=9898 - --port-metrics=9797 - --grpc-port=9999 - --grpc-service-name=podinfo - --level=info - --random-delay=false - --random-error=false env: - name: PODINFO_UI_COLOR value: "#34577c" livenessProbe: exec: command: - podcli - check - http - localhost:9898/healthz initialDelaySeconds: 5 timeoutSeconds: 5 readinessProbe: exec: command: - podcli - check - http - localhost:9898/readyz initialDelaySeconds: 5 timeoutSeconds: 5 resources: limits: cpu: 2000m memory: 512Mi requests: cpu: 100m memory: 64Mi
22.84
49
0.514302
7357b54b42539994b433913c1af2c58ff0992b66
215
yaml
YAML
service.yaml
theone4ever/cloudify-kubernetes-plugin-blueprint
cb5b49336110ad9bf18c8282e044d454c1ee6715
[ "Apache-2.0" ]
null
null
null
service.yaml
theone4ever/cloudify-kubernetes-plugin-blueprint
cb5b49336110ad9bf18c8282e044d454c1ee6715
[ "Apache-2.0" ]
null
null
null
service.yaml
theone4ever/cloudify-kubernetes-plugin-blueprint
cb5b49336110ad9bf18c8282e044d454c1ee6715
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Service metadata: name: nodecellar-service spec: selector: app: nodecellar ports: - protocol: TCP port: 8888 targetPort: 3000 nodePort: 30000 type: NodePort
14.333333
26
0.651163
7357d54c4af2932ce3ef8ceb85747c6dbdcc1ece
144
yaml
YAML
built-in-references/Kubernetes/perf/violations/violation9822.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation9822.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation9822.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: image-demo-9822 spec: containers: - name: nginx image: nginx #ritaacr.azurecr.io/nginx:latest
18
49
0.722222
7357f3bf46338a1594120a038f388f6cedb6647e
701
yaml
YAML
yaml-examples/homeassistant/homeassistant-deployment.yaml
joshdavidson/openhomelab
d5c8a8206fe62bbb679998c22c8150b65dd3af4f
[ "MIT" ]
20
2020-10-21T04:20:19.000Z
2020-11-04T14:55:23.000Z
yaml-examples/homeassistant/homeassistant-deployment.yaml
joshdavidson/openhomelab
d5c8a8206fe62bbb679998c22c8150b65dd3af4f
[ "MIT" ]
6
2020-11-10T20:22:34.000Z
2022-01-25T08:59:01.000Z
yaml-examples/homeassistant/homeassistant-deployment.yaml
joshdavidson/openhomelab
d5c8a8206fe62bbb679998c22c8150b65dd3af4f
[ "MIT" ]
4
2020-11-25T14:34:03.000Z
2022-02-15T04:12:22.000Z
kind: Deployment apiVersion: apps/v1 metadata: name: homeassistant namespace: default labels: app: homeassistant spec: replicas: 1 selector: matchLabels: app: homeassistant template: metadata: labels: app: homeassistant spec: containers: - name: homeassistant image: homeassistant/home-assistant:beta imagePullPolicy: Always ports: - containerPort: 8123 env: - name: TZ value: "America/New_York" volumeMounts: - mountPath: /config name: config volumes: - name: config persistentVolumeClaim: claimName: homeassistant
21.242424
48
0.590585
73582adf1389a16a70a69f0953b49cabf32e7a0b
272
yaml
YAML
config/minimal-keyretrieve/daemons_v1alpha1_clusterrole.yaml
latchset/tang-operator
c8f347d8bfddc7925d1ae9dba676b82af42823f7
[ "Apache-2.0" ]
6
2021-08-24T09:41:26.000Z
2022-02-22T08:19:08.000Z
config/minimal-keyretrieve/daemons_v1alpha1_clusterrole.yaml
latchset/tang-operator
c8f347d8bfddc7925d1ae9dba676b82af42823f7
[ "Apache-2.0" ]
3
2021-08-24T16:08:29.000Z
2022-01-24T17:17:13.000Z
config/minimal-keyretrieve/daemons_v1alpha1_clusterrole.yaml
latchset/tang-operator
c8f347d8bfddc7925d1ae9dba676b82af42823f7
[ "Apache-2.0" ]
1
2022-01-25T11:27:38.000Z
2022-01-25T11:27:38.000Z
kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pod-reader-executor rules: - apiGroups: [""] # "" indicates the core API group resources: ["pods", "pods/log", "pods/exec", "pods/status"] verbs: ["get", "watch", "list", "create", "update"]
30.222222
61
0.672794
7358657408aa30d5f3208ddab36e7b8d25a91994
382
yaml
YAML
kustomize/ingress.yaml
Haybu/podinfo
814e5ea7c52526028debf61ac000fe26a37d6bfd
[ "Apache-2.0" ]
null
null
null
kustomize/ingress.yaml
Haybu/podinfo
814e5ea7c52526028debf61ac000fe26a37d6bfd
[ "Apache-2.0" ]
null
null
null
kustomize/ingress.yaml
Haybu/podinfo
814e5ea7c52526028debf61ac000fe26a37d6bfd
[ "Apache-2.0" ]
null
null
null
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: prodinfo-ingress namespace: default spec: rules: - host: localhost http: paths: - path: /podinfo pathType: Prefix backend: service: name: podinfo port: number: 9898
22.470588
63
0.460733
73588e6c360bfe4ee81155f5b490ca4be7a0a1cc
354
yaml
YAML
kubernetes-controllers/paymentservice-replicaset.yaml
otus-kuber-2020-04/israodin_platform
391929b5bfe865b53772b9ad3540a855cc109afe
[ "MIT" ]
null
null
null
kubernetes-controllers/paymentservice-replicaset.yaml
otus-kuber-2020-04/israodin_platform
391929b5bfe865b53772b9ad3540a855cc109afe
[ "MIT" ]
1
2020-06-27T20:27:10.000Z
2020-06-27T20:27:10.000Z
kubernetes-controllers/paymentservice-replicaset.yaml
otus-kuber-2020-07/israodin_platform
92d19f82d28e9d4d4906620e95e25ac451e1c219
[ "MIT" ]
1
2020-08-01T18:30:51.000Z
2020-08-01T18:30:51.000Z
apiVersion: apps/v1 kind: ReplicaSet metadata: name: paymentservice labels: app: paymentservice spec: replicas: 3 selector: matchLabels: app: paymentservice template: metadata: labels: app: paymentservice spec: containers: - name: paymentservice image: israodin/hipster_payment:v0.0.2
18.631579
48
0.652542
7358ed2c4f0a81597d3aa729839e27980f2d1cb3
2,574
yaml
YAML
deploy/driver/daemonset.yaml
ytpay/csi-nfs
7062065b73b448444c6c0ec2ccd676d6026f75ff
[ "Apache-2.0" ]
1
2022-03-04T08:38:05.000Z
2022-03-04T08:38:05.000Z
deploy/driver/daemonset.yaml
ytpay/csi-nfs
7062065b73b448444c6c0ec2ccd676d6026f75ff
[ "Apache-2.0" ]
null
null
null
deploy/driver/daemonset.yaml
ytpay/csi-nfs
7062065b73b448444c6c0ec2ccd676d6026f75ff
[ "Apache-2.0" ]
4
2021-01-11T01:49:31.000Z
2022-03-22T08:36:29.000Z
# This YAML file contains driver-registrar & csi driver nodeplugin API objects # that are necessary to run CSI nodeplugin for nfs kind: DaemonSet apiVersion: apps/v1 metadata: name: csi-driver-registrar spec: selector: matchLabels: app: csi-driver-registrar template: metadata: labels: app: csi-driver-registrar spec: # debug only(connect to dlv 2345 port) #hostNetwork: true containers: - name: csi-driver-registrar image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2 args: - "--v=5" - "--csi-address=$(CSI_ENDPOINT)" - "--kubelet-registration-path=/var/lib/kubelet/plugins/csi-nfs/csi.sock" env: - name: CSI_ENDPOINT value: /csi/csi.sock volumeMounts: - name: plugin-dir mountPath: /csi - name: registration-dir mountPath: /registration - name: csi-nfs image: ytpay/csi-nfs:v1.0.4 securityContext: privileged: true capabilities: add: ["SYS_ADMIN"] allowPrivilegeEscalation: true lifecycle: preStop: exec: command: ["bash","-c","rm -rf /registration/csi-nfs /registration/csi-nfs-reg.sock"] args : - "--nodeid=$(NODE_ID)" - "--endpoint=$(CSI_ENDPOINT)" - "--enable-identity-server" - "--enable-node-server" env: - name: NODE_ID valueFrom: fieldRef: fieldPath: spec.nodeName - name: CSI_ENDPOINT value: unix:///csi/csi.sock volumeMounts: - name: plugin-dir mountPath: /csi - name: registration-dir mountPath: /registration # nfs ci driver will create a shared mount point in this directory - name: pods-mount-dir mountPath: /var/lib/kubelet/pods mountPropagation: Bidirectional volumes: - name: plugin-dir hostPath: path: /var/lib/kubelet/plugins/csi-nfs type: DirectoryOrCreate - name: registration-dir hostPath: path: /var/lib/kubelet/plugins_registry type: Directory # nfs ci driver will create a shared mount point in this directory - name: pods-mount-dir hostPath: path: /var/lib/kubelet/pods type: Directory
32.175
100
0.536908
735917def353f42a42d9157fc88e02d16a8d240b
542
yaml
YAML
ios-sensor-data/k8s/deployment.yaml
dalelane/machine-learning-kafka-events
f2e62c554065c97d5ec7e8be929e890d3f1f9ce2
[ "MIT" ]
1
2021-03-29T12:22:06.000Z
2021-03-29T12:22:06.000Z
ios-sensor-data/k8s/deployment.yaml
dalelane/machine-learning-kafka-events
f2e62c554065c97d5ec7e8be929e890d3f1f9ce2
[ "MIT" ]
null
null
null
ios-sensor-data/k8s/deployment.yaml
dalelane/machine-learning-kafka-events
f2e62c554065c97d5ec7e8be929e890d3f1f9ce2
[ "MIT" ]
2
2021-03-30T17:40:36.000Z
2021-06-06T21:40:13.000Z
apiVersion: apps/v1 kind: Deployment metadata: name: ios-sensor-data-bridge spec: selector: matchLabels: app: ios-sensor-data-bridge replicas: 1 template: metadata: labels: app: ios-sensor-data-bridge spec: containers: - name: bridge image: dalelane/ios-sensor-data-bridge:latest ports: - containerPort: 3000 env: - name: KAFKA_BOOTSTRAP value: "dale-kafka-bootstrap:9092" - name: RAW_EVENTS_TOPIC value: "IPHONE.SENSORS"
21.68
53
0.608856
73597f9373c7e004cd0074fb6b77d7ef0b1ff346
460
yaml
YAML
converter/test/k8scvt/testdata/sd.yaml
jd3quist/yipee
467a13fc1e8ff6ea0627a8bdbf614394d88c8ec9
[ "Apache-2.0" ]
30
2018-11-02T21:07:30.000Z
2021-04-01T16:51:12.000Z
converter/test/k8scvt/testdata/sd.yaml
jd3quist/yipee
467a13fc1e8ff6ea0627a8bdbf614394d88c8ec9
[ "Apache-2.0" ]
73
2018-11-02T20:29:17.000Z
2022-03-05T18:51:11.000Z
converter/test/k8scvt/testdata/sd.yaml
jd3quist/yipee
467a13fc1e8ff6ea0627a8bdbf614394d88c8ec9
[ "Apache-2.0" ]
17
2018-11-02T22:13:25.000Z
2020-06-23T18:36:12.000Z
# Generated 2018-09-21T20:23:31.900Z by Yipee.io # Application: sd # Last Modified: 2018-09-21T20:23:31.900Z apiVersion: v1 kind: Service metadata: creationTimestamp: "2018-06-14T22:47:13Z" generation: 6 resourceVersion: foo selfLink: bar uid: 921f1b4f-fc8b-4f81-b330-d66a34991ab1 name: mongo labels: name: mongo app: mongo spec: ports: - port: 27017 name: web selector: name: foo.bar-mongo app: mongo type: NodePort
18.4
48
0.695652
73598b05e7114276b0ec8fecfe1be0f07f62f305
868
yaml
YAML
pkg/pipeline/testdata/k8s_normalize.yaml
pedro-r-marques/pipeline
f1ebb9bbfdcf286dc8e1e98626f33ee9be7bc85d
[ "Apache-2.0" ]
null
null
null
pkg/pipeline/testdata/k8s_normalize.yaml
pedro-r-marques/pipeline
f1ebb9bbfdcf286dc8e1e98626f33ee9be7bc85d
[ "Apache-2.0" ]
null
null
null
pkg/pipeline/testdata/k8s_normalize.yaml
pedro-r-marques/pipeline
f1ebb9bbfdcf286dc8e1e98626f33ee9be7bc85d
[ "Apache-2.0" ]
null
null
null
apiVersion: batch/v1 kind: Job metadata: name: mr-sitedata-normalize-1 namespace: roque labels: pipeline: mr_sitedata id: "1" task: normalize spec: completions: 4 parallelism: 4 template: metadata: labels: pipeline: mr_sitedata id: "1" task: normalize spec: containers: - name: normalize image: gcr.io/laserlike-1167/roque-normalize args: - -logtostderr - -workdir=gs://laserlike_roque/mr/1 - -etcd-lock=mr-sitedata-mr_normalize-1 env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: KUBERNETES_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace restartPolicy: Never
23.459459
54
0.542627
73599db52e4bbacaf6999684e731ef3712c464bf
501
yaml
YAML
examples/PyTorch/pytorch-deploy.yaml
Lakshmi-Patneedi/omnia
40a5dd9496af16ab6fd18f2d807a4d8dea11bbf3
[ "Apache-2.0" ]
null
null
null
examples/PyTorch/pytorch-deploy.yaml
Lakshmi-Patneedi/omnia
40a5dd9496af16ab6fd18f2d807a4d8dea11bbf3
[ "Apache-2.0" ]
null
null
null
examples/PyTorch/pytorch-deploy.yaml
Lakshmi-Patneedi/omnia
40a5dd9496af16ab6fd18f2d807a4d8dea11bbf3
[ "Apache-2.0" ]
null
null
null
apiVersion: batch/v1 kind: Job metadata: name: pytorch-cpu-simple namespace: default spec: template: spec: containers: - name: cpu-pytorch image: docker.io/mapler/pytorch-cpu:latest volumeMounts: - mountPath: /pyscript name: torch-job-volume command: ["bash","-c","python /pyscript/pytorch-example.py"] restartPolicy: Never volumes: - name: torch-job-volume hostPath: path: /home/k8snfs/torch-example
23.857143
68
0.616766
7359a33a581113d3ebc5eef0350a83f12a1bb552
239
yaml
YAML
examples/zookeeper-client.yaml
davidsung/cp-helm-charts
b681f4cba49137c0b9a3b7481c97d06dcafbe2ee
[ "Apache-2.0" ]
null
null
null
examples/zookeeper-client.yaml
davidsung/cp-helm-charts
b681f4cba49137c0b9a3b7481c97d06dcafbe2ee
[ "Apache-2.0" ]
null
null
null
examples/zookeeper-client.yaml
davidsung/cp-helm-charts
b681f4cba49137c0b9a3b7481c97d06dcafbe2ee
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: zookeeper-client namespace: default spec: containers: - name: zookeeper-client image: confluentinc/cp-zookeeper:5.2.0 command: - sh - -c - "exec tail -f /dev/null"
17.071429
42
0.635983
7359a876af0932508fbb179bb24ceecc9b6dbb3f
756
yaml
YAML
cluster-apps/team-b.yaml
Flodu31/Starwind-AKS-Arc-Gitops
4817cbf1d490fe87cad0ab757e284ebb5fae00d2
[ "MIT" ]
null
null
null
cluster-apps/team-b.yaml
Flodu31/Starwind-AKS-Arc-Gitops
4817cbf1d490fe87cad0ab757e284ebb5fae00d2
[ "MIT" ]
null
null
null
cluster-apps/team-b.yaml
Flodu31/Starwind-AKS-Arc-Gitops
4817cbf1d490fe87cad0ab757e284ebb5fae00d2
[ "MIT" ]
null
null
null
kind: Deployment apiVersion: apps/v1 metadata: name: starwind-b labels: app: starwind-b namespace: starwind-b spec: selector: matchLabels: app: starwind-b template: metadata: labels: app: starwind-b spec: containers: - name: starwind-b image: nginx ports: - containerPort: 80 resources: limits: cpu: 100m memory: 10Mi requests: cpu: 100m memory: 10Mi volumeMounts: - mountPath: /usr/share/nginx/html name: html volumes: - name: html configMap: name: nginx-index-html
21.6
45
0.473545
7359da93acacecbe00e444b0b556e030c2cd526e
727
yaml
YAML
scripts/kubernetes/nfs-server-rc.yaml
fossabot/elasticshift
7aaa230b2fdc5c8ebd74aceef6f964022f486875
[ "Apache-2.0" ]
null
null
null
scripts/kubernetes/nfs-server-rc.yaml
fossabot/elasticshift
7aaa230b2fdc5c8ebd74aceef6f964022f486875
[ "Apache-2.0" ]
null
null
null
scripts/kubernetes/nfs-server-rc.yaml
fossabot/elasticshift
7aaa230b2fdc5c8ebd74aceef6f964022f486875
[ "Apache-2.0" ]
1
2020-11-28T04:51:51.000Z
2020-11-28T04:51:51.000Z
apiVersion: v1 kind: ReplicationController metadata: name: nfs-server spec: replicas: 1 selector: role: nfs-server template: metadata: labels: role: nfs-server spec: containers: - name: nfs-server image: gcr.io/google-samples/nfs-server:1.1 ports: - name: nfs containerPort: 2049 - name: mountd containerPort: 20048 - name: rpcbind containerPort: 111 securityContext: privileged: true volumeMounts: - mountPath: /Users/ghazni/elasticshift name: mypvc volumes: - name: mypvc persistentVolumeClaim: claimName: nfs-pv
22.71875
51
0.557084
7359f3b77d5fd2519389356c5f4ae61c60208fc2
3,125
yaml
YAML
deploy/examples/osd-purge.yaml
superbrothers/rook
8e3350a625e806ef2e3c7267a754ca70b1b34d07
[ "Apache-2.0" ]
1
2022-03-25T00:25:56.000Z
2022-03-25T00:25:56.000Z
deploy/examples/osd-purge.yaml
superbrothers/rook
8e3350a625e806ef2e3c7267a754ca70b1b34d07
[ "Apache-2.0" ]
null
null
null
deploy/examples/osd-purge.yaml
superbrothers/rook
8e3350a625e806ef2e3c7267a754ca70b1b34d07
[ "Apache-2.0" ]
1
2022-01-18T01:30:45.000Z
2022-01-18T01:30:45.000Z
################################################################################################################# # We need many operations to remove OSDs as written in Documentation/ceph-osd-mgmt.md. # This job can automate some of that operations: mark OSDs as `out`, purge these OSDs, # and delete the corresponding resources like OSD deployments, OSD prepare jobs, and PVCs. # # Please note the following. # # - This job only works for `down` OSDs. # - This job doesn't wait for backfilling to be completed. # # If you want to remove `up` OSDs and/or want to wait for backfilling to be completed between each OSD removal, # please do it by hand. ################################################################################################################# apiVersion: batch/v1 kind: Job metadata: name: rook-ceph-purge-osd namespace: rook-ceph # namespace:cluster labels: app: rook-ceph-purge-osd spec: template: metadata: labels: app: rook-ceph-purge-osd spec: serviceAccountName: rook-ceph-purge-osd containers: - name: osd-removal image: rook/ceph:master # TODO: Insert the OSD ID in the last parameter that is to be removed # The OSD IDs are a comma-separated list. For example: "0" or "0,2". # If you want to preserve the OSD PVCs, set `--preserve-pvc true`. # # A --force-osd-removal option is available if the OSD should be destroyed even though the # removal could lead to data loss. args: - "ceph" - "osd" - "remove" - "--preserve-pvc" - "false" - "--force-osd-removal" - "false" - "--osd-ids" - "<OSD-IDs>" env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: ROOK_MON_ENDPOINTS valueFrom: configMapKeyRef: key: data name: rook-ceph-mon-endpoints - name: ROOK_CEPH_USERNAME valueFrom: secretKeyRef: key: ceph-username name: rook-ceph-mon - name: ROOK_CEPH_SECRET valueFrom: secretKeyRef: key: ceph-secret name: rook-ceph-mon - name: ROOK_CONFIG_DIR value: /var/lib/rook - name: ROOK_CEPH_CONFIG_OVERRIDE value: /etc/rook/config/override.conf - name: ROOK_FSID valueFrom: secretKeyRef: key: fsid name: rook-ceph-mon - name: ROOK_LOG_LEVEL value: DEBUG volumeMounts: - mountPath: /etc/ceph name: ceph-conf-emptydir - mountPath: /var/lib/rook name: rook-config volumes: - emptyDir: {} name: ceph-conf-emptydir - emptyDir: {} name: rook-config restartPolicy: Never
34.722222
113
0.49952
7359fde222459a39893b61cc84a3c718a5bdf5bb
369
yaml
YAML
testdata/plain/backend/hpa.yaml
lalloni/kustomizer
1d3f86c8d836d27fb87fcf9d2a4fe182d5b61ca9
[ "Apache-2.0" ]
null
null
null
testdata/plain/backend/hpa.yaml
lalloni/kustomizer
1d3f86c8d836d27fb87fcf9d2a4fe182d5b61ca9
[ "Apache-2.0" ]
null
null
null
testdata/plain/backend/hpa.yaml
lalloni/kustomizer
1d3f86c8d836d27fb87fcf9d2a4fe182d5b61ca9
[ "Apache-2.0" ]
null
null
null
apiVersion: autoscaling/v2beta2 kind: HorizontalPodAutoscaler metadata: name: backend namespace: kustomizer-demo spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: backend minReplicas: 1 maxReplicas: 2 metrics: - type: Resource resource: name: cpu target: type: Utilization averageUtilization: 99
18.45
31
0.691057
735a06d2095980519d14b7a713650a446d8eac9f
3,016
yaml
YAML
tests/tests/legacy_kustomizations/knative-eventing-install/test_data/expected/apps_v1_deployment_eventing-controller.yaml
Soonmok/manifests
2639398b7b8812de4a7c9be0e4841913f29942cf
[ "Apache-2.0" ]
null
null
null
tests/tests/legacy_kustomizations/knative-eventing-install/test_data/expected/apps_v1_deployment_eventing-controller.yaml
Soonmok/manifests
2639398b7b8812de4a7c9be0e4841913f29942cf
[ "Apache-2.0" ]
1
2020-06-28T06:30:01.000Z
2020-07-09T01:02:40.000Z
tests/tests/legacy_kustomizations/knative-eventing-install/test_data/expected/apps_v1_deployment_eventing-controller.yaml
Soonmok/manifests
2639398b7b8812de4a7c9be0e4841913f29942cf
[ "Apache-2.0" ]
2
2020-10-04T22:15:23.000Z
2020-10-28T04:24:01.000Z
apiVersion: apps/v1 kind: Deployment metadata: labels: app.kubernetes.io/component: knative-eventing-install app.kubernetes.io/instance: knative-eventing-install app.kubernetes.io/managed-by: kfctl app.kubernetes.io/name: knative-eventing-install app.kubernetes.io/part-of: kubeflow app.kubernetes.io/version: v0.11.0 eventing.knative.dev/release: v0.11.0 kustomize.component: knative serving.knative.dev/release: v0.11.0 name: eventing-controller namespace: knative-serving spec: replicas: 1 selector: matchLabels: app: eventing-controller app.kubernetes.io/component: knative-eventing-install app.kubernetes.io/instance: knative-eventing-install app.kubernetes.io/managed-by: kfctl app.kubernetes.io/name: knative-eventing-install app.kubernetes.io/part-of: kubeflow app.kubernetes.io/version: v0.11.0 kustomize.component: knative serving.knative.dev/release: v0.11.0 template: metadata: annotations: sidecar.istio.io/inject: "false" labels: app: eventing-controller app.kubernetes.io/component: knative-eventing-install app.kubernetes.io/instance: knative-eventing-install app.kubernetes.io/managed-by: kfctl app.kubernetes.io/name: knative-eventing-install app.kubernetes.io/part-of: kubeflow app.kubernetes.io/version: v0.11.0 eventing.knative.dev/release: v0.11.0 kustomize.component: knative serving.knative.dev/release: v0.11.0 spec: containers: - env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: CONFIG_LOGGING_NAME value: config-logging - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN value: knative.dev/eventing - name: BROKER_INGRESS_IMAGE value: gcr.io/knative-releases/knative.dev/eventing/cmd/broker/ingress@sha256:0f671b2c3f6ea952cb314b7e7d7ec929702c41c47f59cce1044cf7daa6212d2c - name: BROKER_INGRESS_SERVICE_ACCOUNT value: eventing-broker-ingress - name: BROKER_FILTER_IMAGE value: gcr.io/knative-releases/knative.dev/eventing/cmd/broker/filter@sha256:4cde6893d8763c1c8c52625338d698d5bf6857cf2c37e8e187c5d5a84d75514d - name: BROKER_FILTER_SERVICE_ACCOUNT value: eventing-broker-filter image: gcr.io/knative-releases/knative.dev/eventing/cmd/controller@sha256:d071a79973911f45ffd9021ad7e7cc6f4e262b3f1edb77d9bfdcf91b0d657b4e name: eventing-controller ports: - containerPort: 9090 name: metrics terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/config-logging name: config-logging serviceAccountName: eventing-controller volumes: - configMap: name: config-logging name: config-logging
38.177215
152
0.695292
735a472e9fc7e3cf464bfe62c56d70893365049a
394
yaml
YAML
config/rbac/critconfigtemplate_viewer_role.yaml
seaunderwater/cluster-api-bootstrap-provider-crit
eb5e59a6758a12988ee2a263661679e10cb5073f
[ "Apache-2.0" ]
9
2020-09-25T18:09:59.000Z
2020-12-15T01:26:09.000Z
config/rbac/critconfigtemplate_viewer_role.yaml
seaunderwater/cluster-api-bootstrap-provider-crit
eb5e59a6758a12988ee2a263661679e10cb5073f
[ "Apache-2.0" ]
6
2020-10-07T17:43:59.000Z
2021-03-09T15:48:38.000Z
config/rbac/critconfigtemplate_viewer_role.yaml
seaunderwater/cluster-api-bootstrap-provider-crit
eb5e59a6758a12988ee2a263661679e10cb5073f
[ "Apache-2.0" ]
2
2020-10-26T17:08:42.000Z
2020-11-06T16:53:59.000Z
# permissions for end users to view critconfigtemplates. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: critconfigtemplate-viewer-role rules: - apiGroups: - bootstrap.cluster.x-k8s.io resources: - critconfigtemplates verbs: - get - list - watch - apiGroups: - bootstrap.cluster.x-k8s.io resources: - critconfigtemplates/status verbs: - get
18.761905
56
0.736041
735a74a3700a2b2eaa513e5349a15e2351b49fee
803
yaml
YAML
deploy/operator.yaml
selcukusta/kubernetes-operator
b9f327ff6eed516cf44cec5ced343e5c8c2d8823
[ "MIT" ]
6
2019-11-28T09:29:15.000Z
2021-01-14T10:47:40.000Z
deploy/operator.yaml
selcukusta/kubernetes-operator
b9f327ff6eed516cf44cec5ced343e5c8c2d8823
[ "MIT" ]
null
null
null
deploy/operator.yaml
selcukusta/kubernetes-operator
b9f327ff6eed516cf44cec5ced343e5c8c2d8823
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: cm-operator spec: replicas: 1 selector: matchLabels: name: cm-operator template: metadata: labels: name: cm-operator spec: serviceAccountName: cm-operator imagePullSecrets: - name: "reg-cred" containers: - name: cm-operator image: REPLACE_IMAGE command: - cm-operator imagePullPolicy: Always env: - name: WATCH_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: OPERATOR_NAME value: "cm-operator"
22.942857
47
0.523039
735a8a7c94df2c620a87840587a159501a10fcb7
284
yaml
YAML
doc/k8s/rabbitmqadmin-service.yaml
QuanjieDeng/mediasoup-signal
84cf1f2d1df129dc35efa3ebc80c2e8179bb001c
[ "Apache-2.0" ]
7
2020-07-02T06:30:55.000Z
2021-07-16T02:32:22.000Z
doc/k8s/rabbitmqadmin-service.yaml
QuanjieDeng/mediasoup-signal
84cf1f2d1df129dc35efa3ebc80c2e8179bb001c
[ "Apache-2.0" ]
3
2020-08-28T09:01:11.000Z
2020-10-27T09:00:39.000Z
doc/k8s/rabbitmqadmin-service.yaml
QuanjieDeng/mediasoup-signal
84cf1f2d1df129dc35efa3ebc80c2e8179bb001c
[ "Apache-2.0" ]
2
2020-08-28T07:39:11.000Z
2021-01-19T00:29:07.000Z
apiVersion: v1 kind: Service metadata: labels: app: rabbitmq name: mqadmin spec: ports: - name: http port: 15672 protocol: TCP targetPort: 15672 selector: app: rabbitmq sessionAffinity: None type: ClusterIP status: loadBalancer: {}
15.777778
24
0.633803
735ad7133905c07452373bb5f321211e40ad5681
1,109
yaml
YAML
kubernetes/oxygen/deploy/ingress.yaml
roskenet/Playground
3cce70eeb38646b0f2ffbd071c3aaec7b8f5b9cb
[ "MIT" ]
null
null
null
kubernetes/oxygen/deploy/ingress.yaml
roskenet/Playground
3cce70eeb38646b0f2ffbd071c3aaec7b8f5b9cb
[ "MIT" ]
null
null
null
kubernetes/oxygen/deploy/ingress.yaml
roskenet/Playground
3cce70eeb38646b0f2ffbd071c3aaec7b8f5b9cb
[ "MIT" ]
1
2020-10-02T04:57:25.000Z
2020-10-02T04:57:25.000Z
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: oxygen spec: rules: - host: "oxygen.192-168-49-2.nip.io" http: paths: - pathType: Prefix path: "/" backend: service: name: oxygen port: number: 80 #apiVersion: extensions/v1beta1 #kind: Ingress #metadata: # name: oxygen # namespace: oxygen #spec: # rules: # - host: oxygen.192-168-49-2.nip.io # http: # paths: # - backend: # serviceName: oxygen # servicePort: 8080 #apiVersion: networking.k8s.io/v1 #kind: Ingress #metadata: # name: minimal-ingress # annotations: # nginx.ingress.kubernetes.io/rewrite-target: / #spec: # rules: # - http: # paths: # - path: /testpath # pathType: Prefix # backend: # service: # name: test # port: # number: 80 # - host: oxygen.minikube # http: # paths: # - backend: # serviceName: oxygen # servicePort: 80
20.163636
50
0.503156
735ae1c716ead21258500c1b53b8c6c897841732
795
yaml
YAML
namespaces/live.cloud-platform.service.justice.gov.uk/hmpps-pin-phone-monitor-dev/00-namespace.yaml
rossjones/cloud-platform-environments
85a4e19cf012d97d885c6ea91379ca3f7b59a49a
[ "MIT" ]
null
null
null
namespaces/live.cloud-platform.service.justice.gov.uk/hmpps-pin-phone-monitor-dev/00-namespace.yaml
rossjones/cloud-platform-environments
85a4e19cf012d97d885c6ea91379ca3f7b59a49a
[ "MIT" ]
null
null
null
namespaces/live.cloud-platform.service.justice.gov.uk/hmpps-pin-phone-monitor-dev/00-namespace.yaml
rossjones/cloud-platform-environments
85a4e19cf012d97d885c6ea91379ca3f7b59a49a
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Namespace metadata: name: hmpps-pin-phone-monitor-dev labels: cloud-platform.justice.gov.uk/is-production: "false" cloud-platform.justice.gov.uk/environment-name: "dev" annotations: cloud-platform.justice.gov.uk/business-unit: "HMPPS" cloud-platform.justice.gov.uk/application: "hmpps-prisoner-communication-monitoring" cloud-platform.justice.gov.uk/owner: "Digital Prison Services: [email protected]" cloud-platform.justice.gov.uk/source-code: "https://github.com/ministryofjustice/hmpps-prisoner-communication-monitoring.git" cloud-platform.justice.gov.uk/slack-alert-channel: "dps_alerts_non_prod" cloud-platform.justice.gov.uk/slack-channel: "dps_soct_dev" cloud-platform.justice.gov.uk/team-name: "dps-soct-dev"
49.6875
129
0.769811
735b739cdb7ef33e62c939ac1940f0bfa447b3d9
239
yml
YAML
portal-api-svc.yml
VasiliyLiao/kube-s3-portal
f6779bf35bb5bc02eac2c724fa6ef64c9572a10a
[ "Apache-2.0" ]
5
2017-11-03T07:23:28.000Z
2021-11-19T02:01:24.000Z
portal-api-svc.yml
kairen/docker-s3-portal
f6779bf35bb5bc02eac2c724fa6ef64c9572a10a
[ "Apache-2.0" ]
2
2017-05-20T16:36:28.000Z
2019-12-27T09:49:02.000Z
portal-api-svc.yml
inwinstack/kube-s3-portal
f6779bf35bb5bc02eac2c724fa6ef64c9572a10a
[ "Apache-2.0" ]
8
2017-05-12T02:18:30.000Z
2019-07-03T09:56:34.000Z
--- apiVersion: v1 kind: Service metadata: name: portal-api labels: app: portal-api daemon: api spec: clusterIP: None ports: - port: 80 protocol: TCP targetPort: 80 selector: app: portal-api daemon: api
13.277778
19
0.631799
735c0bff25833a7d2f0503ca3fe1387942652043
443
yaml
YAML
K8s/Backbone/authorizationdb-deployment.yaml
cryoelite/ChoicesRemake-Backend
6c17bf652177e45f7992ffcb1f7d9bd153874fba
[ "MIT" ]
null
null
null
K8s/Backbone/authorizationdb-deployment.yaml
cryoelite/ChoicesRemake-Backend
6c17bf652177e45f7992ffcb1f7d9bd153874fba
[ "MIT" ]
null
null
null
K8s/Backbone/authorizationdb-deployment.yaml
cryoelite/ChoicesRemake-Backend
6c17bf652177e45f7992ffcb1f7d9bd153874fba
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: authorizationdb-deployment spec: selector: matchLabels: app: authorizationdb-pod replicas: 1 template: metadata: labels: app: authorizationdb-pod spec: containers: - name: authorizationdb-container image: localhost:5000/authorizationdb imagePullPolicy: IfNotPresent ports: - containerPort: 1433
22.15
47
0.647856
735c123a72b8cf1cf7b5fcfef920ecca6ecdf755
896
yaml
YAML
aks/secrets/secret_deployment.yaml
guitarrapc/k8s-lab
e99163ed6262ce0b563c711da80dc2efbac55a48
[ "MIT" ]
null
null
null
aks/secrets/secret_deployment.yaml
guitarrapc/k8s-lab
e99163ed6262ce0b563c711da80dc2efbac55a48
[ "MIT" ]
null
null
null
aks/secrets/secret_deployment.yaml
guitarrapc/k8s-lab
e99163ed6262ce0b563c711da80dc2efbac55a48
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: secret-deployment spec: replicas: 3 selector: matchLabels: app: photo-view template: metadata: labels: app: photo-view spec: containers: - image: sampleguitarrapcacr.azurecr.io/photo-view:v1.0 name: photoview-container imagePullPolicy: Always ports: - containerPort: 80 env: - name: SECRET_ID valueFrom: secretKeyRef: name: api-key key: id - name: SECRET_KEY valueFrom: secretKeyRef: name: api-key key: key volumeMounts: - name: secrets-volume mountPath: /etc/secrets readOnly: true volumes: - name: secrets-volume secret: secretName: apl-auth
19.478261
61
0.522321
735c6abc8724c16b99120d9ae499d0a0e24d6d59
1,296
yaml
YAML
services/calibre/deployment.yaml
kristinn93/homelab
3bafb8f3965cc8311e7d5a1aee27044b4ba4f132
[ "MIT" ]
null
null
null
services/calibre/deployment.yaml
kristinn93/homelab
3bafb8f3965cc8311e7d5a1aee27044b4ba4f132
[ "MIT" ]
null
null
null
services/calibre/deployment.yaml
kristinn93/homelab
3bafb8f3965cc8311e7d5a1aee27044b4ba4f132
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: calibre namespace: default labels: app: calibre spec: replicas: 1 selector: matchLabels: app: calibre template: metadata: labels: app: calibre spec: containers: - image: linuxserver/calibre:latest imagePullPolicy: Always name: calibre resources: {} env: - name: PGID value: "65534" - name: PUID value: "65534" - name: TZ value: Atlantic/Reykjavik ports: - name: calibre-8080 containerPort: 8080 hostPort: 8080 - name: calibre-8081 containerPort: 8081 hostPort: 8081 livenessProbe: httpGet: path: / port: calibre initialDelaySeconds: 60 periodSeconds: 20 volumeMounts: - name: host-disk1 mountPath: /config subPath: calibre - mountPath: /books name: nfs-vol-disk3 subPath: books volumes: - name: host-disk1 hostPath: path: /disk1 - name: nfs-vol-disk3 nfs: server: 192.168.1.2 path: /disk3 restartPolicy: Always status: {}
21.966102
41
0.515432
735c7b28e5623030ea158ab6108955409588b04a
1,444
yaml
YAML
mock-data/large-cluster/k8s/rbac.authorization.k8s.io:RoleBinding/kube-system-fluentd-gke-scaler-binding.yaml
kubevious/logic-processor
f3b502fdd851fc955ddb00b39da589bbf05566e3
[ "Apache-2.0" ]
null
null
null
mock-data/large-cluster/k8s/rbac.authorization.k8s.io:RoleBinding/kube-system-fluentd-gke-scaler-binding.yaml
kubevious/logic-processor
f3b502fdd851fc955ddb00b39da589bbf05566e3
[ "Apache-2.0" ]
null
null
null
mock-data/large-cluster/k8s/rbac.authorization.k8s.io:RoleBinding/kube-system-fluentd-gke-scaler-binding.yaml
kubevious/logic-processor
f3b502fdd851fc955ddb00b39da589bbf05566e3
[ "Apache-2.0" ]
1
2022-01-16T20:19:54.000Z
2022-01-16T20:19:54.000Z
kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: fluentd-gke-scaler-binding namespace: kube-system selfLink: >- /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/fluentd-gke-scaler-binding uid: 7515db3e-0b6e-4a15-af1a-4e5fea676f7d resourceVersion: '147515614' creationTimestamp: '2020-06-13T08:00:07Z' labels: addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/cluster-service: 'true' annotations: components.gke.io/component-name: fluentd-scaler components.gke.io/component-version: 1.0.2 kubectl.kubernetes.io/last-applied-configuration: > {"apiVersion":"rbac.authorization.k8s.io/v1","kind":"RoleBinding","metadata":{"annotations":{"components.gke.io/component-name":"fluentd-scaler","components.gke.io/component-version":"1.0.2"},"labels":{"addonmanager.kubernetes.io/mode":"Reconcile","kubernetes.io/cluster-service":"true"},"name":"fluentd-gke-scaler-binding","namespace":"kube-system"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"Role","name":"system:fluentd-gke-scaler"},"subjects":[{"kind":"ServiceAccount","name":"fluentd-gke-scaler","namespace":"kube-system"}]} subjects: - kind: ServiceAccount name: fluentd-gke-scaler namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: 'system:fluentd-gke-scaler'
49.793103
556
0.707756
735ca80b0d7b182a80d030c54d66ccda244a3314
397
yaml
YAML
kubernetes-operators/deploy/deploy-operator.yaml
otus-kuber-2022-03/aosipenko99_platform
fd1dd2b81ed54f5945915c8fae0075918fef83b3
[ "MIT" ]
null
null
null
kubernetes-operators/deploy/deploy-operator.yaml
otus-kuber-2022-03/aosipenko99_platform
fd1dd2b81ed54f5945915c8fae0075918fef83b3
[ "MIT" ]
1
2022-03-27T21:36:08.000Z
2022-03-27T21:36:08.000Z
kubernetes-operators/deploy/deploy-operator.yaml
otus-kuber-2022-03/aosipenko99_platform
fd1dd2b81ed54f5945915c8fae0075918fef83b3
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: mysql-operator spec: replicas: 1 selector: matchLabels: name: mysql-operator template: metadata: labels: name: mysql-operator spec: serviceAccountName: mysql-operator containers: - name: operator image: "aosipenko99/otus-operator:latest" imagePullPolicy: "Always"
19.85
51
0.642317
735ceed21999c45bc2bd831cdad0617d5864f3f3
144
yaml
YAML
built-in-references/Kubernetes/perf/violations/violation9770.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation9770.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation9770.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: image-demo-9770 spec: containers: - name: nginx image: nginx #ritaacr.azurecr.io/nginx:latest
18
49
0.722222
735d0d29cbe3e494a3adc40ebd13d4629bd0cd5e
169
yml
YAML
kubernetes-security/task02/03-clusterrole.yml
otus-kuber-2021-03/timoschenkoaa_platform
bfb865fb6de0bff97f617f8e218e8c3348febc3b
[ "MIT" ]
null
null
null
kubernetes-security/task02/03-clusterrole.yml
otus-kuber-2021-03/timoschenkoaa_platform
bfb865fb6de0bff97f617f8e218e8c3348febc3b
[ "MIT" ]
10
2021-04-08T07:08:01.000Z
2021-09-29T06:26:27.000Z
kubernetes-security/task02/03-clusterrole.yml
otus-kuber-2021-03/timoschenkoaa_platform
bfb865fb6de0bff97f617f8e218e8c3348febc3b
[ "MIT" ]
null
null
null
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: pods-viewer rules: - apiGroups: [""] resources: ["pods"] verbs: ["get", "watch", "list"]
21.125
40
0.680473
735d479cdb98222fd9457578219050f859b5c2f6
220
yaml
YAML
cluster/apps/blogs/strapi/pvc-config.yaml
h3mmy/bloopySphere
8b745fe93e8ac6a41960a1909fe43c122caf38e3
[ "MIT" ]
1
2022-02-11T15:19:02.000Z
2022-02-11T15:19:02.000Z
cluster/apps/blogs/strapi/pvc-config.yaml
h3mmy/bloopySphere
8b745fe93e8ac6a41960a1909fe43c122caf38e3
[ "MIT" ]
190
2021-12-06T18:38:36.000Z
2022-03-31T22:23:23.000Z
cluster/apps/blogs/strapi/pvc-config.yaml
h3mmy/bloopySphere
8b745fe93e8ac6a41960a1909fe43c122caf38e3
[ "MIT" ]
1
2022-03-26T13:53:01.000Z
2022-03-26T13:53:01.000Z
--- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: strapi-config-v1 namespace: blogs spec: accessModes: - ReadWriteOnce resources: requests: storage: 15Gi storageClassName: ceph-block
15.714286
30
0.718182
735d65c39f02575d9a597b6aeb9064bf5cbefc69
910
yaml
YAML
k8s/deployments/sample-frontend-production.yaml
cpini/sample-app
730ee357617662cf24b92363408aed3e1ecd18b7
[ "Apache-2.0" ]
null
null
null
k8s/deployments/sample-frontend-production.yaml
cpini/sample-app
730ee357617662cf24b92363408aed3e1ecd18b7
[ "Apache-2.0" ]
null
null
null
k8s/deployments/sample-frontend-production.yaml
cpini/sample-app
730ee357617662cf24b92363408aed3e1ecd18b7
[ "Apache-2.0" ]
null
null
null
kind: Deployment apiVersion: apps/v1 metadata: name: sample-frontend-production spec: replicas: 4 selector: matchLabels: app: sample role: frontend env: production template: metadata: name: frontend labels: app: sample role: frontend env: production spec: containers: - name: frontend image: gcr.io/magento-295411/sample-app resources: limits: memory: "500Mi" cpu: "100m" imagePullPolicy: Always readinessProbe: httpGet: path: /health port: 8080 env: - name: COMPONENT value: frontend - name: BACKEND_URL value: http://sample-backend-production:8080/metadata - name: VERSION value: production ports: - name: frontend containerPort: 8080
22.195122
63
0.548352
735da79484421d37f50e7bb989c43f0c12bbe413
104
yaml
YAML
ResourceQuota.yaml
40660367/YAML
cf4df4d631e8434d82ee047c537847b5ef71d865
[ "MIT" ]
null
null
null
ResourceQuota.yaml
40660367/YAML
cf4df4d631e8434d82ee047c537847b5ef71d865
[ "MIT" ]
null
null
null
ResourceQuota.yaml
40660367/YAML
cf4df4d631e8434d82ee047c537847b5ef71d865
[ "MIT" ]
null
null
null
apiVersion: v1 kind: ResourceQuota metadata: name: pod-demo spec: hard: pods: "2" #为名字空间配置Pod配额
11.555556
19
0.701923
735e044946988bff8eada5f5525a39204e6144ff
3,244
yaml
YAML
config/config.yaml
houshengbo/net-istio
9c91b9967f673d3d2c956eb1c3e1835139863ae0
[ "Apache-2.0" ]
null
null
null
config/config.yaml
houshengbo/net-istio
9c91b9967f673d3d2c956eb1c3e1835139863ae0
[ "Apache-2.0" ]
null
null
null
config/config.yaml
houshengbo/net-istio
9c91b9967f673d3d2c956eb1c3e1835139863ae0
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The Knative Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: ConfigMap metadata: name: config-istio namespace: knative-serving labels: serving.knative.dev/release: devel networking.knative.dev/ingress-provider: istio data: # TODO(nghia): Extract the .svc.cluster.local suffix into its own config. _example: | ################################ # # # EXAMPLE CONFIGURATION # # # ################################ # This block is not actually functional configuration, # but serves to illustrate the available configuration # options and document them in a way that is accessible # to users that `kubectl edit` this config map. # # These sample configuration options may be copied out of # this example block and unindented to be in the data block # to actually change the configuration. # Default Knative Gateway after v0.3. It points to the Istio # standard istio-ingressgateway, instead of a custom one that we # used pre-0.3. The configuration format should be `gateway. # {{gateway_namespace}}.{{gateway_name}}: "{{ingress_name}}. # {{ingress_namespace}}.svc.cluster.local"`. The {{gateway_namespace}} # is optional; when it is omitted, the system will search for # the gateway in the serving system namespace `knative-serving` gateway.knative-serving.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" # A cluster local gateway to allow pods outside of the mesh to access # Services and Routes not exposing through an ingress. If the users # do have a service mesh setup, this isn't required and can be removed. # # An example use case is when users want to use Istio without any # sidecar injection (like Knative's istio-ci-no-mesh.yaml). Since every pod # is outside of the service mesh in that case, a cluster-local service # will need to be exposed to a cluster-local gateway to be accessible. # The configuration format should be `local-gateway.{{local_gateway_namespace}}. # {{local_gateway_name}}: "{{cluster_local_gateway_name}}. # {{cluster_local_gateway_namespace}}.svc.cluster.local"`. The # {{local_gateway_namespace}} is optional; when it is omitted, the system # will search for the local gateway in the serving system namespace # `knative-serving` local-gateway.knative-serving.knative-local-gateway: "knative-local-gateway.istio-system.svc.cluster.local" # To use only Istio service mesh and no knative-local-gateway, replace # all local-gateway.* entries by the following entry. local-gateway.mesh: "mesh"
46.342857
111
0.700678
735e78f1d5217878570c7a437b9abe36f783ac35
194
yaml
YAML
nfs-server/nfs-server-pvc.yaml
adlrocha/fabric-on-kubernetes
17574b75b32b992edad3c2ae442d50290af51af8
[ "Apache-2.0" ]
23
2018-04-25T07:42:06.000Z
2022-03-04T03:18:15.000Z
nfs-server/nfs-server-pvc.yaml
fushui-Fang/fabric-on-kubernetes
17574b75b32b992edad3c2ae442d50290af51af8
[ "Apache-2.0" ]
3
2018-09-11T06:14:51.000Z
2021-04-28T13:55:43.000Z
nfs-server/nfs-server-pvc.yaml
fushui-Fang/fabric-on-kubernetes
17574b75b32b992edad3c2ae442d50290af51af8
[ "Apache-2.0" ]
13
2018-06-15T00:54:28.000Z
2022-01-26T03:35:45.000Z
kind: PersistentVolumeClaim apiVersion: v1 metadata: name: nfs-server-pvc-fast spec: storageClassName: fast accessModes: - ReadWriteOnce resources: requests: storage: 10Gi
16.166667
27
0.726804
735e87e8551f5e4dffb279c30ae22c1e9140b87c
166
yaml
YAML
openebs/lvm-local/sc_lvm_localpv.yaml
kmova/bootstrap
f162e100fe9398e1e344dd224fdd75071674c24b
[ "Apache-2.0" ]
15
2017-03-17T09:38:09.000Z
2021-07-09T14:59:31.000Z
openebs/lvm-local/sc_lvm_localpv.yaml
kmova/bootstrap
f162e100fe9398e1e344dd224fdd75071674c24b
[ "Apache-2.0" ]
null
null
null
openebs/lvm-local/sc_lvm_localpv.yaml
kmova/bootstrap
f162e100fe9398e1e344dd224fdd75071674c24b
[ "Apache-2.0" ]
16
2017-03-20T00:07:14.000Z
2021-07-08T17:12:42.000Z
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: openebs-lvmpv parameters: storage: "lvm" vgpattern: "lvmvg" provisioner: local.csi.openebs.io
16.6
33
0.759036
735ed1885ac9740d5a70c5a5a3083c8bdf40f153
144
yaml
YAML
built-in-references/Kubernetes/perf/violations/violation6605.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation6605.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation6605.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: image-demo-6605 spec: containers: - name: nginx image: nginx #ritaacr.azurecr.io/nginx:latest
18
49
0.722222
735f153239f7a24b51b4064a20e79e9635a50b59
225
yaml
YAML
cluster/apps/home/zwave2mqtt/config-pvc.yaml
Ornias1993/home-cluster
026be804d29badb3dbc5594b133d7c3d3a302700
[ "Unlicense" ]
null
null
null
cluster/apps/home/zwave2mqtt/config-pvc.yaml
Ornias1993/home-cluster
026be804d29badb3dbc5594b133d7c3d3a302700
[ "Unlicense" ]
null
null
null
cluster/apps/home/zwave2mqtt/config-pvc.yaml
Ornias1993/home-cluster
026be804d29badb3dbc5594b133d7c3d3a302700
[ "Unlicense" ]
null
null
null
--- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: zwave2mqtt-config-v1 namespace: home spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi storageClassName: rook-ceph-block
16.071429
35
0.728889
736013cb8f89ed32d7b6d32f9919be6ac3c539b7
177
yml
YAML
service.yml
adambirse/text-ingester
51bc89c1e448225dc2bea647733eef78ec96c5d5
[ "MIT" ]
null
null
null
service.yml
adambirse/text-ingester
51bc89c1e448225dc2bea647733eef78ec96c5d5
[ "MIT" ]
null
null
null
service.yml
adambirse/text-ingester
51bc89c1e448225dc2bea647733eef78ec96c5d5
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Service metadata: name: ingester spec: selector: app: ingester ports: - protocol: "TCP" port: 8080 targetPort: 8080 type: LoadBalancer
14.75
20
0.677966
736022450efde01ad34af8eb0a9618256fbed1ad
667
yaml
YAML
config/prow-staging/cluster/test-pods_namespace.yaml
rahulii/test-infra
34ab18218e7e212a75f8699a0b90c648a81b8c93
[ "Apache-2.0" ]
3
2021-08-28T17:47:51.000Z
2021-08-29T06:55:09.000Z
config/prow-staging/cluster/test-pods_namespace.yaml
rahulii/test-infra
34ab18218e7e212a75f8699a0b90c648a81b8c93
[ "Apache-2.0" ]
7
2022-03-29T19:59:39.000Z
2022-03-31T19:14:52.000Z
config/prow-staging/cluster/test-pods_namespace.yaml
rahulii/test-infra
34ab18218e7e212a75f8699a0b90c648a81b8c93
[ "Apache-2.0" ]
3
2017-03-02T04:24:35.000Z
2017-09-09T00:40:02.000Z
# Copyright 2020 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: Namespace metadata: name: test-pods
35.105263
74
0.766117
736059db0c7c07102b520ad834f3211990706175
1,074
yaml
YAML
uninstaller.yaml
jlewi/pipelines
76a4d1a27fb58b1660cf838f256615ed1f643fa4
[ "Apache-2.0" ]
null
null
null
uninstaller.yaml
jlewi/pipelines
76a4d1a27fb58b1660cf838f256615ed1f643fa4
[ "Apache-2.0" ]
null
null
null
uninstaller.yaml
jlewi/pipelines
76a4d1a27fb58b1660cf838f256615ed1f643fa4
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: batch/v1 kind: Job metadata: generateName: uninstall-ml-pipeline- spec: backoffLimit: 1 template: metadata: name: uninstall-ml-pipeline spec: containers: - name: uninstaller image: gcr.io/ml-pipeline/bootstrapper:0.1.0 imagePullPolicy: 'Always' # Additional parameter available: args: [ # "--namespace", "foo", # "--report_usage", "false", "--uninstall", ] restartPolicy: Never
29.833333
74
0.679702
73605b25b01f1e4b868e6280e32dcbb442142d93
222
yaml
YAML
deploy/role_binding.yaml
accanto-systems/lm-operator
fcd65afd2b6f16ce5675dd1bbfdae78abc29443c
[ "Apache-2.0" ]
null
null
null
deploy/role_binding.yaml
accanto-systems/lm-operator
fcd65afd2b6f16ce5675dd1bbfdae78abc29443c
[ "Apache-2.0" ]
1
2019-12-23T09:44:37.000Z
2019-12-23T09:44:37.000Z
deploy/role_binding.yaml
accanto-systems/lm-operator
fcd65afd2b6f16ce5675dd1bbfdae78abc29443c
[ "Apache-2.0" ]
null
null
null
kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: lm-operator subjects: - kind: ServiceAccount name: lm-operator roleRef: kind: Role name: lm-operator apiGroup: rbac.authorization.k8s.io
18.5
40
0.765766
7360a369e734532bbc2cd59075fffff7e75e7ead
144
yaml
YAML
built-in-references/Kubernetes/perf/violations/violation5068.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation5068.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation5068.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: image-demo-5068 spec: containers: - name: nginx image: nginx #ritaacr.azurecr.io/nginx:latest
18
49
0.722222
7361407a915f55c083784af1c3d23ad1e0e43491
1,205
yml
YAML
.kubernetes/.init/migrate/job.yml
Etimo/etimo-achievements
82918b43fe625af28939b0f0debaabd289b680b0
[ "MIT" ]
2
2022-01-06T18:02:53.000Z
2022-01-07T07:25:07.000Z
.kubernetes/.init/migrate/job.yml
Etimo/etimo-achievements
82918b43fe625af28939b0f0debaabd289b680b0
[ "MIT" ]
37
2021-10-14T10:34:03.000Z
2022-03-23T11:48:34.000Z
.kubernetes/.init/migrate/job.yml
Etimo/etimo-achievements
82918b43fe625af28939b0f0debaabd289b680b0
[ "MIT" ]
null
null
null
apiVersion: batch/v1 kind: Job metadata: name: migrate-job-${DATE}-${TAG} labels: app.kubernetes.io/name: achievements-migrate-${DATE}-${TAG} app.kubernetes.io/instance: achievements-migrate-${DATE}-${TAG} app.kubernetes.io/version: '${TAG}' app.kubernetes.io/component: init app.kubernetes.io/part-of: achievements annotations: kubernetes.io/change-cause: 'release ${RELEASE}' spec: ttlSecondsAfterFinished: 300 template: metadata: labels: hash: '${TAG}' app.kubernetes.io/part-of: achievements spec: containers: - name: achievements-migrate image: niclaslindstedt/etimo-achievements-migrate:${COMMIT_SHA} resources: requests: cpu: '50m' memory: '64Mi' limits: cpu: '100m' memory: '256Mi' envFrom: - secretRef: name: provisioned-secrets - configMapRef: name: provisioned-config env: - name: DEBUG value: '${DEBUG}' - name: NODE_ENV value: '${NODE_ENV}' restartPolicy: Never backoffLimit: 1
28.023256
73
0.560996
73614e9099eea5127342c8c0351cce190d7b0ab5
421
yaml
YAML
integration/init/other-list-types/expected/base/roleBindingList.yaml
marccampbell/ship
c5e9b6dd093ad52f02d2ab630df8a49b4d83a58d
[ "Apache-2.0" ]
665
2018-08-29T18:07:48.000Z
2022-03-30T01:14:33.000Z
integration/init/other-list-types/expected/base/roleBindingList.yaml
marccampbell/ship
c5e9b6dd093ad52f02d2ab630df8a49b4d83a58d
[ "Apache-2.0" ]
249
2018-08-29T18:51:43.000Z
2022-03-08T22:42:34.000Z
other-list-types/roleBindingList.yaml
AlexRogalskiy/test-charts
d7bd2c86e68f8f127052d11a95cca350abc944e5
[ "MIT" ]
82
2018-08-29T18:10:20.000Z
2022-03-30T02:55:39.000Z
apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBindingList items: - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: rolebinding-prometheus labels: app: default-prometheus namespace: default roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: role-prometheus subjects: - kind: ServiceAccount name: role-prometheus namespace: default
22.157895
42
0.724466
73616f82836493d5dd0e68707b80c188952940cf
1,880
yml
YAML
experiments/generic/docker-service-kill/test/test.yml
uditgaurav/litmus-go
f2db46f74ae8cd3f99a62e6d69ee4b7f28ba8563
[ "Apache-2.0" ]
45
2020-04-17T06:35:05.000Z
2022-03-23T22:32:48.000Z
experiments/generic/docker-service-kill/test/test.yml
uditgaurav/litmus-go
f2db46f74ae8cd3f99a62e6d69ee4b7f28ba8563
[ "Apache-2.0" ]
243
2020-05-04T21:01:57.000Z
2022-03-24T15:21:30.000Z
experiments/generic/docker-service-kill/test/test.yml
uditgaurav/litmus-go
f2db46f74ae8cd3f99a62e6d69ee4b7f28ba8563
[ "Apache-2.0" ]
75
2020-04-22T09:33:17.000Z
2022-03-30T16:13:49.000Z
--- apiVersion: apps/v1 kind: Deployment metadata: name: litmus-experiment spec: replicas: 1 selector: matchLabels: app: litmus-experiment template: metadata: labels: app: litmus-experiment spec: serviceAccountName: docker-service-kill-sa containers: - name: gotest image: busybox command: - sleep - "3600" env: # provide application namespace - name: APP_NAMESPACE value: "" # provide application labels - name: APP_LABEL value: "" # provide application kind - name: APP_KIND value: "" - name: TOTAL_CHAOS_DURATION value: "" # provide auxiliary application details - namespace and labels of the applications # sample input is - "ns1:app=percona,ns2:name=nginx" - name: AUXILIARY_APPINFO value: "" ## Period to wait before injection of chaos in sec - name: RAMP_TIME value: "" ## env var that describes the library used to execute the chaos ## default: litmus. Supported values: litmus, powerfulseal, chaoskube - name: LIB value: "" # provide the chaos namespace - name: CHAOS_NAMESPACE value: "" - name: NODE_LABEL value: "" - name: TARGET_NODE value: "" - name: TARGET_CONTAINER value: "" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: CHAOS_SERVICE_ACCOUNT valueFrom: fieldRef: fieldPath: spec.serviceAccountName
25.066667
94
0.501596
73617bdecd19211fbf9daebc8a0460cdc62d88fe
2,260
yaml
YAML
config/prow/cluster/plank_deployment.yaml
vicsufer/test-infra
6b02761704c1c1070fefb454e998da19d71a9d2b
[ "Apache-2.0" ]
null
null
null
config/prow/cluster/plank_deployment.yaml
vicsufer/test-infra
6b02761704c1c1070fefb454e998da19d71a9d2b
[ "Apache-2.0" ]
null
null
null
config/prow/cluster/plank_deployment.yaml
vicsufer/test-infra
6b02761704c1c1070fefb454e998da19d71a9d2b
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apps/v1 kind: Deployment metadata: namespace: default name: plank labels: app: plank spec: # Mutually exclusive with prow-controller-manager. Only one of them may have more than zero replicas. # If this is re-enabled, the service monitor must be changed back to point to plank: # https://github.com/kubernetes/test-infra/blob/a35145f8e7e00d72a69dc1e8be57b8cf09184a52/config/prow/cluster/monitoring/prow_servicemonitors.yaml#L133 replicas: 0 # Do not scale up. strategy: type: Recreate selector: matchLabels: app: plank template: metadata: labels: app: plank spec: serviceAccountName: plank containers: - name: plank image: gcr.io/k8s-prow/plank:v20201028-8dcb569da5 args: - --config-path=/etc/config/config.yaml - --dry-run=false - --job-config-path=/etc/job-config - --kubeconfig=/etc/kubeconfig/config - --skip-report=true volumeMounts: - mountPath: /etc/kubeconfig name: kubeconfig readOnly: true - name: oauth mountPath: /etc/github readOnly: true - name: config mountPath: /etc/config readOnly: true - name: job-config mountPath: /etc/job-config readOnly: true volumes: - name: kubeconfig secret: defaultMode: 420 secretName: kubeconfig - name: oauth secret: secretName: oauth-token - name: config configMap: name: config - name: job-config configMap: name: job-config
30.540541
152
0.649558
73617c05e33abc07b6bf05715fdecfa3b1d8d6be
450
yml
YAML
kubernetes/udagram_feed_deployment.yml
marcusholmgren/udagram
fa05f9f348c0cff1c791cd00f7f01358c38950fb
[ "MIT" ]
null
null
null
kubernetes/udagram_feed_deployment.yml
marcusholmgren/udagram
fa05f9f348c0cff1c791cd00f7f01358c38950fb
[ "MIT" ]
null
null
null
kubernetes/udagram_feed_deployment.yml
marcusholmgren/udagram
fa05f9f348c0cff1c791cd00f7f01358c38950fb
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: udagram-feed-api spec: replicas: 1 selector: matchLabels: app: udagram-feed-api template: metadata: labels: app: udagram-feed-api spec: containers: - name: udagram-feed-api image: marcusholmgren/udagram-feed-api envFrom: - configMapRef: name: udagram-config ports: - containerPort: 8080
19.565217
46
0.593333
736180b454d2f937af4d2c844f3d71e5f29f2f38
194
yaml
YAML
manifests/example-vsphere-pvc.yaml
zuzzas/vsphere-csi-driver
9e08b3a6f2d283465f73e064bc26af97df4a4124
[ "Apache-2.0" ]
null
null
null
manifests/example-vsphere-pvc.yaml
zuzzas/vsphere-csi-driver
9e08b3a6f2d283465f73e064bc26af97df4a4124
[ "Apache-2.0" ]
null
null
null
manifests/example-vsphere-pvc.yaml
zuzzas/vsphere-csi-driver
9e08b3a6f2d283465f73e064bc26af97df4a4124
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: vsphere-csi-pvc spec: accessModes: - ReadWriteOnce resources: requests: storage: 5Gi storageClassName: vsphere-fcd
16.166667
31
0.737113
736181fc7c21417ca02a169b03cec8a6ad53cdf7
2,070
yaml
YAML
docs/admin/high-availability/kube-controller-manager.yaml
dnwake/kubernetes_trust
96a313825a7ef88ac37133bd3f7e7523aceae385
[ "Apache-2.0" ]
270
2016-02-19T07:03:53.000Z
2022-03-26T15:20:57.000Z
docs/admin/high-availability/kube-controller-manager.yaml
dnwake/kubernetes_trust
96a313825a7ef88ac37133bd3f7e7523aceae385
[ "Apache-2.0" ]
28
2016-02-18T16:00:27.000Z
2016-06-01T02:16:29.000Z
docs/admin/high-availability/kube-controller-manager.yaml
dnwake/kubernetes_trust
96a313825a7ef88ac37133bd3f7e7523aceae385
[ "Apache-2.0" ]
114
2016-02-19T07:49:55.000Z
2022-03-13T10:22:42.000Z
apiVersion: v1 kind: Pod metadata: name: kube-controller-manager spec: containers: - command: - /bin/sh - -c - /usr/local/bin/kube-controller-manager --master=127.0.0.1:8080 --cluster-name=e2e-test-bburns --cluster-cidr=10.245.0.0/16 --allocate-node-cidrs=true --cloud-provider=gce --service-account-private-key-file=/srv/kubernetes/server.key --v=2 1>>/var/log/kube-controller-manager.log 2>&1 image: gcr.io/google_containers/kube-controller-manager:fda24638d51a48baa13c35337fcd4793 livenessProbe: httpGet: path: /healthz port: 10252 initialDelaySeconds: 15 timeoutSeconds: 1 name: kube-controller-manager volumeMounts: - mountPath: /srv/kubernetes name: srvkube readOnly: true - mountPath: /var/log/kube-controller-manager.log name: logfile - mountPath: /etc/ssl name: etcssl readOnly: true - mountPath: /usr/share/ssl name: usrsharessl readOnly: true - mountPath: /var/ssl name: varssl readOnly: true - mountPath: /usr/ssl name: usrssl readOnly: true - mountPath: /usr/lib/ssl name: usrlibssl readOnly: true - mountPath: /usr/local/openssl name: usrlocalopenssl readOnly: true - mountPath: /etc/openssl name: etcopenssl readOnly: true - mountPath: /etc/pki/tls name: etcpkitls readOnly: true hostNetwork: true volumes: - hostPath: path: /srv/kubernetes name: srvkube - hostPath: path: /var/log/kube-controller-manager.log name: logfile - hostPath: path: /etc/ssl name: etcssl - hostPath: path: /usr/share/ssl name: usrsharessl - hostPath: path: /var/ssl name: varssl - hostPath: path: /usr/ssl name: usrssl - hostPath: path: /usr/lib/ssl name: usrlibssl - hostPath: path: /usr/local/openssl name: usrlocalopenssl - hostPath: path: /etc/openssl name: etcopenssl - hostPath: path: /etc/pki/tls name: etcpkitls
24.939759
145
0.631884
7361909b6e8ac87f278f4f4b639506114f8ddb9d
435
yaml
YAML
helm/kdl-server/templates/drone/drone-service.yaml
konstellation-io/kdl-server
b3dac5f51f7cc32f4e4b9939e73f44f1fff32602
[ "MIT" ]
4
2022-03-01T14:46:34.000Z
2022-03-18T09:19:14.000Z
helm/kdl-server/templates/drone/drone-service.yaml
konstellation-io/kdl-server
b3dac5f51f7cc32f4e4b9939e73f44f1fff32602
[ "MIT" ]
531
2021-02-01T09:10:14.000Z
2022-03-31T11:37:16.000Z
helm/kdl-server/templates/drone/drone-service.yaml
konstellation-io/kdl-server
b3dac5f51f7cc32f4e4b9939e73f44f1fff32602
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Service metadata: name: drone labels: app: drone spec: clusterIP: None ports: # drone-secrets container - name: drone-secrets port: 3000 targetPort: 3000 protocol: TCP # drone-runner container - name: http port: 80 targetPort: 80 protocol: TCP - name: runner port: 9000 targetPort: 9000 protocol: TCP selector: app: drone
16.730769
29
0.602299
736261c544c1eb9325d13144e34ffdd3c41382d1
1,030
yaml
YAML
jsonnet/vendor/github.com/observatorium/deployments/environments/dev/manifests/dex-secret.yaml
ArthurSens/operator
08f589b69dbd6a0072f7af8f5225c93c905c1fb2
[ "Apache-2.0" ]
114
2019-10-15T08:53:16.000Z
2022-03-31T17:14:52.000Z
jsonnet/vendor/github.com/observatorium/deployments/environments/dev/manifests/dex-secret.yaml
ArthurSens/operator
08f589b69dbd6a0072f7af8f5225c93c905c1fb2
[ "Apache-2.0" ]
144
2019-06-21T08:20:22.000Z
2020-06-08T14:36:33.000Z
jsonnet/vendor/github.com/observatorium/deployments/environments/dev/manifests/dex-secret.yaml
ArthurSens/operator
08f589b69dbd6a0072f7af8f5225c93c905c1fb2
[ "Apache-2.0" ]
52
2019-10-18T09:17:32.000Z
2022-03-21T09:54:18.000Z
apiVersion: v1 kind: Secret metadata: labels: app.kubernetes.io/component: identity-provider app.kubernetes.io/instance: e2e-test app.kubernetes.io/name: dex app.kubernetes.io/version: v2.24.0 name: dex namespace: dex stringData: config.yaml: |- "enablePasswordDB": true "issuer": "https://dex.dex.svc.cluster.local:5556/dex" "logger": "level": "debug" "oauth2": "passwordConnector": "local" "staticClients": - "id": "test" "issuerCAPath": "/var/run/tls/test/service-ca.crt" "name": "test" "secret": "ZXhhbXBsZS1hcHAtc2VjcmV0" "staticPasswords": - "email": "[email protected]" "hash": "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" "userID": "08a8684b-db88-4b73-90a9-3cd1661f5466" "username": "admin" "storage": "config": "file": "/storage/dex.db" "type": "sqlite3" "web": "https": "0.0.0.0:5556" "tlsCert": "/etc/dex/tls/tls.crt" "tlsKey": "/etc/dex/tls/tls.key"
27.837838
76
0.618447
73627a84860e08a4d4dae203d91f4e534bea956e
479
yaml
YAML
operator/deploy/crds/pvviewer.com_pvwebviewers_crd.yaml
mdnahian/pv-viewer-operator
28a12ff2bd4d5c1fe60b875e7b866c8798b118ff
[ "MIT" ]
null
null
null
operator/deploy/crds/pvviewer.com_pvwebviewers_crd.yaml
mdnahian/pv-viewer-operator
28a12ff2bd4d5c1fe60b875e7b866c8798b118ff
[ "MIT" ]
null
null
null
operator/deploy/crds/pvviewer.com_pvwebviewers_crd.yaml
mdnahian/pv-viewer-operator
28a12ff2bd4d5c1fe60b875e7b866c8798b118ff
[ "MIT" ]
null
null
null
apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: pvwebviewers.pvviewer.com spec: group: pvviewer.com names: kind: PvWebViewer listKind: PvWebViewerList plural: pvwebviewers singular: pvwebviewer scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: type: object x-kubernetes-preserve-unknown-fields: true served: true storage: true subresources: status: {}
20.826087
50
0.697286
7362a34ab87285b71a8dada5625c794c9dda5632
238
yaml
YAML
bootcamp/Scenario-401/pv.yaml
DennyZhang/kubernetes-challenges
8b191bb24454e7c3f5310f1f390dba1a981ecbdb
[ "Apache-2.0" ]
130
2017-12-01T05:30:41.000Z
2018-06-17T14:43:23.000Z
bootcamp/Scenario-401/pv.yaml
DennyZhang/kubernetes-challenges
8b191bb24454e7c3f5310f1f390dba1a981ecbdb
[ "Apache-2.0" ]
1
2018-01-08T11:14:25.000Z
2018-01-08T12:42:18.000Z
bootcamp/Scenario-401/pv.yaml
DennyZhang/kubernetes-challenges
8b191bb24454e7c3f5310f1f390dba1a981ecbdb
[ "Apache-2.0" ]
71
2018-07-24T17:31:53.000Z
2022-02-11T13:42:32.000Z
kind: PersistentVolume apiVersion: v1 metadata: name: jenkins-home labels: type: local spec: capacity: storage: 20Gi accessModes: - ReadWriteOnce storageClassName: standard hostPath: path: "/data/jenkins-home"
15.866667
30
0.705882
7362a9b2bcdc1cf2f7cc938c93765d61603d4239
551
yaml
YAML
deploy/operator.yaml
jmckind/jira-operator
b609c04d6f8b922df2c88ab461dc2e85a495c17f
[ "Apache-2.0" ]
3
2019-05-19T10:40:53.000Z
2020-01-15T13:40:13.000Z
deploy/operator.yaml
jmckind/jira-operator
b609c04d6f8b922df2c88ab461dc2e85a495c17f
[ "Apache-2.0" ]
null
null
null
deploy/operator.yaml
jmckind/jira-operator
b609c04d6f8b922df2c88ab461dc2e85a495c17f
[ "Apache-2.0" ]
3
2019-06-26T12:44:30.000Z
2019-10-07T12:25:50.000Z
apiVersion: apps/v1 kind: Deployment metadata: name: jira-operator spec: replicas: 1 selector: matchLabels: name: jira-operator template: metadata: labels: name: jira-operator spec: containers: - name: jira-operator image: quay.io/coreos/jira-operator:0.0.2 command: - jira-operator imagePullPolicy: Always env: - name: WATCH_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace
21.192308
51
0.557169
7362e5b40d44a8b09a2da0e9e96fc4031c09e176
5,568
yaml
YAML
k8s/us-central1/gen/tf-r2.4.0-ncf-func-k80-x1.yaml
michaelbanfield/ml-testing-accelerators
c88001b4b2a205d5fa193cfbe5af6728dd602ee1
[ "Apache-2.0" ]
null
null
null
k8s/us-central1/gen/tf-r2.4.0-ncf-func-k80-x1.yaml
michaelbanfield/ml-testing-accelerators
c88001b4b2a205d5fa193cfbe5af6728dd602ee1
[ "Apache-2.0" ]
null
null
null
k8s/us-central1/gen/tf-r2.4.0-ncf-func-k80-x1.yaml
michaelbanfield/ml-testing-accelerators
c88001b4b2a205d5fa193cfbe5af6728dd602ee1
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "apiVersion": "batch/v1beta1" "kind": "CronJob" "metadata": "name": "tf-r2.4.0-ncf-func-k80-x1" "namespace": "automated" "spec": "concurrencyPolicy": "Forbid" "jobTemplate": "spec": "activeDeadlineSeconds": 3600 "backoffLimit": 1 "template": "spec": "containers": - "args": - "python3" - "official/recommendation/ncf_keras_main.py" - "--train_dataset_path=$(NCF_DIR)/tpu_data_dir/training_cycle_*/*" - "--eval_dataset_path=$(NCF_DIR)/tpu_data_dir/eval_data/*" - "--input_meta_data_path=$(NCF_DIR)/tpu_data_dir/metadata" - "--data_dir=$(NCF_DIR)/movielens_data" - "--batch_size=99000" - "--learning_rate=3e-5" - "--dataset=ml-20m" - "--eval_batch_size=160000" - "--learning_rate=0.00382059" - "--beta1=0.783529" - "--beta2=0.909003" - "--epsilon=1.45439e-07" - "--num_factors=64" - "--hr_threshold=0.635" - "--layers=256,256,128,64" - "--use_synthetic_data=false" - "--model_dir=$(MODEL_DIR)" - "--train_epochs=1" - "--ml_perf=true" - "--keras_use_ctl=true" "env": - "name": "POD_NAME" "valueFrom": "fieldRef": "fieldPath": "metadata.name" - "name": "POD_UID" "valueFrom": "fieldRef": "fieldPath": "metadata.uid" - "name": "POD_NAMESPACE" "valueFrom": "fieldRef": "fieldPath": "metadata.namespace" - "name": "JOB_NAME" "valueFrom": "fieldRef": "fieldPath": "metadata.labels['job-name']" - "name": "MODEL_DIR" "value": "$(OUTPUT_BUCKET)/tf-r2.4.0/ncf/func/k80-x1/$(JOB_NAME)" "envFrom": - "configMapRef": "name": "gcs-buckets" "image": "gcr.io/xl-ml-test/tensorflow:r2.4.0" "imagePullPolicy": "Always" "name": "train" "resources": "limits": "nvidia.com/gpu": 1 "requests": "cpu": 2 "memory": "20G" "volumeMounts": - "mountPath": "/dev/shm" "name": "dshm" "readOnly": false "initContainers": - "env": - "name": "POD_NAME" "valueFrom": "fieldRef": "fieldPath": "metadata.name" - "name": "POD_UID" "valueFrom": "fieldRef": "fieldPath": "metadata.uid" - "name": "POD_NAMESPACE" "valueFrom": "fieldRef": "fieldPath": "metadata.namespace" - "name": "JOB_NAME" "valueFrom": "fieldRef": "fieldPath": "metadata.labels['job-name']" - "name": "MODEL_DIR" "value": "$(OUTPUT_BUCKET)/tf-r2.4.0/ncf/func/k80-x1/$(JOB_NAME)" - "name": "METRIC_CONFIG" "value": | { "metric_collection_config": { "default_aggregation_strategies": [ "final" ], "metric_to_aggregation_strategies": { "examples_per_second": [ "average" ] }, "use_run_name_prefix": true, "write_to_bigquery": true }, "regression_test_config": { "metric_success_conditions": { "examples_per_second_average": { "comparison": "greater_or_equal", "success_threshold": { "stddevs_from_mean": 2 } }, "total_wall_time": { "comparison": "less", "success_threshold": { "stddevs_from_mean": 5 }, "wait_for_n_points_of_history": 10 } } }, "test_name": "tf-r2.4.0-ncf-func-k80-x1" } "envFrom": - "configMapRef": "name": "gcs-buckets" "image": "gcr.io/xl-ml-test/publisher:stable" "imagePullPolicy": "Always" "name": "publisher" "nodeSelector": "cloud.google.com/gke-accelerator": "nvidia-tesla-k80" "priorityClassName": "gpu-k80" "restartPolicy": "Never" "volumes": - "emptyDir": "medium": "Memory" "name": "dshm" "schedule": "0 6 * * *" "successfulJobsHistoryLimit": 1
35.692308
79
0.460489
736337855c40b24d300dc7448de20bb6495c03a2
611
yml
YAML
apps/kubefiles/myboot-deployment-configuration.yml
sebastienblanc/kubernetes-tutorial
72412039a191205dec4de099fc5f0f3de21aebf7
[ "Apache-2.0" ]
null
null
null
apps/kubefiles/myboot-deployment-configuration.yml
sebastienblanc/kubernetes-tutorial
72412039a191205dec4de099fc5f0f3de21aebf7
[ "Apache-2.0" ]
null
null
null
apps/kubefiles/myboot-deployment-configuration.yml
sebastienblanc/kubernetes-tutorial
72412039a191205dec4de099fc5f0f3de21aebf7
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: labels: app: myboot name: myboot spec: replicas: 1 selector: matchLabels: app: myboot template: metadata: labels: app: myboot spec: containers: - name: myboot image: quay.io/burrsutter/myboot:v1 ports: - containerPort: 8080 envFrom: - configMapRef: name: my-config resources: requests: memory: "300Mi" cpu: "250m" # 1/4 core limits: memory: "400Mi" cpu: "1000m" # 1 core
18.515152
45
0.510638
73633dc449d64cbbe257e7808fad27faabdef428
529
yml
YAML
todo-api-cm.yml
tavorserenase/DO288-apps
ea68121c1302e442badb98e545a4f9b15ba9128c
[ "Apache-2.0" ]
null
null
null
todo-api-cm.yml
tavorserenase/DO288-apps
ea68121c1302e442badb98e545a4f9b15ba9128c
[ "Apache-2.0" ]
null
null
null
todo-api-cm.yml
tavorserenase/DO288-apps
ea68121c1302e442badb98e545a4f9b15ba9128c
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 data: DATABASE_NAME: todo DATABASE_PASSWORD: redhat123 DATABASE_SVC_HOSTNAME: tododb DATABASE_USER: todoapp kind: ConfigMap metadata: annotations: fabric8.io/git-commit: 20d773370efdfce3221f56348ba2c544558a58a3 fabric8.io/git-branch: todo-migrate fabric8.io/git-url: https://github.com/tavorserenase/DO288-apps labels: app: todo-api provider: fabric8 version: 1.0.0-SNAPSHOT group: com.redhat.training.example name: todoapi namespace: archibaldtuttlebuttle-todo-migrate
26.45
67
0.767486
736344467d2ebc6c0b36afb7b36d11b2d686cd98
1,379
yaml
YAML
apps/prow/cluster/sinker_deployment.yaml
mauriciopoppe/k8s.io
46af6132b7a8860c23d38b1d7289074839f9683d
[ "Apache-2.0" ]
null
null
null
apps/prow/cluster/sinker_deployment.yaml
mauriciopoppe/k8s.io
46af6132b7a8860c23d38b1d7289074839f9683d
[ "Apache-2.0" ]
null
null
null
apps/prow/cluster/sinker_deployment.yaml
mauriciopoppe/k8s.io
46af6132b7a8860c23d38b1d7289074839f9683d
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: namespace: prow name: sinker labels: app: sinker spec: replicas: 1 selector: matchLabels: app: sinker template: metadata: labels: app: sinker spec: serviceAccountName: sinker containers: - name: sinker args: - --config-path=/etc/config/config.yaml - --dry-run=false - --job-config-path=/etc/job-config - --kubeconfig=/etc/kubeconfig/kubeconfig image: gcr.io/k8s-prow/sinker:v20220202-56bc36b558 securityContext: allowPrivilegeEscalation: false capabilities: drop: - "ALL" privileged: false readOnlyRootFilesystem: true ports: - name: metrics containerPort: 9090 volumeMounts: - mountPath: /etc/kubeconfig name: kubeconfig readOnly: true - name: config mountPath: /etc/config readOnly: true - name: job-config mountPath: /etc/job-config readOnly: true volumes: - name: kubeconfig secret: defaultMode: 420 secretName: k8s-infra-build-clusters-kubeconfig - name: config configMap: name: config - name: job-config configMap: name: job-config
23.775862
58
0.55765
73634c4ad96dbc78cf16ac9308c533174f927416
12,538
yaml
YAML
pkg/apis/submarinerconfig/v1alpha1/0000_00_submarineraddon.open-cluster-management.io_submarinerconfigs.crd.yaml
mkolesnik/submariner-addon
e9fb85939918d4b1e52d007e192827a05c061c87
[ "Apache-2.0" ]
null
null
null
pkg/apis/submarinerconfig/v1alpha1/0000_00_submarineraddon.open-cluster-management.io_submarinerconfigs.crd.yaml
mkolesnik/submariner-addon
e9fb85939918d4b1e52d007e192827a05c061c87
[ "Apache-2.0" ]
5
2021-11-19T13:39:00.000Z
2022-03-21T18:47:32.000Z
pkg/apis/submarinerconfig/v1alpha1/0000_00_submarineraddon.open-cluster-management.io_submarinerconfigs.crd.yaml
mkolesnik/submariner-addon
e9fb85939918d4b1e52d007e192827a05c061c87
[ "Apache-2.0" ]
null
null
null
--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.5.0 name: submarinerconfigs.submarineraddon.open-cluster-management.io spec: group: submarineraddon.open-cluster-management.io names: kind: SubmarinerConfig listKind: SubmarinerConfigList plural: submarinerconfigs singular: submarinerconfig scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: description: SubmarinerConfig represents the configuration for Submariner, the submariner-addon will use it to configure the Submariner. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: Spec defines the configuration of the Submariner properties: IPSecIKEPort: default: 500 description: IPSecIKEPort represents IPsec IKE port (default 500). type: integer IPSecNATTPort: default: 4500 description: IPSecNATTPort represents IPsec NAT-T port (default 4500). type: integer NATTDiscoveryPort: default: 4900 description: NATTDiscoveryPort specifies the port used for NAT-T Discovery (default UDP/4900). type: integer NATTEnable: default: true description: NATTEnable represents IPsec NAT-T enabled (default true). type: boolean cableDriver: default: libreswan description: CableDriver represents the submariner cable driver implementation. Available options are libreswan (default) strongswan, wireguard, and vxlan. type: string credentialsSecret: description: CredentialsSecret is a reference to the secret with a certain cloud platform credentials, the supported platform includes AWS, GCP, Azure, ROKS and OSD. The submariner-addon will use these credentials to prepare Submariner cluster environment. If the submariner cluster environment requires submariner-addon preparation, this field should be specified. properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object gatewayConfig: description: GatewayConfig represents the gateways configuration of the Submariner. properties: aws: description: AWS represents the configuration for Amazon Web Services. If the platform of managed cluster is not Amazon Web Services, this field will be ignored. properties: instanceType: default: m5n.large description: InstanceType represents the Amazon Web Services EC2 instance type of the gateway node that will be created on the managed cluster. The default value is `m5n.large`. type: string type: object gateways: default: 1 description: Gateways represents the count of worker nodes that will be used to deploy the Submariner gateway component on the managed cluster. If the platform of managed cluster is Amazon Web Services, the submariner-addon will create the specified number of worker nodes and label them with `submariner.io/gateway` on the managed cluster, for other platforms, the submariner-addon will select the specified number of worker nodes and label them with `submariner.io/gateway` on the managed cluster. The default value is 1, if the value is greater than 1, the Submariner gateway HA will be enabled automatically. type: integer type: object globalCIDR: description: GlobalCIDR specifies the global CIDR used by the cluster. type: string imagePullSpecs: description: ImagePullSpecs represents the desired images of submariner components installed on the managed cluster. If not specified, the default submariner images that was defined by submariner operator will be used. properties: lighthouseAgentImagePullSpec: description: LighthouseAgentImagePullSpec represents the desired image of the lighthouse agent. type: string lighthouseCoreDNSImagePullSpec: description: LighthouseCoreDNSImagePullSpec represents the desired image of lighthouse coredns. type: string submarinerGlobalnetImagePullSpec: description: SubmarinerGlobalnetImagePullSpec represents the desired image of the submariner globalnet. type: string submarinerImagePullSpec: description: SubmarinerImagePullSpec represents the desired image of submariner. type: string submarinerRouteAgentImagePullSpec: description: SubmarinerRouteAgentImagePullSpec represents the desired image of the submariner route agent. type: string type: object loadBalancerEnable: default: false description: LoadBalancerEnable enables or disables load balancer mode. When enabled, a LoadBalancer is created in the submariner-operator namespace (default false). type: boolean subscriptionConfig: description: SubscriptionConfig represents a Submariner subscription. SubscriptionConfig can be used to customize the Submariner subscription. properties: channel: description: Channel represents the channel of a submariner subscription. type: string source: default: redhat-operators description: Source represents the catalog source of a submariner subscription. The default value is redhat-operators type: string sourceNamespace: default: openshift-marketplace description: SourceNamespace represents the catalog source namespace of a submariner subscription. The default value is openshift-marketplace type: string startingCSV: description: StartingCSV represents the startingCSV of a submariner subscription. type: string type: object type: object status: description: Status represents the current status of submariner configuration properties: conditions: description: Conditions contain the different condition statuses for this configuration. items: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: message is a human readable message indicating details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime - message - reason - status - type type: object type: array managedClusterInfo: description: ManagedClusterInfo represents the information of a managed cluster. properties: clusterName: description: ClusterName represents the name of the managed cluster. type: string infraId: description: InfraId represents the infrastructure id of the managed cluster. type: string platform: description: Platform represents the cloud provider of the managed cluster. type: string region: description: Region represents the cloud region of the managed cluster. type: string vendor: description: Vendor represents the kubernetes vendor of the managed cluster. type: string type: object type: object type: object served: true storage: true subresources: status: {} status: acceptedNames: kind: "" plural: "" conditions: [] storedVersions: []
62.69
678
0.614133
7363ad93552ac4df91efe89be81820ff4ecb908a
391
yaml
YAML
3tier-app/test_pod/deploy-1.yaml
bistrulli/3tier-bench-app
71c6bbcd1e0c13f7247702867df4158f6a14744d
[ "MIT" ]
null
null
null
3tier-app/test_pod/deploy-1.yaml
bistrulli/3tier-bench-app
71c6bbcd1e0c13f7247702867df4158f6a14744d
[ "MIT" ]
null
null
null
3tier-app/test_pod/deploy-1.yaml
bistrulli/3tier-bench-app
71c6bbcd1e0c13f7247702867df4158f6a14744d
[ "MIT" ]
null
null
null
apiVersion: "apps/v1" kind: "Deployment" metadata: name: "test-pod1" #namespace: "my-space" labels: app: "test-pod1" spec: replicas: 1 selector: matchLabels: app: "test-pod1" template: metadata: labels: app: "test-pod1" spec: containers: - name: "my-exp" image: "bistrulli/my_exp:test_pod" imagePullPolicy: "Always"
18.619048
42
0.590793