hexsha
stringlengths
40
40
size
int64
24
1.05M
ext
stringclasses
2 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
5
269
max_stars_repo_name
stringlengths
7
107
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
list
max_stars_count
int64
1
84.9k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
5
269
max_issues_repo_name
stringlengths
7
107
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
list
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
5
269
max_forks_repo_name
stringlengths
7
107
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
list
max_forks_count
int64
1
55.9k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
24
1.05M
avg_line_length
float64
1
304k
max_line_length
int64
14
1.03M
alphanum_fraction
float64
0
1
7363b013b21ad29b481e449113ccf31538505634
2,802
yaml
YAML
theodolite-benchmarks/definitions/uc4-flink/resources/taskmanager-deployment.yaml
bvonheid/theodolite
4f99b44e8cae7531ffe1a5300bc782f34733b2a3
[ "Apache-2.0" ]
null
null
null
theodolite-benchmarks/definitions/uc4-flink/resources/taskmanager-deployment.yaml
bvonheid/theodolite
4f99b44e8cae7531ffe1a5300bc782f34733b2a3
[ "Apache-2.0" ]
null
null
null
theodolite-benchmarks/definitions/uc4-flink/resources/taskmanager-deployment.yaml
bvonheid/theodolite
4f99b44e8cae7531ffe1a5300bc782f34733b2a3
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: flink-taskmanager spec: replicas: 1 selector: matchLabels: app: flink component: taskmanager template: metadata: labels: app: flink component: taskmanager spec: containers: - name: taskmanager image: ghcr.io/cau-se/theodolite-uc4-flink:latest env: - name: KAFKA_BOOTSTRAP_SERVERS value: "theodolite-cp-kafka:9092" - name: SCHEMA_REGISTRY_URL value: "http://theodolite-cp-schema-registry:8081" - name: COMMIT_INTERVAL_MS value: "100" - name: CHECKPOINTING value: "false" - name: PARALLELISM value: "1" - name: "FLINK_STATE_BACKEND" value: "rocksdb" - name: JOB_MANAGER_RPC_ADDRESS value: "flink-jobmanager" - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS value: "1" #TODO - name: FLINK_PROPERTIES value: |+ blob.server.port: 6124 jobmanager.rpc.port: 6123 taskmanager.rpc.port: 6122 queryable-state.proxy.ports: 6125 jobmanager.memory.process.size: 4Gb taskmanager.memory.process.size: 4Gb #parallelism.default: 1 #TODO resources: limits: memory: 4Gi cpu: 1000m args: ["taskmanager"] ports: - containerPort: 6122 name: rpc - containerPort: 6125 name: query-state - containerPort: 9249 name: metrics livenessProbe: tcpSocket: port: 6122 initialDelaySeconds: 30 periodSeconds: 60 volumeMounts: - name: flink-config-volume-rw mountPath: /opt/flink/conf/ securityContext: runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary initContainers: - name: init-taskmanager image: busybox:1.28 command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/'] volumeMounts: - name: flink-config-volume mountPath: /flink-config/ - name: flink-config-volume-rw mountPath: /flink-config-rw/ volumes: - name: flink-config-volume configMap: name: flink-config items: - key: flink-conf.yaml path: flink-conf.yaml - key: log4j-console.properties path: log4j-console.properties - name: flink-config-volume-rw emptyDir: {}
31.840909
100
0.519272
7363cf8e473863b72bbfcd200352e230f5920e74
204
yaml
YAML
assets/PersistentVolumeClaim/my-alamedainfluxdbPVC.yaml
chenyingtz/federatorai-operator
3eb3084da305163faacb9ef9615ac2dc5ecd5ac5
[ "Apache-2.0" ]
null
null
null
assets/PersistentVolumeClaim/my-alamedainfluxdbPVC.yaml
chenyingtz/federatorai-operator
3eb3084da305163faacb9ef9615ac2dc5ecd5ac5
[ "Apache-2.0" ]
null
null
null
assets/PersistentVolumeClaim/my-alamedainfluxdbPVC.yaml
chenyingtz/federatorai-operator
3eb3084da305163faacb9ef9615ac2dc5ecd5ac5
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: my-alameda.influxdb-data.pvc namespace: federatorai spec: accessModes: - ReadWriteOnce resources: requests: storage: '10Gi'
18.545455
36
0.730392
7363d852fffb5979ae6b04515c66dbeb604d61e6
1,283
yaml
YAML
pods/minio-volume.yaml
elawrence33/ccti-rke2-aws
53a1bba10b14ba51281966d228619b9c37fff215
[ "MIT" ]
null
null
null
pods/minio-volume.yaml
elawrence33/ccti-rke2-aws
53a1bba10b14ba51281966d228619b9c37fff215
[ "MIT" ]
null
null
null
pods/minio-volume.yaml
elawrence33/ccti-rke2-aws
53a1bba10b14ba51281966d228619b9c37fff215
[ "MIT" ]
null
null
null
# apiVersion: v1 # kind: PersistentVolume # metadata: # name: gitlab-minio # labels: # app: minio # chart: minio-0.4.3 # component: app # heritage: Helm # # pod-template-hash: fbd6dbb4d # release: gitlab # security.istio.io/tlsMode: istio # service.istio.io/canonical-name: minio # service.istio.io/canonical-revision: latest # spec: # storageClassName: local-path # Original value: gitlab # local: # Original value: local and path did not exist # path: /minio # claimRef: # name: gitlab-minio # namespace: gitlab # capacity: # storage: 10Gi # volumeMode: Filesystem # accessModes: # - ReadWriteOnce # persistentVolumeReclaimPolicy: Delete # May be depricated # nodeAffinity: # required: # nodeSelectorTerms: # - matchExpressions: # - key: kubernetes.io/hostname # operator: In # values: # - ip-172-31-19-123.us-west-1.compute.internal # # hostPath: # # path: "/mnt/data" # --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: gitlab-minio namespace: gitlab spec: volumeName: gitlab-minio storageClassName: local-path accessModes: - ReadWriteOnce resources: requests: storage: 3Gi
25.156863
82
0.630553
736413a797ef0e7b5579ce9403b5a2ae973f796b
1,827
yaml
YAML
k8s/3-2-generateArtifactsJob.yaml
hrad-h/c1
c2286084cdcfec9f546ee552a1bc68cab524db2b
[ "Apache-2.0" ]
null
null
null
k8s/3-2-generateArtifactsJob.yaml
hrad-h/c1
c2286084cdcfec9f546ee552a1bc68cab524db2b
[ "Apache-2.0" ]
null
null
null
k8s/3-2-generateArtifactsJob.yaml
hrad-h/c1
c2286084cdcfec9f546ee552a1bc68cab524db2b
[ "Apache-2.0" ]
null
null
null
--- apiVersion: batch/v1 kind: Job metadata: namespace: cadanac-v0-1 name: utils spec: backoffLimit: 1 template: metadata: name: utils spec: restartPolicy: "Never" volumes: - name: cadanacvolume persistentVolumeClaim: claimName: cadanac-pvc - name: dockersocket hostPath: path: /var/run/docker.sock containers: - name: cryptogen image: hyperledger/fabric-tools:1.4.4 imagePullPolicy: Always command: ["sh", "-c", "echo 'Cryptogen Starts'; ls -l /shared/artifacts; while [ ! -d /shared/artifacts ]; do echo Waiting for Hyperledger artifacts folder; sleep 1; done; cryptogen generate --config /shared/artifacts/crypto-config.yaml && cp -r crypto-config /shared/ && for file in $(find /shared/ -iname *_sk); do echo $file; dir=$(dirname $file); echo ${dir}; mv ${dir}/*_sk ${dir}/key.pem; done && find /shared -type d | xargs chmod a+rx && find /shared -type f | xargs chmod a+r && touch /shared/status_cryptogen_complete;"] volumeMounts: - mountPath: /shared name: cadanacvolume - name: configtxgen image: hyperledger/fabric-tools:1.4.4 imagePullPolicy: Always command: ["sh", "-c", "echo 'Configtxgen Starts'; ls -l /shared; sleep 1 && while [ ! -f /shared/status_cryptogen_complete ]; do echo Waiting for cryptogen; sleep 1; done; cp /shared/artifacts/configtx.yaml /shared/; cd /shared/; export FABRIC_CFG_PATH=$PWD; configtxgen -profile CadanacOrdererGenesis -outputBlock genesis.block && find /shared -type d | xargs chmod a+rx && find /shared -type f | xargs chmod a+r && touch /shared/status_configtxgen_complete && rm /shared/status_cryptogen_complete;"] volumeMounts: - mountPath: /shared name: cadanacvolume
49.378378
538
0.65572
73645f3b416d199ad7c2e556ad2f69ce8308e69b
3,092
yaml
YAML
config-root/namespaces/jx/jx-pipelines-visualizer/jx-pipelines-visualizer-deploy.yaml
Jamin-Consulting/jx3-azure-akv-cluster
f02fc8177b3d6719a784934968f1ebf3203936c9
[ "Apache-2.0" ]
null
null
null
config-root/namespaces/jx/jx-pipelines-visualizer/jx-pipelines-visualizer-deploy.yaml
Jamin-Consulting/jx3-azure-akv-cluster
f02fc8177b3d6719a784934968f1ebf3203936c9
[ "Apache-2.0" ]
2
2021-06-06T15:53:50.000Z
2021-06-06T15:58:24.000Z
config-root/namespaces/jx/jx-pipelines-visualizer/jx-pipelines-visualizer-deploy.yaml
Jamin-Consulting/jx3-azure-akv-cluster
f02fc8177b3d6719a784934968f1ebf3203936c9
[ "Apache-2.0" ]
null
null
null
# Source: jx-pipelines-visualizer/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: jx-pipelines-visualizer labels: app.kubernetes.io/name: jx-pipelines-visualizer app.kubernetes.io/instance: "jx-pipelines-visualizer" helm.sh/chart: jx-pipelines-visualizer-1.6.6 app.kubernetes.io/version: "1.6.6" app.kubernetes.io/managed-by: "Helm" gitops.jenkins-x.io/pipeline: 'namespaces' annotations: meta.helm.sh/release-name: 'jx-pipelines-visualizer' wave.pusher.com/update-on-config-change: 'true' namespace: jx spec: replicas: 1 revisionHistoryLimit: 2 selector: matchLabels: app.kubernetes.io/name: jx-pipelines-visualizer app.kubernetes.io/instance: "jx-pipelines-visualizer" template: metadata: labels: app.kubernetes.io/name: jx-pipelines-visualizer app.kubernetes.io/instance: "jx-pipelines-visualizer" helm.sh/chart: jx-pipelines-visualizer-1.6.6 app.kubernetes.io/version: "1.6.6" app.kubernetes.io/managed-by: "Helm" spec: containers: - name: jx-pipelines-visualizer image: "gcr.io/jenkinsxio/jx-pipelines-visualizer:1.6.6" args: - -namespace - jx - -resync-interval - 60s - -archived-logs-url-template - azblob://logs/jenkins-x/logs/{{.Owner}}/{{.Repository}}/{{if hasPrefix .Branch "pr"}}{{.Branch | upper}}{{else}}{{.Branch}}{{end}}/{{.Build}}.log - -archived-pipelines-url-template - azblob://logs/jenkins-x/logs/{{.Owner}}/{{.Repository}}/{{if hasPrefix .Branch "pr"}}{{.Branch | upper}}{{else}}{{.Branch}}{{end}}/{{.Build}}.yaml - -archived-pipelineruns-url-template - azblob://logs/jenkins-x/pipelineruns/{{.Namespace}}/{{.Name}}.yaml - -pipeline-trace-url-template - http://grafana-jx-observability.51.140.220.231.nip.io/explore?left=%5B%22now%22,%22now%22,%22Tempo%22,%7B%22query%22:%22{{.TraceID}}%22%7D%5D - -log-level - INFO env: - name: XDG_CONFIG_HOME value: /home/jenkins - name: GIT_SECRET_MOUNT_PATH value: /secrets/git - name: AZURE_STORAGE_ACCOUNT value: "jxadaptedcrane" ports: - name: http containerPort: 8080 livenessProbe: tcpSocket: port: http readinessProbe: httpGet: path: /healthz port: http volumeMounts: - mountPath: /secrets/git name: secrets-git resources: limits: cpu: "1" memory: 512M requests: cpu: "0.2" memory: 128M securityContext: fsGroup: 1000 serviceAccountName: jx-pipelines-visualizer enableServiceLinks: false volumes: - name: secrets-git secret: defaultMode: 420 secretName: tekton-git
35.54023
160
0.577296
736492e85a62cdd0dc27d8818041a2ff5f813655
163
yaml
YAML
GKE/tourists-claim.yaml
Yazan99sh/tourist
220c5537f24df71996518c5b625a8ec57e0de915
[ "MIT" ]
null
null
null
GKE/tourists-claim.yaml
Yazan99sh/tourist
220c5537f24df71996518c5b625a8ec57e0de915
[ "MIT" ]
1
2020-07-19T13:04:49.000Z
2020-07-19T13:04:49.000Z
GKE/tourists-claim.yaml
Yazan99sh/tourist
220c5537f24df71996518c5b625a8ec57e0de915
[ "MIT" ]
2
2021-03-14T19:09:03.000Z
2021-04-29T18:55:47.000Z
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: tourists-claim spec: accessModes: - ReadWriteOnce resources: requests: storage: 30Gi
16.3
27
0.723926
7364b176f0039e74cf936c42234ff6499733b503
530
yaml
YAML
frontend/deployment.yaml
cbron/multiappdemo
a351f49055211ce713f768f489a20ab263f965d8
[ "Unlicense" ]
4
2020-05-18T16:37:41.000Z
2020-10-20T16:30:40.000Z
frontend/deployment.yaml
cbron/multiappdemo
a351f49055211ce713f768f489a20ab263f965d8
[ "Unlicense" ]
null
null
null
frontend/deployment.yaml
cbron/multiappdemo
a351f49055211ce713f768f489a20ab263f965d8
[ "Unlicense" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: frontend spec: replicas: 1 selector: matchLabels: app: frontend version: v1 template: metadata: labels: app: frontend version: v1 spec: containers: - name: app image: cbron/multiappdemo-frontend:latest imagePullPolicy: Always env: - name: BACKEND value: "http://backend-svc:8081" # could also use existing env vars from k8s ports: - containerPort: 8080
20.384615
86
0.59434
73652a318b63d0495cf160d1b08b59fcb33a637c
277
yml
YAML
gitlab/gitlab-svc.yml
acedemand/kubernetes-gitlab
2f495eff8f9774416e03d00fd2fe0dbbf009d719
[ "MIT" ]
null
null
null
gitlab/gitlab-svc.yml
acedemand/kubernetes-gitlab
2f495eff8f9774416e03d00fd2fe0dbbf009d719
[ "MIT" ]
null
null
null
gitlab/gitlab-svc.yml
acedemand/kubernetes-gitlab
2f495eff8f9774416e03d00fd2fe0dbbf009d719
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Service metadata: name: gitlab namespace: jenkins labels: name: gitlab spec: type: LoadBalancer selector: name: gitlab ports: - name: http port: 80 targetPort: http - name: ssh port: 1022 targetPort: ssh
14.578947
22
0.620939
73655adbf50277586c49954f71ba74d0e8668caf
1,421
yaml
YAML
kubernetes-deployments/movies2companies-deployment.yaml
kinoreel/kino-gather
defc0d6b311651f985467b5bfcfdbf77d73c10ae
[ "MIT" ]
null
null
null
kubernetes-deployments/movies2companies-deployment.yaml
kinoreel/kino-gather
defc0d6b311651f985467b5bfcfdbf77d73c10ae
[ "MIT" ]
3
2017-06-03T16:50:56.000Z
2017-10-01T09:24:37.000Z
kubernetes-deployments/movies2companies-deployment.yaml
kinoreel/kino-gather
defc0d6b311651f985467b5bfcfdbf77d73c10ae
[ "MIT" ]
null
null
null
apiVersion: apps/v1 # for versions before 1.6.0 use extensions/v1beta1 kind: Deployment metadata: name: insert-movies2companies spec: replicas: 1 selector: matchLabels: app: insert-movies2companies template: metadata: labels: app: insert-movies2companies spec: containers: - name: insert-movies2companies image: kinoreel/gather:latest imagePullPolicy: Always env: - name: PROCESS value: "insert_movies2companies" - name: KAFKA_BROKER valueFrom: secretKeyRef: name: kino-secrets key: kafka_broker - name: DB_SERVER valueFrom: secretKeyRef: name: kino-secrets key: gather_db_server - name: DB_PORT valueFrom: secretKeyRef: name: kino-secrets key: gather_db_port - name: DB_DATABASE valueFrom: secretKeyRef: name: kino-secrets key: gather_db_database - name: DB_USER valueFrom: secretKeyRef: name: kino-secrets key: gather_db_user - name: DB_PASSWORD valueFrom: secretKeyRef: name: kino-secrets key: gather_db_password
27.862745
70
0.522871
7365e71a5a5c47a5c186c55d7ca3753a2b169283
354
yaml
YAML
deploy/clusterrolebinding.yaml
zhujian7/clusterclaims-controller
4009b16ddb7c3d1bca9fb156caf87ed4a665d3e0
[ "Apache-2.0" ]
1
2022-03-30T16:21:48.000Z
2022-03-30T16:21:48.000Z
deploy/clusterrolebinding.yaml
zhujian7/clusterclaims-controller
4009b16ddb7c3d1bca9fb156caf87ed4a665d3e0
[ "Apache-2.0" ]
22
2022-01-07T08:03:17.000Z
2022-03-31T08:04:33.000Z
deploy/clusterrolebinding.yaml
zhujian7/clusterclaims-controller
4009b16ddb7c3d1bca9fb156caf87ed4a665d3e0
[ "Apache-2.0" ]
7
2022-01-07T14:06:05.000Z
2022-03-21T12:15:26.000Z
--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: clusterclaims-controller subjects: - kind: ServiceAccount name: clusterclaims-controller namespace: open-cluster-management ## CHANGE: ACM namespace roleRef: kind: ClusterRole name: clusterclaims-controller apiGroup: rbac.authorization.k8s.io
25.285714
72
0.765537
7365eea39173c222ef396fbcc9dbeef093617e19
173
yml
YAML
devops/eks-helpers/storageclass.yml
424D57/tm-aws-containers-security-demo
832653a8a2ded3e92bc3deb83ee7b4950e6ec35e
[ "MIT" ]
2
2019-03-12T18:50:33.000Z
2019-06-10T14:10:09.000Z
devops/eks-helpers/storageclass.yml
424D57/tm-aws-containers-security-demo
832653a8a2ded3e92bc3deb83ee7b4950e6ec35e
[ "MIT" ]
null
null
null
devops/eks-helpers/storageclass.yml
424D57/tm-aws-containers-security-demo
832653a8a2ded3e92bc3deb83ee7b4950e6ec35e
[ "MIT" ]
null
null
null
kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: gp2 provisioner: kubernetes.io/aws-ebs parameters: type: gp2 reclaimPolicy: Retain mountOptions: - debug
15.727273
34
0.791908
73661d0bee2352e0be826e3f62b3232cb597bfd0
1,361
yaml
YAML
openmcp-metric-collector/member/deploy/operator/operator-cluster1.yaml
openmcp/openmcp
cd4966295db8b46e36a677b30fccbd7ad5f1e18d
[ "Unlicense" ]
null
null
null
openmcp-metric-collector/member/deploy/operator/operator-cluster1.yaml
openmcp/openmcp
cd4966295db8b46e36a677b30fccbd7ad5f1e18d
[ "Unlicense" ]
null
null
null
openmcp-metric-collector/member/deploy/operator/operator-cluster1.yaml
openmcp/openmcp
cd4966295db8b46e36a677b30fccbd7ad5f1e18d
[ "Unlicense" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: cluster-metric-collector namespace: openmcp spec: replicas: 1 selector: matchLabels: name: cluster-metric-collector template: metadata: labels: name: cluster-metric-collector spec: serviceAccountName: cluster-metric-collector containers: - name: cluster-metric-collector # Replace this with the built image name image: openmcp/cluster-metric-collector:v0.0.1 command: - cluster-metric-collector imagePullPolicy: Always env: - name: WATCH_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: GRPC_SERVER value: "119.65.195.180" - name: GRPC_PORT value: "32051" - name: CLUSTER_NAME value: "cluster1" - name: OPERATOR_NAME value: "openmcp-metric-collector" tolerations: - key: node.kubernetes.io/not-ready effect: NoExecute tolerationSeconds: 0 - key: node.kubernetes.io/unreachable effect: NoExecute tolerationSeconds: 0
27.77551
56
0.559882
73661f272d2ed78fcc5afecf10a8d47a037c52d2
537
yml
YAML
deployment.yml
landness/RPC-K8S-githubAction_cicd_test
8178274f1b952650d52125deeda2cfc3e6d3632c
[ "MIT" ]
null
null
null
deployment.yml
landness/RPC-K8S-githubAction_cicd_test
8178274f1b952650d52125deeda2cfc3e6d3632c
[ "MIT" ]
null
null
null
deployment.yml
landness/RPC-K8S-githubAction_cicd_test
8178274f1b952650d52125deeda2cfc3e6d3632c
[ "MIT" ]
null
null
null
apiVersion: apps/v1 # 接口版本 kind: Deployment # 接口类型 metadata: name: rush-rpc # deployment name labels: app: rush-rpc spec: replicas: 1 # pod数 selector: matchLabels: app: rush-rpc template: metadata: labels: app: rush-rpc # 模板名称 spec: # 定义容器模板 containers: # -command -arg可选 不填默认配置 - name: rush-rpc image: hkccr.ccs.tencentyun.com/rush-rpc ports: - containerPort: 80 imagePullSecrets: - name: regcred
23.347826
50
0.554935
73662e7aa84fadde7bc94e31de31078e26ce6547
214
yml
YAML
scripts/infrastructure/K8S/deployments/RNG-K8S-service.yml
ifatssane/teo-school
b4eae6b21733675396ec68f40cff7773bff078e9
[ "Apache-2.0" ]
null
null
null
scripts/infrastructure/K8S/deployments/RNG-K8S-service.yml
ifatssane/teo-school
b4eae6b21733675396ec68f40cff7773bff078e9
[ "Apache-2.0" ]
null
null
null
scripts/infrastructure/K8S/deployments/RNG-K8S-service.yml
ifatssane/teo-school
b4eae6b21733675396ec68f40cff7773bff078e9
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Service metadata: labels: app: rng name: rng-svc namespace: dockercoins spec: ports: - port: 80 protocol: TCP targetPort: 80 selector: app: rng-app type: ClusterIP
14.266667
24
0.658879
73669f485914ecf56eb59d03e890f1006f1e6cef
218
yaml
YAML
stages/99_stream_processing/_charts/confluent/kafka-client.yaml
dgr113/my-otus-tests
c573d85be422eb76986fa217ffc0fe3415b6df2b
[ "MIT" ]
null
null
null
stages/99_stream_processing/_charts/confluent/kafka-client.yaml
dgr113/my-otus-tests
c573d85be422eb76986fa217ffc0fe3415b6df2b
[ "MIT" ]
null
null
null
stages/99_stream_processing/_charts/confluent/kafka-client.yaml
dgr113/my-otus-tests
c573d85be422eb76986fa217ffc0fe3415b6df2b
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: kafka-client spec: containers: - name: kafka-client image: confluentinc/cp-kafka:5.0.1 command: - sh - -c - "exec tail -f /dev/null"
16.769231
40
0.568807
7367270184110f0a109dc5ece4914003ad584305
488
yaml
YAML
manifests/07_configmap.yaml
germanodasilva/custom-marketplace
593c911a3d3e62fb0c401bce8c1614261eae9b53
[ "Apache-2.0" ]
100
2018-10-18T18:03:53.000Z
2022-03-30T08:22:36.000Z
manifests/07_configmap.yaml
germanodasilva/custom-marketplace
593c911a3d3e62fb0c401bce8c1614261eae9b53
[ "Apache-2.0" ]
438
2018-09-21T21:50:32.000Z
2022-03-30T13:43:41.000Z
manifests/07_configmap.yaml
germanodasilva/custom-marketplace
593c911a3d3e62fb0c401bce8c1614261eae9b53
[ "Apache-2.0" ]
90
2018-09-14T21:33:39.000Z
2022-02-23T20:23:40.000Z
apiVersion: v1 kind: ConfigMap metadata: labels: # This label ensures that the OpenShift Certificate Authority bundle # is added to the ConfigMap. config.openshift.io/inject-trusted-cabundle: "true" name: marketplace-trusted-ca namespace: openshift-marketplace annotations: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true"
34.857143
72
0.762295
736763dc1652e98dcf1765464a7bad9678847b0f
2,821
yaml
YAML
manifests/app/dreamkast/overlays/production/main/cronjob.yaml
mhoshi-vm/dreamkast-infra
8867739281abc12dd369f43c63ec258325243c0b
[ "MIT" ]
null
null
null
manifests/app/dreamkast/overlays/production/main/cronjob.yaml
mhoshi-vm/dreamkast-infra
8867739281abc12dd369f43c63ec258325243c0b
[ "MIT" ]
null
null
null
manifests/app/dreamkast/overlays/production/main/cronjob.yaml
mhoshi-vm/dreamkast-infra
8867739281abc12dd369f43c63ec258325243c0b
[ "MIT" ]
null
null
null
apiVersion: batch/v1 kind: CronJob metadata: name: post-to-cicd2021-registration-status spec: # Execute every 10:00 JST but this configure is written by UTC schedule: "0 1 * * *" jobTemplate: spec: template: spec: containers: - name: dreamkast image: 607167088920.dkr.ecr.ap-northeast-1.amazonaws.com/dreamkast-ecs:main command: - /bin/sh - -c - bundle exec rake util:post_number_of_registrants_to_slack env: - name: SLACK_WEBHOOK_URL valueFrom: secretKeyRef: name: dreamkast-secret key: SLACK_WEBHOOK_URL - name: CONFERENCE_ABBR value: "cndt2021" - name: RAILS_ENV value: "production" - name: MYSQL_HOST value: "dreamkast-prd-rds.c6eparu1hmbv.ap-northeast-1.rds.amazonaws.com" - name: MYSQL_DATABASE value: "dreamkast" - name: REDIS_URL value: "redis://drr7wdanp39n8i1.bp6loy.ng.0001.apne1.cache.amazonaws.com:6379" - name: SENTRY_DSN value: "https://[email protected]/5350644" - name: S3_BUCKET value: "dreamkast-prd-bucket" - name: S3_REGION value: ap-northeast-1 - name: DREAMKAST_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: RAILS_MASTER_KEY valueFrom: secretKeyRef: name: rails-app-secret key: rails-app-secret - name: MYSQL_USER valueFrom: secretKeyRef: name: db-secret key: username - name: MYSQL_PASSWORD valueFrom: secretKeyRef: name: db-secret key: password - name: AUTH0_CLIENT_ID valueFrom: secretKeyRef: name: dreamkast-secret key: AUTH0_CLIENT_ID - name: AUTH0_CLIENT_SECRET valueFrom: secretKeyRef: name: dreamkast-secret key: AUTH0_CLIENT_SECRET - name: AUTH0_DOMAIN valueFrom: secretKeyRef: name: dreamkast-secret key: AUTH0_DOMAIN restartPolicy: OnFailure
37.118421
100
0.462956
73677841bb4a347165bf62de3d0be8877a01f49f
4,515
yaml
YAML
deploy/kubernetes/driver/kubernetes/manifests/controller-server.yaml
IBM/ibm-vpc-file-csi-driver
55285139670d7bbd59f80cb1fffd90b8d8318a69
[ "Apache-2.0" ]
null
null
null
deploy/kubernetes/driver/kubernetes/manifests/controller-server.yaml
IBM/ibm-vpc-file-csi-driver
55285139670d7bbd59f80cb1fffd90b8d8318a69
[ "Apache-2.0" ]
4
2021-06-18T05:23:12.000Z
2022-03-03T10:57:15.000Z
deploy/kubernetes/driver/kubernetes/manifests/controller-server.yaml
IBM/ibm-vpc-file-csi-driver
55285139670d7bbd59f80cb1fffd90b8d8318a69
[ "Apache-2.0" ]
null
null
null
kind: StatefulSet apiVersion: apps/v1 metadata: name: ibm-vpc-file-csi-controller spec: serviceName: "ibm-vpc-file-service" replicas: 1 selector: matchLabels: app: ibm-vpc-file-csi-driver template: metadata: annotations: prometheus.io/scrape: "true" prometheus.io/port: "9080" prometheus.io/path: "/metrics" labels: app: ibm-vpc-file-csi-driver spec: serviceAccountName: ibm-vpc-file-controller-sa securityContext: runAsNonRoot: true runAsUser: 2121 containers: - name: csi-provisioner image: MUSTPATCHWITHKUSTOMIZE imagePullPolicy: Always securityContext: privileged: false allowPrivilegeEscalation: false args: - "--v=5" - "--csi-address=$(ADDRESS)" - "--timeout=600s" - "--feature-gates=Topology=true" env: - name: ADDRESS value: /csi/csi-vpc-file.sock resources: limits: cpu: 100m memory: 100Mi requests: cpu: 10m memory: 20Mi volumeMounts: - name: socket-dir mountPath: /csi - name: iks-vpc-file-driver image: MUSTPATCHWITHKUSTOMIZE imagePullPolicy: Always securityContext: privileged: false allowPrivilegeEscalation: false args: - "--v=5" - "--endpoint=$(CSI_ENDPOINT)" - "--lock_enabled=false" - "--sidecarEndpoint=$(SIDECAREP)" envFrom: - configMapRef: name: ibm-vpc-file-csi-configmap - configMapRef: name: ibm-cloud-provider-data env: - name: SIDECAREP value: "/sidecardir/providerfile.sock" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: VPC_ID value: $(vpc_id) resources: limits: cpu: 500m memory: 500Mi requests: cpu: 50m memory: 100Mi ports: - name: healthz containerPort: 9808 protocol: TCP livenessProbe: httpGet: path: /healthz port: healthz initialDelaySeconds: 10 timeoutSeconds: 3 periodSeconds: 10 failureThreshold: 5 volumeMounts: - name: socket-dir mountPath: /csi - name: customer-auth readOnly: true mountPath: /etc/storage_ibmc - name: cluster-info readOnly: true mountPath: /etc/storage_ibmc/cluster_info - name: socket-dir mountPath: /sidecardir - name: liveness-probe image: MUSTPATCHWITHKUSTOMIZE securityContext: privileged: false allowPrivilegeEscalation: false args: - --csi-address=/csi/csi-vpc-file.sock resources: limits: cpu: 50m memory: 50Mi requests: cpu: 5m memory: 10Mi volumeMounts: - name: socket-dir mountPath: /csi - name: storage-secret-sidecar image: MUSTPATCHWITHKUSTOMIZE imagePullPolicy: Always args: - "--endpoint=$(ENDPOINT)" env: - name: ENDPOINT value: "unix:/sidecardir/providerfile.sock" - name: SECRET_CONFIG_PATH value: /etc/storage_ibmc volumeMounts: - mountPath: /sidecardir name: socket-dir - mountPath: /etc/storage_ibmc name: customer-auth readOnly: true - mountPath: /etc/storage_ibmc/cluster_info name: cluster-info readOnly: true volumes: - name: socket-dir emptyDir: {} - name: customer-auth secret: secretName: storage-secret-store - name: cluster-info configMap: name: cluster-info volumeClaimTemplates: []
29.318182
57
0.494574
736814eae24323d33e70ffe8460c8fc574dd4a8c
144
yaml
YAML
built-in-references/Kubernetes/perf/violations/violation2252.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation2252.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation2252.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: image-demo-2252 spec: containers: - name: nginx image: nginx #ritaacr.azurecr.io/nginx:latest
18
49
0.722222
736832116f76e11413469d3d60a29d0c8448750f
144
yaml
YAML
built-in-references/Kubernetes/perf/violations/violation6444.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation6444.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation6444.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: image-demo-6444 spec: containers: - name: nginx image: nginx #ritaacr.azurecr.io/nginx:latest
18
49
0.722222
7368470e2228f7cc933020098f037d4e8840167d
275
yaml
YAML
hosts/k8s000/root/kubernetes/v0/kube-system/serviceaccounts/service-account-controller.yaml
vskurikhin/home-devops
9a4ab76b6deee942e4979c37f9847dc1116347e1
[ "Unlicense" ]
null
null
null
hosts/k8s000/root/kubernetes/v0/kube-system/serviceaccounts/service-account-controller.yaml
vskurikhin/home-devops
9a4ab76b6deee942e4979c37f9847dc1116347e1
[ "Unlicense" ]
null
null
null
hosts/k8s000/root/kubernetes/v0/kube-system/serviceaccounts/service-account-controller.yaml
vskurikhin/home-devops
9a4ab76b6deee942e4979c37f9847dc1116347e1
[ "Unlicense" ]
null
null
null
apiVersion: v1 kind: ServiceAccount metadata: creationTimestamp: "2022-01-29T19:55:36Z" name: service-account-controller namespace: kube-system resourceVersion: "287" uid: e2714522-91ac-440b-bf66-c24f8d483dea secrets: - name: service-account-controller-token-vnct2
25
46
0.785455
73687b2b2cc501081a724614aa0bb4436cd5ce9a
1,195
yaml
YAML
src/okd/storage/monitoring/cluster-configuration.yaml
mexok/okd-the-hard-way
952a16a2e2863f297bb34b5e2e7ae7e6b0e10072
[ "MIT" ]
null
null
null
src/okd/storage/monitoring/cluster-configuration.yaml
mexok/okd-the-hard-way
952a16a2e2863f297bb34b5e2e7ae7e6b0e10072
[ "MIT" ]
null
null
null
src/okd/storage/monitoring/cluster-configuration.yaml
mexok/okd-the-hard-way
952a16a2e2863f297bb34b5e2e7ae7e6b0e10072
[ "MIT" ]
null
null
null
apiVersion: v1 kind: ConfigMap data: config.yaml: | prometheusOperator: nodeSelector: node-role.kubernetes.io/infra: "" prometheusK8s: retention: 24h nodeSelector: node-role.kubernetes.io/infra: "" volumeClaimTemplate: spec: storageClassName: block resources: requests: storage: 15Gi alertmanagerMain: nodeSelector: node-role.kubernetes.io/infra: "" volumeClaimTemplate: spec: storageClassName: block resources: requests: storage: 5Gi kubeStateMetrics: nodeSelector: node-role.kubernetes.io/infra: "" openshiftStateMetrics: nodeSelector: node-role.kubernetes.io/infra: "" grafana: nodeSelector: node-role.kubernetes.io/infra: "" telemeterClient: nodeSelector: node-role.kubernetes.io/infra: "" thanosQuerier: nodeSelector: node-role.kubernetes.io/infra: "" k8sPrometheusAdapter: nodeSelector: node-role.kubernetes.io/infra: "" metadata: name: cluster-monitoring-config namespace: openshift-monitoring
24.895833
41
0.610042
73694df9ea3c3b0466d24f0f5e45ed12c762e41f
321
yaml
YAML
_Infrastructure/secret.yaml
howlingsails/kind-dnd
3bc373a7c570182581692891fdebd1e4ac4abce9
[ "CC0-1.0" ]
null
null
null
_Infrastructure/secret.yaml
howlingsails/kind-dnd
3bc373a7c570182581692891fdebd1e4ac4abce9
[ "CC0-1.0" ]
null
null
null
_Infrastructure/secret.yaml
howlingsails/kind-dnd
3bc373a7c570182581692891fdebd1e4ac4abce9
[ "CC0-1.0" ]
1
2021-12-07T00:27:15.000Z
2021-12-07T00:27:15.000Z
apiVersion: v1 kind: Secret metadata: name: mysecret namespace: default type: Opaque data: password: imSoSecretYouWouldNeverGuessItAnytimeSoon # Example: # password: {{ .Values.password | b64enc }} # Perhaps you should use # https://learn.hashicorp.com/tutorials/vault/kubernetes-sidecar?in=vault/kubernetes
26.75
86
0.76324
73698f56bdb98020ac697c6815f1a9c6947c5a11
573
yaml
YAML
config-root/namespaces/jx/jxboot-helmfile-resources/nexus-ingress.yaml
bussrrajeshnayak/bussr-jx3-gke-gsm
81375390e985b4ce5dedd57bd2b3e5b9039aeb48
[ "Apache-2.0" ]
null
null
null
config-root/namespaces/jx/jxboot-helmfile-resources/nexus-ingress.yaml
bussrrajeshnayak/bussr-jx3-gke-gsm
81375390e985b4ce5dedd57bd2b3e5b9039aeb48
[ "Apache-2.0" ]
null
null
null
config-root/namespaces/jx/jxboot-helmfile-resources/nexus-ingress.yaml
bussrrajeshnayak/bussr-jx3-gke-gsm
81375390e985b4ce5dedd57bd2b3e5b9039aeb48
[ "Apache-2.0" ]
null
null
null
# Source: jxboot-helmfile-resources/templates/700-nexus-ing.yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: 'jxboot-helmfile-resources' name: nexus namespace: jx labels: gitops.jenkins-x.io/pipeline: 'namespaces' spec: rules: - http: paths: - pathType: ImplementationSpecific backend: service: name: nexus port: number: 80 host: nexus-jx.34.69.81.209.nip.io
24.913043
64
0.612565
7369d2ac28e932e463a02dfca794b5d04ab5f8d5
8,509
yaml
YAML
k8s/us-central1/gen/tf-nightly-xlnet-squad-func-v3-8.yaml
will-cromar/ml-testing-accelerators
7d4837085931c97ee54e0df8f0a6ae5a47360dfc
[ "Apache-2.0" ]
null
null
null
k8s/us-central1/gen/tf-nightly-xlnet-squad-func-v3-8.yaml
will-cromar/ml-testing-accelerators
7d4837085931c97ee54e0df8f0a6ae5a47360dfc
[ "Apache-2.0" ]
null
null
null
k8s/us-central1/gen/tf-nightly-xlnet-squad-func-v3-8.yaml
will-cromar/ml-testing-accelerators
7d4837085931c97ee54e0df8f0a6ae5a47360dfc
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "apiVersion": "batch/v1beta1" "kind": "CronJob" "metadata": "labels": "accelerator": "v3-8" "benchmarkId": "tf-nightly-xlnet-squad-func-v3-8" "frameworkVersion": "tf-nightly" "mode": "func" "model": "xlnet-squad" "name": "tf-nightly-xlnet-squad-func-v3-8" "namespace": "automated" "spec": "concurrencyPolicy": "Forbid" "jobTemplate": "metadata": "annotations": "ml-testing-accelerators/gcs-subdir": "tf-nightly/xlnet-squad/func/v3-8" "ml-testing-accelerators/metric-config": | { "sources": [ { "literals": { "assertions": { "duration": { "inclusive_bounds": false, "std_devs_from_mean": { "comparison": "LESS", "std_devs": 5 }, "wait_for_n_data_points": 10 } } } }, { "tensorboard": { "aggregate_assertions": [ { "assertion": { "inclusive_bounds": true, "std_devs_from_mean": { "comparison": "GREATER", "std_devs": 4 }, "wait_for_n_data_points": 0 }, "strategy": "AVERAGE", "tag": "examples_per_second" } ], "exclude_tags": [ ], "include_tags": [ { "strategies": [ "FINAL" ], "tag_pattern": "*" } ], "merge_runs": false } } ] } "labels": "accelerator": "v3-8" "benchmarkId": "tf-nightly-xlnet-squad-func-v3-8" "frameworkVersion": "tf-nightly" "mode": "func" "model": "xlnet-squad" "spec": "activeDeadlineSeconds": 3600 "backoffLimit": 1 "template": "metadata": "annotations": "reserved.cloud-tpus.google.com": "false" "tf-version.cloud-tpus.google.com": "nightly" "spec": "containers": - "env": - "name": "POD_NAME" "valueFrom": "fieldRef": "fieldPath": "metadata.name" - "name": "POD_NAMESPACE" "valueFrom": "fieldRef": "fieldPath": "metadata.namespace" "image": "gcr.io/xl-ml-test/health-monitor:stable" "imagePullPolicy": "Always" "name": "monitor" - "args": - "python3" - "official/nlp/xlnet/run_squad.py" - "--strategy_type=tpu" - "--tpu=$(KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS)" - "--init_checkpoint=$(XLNET_CHECKPOINT_DIR)/xlnet_model.ckpt" - "--model_dir=$(MODEL_DIR)" - "--train_tfrecord_path=$(XLNET_SQUAD_DIR)" - "--test_tfrecord_path=$(XLNET_SQUAD_DIR)/12048.eval.tf_record" - "--test_feature_path=$(XLNET_SQUAD_DIR)/spiece.model.slen-512.qlen-64.eval.features.pkl" - "--predict_file=$(XLNET_SQUAD_DIR)/dev-v2.0.json" - "--predict_dir=$(MODEL_DIR)" - "--seq_len=512" - "--reuse_len=256" - "--mem_len=0" - "--n_layer=24" - "--d_model=1024" - "--d_embed=1024" - "--n_head=16" - "--d_head=64" - "--d_inner=4096" - "--untie_r=true" - "--ff_activation=gelu" - "--learning_rate=.00003" - "--warmup_steps=1000" - "--iterations=1000" - "--bi_data=false" - "--query_len=64" - "--adam_epsilon=.000001" - "--lr_layer_decay_rate=0.75" - "--train_batch_size=48" - "--train_steps=1000" "env": - "name": "POD_NAME" "valueFrom": "fieldRef": "fieldPath": "metadata.name" - "name": "POD_UID" "valueFrom": "fieldRef": "fieldPath": "metadata.uid" - "name": "POD_NAMESPACE" "valueFrom": "fieldRef": "fieldPath": "metadata.namespace" - "name": "JOB_NAME" "valueFrom": "fieldRef": "fieldPath": "metadata.labels['job-name']" - "name": "MODEL_DIR" "value": "$(OUTPUT_BUCKET)/tf-nightly/xlnet-squad/func/v3-8/$(JOB_NAME)" "envFrom": - "configMapRef": "name": "gcs-buckets" "image": "gcr.io/xl-ml-test/tensorflow:nightly" "imagePullPolicy": "Always" "name": "train" "resources": "limits": "cloud-tpus.google.com/preemptible-v3": 8 "requests": "cpu": 2 "memory": "20G" "volumeMounts": - "mountPath": "/dev/shm" "name": "dshm" "readOnly": false "initContainers": - "env": - "name": "POD_NAME" "valueFrom": "fieldRef": "fieldPath": "metadata.name" - "name": "POD_UID" "valueFrom": "fieldRef": "fieldPath": "metadata.uid" - "name": "POD_NAMESPACE" "valueFrom": "fieldRef": "fieldPath": "metadata.namespace" - "name": "JOB_NAME" "valueFrom": "fieldRef": "fieldPath": "metadata.labels['job-name']" - "name": "MODEL_DIR" "value": "$(OUTPUT_BUCKET)/tf-nightly/xlnet-squad/func/v3-8/$(JOB_NAME)" - "name": "METRIC_CONFIG" "value": | { "metric_collection_config": { "default_aggregation_strategies": [ "final" ], "metric_to_aggregation_strategies": { "examples_per_second": [ "average" ] }, "use_run_name_prefix": true, "write_to_bigquery": true }, "regression_test_config": { "metric_success_conditions": { "examples_per_second_average": { "comparison": "greater_or_equal", "success_threshold": { "stddevs_from_mean": 4 } }, "total_wall_time": { "comparison": "less", "success_threshold": { "stddevs_from_mean": 5 }, "wait_for_n_points_of_history": 10 } } }, "test_name": "tf-nightly-xlnet-squad-func-v3-8" } "envFrom": - "configMapRef": "name": "gcs-buckets" "image": "gcr.io/xl-ml-test/publisher:stable" "imagePullPolicy": "Always" "name": "publisher" "nodeSelector": "tpu-available": "true" "restartPolicy": "Never" "volumes": - "emptyDir": "medium": "Memory" "name": "dshm" "schedule": "0 8 * * *" "successfulJobsHistoryLimit": 1 "suspend": false
34.730612
102
0.430838
736a18e7e7257d557fa27cb3a56a07e7d7b0ae8b
691
yaml
YAML
rook/base/ceph-poc/configmap.yaml
ohauer/neco-apps
fe5e18ea06672d0e9f7a854d2314d5be1f55300d
[ "Apache-2.0" ]
6
2019-04-01T22:54:46.000Z
2019-06-24T03:58:49.000Z
rook/base/ceph-poc/configmap.yaml
ohauer/neco-apps
fe5e18ea06672d0e9f7a854d2314d5be1f55300d
[ "Apache-2.0" ]
7
2019-02-08T01:51:30.000Z
2019-06-03T03:04:52.000Z
rook/base/ceph-poc/configmap.yaml
cybozu-go/neco-ops
7aeefaeb8d7405069676b823fb72c9654a66f965
[ "MIT" ]
null
null
null
apiVersion: v1 kind: ConfigMap metadata: name: rook-config-override namespace: ceph-poc annotations: argocd.argoproj.io/sync-wave: "-1" data: config: | [mgr] ; Suppress the folowing warning. ; ; ``` ; health: HEALTH_WARN ; 1 pools have many more objects per pg than average ; ``` mon_pg_warn_max_object_skew = 0 [client] rgw_dynamic_resharding = false rgw enable ops log = true debug rgw = 20/20 [global] mon_osd_down_out_subtree_limit = "root" ; If HEALTH_WARN appears, it should be investigated, but there is ; no requirement for slow ops. So this threshold was extended. osd_op_complaint_time = 300.0
25.592593
69
0.668596
736a63e45ba75a626a5012a9a2fa255cb5fd105f
6,209
yaml
YAML
addons/kotsadm/1.47.2/statefulset/kotsadm-statefulset.yaml
Sharpie/kURL
b930200dec4d26de25b8de12213a80572d12094c
[ "Apache-2.0" ]
405
2020-01-27T05:25:11.000Z
2022-03-30T18:08:37.000Z
addons/kotsadm/1.47.2/statefulset/kotsadm-statefulset.yaml
Sharpie/kURL
b930200dec4d26de25b8de12213a80572d12094c
[ "Apache-2.0" ]
1,073
2020-01-29T20:00:33.000Z
2022-03-31T04:26:06.000Z
addons/kotsadm/1.47.2/statefulset/kotsadm-statefulset.yaml
Sharpie/kURL
b930200dec4d26de25b8de12213a80572d12094c
[ "Apache-2.0" ]
42
2020-02-14T20:56:37.000Z
2022-03-30T07:11:58.000Z
--- apiVersion: apps/v1 kind: StatefulSet metadata: name: kotsadm namespace: default # explicity specified to be detected by json6902 patches labels: kots.io/kotsadm: "true" kots.io/backup: velero spec: serviceName: kotsadm replicas: 1 selector: matchLabels: app: kotsadm volumeClaimTemplates: - metadata: name: kotsadmdata labels: kots.io/kotsadm: "true" kots.io/backup: velero spec: accessModes: - ReadWriteOnce resources: requests: storage: 4Gi template: metadata: labels: app: kotsadm kots.io/kotsadm: "true" kots.io/backup: velero annotations: backup.velero.io/backup-volumes: backup pre.hook.backup.velero.io/command: '["/backup.sh"]' pre.hook.backup.velero.io/timeout: 10m spec: securityContext: runAsUser: 1001 fsGroup: 1001 serviceAccountName: kotsadm restartPolicy: Always volumes: - name: kotsadmdata persistentVolumeClaim: claimName: kotsadmdata - name: kubelet-client-cert secret: secretName: kubelet-client-cert - name: kurl-proxy-kotsadm-tls-cert secret: secretName: kotsadm-tls - name: kotsadm-web-scripts configMap: defaultMode: 511 # hex 777 name: kotsadm-web-scripts - name: backup emptyDir: {} - name: migrations emptyDir: medium: Memory containers: - image: kotsadm/kotsadm:v1.47.2 name: kotsadm command: ["bash"] args: - "/scripts/start-kotsadm-web.sh" ports: - name: http containerPort: 3000 readinessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 httpGet: path: /healthz port: 3000 scheme: HTTP env: - name: AUTO_CREATE_CLUSTER value: "1" - name: AUTO_CREATE_CLUSTER_NAME value: "this-cluster" - name: AUTO_CREATE_CLUSTER_TOKEN valueFrom: secretKeyRef: name: kotsadm-cluster-token key: kotsadm-cluster-token - name: SHARED_PASSWORD_BCRYPT valueFrom: secretKeyRef: name: kotsadm-password key: passwordBcrypt - name: SESSION_KEY valueFrom: secretKeyRef: name: kotsadm-session key: key - name: POSTGRES_URI valueFrom: secretKeyRef: name: kotsadm-postgres key: uri - name: POSTGRES_PASSWORD valueFrom: secretKeyRef: name: kotsadm-postgres key: password - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: API_ENCRYPTION_KEY valueFrom: secretKeyRef: name: kotsadm-encryption key: encryptionKey - name: API_ADVERTISE_ENDPOINT value: "http://localhost:8800" - name: API_ENDPOINT value: http://kotsadm.default.svc.cluster.local:3000 - name: KURL_PROXY_TLS_CERT_PATH value: /etc/kurl-proxy/ca/tls.crt - name: DEX_PGPASSWORD valueFrom: secretKeyRef: key: PGPASSWORD name: kotsadm-dex-postgres volumeMounts: - name: kotsadmdata mountPath: /kotsadmdata - name: kotsadm-web-scripts mountPath: /scripts/start-kotsadm-web.sh subPath: start-kotsadm-web.sh - mountPath: /backup name: backup - name: kubelet-client-cert readOnly: true mountPath: /etc/kubernetes/pki/kubelet - name: kurl-proxy-kotsadm-tls-cert readOnly: true mountPath: /etc/kurl-proxy/ca resources: limits: cpu: 1 requests: cpu: 100m memory: 100Mi initContainers: - name: schemahero-plan image: kotsadm/kotsadm-migrations:v1.47.2 args: ["plan"] volumeMounts: - name: migrations mountPath: /migrations env: - name: SCHEMAHERO_DRIVER value: postgres - name: SCHEMAHERO_SPEC_FILE value: /tables - name: SCHEMAHERO_URI valueFrom: secretKeyRef: name: kotsadm-postgres key: uri - name: SCHEMAHERO_OUT value: /migrations/plan.yaml resources: limits: cpu: 100m memory: 100Mi requests: cpu: 50m memory: 50Mi - name: schemahero-apply image: kotsadm/kotsadm-migrations:v1.47.2 args: ["apply"] volumeMounts: - name: migrations mountPath: /migrations env: - name: SCHEMAHERO_DRIVER value: postgres - name: SCHEMAHERO_DDL value: /migrations/plan.yaml - name: SCHEMAHERO_URI valueFrom: secretKeyRef: name: kotsadm-postgres key: uri resources: limits: cpu: 100m memory: 100Mi requests: cpu: 50m memory: 50Mi - name: restore-data command: - "/restore.sh" env: - name: POSTGRES_PASSWORD valueFrom: secretKeyRef: key: password name: kotsadm-postgres image: kotsadm/kotsadm:v1.47.2 imagePullPolicy: IfNotPresent volumeMounts: - name: kotsadmdata mountPath: /kotsadmdata - name: backup mountPath: /backup resources: limits: cpu: 1 requests: cpu: 100m memory: 100Mi
27.473451
77
0.521179
736a6ea8dee15135695fdd8066b424fd4255ef90
2,732
yaml
YAML
kubernetes/overlays/development/static/workspace/environment-variables.secret.example.yaml
tenlastic/open-platform
7a02e99aabb496cc126f31e4e5707576bf60ac50
[ "MIT" ]
8
2020-05-09T11:15:59.000Z
2022-02-16T05:57:03.000Z
kubernetes/overlays/development/static/workspace/environment-variables.secret.example.yaml
tenlastic/open-platform
7a02e99aabb496cc126f31e4e5707576bf60ac50
[ "MIT" ]
11
2019-08-15T01:11:31.000Z
2020-02-02T03:56:32.000Z
kubernetes/overlays/development/static/workspace/environment-variables.secret.example.yaml
tenlastic/open-platform
7a02e99aabb496cc126f31e4e5707576bf60ac50
[ "MIT" ]
3
2019-11-26T18:15:20.000Z
2022-03-04T11:59:47.000Z
apiVersion: v1 kind: Secret metadata: name: workspace-environment-variables namespace: static stringData: DOCKER_REGISTRY_URL: http://localhost:5000 JWT_PRIVATE_KEY: -----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAkUT/+1h+5zP255s9eQkjjiA1drGhA0T35FPxAdc/6EwwoU66\nnDh9Z14nb61V7+Wj8bPc859cujIAyU7fVNMLIUVb2qJvYo3nQvBYMKlAnpPsUP7X\ncxAJWT/DNlmgnfqYa7HKKY0pjON1n+3WGADtqrMbof4UOQrJVjxYAmJaAsASPjig\n60IzygIJmLtYp7VcvzjyUqeujBgU6C6wWqcv15j/gL1rCHQ9V81Aaru+37r6QM/1\nLWHTBdspFkOFgKtDWwI9Ceq/YivrdI0h2tuOAfhuV4hBB32k6w7I/pKgsP0rU20e\na3kA9JPnbEnEAYnX6ZLNIJh/fvgnPlpB/F9ojwIDAQABAoIBAH1adSmGq7sGdEBX\nDSysXin+USQjEzWnBeHZLVbRTyw80pnfPdggRtYzwQP1JvrjxhyDgF4ZXgKts1AC\n52Ri2G/VUfXPxR3eJXj1vKQA6Ont6zXZMkDvzaFeDC2SGa841t3XhQNl9DzG6nEe\n4mfEJVHvTCD1Bq5SSHpJiAhGpjVH/rdJDJDYq4SPmqvuGC41KTFXeYW1JIwf3Gn1\nsla77TCAdaZ/pDOC1vI1Bj+Avm1Zp8NF19Ex1UJnl4RoosLbCpAXuSfkCgf5qrvF\nX0rtQmW4JIRVtwW1mh4a6nyvH7HbIkrayCp3lE2HAaudpx7dHdomT4kYekLIwRG2\nspnsGgECgYEA/psvMDdiu5LCvfB1NNvLiCK9PmtuRgdu5rQD/mn7JUi6765Kr8SB\nT7a1c0lWypj+h5M+JIexvqtULCvk4Ru5YH4/omk5MrDCcGB9DvZmJr5rxJyCklAN\n6TTUuFSfTPyiSi/gqQm9PRt48JIWLs68j7iX5fdeokbSYT/vXg+F2uECgYEAkhCW\nMDt47Ty82sLHjTvKuZogUhnJIhyPgILQpX+eTFb2g9JaDqcuTw89R/xQuD3KcJdq\nzE5Le5T0mbS8QqK8lxZnOrKGFWJpXCBpVB0BRpW+qkDlnhQf4ZNLFU0W3qeqyTXQ\nYheH++KK1Zh5rn3tfdQI8lFaAePKWcYa6MZuoW8CgYEAv2v7UXwCvlX8FoQICwLJ\nhoPf1mM9GGeUSN3FNu2FhiWlbniS7I2IT19mEtRr7YyTqWjZyus+hsUXVtJ1lY2b\nlWG2BCyqdWMR/R1+SNaWPMDkE4E8ZjbMDq7K0//DeUVj2vUeKtkIrz3NcsnS5cRH\nXqMBiUOvocAvgjBojkaLc+ECgYA207MMd8Z0ho/9LY/qG7+E2622I6ez/jR/cJsR\nBE5PKXVvFc5oIajoVDgUhUlnXwpWKQAVoG4E/WyzJXxlJuDTdb1Z8xkRfdc+lYvz\n3s9Tx3HCRonqRHOTIM7WOBGVkioxTNnjeCG7EtY6Rvml3e7/hE+3ZYH7o52ZBRuk\ntJdiUQKBgQD4wou8NPkUimtc0xlBA7uSEDpZKNZvWWfYr5iRYYG2rlw2Ptzh/NG6\nnWdj6OZEWEe7fykWzzSWnGio/LrVrl7U4PSRgbhQ5mSNqW2dFAgnsRXsWplBz2q+\nT5lKxxqmZ+zTdq7DuH8+FrqMzlCxvGpx6Mt4Qbzu0RJQe5ijPbuskw==\n-----END RSA PRIVATE KEY----- JWT_PUBLIC_KEY: -----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAkUT/+1h+5zP255s9eQkj\njiA1drGhA0T35FPxAdc/6EwwoU66nDh9Z14nb61V7+Wj8bPc859cujIAyU7fVNML\nIUVb2qJvYo3nQvBYMKlAnpPsUP7XcxAJWT/DNlmgnfqYa7HKKY0pjON1n+3WGADt\nqrMbof4UOQrJVjxYAmJaAsASPjig60IzygIJmLtYp7VcvzjyUqeujBgU6C6wWqcv\n15j/gL1rCHQ9V81Aaru+37r6QM/1LWHTBdspFkOFgKtDWwI9Ceq/YivrdI0h2tuO\nAfhuV4hBB32k6w7I/pKgsP0rU20ea3kA9JPnbEnEAYnX6ZLNIJh/fvgnPlpB/F9o\njwIDAQAB\n-----END PUBLIC KEY----- MINIO_BUCKET: api MINIO_CONNECTION_STRING: http://minioadmin:minioadmin@localhost:9000 MONGO_CONNECTION_STRING: mongodb://root:password@localhost:27017/admin?replicaSet=rs0 NATS_CONNECTION_STRING: localhost:4222 PASSWORD_RESET_URL: http://www.localhost/authentication/reset-password RABBITMQ_CONNECTION_STRING: amqp://username:password@localhost:5672 type: Opaque
160.705882
1,723
0.906662
736a7d00613466bf6209626cd9a17fd0b8b4c482
287
yaml
YAML
yaml/springcloud-test/busybox.yaml
AlvinWanCN/k8s
19853b0f81de36dcbf63da7ea2c72e8afbf12b9c
[ "Apache-2.0" ]
5
2017-07-13T08:56:40.000Z
2017-12-04T02:07:51.000Z
yaml/springcloud-test/busybox.yaml
AlvinWanCN/k8s
19853b0f81de36dcbf63da7ea2c72e8afbf12b9c
[ "Apache-2.0" ]
null
null
null
yaml/springcloud-test/busybox.yaml
AlvinWanCN/k8s
19853b0f81de36dcbf63da7ea2c72e8afbf12b9c
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: busybox namespace: springcloud-test spec: containers: - image: radial/busyboxplus:curl command: - sleep - "3600" imagePullPolicy: IfNotPresent name: busybox restartPolicy: Always
19.133333
38
0.609756
736a822705aba1f11984b3ab59aa0786ed544638
362
yaml
YAML
config/rbac/storageconsumer_viewer_role.yaml
kdvalin/ocs-operator
1ec83b625af8203dab0a9fd8cb1a3d760e94d792
[ "Apache-2.0" ]
58
2019-06-14T15:24:13.000Z
2021-08-11T14:55:05.000Z
config/rbac/storageconsumer_viewer_role.yaml
kdvalin/ocs-operator
1ec83b625af8203dab0a9fd8cb1a3d760e94d792
[ "Apache-2.0" ]
1,321
2019-06-17T12:50:19.000Z
2021-09-03T06:47:45.000Z
config/rbac/storageconsumer_viewer_role.yaml
kdvalin/ocs-operator
1ec83b625af8203dab0a9fd8cb1a3d760e94d792
[ "Apache-2.0" ]
145
2019-06-09T16:27:33.000Z
2021-08-31T12:58:54.000Z
# permissions for end users to view storageconsumers. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: storageconsumer-viewer-role rules: - apiGroups: - ocs.openshift.io resources: - storageconsumers verbs: - get - list - watch - apiGroups: - ocs.openshift.io resources: - storageconsumers/status verbs: - get
17.238095
53
0.723757
736ae2fc7cf61b9284a88ac67305cc52dbaae0da
2,395
yaml
YAML
config/300-clustertriggerbinding.yaml
sugardon/triggers
b7678ada8d4a2eb7b80a7d6b0d63cabe4f57e822
[ "Apache-2.0" ]
null
null
null
config/300-clustertriggerbinding.yaml
sugardon/triggers
b7678ada8d4a2eb7b80a7d6b0d63cabe4f57e822
[ "Apache-2.0" ]
null
null
null
config/300-clustertriggerbinding.yaml
sugardon/triggers
b7678ada8d4a2eb7b80a7d6b0d63cabe4f57e822
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 The Tekton Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clustertriggerbindings.triggers.tekton.dev labels: app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-triggers triggers.tekton.dev/release: "devel" version: "devel" spec: group: triggers.tekton.dev scope: Cluster names: kind: ClusterTriggerBinding plural: clustertriggerbindings singular: clustertriggerbinding shortNames: - ctb categories: - tekton - tekton-triggers versions: - name: v1beta1 served: false storage: false schema: openAPIV3Schema: type: object # One can use x-kubernetes-preserve-unknown-fields: true # at the root of the schema (and inside any properties, additionalProperties) # to get the traditional CRD behaviour that nothing is pruned, despite # setting spec.preserveUnknownProperties: false. # # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ # See issue: https://github.com/knative/serving/issues/912 x-kubernetes-preserve-unknown-fields: true subresources: status: {} - name: v1alpha1 served: true storage: true schema: openAPIV3Schema: type: object # One can use x-kubernetes-preserve-unknown-fields: true # at the root of the schema (and inside any properties, additionalProperties) # to get the traditional CRD behaviour that nothing is pruned, despite # setting spec.preserveUnknownProperties: false. # # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ # See issue: https://github.com/knative/serving/issues/912 x-kubernetes-preserve-unknown-fields: true subresources: status: {}
34.710145
85
0.702296
736b4d22aac1e9ace1ce890edf8c82848712db97
61
yaml
YAML
kustomize/micro/namespace.yaml
canthefason/installer
6beda63f837719cc84151efed85427ee1e6c41e9
[ "Apache-2.0" ]
null
null
null
kustomize/micro/namespace.yaml
canthefason/installer
6beda63f837719cc84151efed85427ee1e6c41e9
[ "Apache-2.0" ]
null
null
null
kustomize/micro/namespace.yaml
canthefason/installer
6beda63f837719cc84151efed85427ee1e6c41e9
[ "Apache-2.0" ]
1
2021-01-13T16:35:06.000Z
2021-01-13T16:35:06.000Z
apiVersion: v1 kind: Namespace metadata: name: istio-micro
12.2
19
0.770492
736b532792f07929538cf70be8d07ad4e2474248
5,188
yaml
YAML
manifests/0000_70_cluster-network-operator_03_deployment-ibm-cloud-managed.yaml
Kyrtapz/cluster-network-operator
bc1d9137a31d7b20f65a99e5e8cddd328b3c12a3
[ "Apache-2.0" ]
2
2018-09-13T12:58:45.000Z
2018-09-26T05:00:44.000Z
manifests/0000_70_cluster-network-operator_03_deployment-ibm-cloud-managed.yaml
Kyrtapz/cluster-network-operator
bc1d9137a31d7b20f65a99e5e8cddd328b3c12a3
[ "Apache-2.0" ]
5
2018-09-03T16:14:14.000Z
2018-09-27T15:09:29.000Z
manifests/0000_70_cluster-network-operator_03_deployment-ibm-cloud-managed.yaml
Kyrtapz/cluster-network-operator
bc1d9137a31d7b20f65a99e5e8cddd328b3c12a3
[ "Apache-2.0" ]
2
2018-09-03T16:10:38.000Z
2018-09-26T01:40:22.000Z
# *** AUTOMATICALLY GENERATED FILE - DO NOT EDIT *** apiVersion: apps/v1 kind: Deployment metadata: annotations: include.release.openshift.io/ibm-cloud-managed: "true" labels: name: network-operator name: network-operator namespace: openshift-network-operator spec: replicas: 1 selector: matchLabels: name: network-operator strategy: rollingUpdate: maxSurge: 0 maxUnavailable: 1 type: RollingUpdate template: metadata: annotations: target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' labels: name: network-operator spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: name: network-operator topologyKey: kubernetes.io/hostname weight: 100 containers: - command: - /bin/bash - -c - | #!/bin/bash set -o allexport if [[ -f /etc/kubernetes/apiserver-url.env ]]; then source /etc/kubernetes/apiserver-url.env else URL_ONLY_KUBECONFIG=/etc/kubernetes/kubeconfig fi exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 env: - name: RELEASE_VERSION value: 0.0.1-snapshot - name: SDN_IMAGE value: quay.io/openshift/origin-sdn:latest - name: KUBE_PROXY_IMAGE value: quay.io/openshift/origin-kube-proxy:latest - name: KUBE_RBAC_PROXY_IMAGE value: quay.io/openshift/origin-kube-rbac-proxy:latest - name: MULTUS_IMAGE value: quay.io/openshift/origin-multus-cni:latest - name: MULTUS_ADMISSION_CONTROLLER_IMAGE value: quay.io/openshift/origin-multus-admission-controller:latest - name: CNI_PLUGINS_IMAGE value: quay.io/openshift/origin-container-networking-plugins:latest - name: BOND_CNI_PLUGIN_IMAGE value: quay.io/openshift/origin-network-interface-bond-cni:latest - name: WHEREABOUTS_CNI_IMAGE value: quay.io/openshift/origin-multus-whereabouts-ipam-cni:latest - name: ROUTE_OVERRRIDE_CNI_IMAGE value: quay.io/openshift/origin-multus-route-override-cni:latest - name: MULTUS_NETWORKPOLICY_IMAGE value: quay.io/openshift/origin-multus-networkpolicy:latest - name: OVN_IMAGE value: quay.io/openshift/origin-ovn-kubernetes:latest - name: OVN_NB_RAFT_ELECTION_TIMER value: "10" - name: OVN_SB_RAFT_ELECTION_TIMER value: "16" - name: OVN_NORTHD_PROBE_INTERVAL value: "5000" - name: OVN_CONTROLLER_INACTIVITY_PROBE value: "180000" - name: OVN_NB_INACTIVITY_PROBE value: "60000" - name: EGRESS_ROUTER_CNI_IMAGE value: quay.io/openshift/origin-egress-router-cni:latest - name: KURYR_DAEMON_IMAGE value: quay.io/openshift/origin-kuryr-cni:latest - name: KURYR_CONTROLLER_IMAGE value: quay.io/openshift/origin-kuryr-controller:latest - name: NETWORK_METRICS_DAEMON_IMAGE value: quay.io/openshift/origin-network-metrics-daemon:latest - name: NETWORK_CHECK_SOURCE_IMAGE value: quay.io/openshift/origin-cluster-network-operator:latest - name: NETWORK_CHECK_TARGET_IMAGE value: quay.io/openshift/origin-cluster-network-operator:latest - name: NETWORK_OPERATOR_IMAGE value: quay.io/openshift/origin-cluster-network-operator:latest - name: CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE value: quay.io/openshift/origin-cloud-network-config-controller:latest - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name image: quay.io/openshift/origin-cluster-network-operator:latest name: network-operator ports: - containerPort: 9104 hostPort: 9104 name: cno protocol: TCP resources: requests: cpu: 10m memory: 50Mi terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/kubernetes name: host-etc-kube readOnly: true - mountPath: /var/run/secrets/serving-cert name: metrics-tls hostNetwork: true priorityClassName: system-cluster-critical restartPolicy: Always securityContext: runAsNonRoot: true runAsUser: 65534 tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoSchedule key: node.kubernetes.io/not-ready operator: Exists - effect: NoSchedule key: node.kubernetes.io/network-unavailable operator: Exists volumes: - hostPath: path: /etc/kubernetes type: Directory name: host-etc-kube - name: metrics-tls secret: optional: true secretName: metrics-tls
35.054054
90
0.630686
736b68aa2e9bc8e40eb5d8c4a8bb752669eeb8ae
2,553
yaml
YAML
base/simply-nginx/configmap.yaml
ikeikeikeike/lit-k8s
42c984f65959572c3f7c21b8d874f9d47324469b
[ "MIT" ]
1
2018-08-20T13:53:38.000Z
2018-08-20T13:53:38.000Z
base/simply-nginx/configmap.yaml
ikeikeikeike/lit-k8s
42c984f65959572c3f7c21b8d874f9d47324469b
[ "MIT" ]
null
null
null
base/simply-nginx/configmap.yaml
ikeikeikeike/lit-k8s
42c984f65959572c3f7c21b8d874f9d47324469b
[ "MIT" ]
null
null
null
--- apiVersion: v1 kind: ConfigMap metadata: name: simply-nginx-conf data: nginx.conf: |- user nginx; worker_processes auto; worker_rlimit_nofile 30720; error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 10240; multi_accept on; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format ltsv 'time:$time_local\t' 'server:$server_name\t' 'remote_addr:$remote_addr\t' 'x_forwarded_for:$http_x_forwarded_for\t' 'time_local:$msec\t' 'request_uri:$request_uri\t' 'request:$request\t' 'body_bytes_sent:$body_bytes_sent\t' 'status:$status\t' 'referer:$http_referer\t' 'user_agent:$http_user_agent'; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main buffer=16k flush=2m; types_hash_max_size 2048; client_max_body_size 500M; sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; keepalive_requests 100; server_names_hash_bucket_size 64; server_tokens off; # gzip on; gzip on; gzip_static on; proxy_read_timeout 300; proxy_send_timeout 300; proxy_connect_timeout 300; proxy_intercept_errors on; proxy_set_header Connection 'keep-alive'; proxy_set_header Host $http_host; proxy_set_header X-Scheme $scheme; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; # upstream base_backend { # server eiicon:8000; # } # server { # listen 80; # location / { # proxy_pass http://eiicon_backend; # proxy_redirect off; # } # } # include /etc/nginx/conf.d/*.conf; # Disable to conf.d/default.conf include /etc/nginx/sites-enabled/*; }
28.054945
79
0.523306
736bbad736b2fa496e06b1ee99622534f936504c
501
yaml
YAML
docker/k8s/enclave-manager-deployment.yaml
manojsalunke85/avalon
99fc49ac215ac3dfcfb0547f8abebc0b131dfad1
[ "Apache-2.0" ]
null
null
null
docker/k8s/enclave-manager-deployment.yaml
manojsalunke85/avalon
99fc49ac215ac3dfcfb0547f8abebc0b131dfad1
[ "Apache-2.0" ]
null
null
null
docker/k8s/enclave-manager-deployment.yaml
manojsalunke85/avalon
99fc49ac215ac3dfcfb0547f8abebc0b131dfad1
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: enclave-manager-pool spec: replicas: 1 selector: matchLabels: app: tee template: metadata: name: tee labels: app: tee spec: containers: - image: tcf-dev:latest name: tee imagePullPolicy: Never envFrom: - prefix: TCF_ configMapRef: name: tcf-config command: ["/bin/bash"] args: ["$(TCF_HOME)/scripts/enclave_manager.sh"]
19.269231
56
0.56487
736c1bc640857c75aa5fd7ac998553c15e5daf0b
379
yaml
YAML
setup_ks_apps/components/argocd/clusterrole-argocd-application-controller-wgkuf.yaml
mnmainguy/ml-cd-kubeflow
9757867769d58914ea473024fcb50a6a5046b6e1
[ "MIT" ]
3
2019-07-09T09:46:58.000Z
2020-12-10T12:46:12.000Z
setup_ks_apps/components/argocd/clusterrole-argocd-application-controller-wgkuf.yaml
mnmainguy/ml-cd-kubeflow
9757867769d58914ea473024fcb50a6a5046b6e1
[ "MIT" ]
13
2022-02-28T07:37:59.000Z
2022-03-30T02:05:03.000Z
experimental/addons/argocd/resources/core-install/argocd-application-controller-cr.yaml
lowkeyrd/catalog
c45cab7278cefbe0d01f1a149632668d388d2dac
[ "Apache-2.0" ]
2
2019-11-14T23:07:10.000Z
2019-11-30T19:12:34.000Z
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/component: application-controller app.kubernetes.io/name: argocd-application-controller app.kubernetes.io/part-of: argocd name: argocd-application-controller rules: - apiGroups: - '*' resources: - '*' verbs: - '*' - nonResourceURLs: - '*' verbs: - '*'
18.95
57
0.686016
736ccd1818898bcac8af08a7d19c332a8df98b6a
1,221
yaml
YAML
manifest/deploy/deployment-dead-mans-switch.yaml
leosunmo/dead-mans-switch
348c3b00a73ae2987a3f8c21f29eed7b1b468ab6
[ "Apache-2.0" ]
20
2020-12-17T14:28:49.000Z
2022-02-15T13:09:38.000Z
manifest/deploy/deployment-dead-mans-switch.yaml
leosunmo/dead-mans-switch
348c3b00a73ae2987a3f8c21f29eed7b1b468ab6
[ "Apache-2.0" ]
3
2020-07-23T11:14:58.000Z
2021-06-09T08:14:22.000Z
manifest/deploy/deployment-dead-mans-switch.yaml
leosunmo/dead-mans-switch
348c3b00a73ae2987a3f8c21f29eed7b1b468ab6
[ "Apache-2.0" ]
9
2020-07-14T05:22:27.000Z
2022-03-29T05:54:01.000Z
apiVersion: apps/v1 kind: Deployment metadata: name: dead-mans-switch namespace: monitoring spec: replicas: 1 selector: matchLabels: app: dead-mans-switch template: metadata: labels: app: dead-mans-switch spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: app operator: In values: - alertmanager topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 containers: - args: - -config=/etc/deadmansswitch/config.yaml image: gcr.io/pingcap-public/deadmansswitch:latest imagePullPolicy: Always name: dead-mans-switch readinessProbe: failureThreshold: 10 httpGet: path: /health port: 8080 initialDelaySeconds: 5 periodSeconds: 10 volumeMounts: - mountPath: /etc/deadmansswitch name: config volumes: - configMap: name: dead-mans-switch-config name: config
25.4375
65
0.562654
736cf13a309e9a943b2e4a83d311023795cbd8aa
305
yaml
YAML
ocp-resources/amq-nodeport-service.yaml
garethahealy/ibmq-to-amq-bridge
ab40ae859eb18de8c7faf25a0c0ad1d71d6d6660
[ "Apache-2.0" ]
2
2018-04-04T01:50:14.000Z
2018-05-24T15:41:05.000Z
ocp-resources/amq-nodeport-service.yaml
garethahealy/ibmq-to-amq-bridge
ab40ae859eb18de8c7faf25a0c0ad1d71d6d6660
[ "Apache-2.0" ]
null
null
null
ocp-resources/amq-nodeport-service.yaml
garethahealy/ibmq-to-amq-bridge
ab40ae859eb18de8c7faf25a0c0ad1d71d6d6660
[ "Apache-2.0" ]
1
2021-10-16T08:10:09.000Z
2021-10-16T08:10:09.000Z
apiVersion: v1 kind: Service metadata: labels: application: broker template: amq62-basic xpaas: 1.3.1 name: broker-amq-tcp-nodeport spec: ports: - port: 61616 protocol: TCP targetPort: 61616 selector: deploymentConfig: broker-amq sessionAffinity: None type: NodePort
16.944444
32
0.695082
736d51f9d038b704be2684834c9df57b8374e536
2,309
yaml
YAML
config-root/namespaces/jx/jx-pipelines-visualizer/jx-pipelines-visualizer-deploy.yaml
frandelgado/jx3-minikube
e5028f1c945cd1b825becacfb0e9020764087f27
[ "Apache-2.0" ]
null
null
null
config-root/namespaces/jx/jx-pipelines-visualizer/jx-pipelines-visualizer-deploy.yaml
frandelgado/jx3-minikube
e5028f1c945cd1b825becacfb0e9020764087f27
[ "Apache-2.0" ]
null
null
null
config-root/namespaces/jx/jx-pipelines-visualizer/jx-pipelines-visualizer-deploy.yaml
frandelgado/jx3-minikube
e5028f1c945cd1b825becacfb0e9020764087f27
[ "Apache-2.0" ]
null
null
null
# Source: jx-pipelines-visualizer/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: jx-pipelines-visualizer labels: app.kubernetes.io/name: jx-pipelines-visualizer app.kubernetes.io/instance: "jx-pipelines-visualizer" helm.sh/chart: jx-pipelines-visualizer-1.7.5 app.kubernetes.io/version: "1.7.5" app.kubernetes.io/managed-by: "Helm" gitops.jenkins-x.io/pipeline: 'namespaces' annotations: meta.helm.sh/release-name: 'jx-pipelines-visualizer' wave.pusher.com/update-on-config-change: 'true' namespace: jx spec: replicas: 1 revisionHistoryLimit: 2 selector: matchLabels: app.kubernetes.io/name: jx-pipelines-visualizer app.kubernetes.io/instance: "jx-pipelines-visualizer" template: metadata: labels: app.kubernetes.io/name: jx-pipelines-visualizer app.kubernetes.io/instance: "jx-pipelines-visualizer" helm.sh/chart: jx-pipelines-visualizer-1.7.5 app.kubernetes.io/version: "1.7.5" app.kubernetes.io/managed-by: "Helm" spec: containers: - name: jx-pipelines-visualizer image: "ghcr.io/jenkins-x/jx-pipelines-visualizer:1.7.5" args: - -namespace - jx - -resync-interval - 60s - -pipeline-trace-url-template - http://grafana-jx-observability.192.168.99.100.nip.io/explore?left=%5B%22now%22,%22now%22,%22Tempo%22,%7B%22query%22:%22{{.TraceID}}%22%7D%5D - -log-level - INFO ports: - name: http containerPort: 8080 livenessProbe: tcpSocket: port: http readinessProbe: httpGet: path: /healthz port: http volumeMounts: - mountPath: /secrets/git name: secrets-git resources: limits: cpu: "1" memory: 512M requests: cpu: "0.2" memory: 128M securityContext: fsGroup: 1000 serviceAccountName: jx-pipelines-visualizer enableServiceLinks: false volumes: - name: secrets-git secret: defaultMode: 420 secretName: tekton-git
31.202703
155
0.587267
736d99405408308667e70b29288bd2415f763ebf
262
yaml
YAML
Pod/pod.yaml
mabroor007/k8s
598341462b8624c69b71f6233ccb465f352c4e56
[ "MIT" ]
null
null
null
Pod/pod.yaml
mabroor007/k8s
598341462b8624c69b71f6233ccb465f352c4e56
[ "MIT" ]
null
null
null
Pod/pod.yaml
mabroor007/k8s
598341462b8624c69b71f6233ccb465f352c4e56
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: hello-world labels: name: hello-world spec: restartPolicy: "Never" containers: - name: hello-world image: hello-world resources: limits: memory: "128Mi" cpu: "500m"
16.375
25
0.591603
736ddc5d2351efaadfb0c3899aba74e91d0050e1
1,103
yaml
YAML
k8s/app/app-deployment.yaml
antoniosergiojr/java_kubernetes_digital_innovation_one
3b0ffa042410d5a9de4469f6213b386a8f105c08
[ "Apache-2.0" ]
null
null
null
k8s/app/app-deployment.yaml
antoniosergiojr/java_kubernetes_digital_innovation_one
3b0ffa042410d5a9de4469f6213b386a8f105c08
[ "Apache-2.0" ]
null
null
null
k8s/app/app-deployment.yaml
antoniosergiojr/java_kubernetes_digital_innovation_one
3b0ffa042410d5a9de4469f6213b386a8f105c08
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: myapp namespace: dev-to labels: app: myapp spec: # numero de pods replicas: 1 selector: matchLabels: app: myapp template: metadata: labels: app: myapp spec: containers: - name: myapp image: java-k8s:latest imagePullPolicy: Never ports: - containerPort: 8080 name: http envFrom: #- secretRef: #name: secret - configMapRef: name: myapp livenessProbe: httpGet: # garante se a aplicacao esta ok path: /app/actuator/health/liveness port: 8080 initialDelaySeconds: 30 readinessProbe: httpGet: # verifica se esta ok, se nao responder ok ele vai destruir esse pod e coloca outro no lugar path: /app/actuator/health/readiness port: 8080 initialDelaySeconds: 30
25.651163
110
0.49864
736e33d3b30ef52cd3b33f78d1497e048d3ef7dd
60
yaml
YAML
kube/openfaas-ns.yaml
sguzman/custom-helm-chart-configs
83541fa0c08e99f6f75df3ef1aed8defd850bd83
[ "Unlicense" ]
1
2021-08-30T13:52:59.000Z
2021-08-30T13:52:59.000Z
kube/openfaas-ns.yaml
sguzman/kubenetes-data-pipeline
83541fa0c08e99f6f75df3ef1aed8defd850bd83
[ "Unlicense" ]
null
null
null
kube/openfaas-ns.yaml
sguzman/kubenetes-data-pipeline
83541fa0c08e99f6f75df3ef1aed8defd850bd83
[ "Unlicense" ]
null
null
null
apiVersion: v1 kind: Namespace metadata: name: openfaas-fn
15
19
0.783333
736e6619a06a9cc653070d376d8f0365744cdac9
69
yaml
YAML
deploy/service_account.yaml
EugeneLugovtsov/argocd-operator
131c03c15d2c0b00d3e7b45a7c947ddb5c2abeb6
[ "Apache-2.0" ]
1
2021-03-25T10:45:27.000Z
2021-03-25T10:45:27.000Z
deploy/service_account.yaml
EugeneLugovtsov/argocd-operator
131c03c15d2c0b00d3e7b45a7c947ddb5c2abeb6
[ "Apache-2.0" ]
null
null
null
deploy/service_account.yaml
EugeneLugovtsov/argocd-operator
131c03c15d2c0b00d3e7b45a7c947ddb5c2abeb6
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: ServiceAccount metadata: name: argocd-operator
17.25
23
0.811594
736e67feeaff065e5c2abe789f346cfeb2a2e5c3
203
yaml
YAML
staging/volumes/portworx/portworx-volume-pvcsc.yaml
DeanJain/examples
ef5deea76e023e32dbb71eba5e4f5c4840273e86
[ "Apache-2.0" ]
4,959
2017-05-19T21:38:25.000Z
2022-03-31T12:35:01.000Z
staging/volumes/portworx/portworx-volume-pvcsc.yaml
DeanJain/examples
ef5deea76e023e32dbb71eba5e4f5c4840273e86
[ "Apache-2.0" ]
430
2017-05-01T14:42:56.000Z
2022-03-28T13:32:14.000Z
staging/volumes/portworx/portworx-volume-pvcsc.yaml
DeanJain/examples
ef5deea76e023e32dbb71eba5e4f5c4840273e86
[ "Apache-2.0" ]
3,767
2017-05-01T12:05:45.000Z
2022-03-31T17:17:24.000Z
kind: PersistentVolumeClaim apiVersion: v1 metadata: name: pvcsc001 spec: accessModes: - ReadWriteOnce resources: requests: storage: 2Gi storageClassName: portworx-io-priority-high
16.916667
45
0.738916
736ea3af01ddf45ce280535edf086b015a7f9f34
7,888
yaml
YAML
vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
rhobs/cluster-monitoring-operator
fb0f3a792c3cc517ca2017f3f3ca4f4da9f2082e
[ "Apache-2.0" ]
57
2018-08-26T06:34:10.000Z
2022-02-25T17:11:16.000Z
vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
rhobs/cluster-monitoring-operator
fb0f3a792c3cc517ca2017f3f3ca4f4da9f2082e
[ "Apache-2.0" ]
762
2018-09-10T18:44:57.000Z
2022-03-31T15:08:54.000Z
vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
rhobs/cluster-monitoring-operator
fb0f3a792c3cc517ca2017f3f3ca4f4da9f2082e
[ "Apache-2.0" ]
140
2018-08-16T17:45:41.000Z
2022-03-24T15:55:37.000Z
apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" name: authentications.config.openshift.io spec: group: config.openshift.io names: kind: Authentication listKind: AuthenticationList plural: authentications singular: authentication scope: Cluster versions: - name: v1 schema: openAPIV3Schema: description: Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`. type: object required: - spec properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: spec holds user settable values for configuration type: object properties: oauthMetadata: description: 'oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.' type: object required: - name properties: name: description: name is the metadata.name of the referenced config map type: string serviceAccountIssuer: description: 'serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will result in the invalidation of all bound tokens with the previous issuer value. Unless the holder of a bound token has explicit support for a change in issuer, they will not request a new bound token until pod restart or until their existing token exceeds 80% of its duration.' type: string type: description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. type: string webhookTokenAuthenticator: description: webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. type: object required: - kubeConfig properties: kubeConfig: description: "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication \n The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored." type: object required: - name properties: name: description: name is the metadata.name of the referenced secret type: string webhookTokenAuthenticators: description: webhookTokenAuthenticators is DEPRECATED, setting it has no effect. type: array items: description: deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. type: object properties: kubeConfig: description: 'kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.' type: object required: - name properties: name: description: name is the metadata.name of the referenced secret type: string status: description: status holds observed values from the cluster. They may not be overridden. type: object properties: integratedOAuthMetadata: description: 'integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.' type: object required: - name properties: name: description: name is the metadata.name of the referenced config map type: string served: true storage: true subresources: status: {}
77.333333
880
0.663032
736ee8cf47906321095da3921cdbbde61f7687fa
222
yaml
YAML
test/plex/1.6.12/templates/probe_config.yaml
nlopezs74/charts
b6e228daa213e2af0f932eebddd7a7658d61417a
[ "BSD-3-Clause" ]
47
2020-11-01T07:34:53.000Z
2022-02-28T02:02:54.000Z
test/plex/1.6.12/templates/probe_config.yaml
nlopezs74/charts
b6e228daa213e2af0f932eebddd7a7658d61417a
[ "BSD-3-Clause" ]
83
2020-11-05T06:42:24.000Z
2022-03-30T10:32:32.000Z
test/plex/1.6.12/templates/probe_config.yaml
nlopezs74/charts
b6e228daa213e2af0f932eebddd7a7658d61417a
[ "BSD-3-Clause" ]
90
2020-12-20T15:56:12.000Z
2022-03-23T19:41:14.000Z
apiVersion: v1 kind: ConfigMap metadata: name: "plex-probe-check" data: entrypoint.sh: |- #!/bin/sh curl -ksf http://$POD_IP:32400/identity -o /dev/null || curl -ksf https://$POD_IP:32400/identity -o /dev/null
24.666667
113
0.671171
736f1f0542943693b30aa2866048542407c22550
211
yaml
YAML
pkg/sync/v71/data/Secret/openshift-ansible-service-broker/asb-client.yaml
troy0820/openshift-azure
2760ef235865f6bc17d359bc74f8d5059ebb0386
[ "Apache-2.0" ]
55
2018-08-03T04:38:28.000Z
2021-07-26T16:42:09.000Z
pkg/sync/v71/data/Secret/openshift-ansible-service-broker/asb-client.yaml
troy0820/openshift-azure
2760ef235865f6bc17d359bc74f8d5059ebb0386
[ "Apache-2.0" ]
2,257
2018-07-26T21:29:25.000Z
2022-03-27T06:07:22.000Z
pkg/sync/v71/data/Secret/openshift-ansible-service-broker/asb-client.yaml
troy0820/openshift-azure
2760ef235865f6bc17d359bc74f8d5059ebb0386
[ "Apache-2.0" ]
62
2018-07-24T12:16:53.000Z
2021-12-09T18:02:58.000Z
apiVersion: v1 kind: Secret metadata: annotations: kubernetes.io/service-account.name: asb-client name: asb-client namespace: openshift-ansible-service-broker type: kubernetes.io/service-account-token
23.444444
50
0.78673
736f9ef76fc57d6ee75b402b31dbcdf876ec728c
621
yaml
YAML
namespaces/live-1.cloud-platform.service.justice.gov.uk/laa-legal-adviser-api-production/00-namespace.yaml
digitalronin/cloud-platform-environments
b617fa089ee2a7dafc53602808326ae7d56fd6d9
[ "MIT" ]
28
2018-07-13T15:02:24.000Z
2022-03-17T09:23:54.000Z
namespaces/live-1.cloud-platform.service.justice.gov.uk/laa-legal-adviser-api-production/00-namespace.yaml
happygeneralist/recall-decisions-alpha
09124f3994a878e9969e7b4326088b2ae7bfd170
[ "MIT" ]
963
2018-05-30T15:46:06.000Z
2022-03-30T10:06:39.000Z
namespaces/live-1.cloud-platform.service.justice.gov.uk/laa-legal-adviser-api-production/00-namespace.yaml
happygeneralist/recall-decisions-alpha
09124f3994a878e9969e7b4326088b2ae7bfd170
[ "MIT" ]
29
2018-12-04T09:37:36.000Z
2022-02-25T10:35:34.000Z
apiVersion: v1 kind: Namespace metadata: name: laa-legal-adviser-api-production labels: cloud-platform.justice.gov.uk/is-production: "true" cloud-platform.justice.gov.uk/environment-name: "production" annotations: cloud-platform.justice.gov.uk/business-unit: "LAA" cloud-platform.justice.gov.uk/application: "LAA Legal Adviser API" cloud-platform.justice.gov.uk/owner: "LAA Get Access: [email protected]" cloud-platform.justice.gov.uk/source-code: "https://github.com/ministryofjustice/laa-legal-adviser-api" cloud-platform.justice.gov.uk/slack-channel: "cla-alerts"
44.357143
107
0.756844
736fcb7cc9fa78408851435b823ac13704d7155a
271
yaml
YAML
rendered/traces-only/serviceAccount.yaml
mateuszrzeszutek/splunk-otel-collector-chart
e17426410fe97945c501bf702d7654ec563c2ac8
[ "Apache-2.0" ]
null
null
null
rendered/traces-only/serviceAccount.yaml
mateuszrzeszutek/splunk-otel-collector-chart
e17426410fe97945c501bf702d7654ec563c2ac8
[ "Apache-2.0" ]
null
null
null
rendered/traces-only/serviceAccount.yaml
mateuszrzeszutek/splunk-otel-collector-chart
e17426410fe97945c501bf702d7654ec563c2ac8
[ "Apache-2.0" ]
null
null
null
--- # Source: splunk-otel-collector/templates/serviceAccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: default-splunk-otel-collector labels: app: splunk-otel-collector chart: splunk-otel-collector-0.25.0 release: default heritage: Helm
22.583333
61
0.745387
736ffed258af4ba15707f83b594c227c6cef4080
3,441
yaml
YAML
cluster/manifests/flannel/daemonset.yaml
lfroment0/kubernetes-on-aws
948c5fd3cceb2225d5e0a6d7f8daa966c94fef6a
[ "MIT" ]
1
2019-12-19T00:15:25.000Z
2019-12-19T00:15:25.000Z
cluster/manifests/flannel/daemonset.yaml
lfroment0/kubernetes-on-aws
948c5fd3cceb2225d5e0a6d7f8daa966c94fef6a
[ "MIT" ]
null
null
null
cluster/manifests/flannel/daemonset.yaml
lfroment0/kubernetes-on-aws
948c5fd3cceb2225d5e0a6d7f8daa966c94fef6a
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel namespace: kube-system labels: application: flannel version: v0.11.0-10 spec: updateStrategy: type: OnDelete selector: matchLabels: application: flannel template: metadata: labels: application: flannel version: v0.11.0-10 spec: priorityClassName: system-node-critical serviceAccountName: flannel containers: - name: delayed-install-cni image: registry.opensource.zalan.do/teapot/flannel-awaiter:master-3 command: - /await-and-copy stdin: true volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ env: - name: CNI_CONFIG_SOURCE value: /etc/kube-flannel/cni-conf.json - name: CNI_CONFIG_TARGET value: /etc/cni/net.d/10-flannel.conflist resources: requests: cpu: 25m memory: 50Mi limits: cpu: 25m memory: 50Mi - name: apiserver-proxy image: registry.opensource.zalan.do/teapot/etcd-proxy:master-3 command: - /bin/sh args: - -c - "exec /etcd-proxy --listen-address 127.0.0.1:333 $KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT" resources: requests: cpu: 25m memory: 25Mi limits: cpu: 25m memory: 25Mi - name: kube-flannel image: registry.opensource.zalan.do/teapot/flannel:v0.11.0-10 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr - --v=2 env: - name: KUBERNETES_SERVICE_HOST value: "127.0.0.1" - name: KUBERNETES_SERVICE_PORT value: "333" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace resources: limits: cpu: 25m memory: 100Mi requests: cpu: 25m memory: 100Mi securityContext: privileged: true volumeMounts: - name: flannel-cfg mountPath: /etc/kube-flannel/ - name: run mountPath: /run - args: - -c - /tc-flannel.sh command: - /bin/bash image: registry.opensource.zalan.do/teapot/flannel-tc:v0.0.2 name: flannel-tc resources: requests: cpu: 1m memory: 25Mi limits: cpu: 1m memory: 25Mi securityContext: privileged: true stdin: true volumeMounts: - mountPath: /run name: run - mountPath: /lib/tc name: lib-tc hostNetwork: true tolerations: - operator: Exists effect: NoSchedule - operator: Exists effect: NoExecute volumes: - name: flannel-cfg configMap: name: kube-flannel-cfg - name: cni hostPath: path: /etc/kubernetes/cni/net.d - name: run hostPath: path: /run - hostPath: path: /lib/tc type: "" name: lib-tc
25.116788
109
0.517873
73704b5b6667d5f59902f140c36fae76d4678a40
1,232
yaml
YAML
kube/job_merging.yaml
glushkov/WFMS
94ad90822f66f4055e683524209c229acda7d72e
[ "MIT" ]
null
null
null
kube/job_merging.yaml
glushkov/WFMS
94ad90822f66f4055e683524209c229acda7d72e
[ "MIT" ]
null
null
null
kube/job_merging.yaml
glushkov/WFMS
94ad90822f66f4055e683524209c229acda7d72e
[ "MIT" ]
null
null
null
apiVersion: batch/v1beta1 kind: CronJob metadata: name: job-merging spec: schedule: "25 2 * * *" suspend: false concurrencyPolicy: Forbid jobTemplate: spec: template: spec: containers: - name: wfms-jobs-merging image: atlasanalyticsservice/wfms:latest command: ["Jobs/Enrich/merging/run.sh"] env: - name: JOB_ORACLE_USER valueFrom: secretKeyRef: name: oracle-secret key: job_oracle_user - name: JOB_ORACLE_PASS valueFrom: secretKeyRef: name: oracle-secret key: job_oracle_pass - name: JOB_ORACLE_CONNECTION_STRING valueFrom: secretKeyRef: name: oracle-secret key: job_oracle_connection_string - name: JOB_ORACLE_ADG_CONNECTION_STRING valueFrom: secretKeyRef: name: oracle-secret key: job_oracle_adg_connection_string restartPolicy: Never
31.589744
59
0.489448
73708190b380615e2b2417082fc9c4b1a8530c56
95
yaml
YAML
deploy/osd-ids-tester/105-ids-tester.rbac.ServiceAccount.yaml
tonytheleg/managed-cluster-config
36626abc9a7313bcc0740851027a8dce02ea1483
[ "Apache-2.0" ]
null
null
null
deploy/osd-ids-tester/105-ids-tester.rbac.ServiceAccount.yaml
tonytheleg/managed-cluster-config
36626abc9a7313bcc0740851027a8dce02ea1483
[ "Apache-2.0" ]
null
null
null
deploy/osd-ids-tester/105-ids-tester.rbac.ServiceAccount.yaml
tonytheleg/managed-cluster-config
36626abc9a7313bcc0740851027a8dce02ea1483
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: ServiceAccount metadata: name: ids-test namespace: openshift-suricata
15.833333
31
0.789474
73708d5ba8f6445e13386409c186a011e8ca0d63
1,586
yaml
YAML
keepku/messaging-sys/email/k8s-pod-slowcat.yaml
dltdojo/xidunku
42c8b4676fcd4dc4964e57b511d05b3065c4b0f5
[ "Apache-2.0" ]
null
null
null
keepku/messaging-sys/email/k8s-pod-slowcat.yaml
dltdojo/xidunku
42c8b4676fcd4dc4964e57b511d05b3065c4b0f5
[ "Apache-2.0" ]
1
2021-05-10T21:31:59.000Z
2021-05-10T21:31:59.000Z
keepku/messaging-sys/email/k8s-pod-slowcat.yaml
dltdojo/xidunku
42c8b4676fcd4dc4964e57b511d05b3065c4b0f5
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: email-server labels: app: email-server spec: terminationGracePeriodSeconds: 0 containers: - name: email-server image: localhost:32000/msgsys-email ports: - name: smtp containerPort: 25 command: - dumb-init - -- args: - /bin/bash - -c - | exim -bdf -v -q30m & sleep 3 # [Simple Mail Transfer Protocol - Wikipedia](https://en.wikipedia.org/wiki/Simple_Mail_Transfer_Protocol) # [Simple Mail Transfer Protocol ietf.org/rfc/rfc2821.txt](https://www.ietf.org/rfc/rfc2821.txt) # [microHOWTO: Send an email using netcat](http://www.microhowto.info/howto/send_an_email_using_netcat.html) # [Send email with netcat - Stack Overflow](https://stackoverflow.com/questions/44250054/send-email-with-netcat) echo "======> SEND MAIL " function slowcat(){ while read; do sleep .5; echo "$REPLY"; done; } slowcat <<EOF | nc localhost 25 HELO localhost MAIL FROM:bar@localhost RCPT TO:foo@localhost DATA From: bar@localhost To: foo@localhost Subject: Test Date: Thu, 31 Feb 2015 12:00:00 +0000 Testing. Have a good day! . QUIT EOF sleep 2 echo "======> READ MAIL " su - foo -c "echo p | mail" sleep 3000 echo end resources: limits: memory: "512Mi" cpu: "500m"
29.37037
122
0.549811
7370feaabf324145f46417e1aa2ee24250f75a34
814
yml
YAML
K8_YAML_files/MysqlDB-Deployment_without_secret.yml
srinivasbv22/java_login_tomcat_mysql
e35d4768df0774781bc275521c9e64ab2931d3ed
[ "MIT" ]
null
null
null
K8_YAML_files/MysqlDB-Deployment_without_secret.yml
srinivasbv22/java_login_tomcat_mysql
e35d4768df0774781bc275521c9e64ab2931d3ed
[ "MIT" ]
null
null
null
K8_YAML_files/MysqlDB-Deployment_without_secret.yml
srinivasbv22/java_login_tomcat_mysql
e35d4768df0774781bc275521c9e64ab2931d3ed
[ "MIT" ]
9
2018-12-13T17:02:31.000Z
2022-03-12T11:00:52.000Z
apiVersion: apps/v1 kind: Deployment metadata: name: mysqldb-deployment labels: app: login-application spec: replicas: 1 selector: matchLabels: name: mysqldb-pod app: login-application template: metadata: name: mysqldb labels: name: mysqldb-pod app: login-application spec: containers: - name: mysqldb image: muralindia/java_test_mysql:1.0 env: - name: MYSQL_ROOT_PASSWORD value: root ports: - containerPort: 3306 volumeMounts: - name: persistent-volume mountPath: /var/lib/mysql nodeSelector: type: backend volumes: - name: persistent-volume hostPath: path: /var/mysql_data_backup type: DirectoryOrCreate
21.421053
45
0.594595
737107ddf91dd6687cc9c639d2e9347e66edf624
182
yaml
YAML
kubernetes-in-action-master/Chapter06/mongodb-pvc.yaml
duc-n/devops
146f3f7175f9a523f744d4d52cad75257303b2c9
[ "Apache-2.0" ]
null
null
null
kubernetes-in-action-master/Chapter06/mongodb-pvc.yaml
duc-n/devops
146f3f7175f9a523f744d4d52cad75257303b2c9
[ "Apache-2.0" ]
null
null
null
kubernetes-in-action-master/Chapter06/mongodb-pvc.yaml
duc-n/devops
146f3f7175f9a523f744d4d52cad75257303b2c9
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mongodb-pvc spec: resources: requests: storage: 1Gi accessModes: - ReadWriteOnce storageClassName: ""
15.166667
27
0.714286
737149eb2e1c8e996e06f2fe164891dff6aa331c
801
yaml
YAML
k8s/ingress.yaml
bloveless/spacemonger-api
4546becdaf8520bd4cced9f9e4611b6bd195d6b5
[ "MIT" ]
null
null
null
k8s/ingress.yaml
bloveless/spacemonger-api
4546becdaf8520bd4cced9f9e4611b6bd195d6b5
[ "MIT" ]
null
null
null
k8s/ingress.yaml
bloveless/spacemonger-api
4546becdaf8520bd4cced9f9e4611b6bd195d6b5
[ "MIT" ]
null
null
null
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: spacemonger-api-ingress namespace: spacemonger annotations: cert-manager.io/cluster-issuer: letsencrypt-prod ingress.kubernetes.io/force-ssl-redirect: "true" spec: tls: - hosts: - spacemonger.brennonloveless.com secretName: spacemonger-crt rules: - host: spacemonger.brennonloveless.com http: paths: - path: /api pathType: Prefix backend: service: name: spacemonger-api-service port: number: 8080 - path: / pathType: Prefix backend: service: name: spacemonger-ui-service port: number: 80
25.03125
52
0.54432
7371a3ca03aab267b2eccba00ff3b366f143eafe
239
yaml
YAML
namespaces/live-1.cloud-platform.service.justice.gov.uk/money-to-prisoners-prod/02-limitrange.yaml
umaar/cloud-platform-environments
c0fab9757b5fe340998457e182424ac1027b7c5f
[ "MIT" ]
null
null
null
namespaces/live-1.cloud-platform.service.justice.gov.uk/money-to-prisoners-prod/02-limitrange.yaml
umaar/cloud-platform-environments
c0fab9757b5fe340998457e182424ac1027b7c5f
[ "MIT" ]
null
null
null
namespaces/live-1.cloud-platform.service.justice.gov.uk/money-to-prisoners-prod/02-limitrange.yaml
umaar/cloud-platform-environments
c0fab9757b5fe340998457e182424ac1027b7c5f
[ "MIT" ]
null
null
null
apiVersion: v1 kind: LimitRange metadata: name: limitrange namespace: money-to-prisoners-prod spec: limits: - default: cpu: 250m memory: 500Mi defaultRequest: cpu: 125m memory: 250Mi type: Container
15.933333
36
0.656904
7371f5e211581b074bc5c5bd451fb1d145eb8bd6
1,210
yaml
YAML
namespace-inheritance/compiled/namespace_incubator-1.yaml
tonyzhc/anthos-config-management-samples
5da4d66177aa7bd09a4990884c47a876bba22353
[ "Apache-2.0" ]
30
2021-04-21T14:45:23.000Z
2022-03-28T21:32:00.000Z
namespace-inheritance/compiled/namespace_incubator-1.yaml
tonyzhc/anthos-config-management-samples
5da4d66177aa7bd09a4990884c47a876bba22353
[ "Apache-2.0" ]
23
2021-06-23T19:02:57.000Z
2022-02-24T14:17:34.000Z
namespace-inheritance/compiled/namespace_incubator-1.yaml
tonyzhc/anthos-config-management-samples
5da4d66177aa7bd09a4990884c47a876bba22353
[ "Apache-2.0" ]
102
2021-04-27T22:12:39.000Z
2022-03-31T04:29:28.000Z
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START anthos_config_management_namespace_inheritance_compiled_namespace_incubator_1] --- apiVersion: v1 kind: Namespace metadata: annotations: configmanagement.gke.io/cluster-name: defaultcluster configmanagement.gke.io/source-path: config/namespaces/rnd/incubator-1/namespace.yaml configsync.gke.io/declared-fields: '{}' hnc.x-k8s.io/managed-by: configmanagement.gke.io labels: configsync.gke.io/declared-version: v1 incubator-1.tree.hnc.x-k8s.io/depth: "0" rnd.tree.hnc.x-k8s.io/depth: "1" name: incubator-1 # [END anthos_config_management_namespace_inheritance_compiled_namespace_incubator_1]
39.032258
89
0.771901
737240cadd512d8720432d27c594c82f160cce99
480
yml
YAML
roles/kubernetes-master/templates/kube-scheduler.yml
venkateswarlu-1/nw1
57d82047d112ce50bf849e22068571b8877e32eb
[ "Apache-2.0" ]
75
2015-07-16T14:37:36.000Z
2022-03-30T21:40:14.000Z
roles/kubernetes-master/templates/kube-scheduler.yml
venkateswarlu-1/nw1
57d82047d112ce50bf849e22068571b8877e32eb
[ "Apache-2.0" ]
86
2015-07-21T09:54:36.000Z
2016-06-29T10:31:35.000Z
roles/kubernetes-master/templates/kube-scheduler.yml
venkateswarlu-1/nw1
57d82047d112ce50bf849e22068571b8877e32eb
[ "Apache-2.0" ]
38
2015-07-29T12:57:31.000Z
2022-03-30T21:40:16.000Z
apiVersion: v1 kind: Pod metadata: name: kube-scheduler namespace: kube-system spec: hostNetwork: true containers: - name: kube-scheduler image: gcr.io/google_containers/hyperkube:{{ kube_version }} command: - /hyperkube - scheduler - --master=http://127.0.0.1:8080 - --v={{kube_log_level}} livenessProbe: httpGet: host: 127.0.0.1 path: /healthz port: 10251 initialDelaySeconds: 15 timeoutSeconds: 1
20.869565
64
0.63125
7372f11b74886e0da2b911c814da9b76f971a234
371
yaml
YAML
tests/stacks/ibm/test_data/expected/rbac.authorization.k8s.io_v1_clusterrole_jupyter-web-app-kubeflow-notebook-ui-admin.yaml
pathologywatch/manifests
622a1974ef841406b45bd3ae51ba507cbaf4bf89
[ "Apache-2.0" ]
376
2019-02-27T01:32:34.000Z
2022-03-29T14:02:39.000Z
tests/stacks/ibm/test_data/expected/rbac.authorization.k8s.io_v1_clusterrole_jupyter-web-app-kubeflow-notebook-ui-admin.yaml
pathologywatch/manifests
622a1974ef841406b45bd3ae51ba507cbaf4bf89
[ "Apache-2.0" ]
2,171
2019-02-27T17:37:34.000Z
2022-03-31T18:16:26.000Z
tests/stacks/ibm/test_data/expected/rbac.authorization.k8s.io_v1_clusterrole_jupyter-web-app-kubeflow-notebook-ui-admin.yaml
pathologywatch/manifests
622a1974ef841406b45bd3ae51ba507cbaf4bf89
[ "Apache-2.0" ]
643
2019-02-27T01:49:11.000Z
2022-03-31T06:31:15.000Z
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app: jupyter-web-app app.kubernetes.io/component: jupyter-web-app app.kubernetes.io/name: jupyter-web-app kustomize.component: jupyter-web-app rbac.authorization.kubeflow.org/aggregate-to-kubeflow-admin: "true" name: jupyter-web-app-kubeflow-notebook-ui-admin rules: []
30.916667
71
0.754717
73735f7e956a615416dfd686b3d72d26d88d425b
315
yaml
YAML
docs/examples/nfs/pvc-exportrules.yaml
dnix101/pso-csi
83b83f01b2f59c21ba0033156f7b597db7b699b9
[ "Apache-2.0" ]
27
2020-06-19T18:37:19.000Z
2022-02-12T17:14:06.000Z
docs/examples/nfs/pvc-exportrules.yaml
dnix101/pso-csi
83b83f01b2f59c21ba0033156f7b597db7b699b9
[ "Apache-2.0" ]
105
2020-06-10T01:30:10.000Z
2022-03-31T21:48:41.000Z
docs/examples/nfs/pvc-exportrules.yaml
dnix101/pso-csi
83b83f01b2f59c21ba0033156f7b597db7b699b9
[ "Apache-2.0" ]
37
2020-06-11T20:52:04.000Z
2022-03-16T16:41:29.000Z
kind: PersistentVolumeClaim apiVersion: v1 metadata: # Referenced in pod.yaml for the volume spec name: pvc-exportrules spec: accessModes: - ReadWriteOnce resources: requests: storage: 10Gi # Matches the name defined in deployment/storageclass.yaml storageClassName: pure-file-exportrules
22.5
60
0.755556
737362d2671b243a098cf9e5f5326dea4671ef38
1,295
yaml
YAML
roles/kubernetes/device_plugins/charts/gpu-plugin/templates/gpu_plugin.yaml
oEscal/converged-edge-experience-kits
1878cfcca364f6c0fd62a1c9825187057fd090f6
[ "Apache-2.0" ]
27
2020-03-20T20:32:44.000Z
2021-03-15T07:15:08.000Z
roles/kubernetes/device_plugins/charts/gpu-plugin/templates/gpu_plugin.yaml
oEscal/converged-edge-experience-kits
1878cfcca364f6c0fd62a1c9825187057fd090f6
[ "Apache-2.0" ]
81
2020-01-22T14:57:42.000Z
2021-03-23T18:06:43.000Z
roles/kubernetes/device_plugins/charts/gpu-plugin/templates/gpu_plugin.yaml
oEscal/converged-edge-experience-kits
1878cfcca364f6c0fd62a1c9825187057fd090f6
[ "Apache-2.0" ]
27
2019-12-27T06:07:47.000Z
2021-02-26T09:32:46.000Z
# SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation --- apiVersion: apps/v1 kind: DaemonSet metadata: name: "{{ .Release.Name }}" labels: app: "{{ .Release.Name }}" spec: selector: matchLabels: app: "{{ .Release.Name }}" template: metadata: labels: app: "{{ .Release.Name }}" spec: containers: - name: "{{ .Release.Name }}" env: - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: "{{ .Values.pullPolicy }}" securityContext: readOnlyRootFilesystem: true volumeMounts: - name: devfs mountPath: /dev/dri readOnly: true - name: sysfs mountPath: /sys/class/drm readOnly: true - name: kubeletsockets mountPath: /var/lib/kubelet/device-plugins volumes: - name: devfs hostPath: path: /dev/dri - name: sysfs hostPath: path: /sys/class/drm - name: kubeletsockets hostPath: path: /var/lib/kubelet/device-plugins nodeSelector: "{{ .Values.nodeSelector }}": "yes"
24.903846
71
0.537452
73737df92ce007ab5b20297499f69e7e96a9de0c
265
yaml
YAML
kubernetes-templating/kustomize/overrides/prod/deployment.yaml
antik9/antik9_platform
1647a42cabab1ab54c2e19dcaee9f8f8efaf3057
[ "MIT" ]
null
null
null
kubernetes-templating/kustomize/overrides/prod/deployment.yaml
antik9/antik9_platform
1647a42cabab1ab54c2e19dcaee9f8f8efaf3057
[ "MIT" ]
null
null
null
kubernetes-templating/kustomize/overrides/prod/deployment.yaml
antik9/antik9_platform
1647a42cabab1ab54c2e19dcaee9f8f8efaf3057
[ "MIT" ]
null
null
null
--- apiVersion: apps/v1 kind: Deployment metadata: name: recommendationservice namespace: hipster-shop-prod spec: selector: matchLabels: app: recommendationservice-prod template: metadata: labels: app: recommendationservice-prod
17.666667
39
0.709434
7373b5f2c4d1a4c7d02daafb279a0879ae0d1e24
210
yml
YAML
manifests/service.yml
rbmathis/pipelines-javascript-docker
9185dacb192f96b82aa824861c728806c7cb9330
[ "CC-BY-4.0", "MIT" ]
null
null
null
manifests/service.yml
rbmathis/pipelines-javascript-docker
9185dacb192f96b82aa824861c728806c7cb9330
[ "CC-BY-4.0", "MIT" ]
null
null
null
manifests/service.yml
rbmathis/pipelines-javascript-docker
9185dacb192f96b82aa824861c728806c7cb9330
[ "CC-BY-4.0", "MIT" ]
null
null
null
apiVersion: v1 kind: Service metadata: name: rbmathispipelinesjavascriptdocker spec: type: LoadBalancer ports: - port: 8080 selector: app: rbmathispipelinesjavascriptdocker
21
46
0.690476
7373c61bafb827c13cc39f09edffd8b8817e8b4b
62
yaml
YAML
overlays/azure-dev/namespace.yaml
ezYakaEagle442/azure-service-operator
2ef4852291606a7e286a9494888d17e9e6a1824e
[ "MIT" ]
null
null
null
overlays/azure-dev/namespace.yaml
ezYakaEagle442/azure-service-operator
2ef4852291606a7e286a9494888d17e9e6a1824e
[ "MIT" ]
null
null
null
overlays/azure-dev/namespace.yaml
ezYakaEagle442/azure-service-operator
2ef4852291606a7e286a9494888d17e9e6a1824e
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Namespace metadata: name: petclinic-dev
15.5
21
0.790323
7373d9ff90c7c01735c388002cacfb4dda21cf87
110
yaml
YAML
charts/polaris/charts/aws-service-operator/templates/serviceaccount.yaml
davidwebstar34/polaris-cluster
187d91e8ffd529d265cc8eb3de23fb0d12190a5f
[ "MIT" ]
null
null
null
charts/polaris/charts/aws-service-operator/templates/serviceaccount.yaml
davidwebstar34/polaris-cluster
187d91e8ffd529d265cc8eb3de23fb0d12190a5f
[ "MIT" ]
null
null
null
charts/polaris/charts/aws-service-operator/templates/serviceaccount.yaml
davidwebstar34/polaris-cluster
187d91e8ffd529d265cc8eb3de23fb0d12190a5f
[ "MIT" ]
null
null
null
kind: ServiceAccount apiVersion: v1 metadata: name: aws-service-operator #namespace: aws-service-operator
18.333333
34
0.790909
73741634350c96ab768510002258e0eab7336a4d
3,201
yaml
YAML
deploy/kubernetes/base/controller.yaml
tidbcloud/aws-ebs-csi-driver
905ec060237276501818e78acdcb5de0a795f42c
[ "Apache-2.0" ]
null
null
null
deploy/kubernetes/base/controller.yaml
tidbcloud/aws-ebs-csi-driver
905ec060237276501818e78acdcb5de0a795f42c
[ "Apache-2.0" ]
null
null
null
deploy/kubernetes/base/controller.yaml
tidbcloud/aws-ebs-csi-driver
905ec060237276501818e78acdcb5de0a795f42c
[ "Apache-2.0" ]
null
null
null
--- # Source: aws-ebs-csi-driver/templates/controller.yaml # Controller Service kind: Deployment apiVersion: apps/v1 metadata: name: ebs-csi-controller namespace: kube-system labels: app.kubernetes.io/name: aws-ebs-csi-driver spec: replicas: 2 selector: matchLabels: app: ebs-csi-controller app.kubernetes.io/name: aws-ebs-csi-driver template: metadata: labels: app: ebs-csi-controller app.kubernetes.io/name: aws-ebs-csi-driver spec: nodeSelector: kubernetes.io/os: linux serviceAccountName: ebs-csi-controller-sa priorityClassName: system-cluster-critical tolerations: - operator: Exists containers: - name: ebs-plugin image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:v0.9.1 imagePullPolicy: IfNotPresent args: # - {all,controller,node} # specify the driver mode - --endpoint=$(CSI_ENDPOINT) - --logtostderr - --v=5 env: - name: CSI_ENDPOINT value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: name: aws-secret key: key_id optional: true - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: name: aws-secret key: access_key optional: true volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ ports: - name: healthz containerPort: 9808 protocol: TCP livenessProbe: httpGet: path: /healthz port: healthz initialDelaySeconds: 10 timeoutSeconds: 3 periodSeconds: 10 failureThreshold: 5 - name: csi-provisioner image: k8s.gcr.io/sig-storage/csi-provisioner:v2.0.2 args: - --csi-address=$(ADDRESS) - --v=5 - --feature-gates=Topology=true - --leader-election=true - --default-fstype=ext4 env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-attacher image: k8s.gcr.io/sig-storage/csi-attacher:v3.0.0 args: - --csi-address=$(ADDRESS) - --v=5 - --leader-election=true env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock volumeMounts: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: liveness-probe image: k8s.gcr.io/sig-storage/livenessprobe:v2.1.0 args: - --csi-address=/csi/csi.sock volumeMounts: - name: socket-dir mountPath: /csi volumes: - name: socket-dir emptyDir: {}
30.778846
69
0.526398
737426b9bea91188f68d78f0e3a019e801a2454a
191
yml
YAML
Kubernetes/regapp-service.yml
harshhaareddy/My_DevOps_Project
2ea142531ba8d2c44f91b5e8be7fac4a0a3f60f0
[ "CC0-1.0" ]
null
null
null
Kubernetes/regapp-service.yml
harshhaareddy/My_DevOps_Project
2ea142531ba8d2c44f91b5e8be7fac4a0a3f60f0
[ "CC0-1.0" ]
null
null
null
Kubernetes/regapp-service.yml
harshhaareddy/My_DevOps_Project
2ea142531ba8d2c44f91b5e8be7fac4a0a3f60f0
[ "CC0-1.0" ]
1
2022-01-29T02:59:44.000Z
2022-01-29T02:59:44.000Z
apiVersion: v1 kind: Service metadata: name: my-service labels: app: regapp spec: selector: app: regapp ports: - port: 8080 targetPort: 8080 type: LoadBalancer
12.733333
22
0.643979
73748e758e1b14286a4e51c79d5465293d8cdb55
640
yml
YAML
k8s/deployment.yml
raguilera82/nestjs-workshop
67cfbe36272ba7a8d9e0dc9e9a8857192f6dc62d
[ "MIT" ]
2
2021-03-03T22:15:49.000Z
2021-08-24T04:52:35.000Z
k8s/deployment.yml
raguilera82/nestjs-workshop
67cfbe36272ba7a8d9e0dc9e9a8857192f6dc62d
[ "MIT" ]
2
2021-03-10T17:38:00.000Z
2022-01-22T12:02:17.000Z
k8s/deployment.yml
raguilera82/nestjs-workshop
67cfbe36272ba7a8d9e0dc9e9a8857192f6dc62d
[ "MIT" ]
6
2020-05-15T15:51:54.000Z
2021-08-24T04:52:38.000Z
apiVersion: apps/v1 kind: Deployment metadata: name: nest-workshop namespace: apps labels: app: nest-workshop spec: replicas: 1 selector: matchLabels: app: nest-workshop template: metadata: labels: app: nest-workshop spec: imagePullSecrets: - name: nexus-docker containers: - name: nest-workshop image: nest-workshop:0.0.1 imagePullPolicy: Never ports: - containerPort: 3000 resources: requests: memory: "64Mi" cpu: "250m" limits: memory: "128Mi" cpu: "500m"
20
34
0.551563
7374ac1c0f858539af04ca06deddcec9c26adc1b
468
yaml
YAML
config/rbac/ethernetclusterconfig_viewer_role.yaml
smart-edge-open/intel-ethernet-operator
456084b64ecfdc9b36f693f55ad4102590ad40d1
[ "Apache-2.0" ]
7
2022-01-18T08:35:04.000Z
2022-03-25T02:41:07.000Z
config/rbac/ethernetclusterconfig_viewer_role.yaml
smart-edge-open/intel-ethernet-operator
456084b64ecfdc9b36f693f55ad4102590ad40d1
[ "Apache-2.0" ]
null
null
null
config/rbac/ethernetclusterconfig_viewer_role.yaml
smart-edge-open/intel-ethernet-operator
456084b64ecfdc9b36f693f55ad4102590ad40d1
[ "Apache-2.0" ]
1
2022-02-08T10:38:18.000Z
2022-02-08T10:38:18.000Z
# SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2021 Intel Corporation # permissions for end users to view ethernetclusterconfigs. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: ethernetclusterconfig-viewer-role rules: - apiGroups: - ethernet.intel.com resources: - ethernetclusterconfigs verbs: - get - list - watch - apiGroups: - ethernet.intel.com resources: - ethernetclusterconfigs/status verbs: - get
19.5
59
0.745726
7374acc7c3b8fed468797e33fa7ee07d14c231f7
1,664
yaml
YAML
workloads/podinfo-dep.yaml
jgarcia-starz/flux-get-started
8fc3ca1607236ebae3f2fd4e44d7c9f1e1d4da79
[ "Apache-2.0" ]
null
null
null
workloads/podinfo-dep.yaml
jgarcia-starz/flux-get-started
8fc3ca1607236ebae3f2fd4e44d7c9f1e1d4da79
[ "Apache-2.0" ]
null
null
null
workloads/podinfo-dep.yaml
jgarcia-starz/flux-get-started
8fc3ca1607236ebae3f2fd4e44d7c9f1e1d4da79
[ "Apache-2.0" ]
null
null
null
--- apiVersion: apps/v1 kind: Deployment metadata: name: podinfo namespace: demo labels: app: podinfo annotations: fluxcd.io/automated: "true" fluxcd.io/tag.init: regex:^3.10.* fluxcd.io/tag.podinfod: semver:~3.1 spec: strategy: rollingUpdate: maxUnavailable: 0 type: RollingUpdate selector: matchLabels: app: podinfo template: metadata: annotations: prometheus.io/scrape: "true" prometheus.io/port: "9797" labels: app: podinfo spec: initContainers: - name: init image: alpine:3.10 command: - sleep - "1" containers: - name: podinfod image: stefanprodan/podinfo:3.1.5 imagePullPolicy: IfNotPresent ports: - name: http containerPort: 9898 protocol: TCP - name: http-metrics containerPort: 9797 protocol: TCP - name: grpc containerPort: 9999 protocol: TCP command: - ./podinfo - --port=9898 - --port-metrics=9797 - --grpc-port=9999 - --grpc-service-name=podinfo - --level=info - --random-delay=false - --random-error=false env: - name: PODINFO_UI_COLOR value: "#FFFFFF" livenessProbe: httpGet: path: /healthz port: 9898 readinessProbe: httpGet: path: /readyz port: 9898 resources: limits: cpu: 1000m memory: 128Mi requests: cpu: 10m memory: 64Mi
21.894737
41
0.518029
7374d1b5025ed695335996c5e3f0a13d52c3d4dd
4,149
yaml
YAML
clusters/app.ci/prow/03_deployment/statusreconciler.yaml
thegreyd/release
d3248b1d8379a7de5bc1048ec3dd99dfbc9a5b98
[ "Apache-2.0" ]
null
null
null
clusters/app.ci/prow/03_deployment/statusreconciler.yaml
thegreyd/release
d3248b1d8379a7de5bc1048ec3dd99dfbc9a5b98
[ "Apache-2.0" ]
null
null
null
clusters/app.ci/prow/03_deployment/statusreconciler.yaml
thegreyd/release
d3248b1d8379a7de5bc1048ec3dd99dfbc9a5b98
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: namespace: ci name: statusreconciler labels: app: prow component: statusreconciler spec: replicas: 1 selector: matchLabels: app: prow component: statusreconciler template: metadata: labels: app: prow component: statusreconciler spec: serviceAccountName: statusreconciler terminationGracePeriodSeconds: 180 containers: - name: statusreconciler image: gcr.io/k8s-prow/status-reconciler:v20210604-95cc9f4b68 imagePullPolicy: IfNotPresent args: - --dry-run=false - --continue-on-error=true - --plugin-config=/etc/plugins/plugins.yaml - --supplemental-plugin-config-dir=/etc/plugins - --config-path=/etc/config/config.yaml - --supplemental-prow-config-dir=/etc/config - --github-token-path=/etc/github/oauth - --github-endpoint=http://ghproxy - --github-endpoint=https://api.github.com - --github-graphql-endpoint=http://ghproxy/graphql - --job-config-path=/etc/job-config - --projected-token-file=/var/sa-token/token ports: - name: http containerPort: 8888 volumeMounts: - name: service-account-token mountPath: /var/sa-token - name: oauth mountPath: /etc/github readOnly: true - name: config mountPath: /etc/config readOnly: true - name: job-config-misc mountPath: /etc/job-config/misc readOnly: true - name: job-config-master mountPath: /etc/job-config/master readOnly: true - name: job-config-3x mountPath: /etc/job-config/3.x readOnly: true - name: job-config-41 mountPath: /etc/job-config/4.1 readOnly: true - name: job-config-42 mountPath: /etc/job-config/4.2 readOnly: true - name: job-config-43 mountPath: /etc/job-config/4.3 readOnly: true - name: job-config-44 mountPath: /etc/job-config/4.4 readOnly: true - name: job-config-45 mountPath: /etc/job-config/4.5 readOnly: true - name: job-config-46 mountPath: /etc/job-config/4.6 readOnly: true - name: job-config-47 mountPath: /etc/job-config/4.7 readOnly: true - name: job-config-48 mountPath: /etc/job-config/4.8 readOnly: true - name: job-config-49 mountPath: /etc/job-config/4.9 readOnly: true - name: plugins mountPath: /etc/plugins readOnly: true resources: requests: memory: "200Mi" cpu: "20m" volumes: - name: service-account-token projected: sources: - serviceAccountToken: path: token - name: oauth secret: secretName: github-credentials-openshift-ci-robot - name: config configMap: name: config - name: job-config-misc configMap: name: job-config-misc - name: job-config-master configMap: name: job-config-master - name: job-config-3x configMap: name: job-config-3.x - name: job-config-41 configMap: name: job-config-4.1 - name: job-config-42 configMap: name: job-config-4.2 - name: job-config-43 configMap: name: job-config-4.3 - name: job-config-44 configMap: name: job-config-4.4 - name: job-config-45 configMap: name: job-config-4.5 - name: job-config-46 configMap: name: job-config-4.6 - name: job-config-47 configMap: name: job-config-4.7 - name: job-config-48 configMap: name: job-config-4.8 - name: job-config-49 configMap: name: job-config-4.9 - name: plugins configMap: name: plugins
28.417808
69
0.548566
7374d80e7cdddfeeea72e2dbbf2264eed4178660
418
yaml
YAML
manifests/prometheus-clusterRole.yaml
rofafor/kube-prometheus
2c25661fad2333fd0a6e8c504f16dca3ed60b218
[ "Apache-2.0" ]
null
null
null
manifests/prometheus-clusterRole.yaml
rofafor/kube-prometheus
2c25661fad2333fd0a6e8c504f16dca3ed60b218
[ "Apache-2.0" ]
1
2021-12-08T20:19:50.000Z
2021-12-08T20:19:50.000Z
manifests/prometheus-clusterRole.yaml
rofafor/kube-prometheus
2c25661fad2333fd0a6e8c504f16dca3ed60b218
[ "Apache-2.0" ]
null
null
null
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/component: prometheus app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: kube-prometheus app.kubernetes.io/version: 2.31.1 name: prometheus-k8s namespace: monitoring rules: - apiGroups: - "" resources: - nodes/metrics verbs: - get - nonResourceURLs: - /metrics verbs: - get
19
46
0.708134
7374e410295e57edcd117524a9db8039eab9cf3e
360
yaml
YAML
k8s/rabbitmq-deployment.yaml
nikoladragas/public-transport
be25ab7bea76f8b0873689e6fbcdda63c210c96e
[ "MIT" ]
1
2020-10-27T22:42:56.000Z
2020-10-27T22:42:56.000Z
k8s/rabbitmq-deployment.yaml
nikoladragas/public-transport
be25ab7bea76f8b0873689e6fbcdda63c210c96e
[ "MIT" ]
null
null
null
k8s/rabbitmq-deployment.yaml
nikoladragas/public-transport
be25ab7bea76f8b0873689e6fbcdda63c210c96e
[ "MIT" ]
1
2020-11-23T18:40:44.000Z
2020-11-23T18:40:44.000Z
apiVersion: apps/v1 kind: Deployment metadata: name: rabbitmq-deployment spec: replicas: 1 selector: matchLabels: component: rabbitmq template: metadata: labels: component: rabbitmq spec: containers: - name: rabbitmq image: rabbitmq:3-management ports: - containerPort: 5672
18.947368
38
0.611111
7374f81bf6dc096f6119be0189364bd2fe9b5f30
2,576
yaml
YAML
apps/prow/cluster/hook_deployment.yaml
soltysh/k8s.io
4dc55f5a4e8a5d97860df5500d5598bbe2912878
[ "Apache-2.0" ]
null
null
null
apps/prow/cluster/hook_deployment.yaml
soltysh/k8s.io
4dc55f5a4e8a5d97860df5500d5598bbe2912878
[ "Apache-2.0" ]
null
null
null
apps/prow/cluster/hook_deployment.yaml
soltysh/k8s.io
4dc55f5a4e8a5d97860df5500d5598bbe2912878
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: namespace: prow name: hook labels: app: hook spec: replicas: 4 strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 maxUnavailable: 1 selector: matchLabels: app: hook template: metadata: labels: app: hook spec: serviceAccountName: hook terminationGracePeriodSeconds: 180 containers: - name: hook image: gcr.io/k8s-prow/hook:v20220613-e739eafe50 imagePullPolicy: Always args: - --config-path=/etc/config/config.yaml - --dry-run=false - --github-endpoint=http://ghproxy.prow.svc.cluster.local - --github-endpoint=https://api.github.com - --github-token-path=/etc/github/token - --job-config-path=/etc/job-config - --kubeconfig=/etc/kubeconfig/kubeconfig ports: - name: http containerPort: 8888 - name: metrics containerPort: 9090 securityContext: allowPrivilegeEscalation: false capabilities: drop: - "ALL" privileged: false volumeMounts: - name: hmac mountPath: /etc/webhook readOnly: true - name: github-token mountPath: /etc/github readOnly: true - name: config mountPath: /etc/config readOnly: true - name: job-config mountPath: /etc/job-config readOnly: true - name: plugins mountPath: /etc/plugins readOnly: true - name: kubeconfig mountPath: /etc/kubeconfig readOnly: true livenessProbe: httpGet: path: /healthz port: 8081 initialDelaySeconds: 3 periodSeconds: 3 readinessProbe: httpGet: path: /healthz/ready port: 8081 initialDelaySeconds: 10 periodSeconds: 3 timeoutSeconds: 600 volumes: - name: hmac secret: secretName: k8s-infra-prow-hmac-token - name: github-token secret: defaultMode: 420 secretName: k8s-infra-ci-robot-github-token - name: kubeconfig secret: defaultMode: 420 secretName: k8s-infra-build-clusters-kubeconfig - name: config configMap: name: config - name: job-config configMap: name: job-config - name: plugins configMap: name: plugins
25.50495
65
0.549689
73751a22e7b3bd261db8069e0664e58cff1e0d64
205
yaml
YAML
frontend/000-prod/000-config.yaml
toresbe/kubernetes_draft
3132e88851cf270df511d9b5575ebad95ee27cc4
[ "MIT" ]
null
null
null
frontend/000-prod/000-config.yaml
toresbe/kubernetes_draft
3132e88851cf270df511d9b5575ebad95ee27cc4
[ "MIT" ]
null
null
null
frontend/000-prod/000-config.yaml
toresbe/kubernetes_draft
3132e88851cf270df511d9b5575ebad95ee27cc4
[ "MIT" ]
null
null
null
apiVersion: v1 kind: ConfigMap metadata: name: frontend-prod-config namespace: default data: API_BASE_URL: "https://forrige.frikanalen.no/api/" GRAPHQL_URL: "https://forrige.frikanalen.no/graphql"
22.777778
54
0.760976
73755cd4c4e31b2fa65184e9d5b8f6120044a071
468
yaml
YAML
examples/kubernetes/raw-block-pod.yaml
antmoveh/carina
6b2df56d8fa16cb616a6c8f449b754690486df2e
[ "Apache-2.0" ]
267
2021-08-18T10:54:31.000Z
2022-03-31T17:00:41.000Z
examples/kubernetes/raw-block-pod.yaml
antmoveh/carina
6b2df56d8fa16cb616a6c8f449b754690486df2e
[ "Apache-2.0" ]
32
2021-08-20T09:37:52.000Z
2022-03-04T05:45:01.000Z
examples/kubernetes/raw-block-pod.yaml
antmoveh/carina
6b2df56d8fa16cb616a6c8f449b754690486df2e
[ "Apache-2.0" ]
32
2021-08-19T07:08:49.000Z
2022-03-17T14:48:19.000Z
--- apiVersion: v1 kind: Pod metadata: name: carina-block-pod namespace: carina spec: containers: - name: centos securityContext: capabilities: add: ["SYS_RAWIO"] image: centos:latest imagePullPolicy: "IfNotPresent" command: ["/bin/sleep", "infinity"] volumeDevices: - name: data devicePath: /dev/xvda volumes: - name: data persistentVolumeClaim: claimName: raw-block-pvc
20.347826
41
0.606838
737568c1074220e2d3f20d4f5f11e553565e1232
336
yml
YAML
proje/mysolution/jr-prod-clr.yml
sezginerdem/k8sfundamentals
8f09877bfd604b857edff2cbab3b8f99fc1326d5
[ "MIT" ]
2
2021-07-14T10:14:51.000Z
2022-03-25T13:27:36.000Z
proje/mysolution/jr-prod-clr.yml
sezginerdem/k8sfundamentals
8f09877bfd604b857edff2cbab3b8f99fc1326d5
[ "MIT" ]
null
null
null
proje/mysolution/jr-prod-clr.yml
sezginerdem/k8sfundamentals
8f09877bfd604b857edff2cbab3b8f99fc1326d5
[ "MIT" ]
null
null
null
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: namespace: production name: junior-prod-role rules: - apiGroups: [""] # "" indicates the core API group resources: [""] # "services", "endpoints", "pods", "pods/log" etc. verbs: ["get", "list"] # "get", "list", "watch", "create", "update", "patch", "delete"
30.545455
88
0.651786
73759c0d259402a896d3d8a0f57bcf1671a6645f
143
yaml
YAML
built-in-references/Kubernetes/perf/violations/violation273.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation273.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation273.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: image-demo-273 spec: containers: - name: nginx image: nginx #ritaacr.azurecr.io/nginx:latest
17.875
49
0.72028
73768e90cedd605ccbef5fe29bff81123f73c492
4,292
yaml
YAML
infrastructure/kube/keep-test/ethereum/keep-ecdsa-1-statefulset.yaml
aiinkiestism/keep-ecdsa
7b382159d9b35a9afa1f22dea5db091d863d6a56
[ "MIT" ]
54
2020-03-26T22:24:16.000Z
2022-03-30T20:16:58.000Z
infrastructure/kube/keep-test/ethereum/keep-ecdsa-1-statefulset.yaml
aiinkiestism/keep-ecdsa
7b382159d9b35a9afa1f22dea5db091d863d6a56
[ "MIT" ]
321
2020-03-24T14:29:26.000Z
2022-03-04T09:00:38.000Z
infrastructure/kube/keep-test/ethereum/keep-ecdsa-1-statefulset.yaml
aiinkiestism/keep-ecdsa
7b382159d9b35a9afa1f22dea5db091d863d6a56
[ "MIT" ]
29
2020-05-14T17:30:04.000Z
2022-01-16T18:03:40.000Z
--- apiVersion: apps/v1 kind: StatefulSet metadata: name: keep-ecdsa-1 namespace: default labels: keel.sh/policy: all app: keep type: ecdsa id: "1" network: ropsten chain: ethereum spec: replicas: 1 selector: matchLabels: app: keep type: ecdsa id: "1" network: ropsten chain: ethereum serviceName: keep-ecdsa-1 volumeClaimTemplates: - metadata: name: keep-ecdsa-data spec: accessModes: [ReadWriteOnce] resources: requests: storage: 512Mi - metadata: name: keep-ecdsa-config spec: accessModes: [ReadWriteOnce] resources: requests: storage: 5Mi template: metadata: labels: app: keep type: ecdsa id: "1" network: ropsten chain: ethereum spec: containers: - name: keep-ecdsa image: gcr.io/keep-test-f3e0/keep-ecdsa-ethereum imagePullPolicy: Always ports: - containerPort: 3919 env: - name: KEEP_ETHEREUM_PASSWORD valueFrom: secretKeyRef: name: eth-account-passphrases key: account-1 - name: LOG_LEVEL value: "keep*=debug tss-lib=warn" - name: IPFS_LOGGING_FMT value: nocolor volumeMounts: - name: keep-ecdsa-config mountPath: /mnt/keep-ecdsa/config - name: keep-ecdsa-data mountPath: /mnt/keep-ecdsa/data - name: eth-account-keyfile mountPath: /mnt/keep-ecdsa/keyfile command: [ "keep-ecdsa", "-config", "/mnt/keep-ecdsa/config/keep-ecdsa-config.toml", "start", ] initContainers: - name: initcontainer-provision-keep-ecdsa image: gcr.io/keep-test-f3e0/initcontainer-provision-keep-ecdsa-ethereum imagePullPolicy: Always env: - name: ETH_RPC_URL valueFrom: secretKeyRef: name: eth-network-ropsten key: keep-ecdsa-rpc-url - name: ETH_WS_URL valueFrom: secretKeyRef: name: eth-network-ropsten key: keep-ecdsa-ws-url - name: ETH_NETWORK_ID valueFrom: configMapKeyRef: name: eth-network-ropsten key: network-id - name: CONTRACT_OWNER_ETH_ACCOUNT_ADDRESS valueFrom: configMapKeyRef: name: eth-network-ropsten key: contract-owner-eth-account-address - name: CONTRACT_OWNER_ETH_ACCOUNT_PRIVATE_KEY valueFrom: secretKeyRef: name: eth-network-ropsten key: contract-owner-eth-account-private-key - name: KEEP_TECDSA_ETH_KEYFILE_PATH value: /mnt/keep-ecdsa/keyfile/account-1-keyfile - name: KEEP_TECDSA_PEERS value: /dns4/ecdsa-0.test.keep.network/tcp/3919/ipfs/16Uiu2HAmCcfVpHwfBKNFbQuhvGuFXHVLQ65gB4sJm7HyrcZuLttH - name: KEEP_TECDSA_ANNOUNCED_ADDRESSES value: /dns4/ecdsa-1.test.keep.network/tcp/3919 - name: KEEP_TECDSA_PORT value: "3919" - name: KEEP_DATA_DIR value: /mnt/keep-ecdsa/data - name: METRICS_PORT value: "9601" volumeMounts: - name: keep-ecdsa-config mountPath: /mnt/keep-ecdsa/config - name: eth-account-keyfile mountPath: /mnt/keep-ecdsa/keyfile command: ["node", "/tmp/provision-keep-ecdsa.js"] volumes: - name: keep-ecdsa-config persistentVolumeClaim: claimName: keep-ecdsa-config - name: keep-ecdsa-data persistentVolumeClaim: claimName: keep-ecdsa-data - name: eth-account-keyfile configMap: name: eth-account-info items: - key: account-1-keyfile path: account-1-keyfile
30.877698
120
0.528658
73769e262b66715d3fa0fe0f71177725b23188cd
77
yml
YAML
openshift-serviceaccount.yml
vbehar/openshift-github-hooks
0762dd7dae20f054987f127a3e069ba18e28eba0
[ "Apache-2.0" ]
3
2017-02-25T20:03:17.000Z
2020-01-30T11:48:19.000Z
openshift-serviceaccount.yml
vbehar/openshift-github-hooks
0762dd7dae20f054987f127a3e069ba18e28eba0
[ "Apache-2.0" ]
null
null
null
openshift-serviceaccount.yml
vbehar/openshift-github-hooks
0762dd7dae20f054987f127a3e069ba18e28eba0
[ "Apache-2.0" ]
4
2016-03-24T08:08:03.000Z
2019-03-03T18:17:44.000Z
kind: ServiceAccount apiVersion: v1 metadata: name: github-hooks-controller
19.25
31
0.818182
7376ab75c18acc8252ee1f7a04463c9dd67948ac
4,613
yml
YAML
deploy/kubernetes/deployment.yml
proact-de/vcloud-csi-driver
29cf24fa84dc9fd6134cccb8a6d0bc6833112b6e
[ "Apache-2.0" ]
5
2021-02-17T14:39:48.000Z
2022-03-25T02:06:45.000Z
deploy/kubernetes/deployment.yml
proact-de/vcloud-csi-driver
29cf24fa84dc9fd6134cccb8a6d0bc6833112b6e
[ "Apache-2.0" ]
13
2021-02-15T14:54:57.000Z
2022-03-19T22:44:09.000Z
deploy/kubernetes/deployment.yml
proact-de/vcloud-csi-driver
29cf24fa84dc9fd6134cccb8a6d0bc6833112b6e
[ "Apache-2.0" ]
1
2022-02-25T14:25:42.000Z
2022-02-25T14:25:42.000Z
kind: Deployment apiVersion: apps/v1 metadata: name: vcloud-csi-driver labels: app.kubernetes.io/name: vcloud-csi-driver app.kubernetes.io/component: ctrl spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: vcloud-csi-driver app.kubernetes.io/component: ctrl template: metadata: labels: app.kubernetes.io/name: vcloud-csi-driver app.kubernetes.io/component: ctrl spec: serviceAccountName: vcloud-csi-ctrl priorityClassName: system-cluster-critical tolerations: - operator: Exists effect: NoSchedule - operator: Exists effect: NoExecute containers: - name: csi-driver image: vcloud-driver imagePullPolicy: Always securityContext: privileged: true allowPrivilegeEscalation: true capabilities: add: - SYS_ADMIN env: - name: VCLOUD_CSI_NODENAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: VCLOUD_CSI_HREF valueFrom: secretKeyRef: name: vcloud-csi-driver key: href - name: VCLOUD_CSI_INSECURE valueFrom: secretKeyRef: name: vcloud-csi-driver key: insecure - name: VCLOUD_CSI_USERNAME valueFrom: secretKeyRef: name: vcloud-csi-driver key: username - name: VCLOUD_CSI_PASSWORD valueFrom: secretKeyRef: name: vcloud-csi-driver key: password - name: VCLOUD_CSI_ORG valueFrom: secretKeyRef: name: vcloud-csi-driver key: org - name: VCLOUD_CSI_VDC valueFrom: secretKeyRef: name: vcloud-csi-driver key: vdc - name: VCLOUD_CSI_ENDOINT value: "unix:///csi/csi.sock" ports: - name: metrics containerPort: 8080 livenessProbe: httpGet: path: /healthz port: metrics initialDelaySeconds: 10 failureThreshold: 5 timeoutSeconds: 3 periodSeconds: 2 readinessProbe: httpGet: path: /readyz port: metrics initialDelaySeconds: 10 failureThreshold: 5 timeoutSeconds: 3 periodSeconds: 2 volumeMounts: - name: socket mountPath: /csi - name: csi-attacher image: csi-attacher imagePullPolicy: Always args: - --v=5 - --csi-address=/csi/csi.sock - --leader-election securityContext: privileged: true allowPrivilegeEscalation: true capabilities: add: - SYS_ADMIN volumeMounts: - name: socket mountPath: /csi - name: csi-provisioner image: csi-provisioner imagePullPolicy: Always args: - --v=5 - --csi-address=/csi/csi.sock - --leader-election - --default-fstype=ext4 - --feature-gates=Topology=true - --strict-topology securityContext: privileged: true allowPrivilegeEscalation: true capabilities: add: - SYS_ADMIN volumeMounts: - name: socket mountPath: /csi - name: csi-resizer image: csi-resizer imagePullPolicy: Always args: - --v=5 - --csi-address=/csi/csi.sock - --leader-election securityContext: privileged: true allowPrivilegeEscalation: true capabilities: add: - SYS_ADMIN volumeMounts: - name: socket mountPath: /csi - name: liveness-probe image: liveness-probe imagePullPolicy: Always args: - --csi-address=/csi/csi.sock volumeMounts: - name: socket mountPath: /csi volumes: - name: socket emptyDir: {}
26.976608
49
0.482333
7376df22e0e0feaf54e2850bcc17fd174374f1b3
2,298
yaml
YAML
manifests/stateful_set.yaml
JoooostB/kube-trivy-exporter
14919852e4ef7abce26c04056c606759b658e00a
[ "MIT" ]
40
2020-01-17T22:39:07.000Z
2022-02-14T15:30:33.000Z
manifests/stateful_set.yaml
JoooostB/kube-trivy-exporter
14919852e4ef7abce26c04056c606759b658e00a
[ "MIT" ]
9
2019-10-22T22:16:37.000Z
2021-09-15T00:43:21.000Z
manifests/stateful_set.yaml
JoooostB/kube-trivy-exporter
14919852e4ef7abce26c04056c606759b658e00a
[ "MIT" ]
11
2020-05-09T16:44:04.000Z
2022-03-08T09:09:03.000Z
apiVersion: apps/v1 kind: StatefulSet metadata: name: kube-trivy-exporter spec: serviceName: kube-trivy-exporter replicas: 1 updateStrategy: type: RollingUpdate selector: matchLabels: app: kube-trivy-exporter template: metadata: annotations: prometheus.io/scrape: "true" prometheus.io/schema: "http" prometheus.io/port: "9090" prometheus.io/path: "/metrics" labels: app: kube-trivy-exporter spec: serviceAccountName: kube-trivy-exporter securityContext: sysctls: # https://github.com/kubernetes/kubernetes/pull/54896 #- name: net.core.somaxconn # value: "65535" - name: net.ipv4.ip_local_port_range value: "10000 65535" affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: app operator: In values: - kube-trivy-exporter topologyKey: kubernetes.io/hostname weight: 100 containers: - name: kube-trivy-exporter image: ghcr.io/kaidotdev/kube-trivy-exporter:v1.1.1 imagePullPolicy: Always args: - server - --api-address=0.0.0.0:8000 - --monitor-address=0.0.0.0:9090 - --enable-tracing - --trivy-concurrency=30 - --collector-loop-interval=3600 env: - name: GOGC value: "100" ports: - containerPort: 8000 - containerPort: 9090 readinessProbe: httpGet: path: /health port: 8000 scheme: HTTP initialDelaySeconds: 10 periodSeconds: 1 successThreshold: 3 failureThreshold: 1 timeoutSeconds: 1 volumeMounts: - name: cache mountPath: /root/.cache/trivy volumeClaimTemplates: - metadata: name: cache spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi
28.02439
63
0.52611
7376fa6268f82d79d3242e47d1ca4a06fdd454bf
1,175
yaml
YAML
manifests/base/application-controller/argocd-application-controller-deployment.yaml
pchico83/argo-cd
3aa0748c70975941bc76232137a3ae9032a1f87d
[ "Apache-2.0" ]
11
2020-05-13T09:29:14.000Z
2022-03-31T08:08:36.000Z
manifests/base/application-controller/argocd-application-controller-deployment.yaml
pchico83/argo-cd
3aa0748c70975941bc76232137a3ae9032a1f87d
[ "Apache-2.0" ]
6
2021-04-13T08:59:25.000Z
2021-04-20T19:41:37.000Z
manifests/base/application-controller/argocd-application-controller-deployment.yaml
pchico83/argo-cd
3aa0748c70975941bc76232137a3ae9032a1f87d
[ "Apache-2.0" ]
8
2020-05-13T09:29:47.000Z
2021-11-06T11:39:11.000Z
apiVersion: apps/v1 kind: Deployment metadata: labels: app.kubernetes.io/name: argocd-application-controller app.kubernetes.io/part-of: argocd app.kubernetes.io/component: application-controller name: argocd-application-controller spec: strategy: type: Recreate selector: matchLabels: app.kubernetes.io/name: argocd-application-controller template: metadata: labels: app.kubernetes.io/name: argocd-application-controller spec: containers: - command: - argocd-application-controller - --status-processors - "20" - --operation-processors - "10" image: argoproj/argocd:latest imagePullPolicy: Always name: argocd-application-controller ports: - containerPort: 8082 readinessProbe: httpGet: path: /healthz port: 8082 initialDelaySeconds: 5 periodSeconds: 10 livenessProbe: httpGet: path: /healthz port: 8082 initialDelaySeconds: 5 periodSeconds: 10 serviceAccountName: argocd-application-controller
26.111111
61
0.621277
73771c5b004290698f4b09234bdafdd9bba7d996
256
yaml
YAML
namespaces/live-1.cloud-platform.service.justice.gov.uk/prison-to-nhs-update-prod/02-limitrange.yaml
rossjones/cloud-platform-environments
85a4e19cf012d97d885c6ea91379ca3f7b59a49a
[ "MIT" ]
28
2018-07-13T15:02:24.000Z
2022-03-17T09:23:54.000Z
namespaces/live-1.cloud-platform.service.justice.gov.uk/prison-to-nhs-update-prod/02-limitrange.yaml
rossjones/cloud-platform-environments
85a4e19cf012d97d885c6ea91379ca3f7b59a49a
[ "MIT" ]
963
2018-05-30T15:46:06.000Z
2022-03-30T10:06:39.000Z
namespaces/live-1.cloud-platform.service.justice.gov.uk/prison-to-nhs-update-prod/02-limitrange.yaml
rossjones/cloud-platform-environments
85a4e19cf012d97d885c6ea91379ca3f7b59a49a
[ "MIT" ]
29
2018-12-04T09:37:36.000Z
2022-02-25T10:35:34.000Z
apiVersion: v1 kind: LimitRange metadata: name: limitrange namespace: prison-to-nhs-update-prod spec: limits: - default: cpu: 2000m memory: 1024Mi defaultRequest: cpu: 10m memory: 512Mi type: Container
17.066667
38
0.621094
73771db1a31d62f38c83d3dfb342b57b0c7c2dd7
443
yaml
YAML
baseline/test-disallow-host-path.yaml
JimBugwadia/pod-security-tests
4c34b7cf329b6cc6b6eed860c3d79d2ce5708972
[ "Apache-2.0" ]
null
null
null
baseline/test-disallow-host-path.yaml
JimBugwadia/pod-security-tests
4c34b7cf329b6cc6b6eed860c3d79d2ce5708972
[ "Apache-2.0" ]
null
null
null
baseline/test-disallow-host-path.yaml
JimBugwadia/pod-security-tests
4c34b7cf329b6cc6b6eed860c3d79d2ce5708972
[ "Apache-2.0" ]
null
null
null
--- apiVersion: v1 kind: Pod metadata: name: host-path-volumes labels: pod-security-standards/policy: "Baseline" pod-security-standards/control: "HostPath_Volumes" spec: securityContext: runAsNonRoot: true runAsUser: 1000 containers: - name: host-path image: kubernetes/pause volumeMounts: - name: hostdir mountPath: /tmp volumes: - name: hostdir hostPath: path: /root
19.26087
56
0.650113
73774a96b587166ec7a6a772967fcd46cd32f9b5
1,147
yml
YAML
e2e-tests/self-healing-advanced/conf/pumba.yml
bratao/percona-xtradb-cluster-operator
e2514483d704a0c993a797daf062541050e4d3a1
[ "Apache-2.0" ]
325
2019-03-11T08:05:50.000Z
2022-03-29T03:35:21.000Z
e2e-tests/self-healing-advanced/conf/pumba.yml
bratao/percona-xtradb-cluster-operator
e2514483d704a0c993a797daf062541050e4d3a1
[ "Apache-2.0" ]
645
2019-03-17T10:02:38.000Z
2022-03-31T17:33:51.000Z
e2e-tests/self-healing-advanced/conf/pumba.yml
bratao/percona-xtradb-cluster-operator
e2514483d704a0c993a797daf062541050e4d3a1
[ "Apache-2.0" ]
148
2019-03-17T07:16:58.000Z
2022-03-23T15:12:37.000Z
apiVersion: apps/v1 kind: Deployment metadata: name: pumba spec: replicas: 1 selector: matchLabels: name: pumba template: metadata: labels: name: pumba com.gaiaadm.pumba: "true" # prevent pumba from killing itself spec: affinity: podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: statefulset.kubernetes.io/pod-name: self-healing-advanced-proxysql-0 topologyKey: kubernetes.io/hostname containers: - name: pumba image: gaiaadm/pumba:latest imagePullPolicy: IfNotPresent command: ["/pumba", "--dry-run", "--interval", "2h", "pause", "--duration", "1h", "some-name-rs0-0" ] resources: requests: cpu: 10m memory: 5M limits: cpu: 100m memory: 20M volumeMounts: - name: dockersocket mountPath: /var/run/docker.sock volumes: - hostPath: path: /var/run/docker.sock name: dockersocket
27.309524
111
0.549259
73776aa1490305c34b5a2de9723ff208f6dccfe5
235
yaml
YAML
namespaces/live-1.cloud-platform.service.justice.gov.uk/alexhaslehurst-dev/02-limitrange.yaml
rossjones/cloud-platform-environments
85a4e19cf012d97d885c6ea91379ca3f7b59a49a
[ "MIT" ]
28
2018-07-13T15:02:24.000Z
2022-03-17T09:23:54.000Z
namespaces/live-1.cloud-platform.service.justice.gov.uk/alexhaslehurst-dev/02-limitrange.yaml
happygeneralist/recall-decisions-alpha
09124f3994a878e9969e7b4326088b2ae7bfd170
[ "MIT" ]
963
2018-05-30T15:46:06.000Z
2022-03-30T10:06:39.000Z
namespaces/live-1.cloud-platform.service.justice.gov.uk/alexhaslehurst-dev/02-limitrange.yaml
happygeneralist/recall-decisions-alpha
09124f3994a878e9969e7b4326088b2ae7bfd170
[ "MIT" ]
29
2018-12-04T09:37:36.000Z
2022-02-25T10:35:34.000Z
apiVersion: v1 kind: LimitRange metadata: name: limitrange namespace: alexhaslehurst-dev spec: limits: - default: cpu: 1000m memory: 1000Mi defaultRequest: cpu: 10m memory: 100Mi type: Container
15.666667
31
0.659574
7377b8adeba2f1f9079bddec0c78cca84a2187cc
1,689
yaml
YAML
mbox-operator/config/crd/bases/apps.fedoraproject.org_mbkojihubs.yaml
Zlopez/mbbox
bc7cbdb74fd9ec0071617bbcf15f32f0e44935ac
[ "MIT" ]
12
2020-02-19T01:49:02.000Z
2021-06-22T12:47:33.000Z
mbox-operator/config/crd/bases/apps.fedoraproject.org_mbkojihubs.yaml
Zlopez/mbbox
bc7cbdb74fd9ec0071617bbcf15f32f0e44935ac
[ "MIT" ]
146
2020-02-25T13:08:48.000Z
2021-04-13T07:02:05.000Z
mbox-operator/config/crd/bases/apps.fedoraproject.org_mbkojihubs.yaml
Zlopez/mbbox
bc7cbdb74fd9ec0071617bbcf15f32f0e44935ac
[ "MIT" ]
7
2020-02-19T08:52:05.000Z
2021-01-29T07:32:31.000Z
--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: mbkojihubs.apps.fedoraproject.org spec: group: apps.fedoraproject.org names: kind: MBKojiHub listKind: MBKojiHubList plural: mbkojihubs singular: mbkojihub scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: description: MBKojiHub is the Schema for the mbkojihubs API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: Spec defines the desired state of MBKojiHub type: object x-kubernetes-preserve-unknown-fields: true status: description: Status defines the observed state of MBKojiHub type: object x-kubernetes-preserve-unknown-fields: true type: object served: true storage: true subresources: status: {}
37.533333
175
0.66489
7377cb9082670a159e5bf9fef797f78bcb561a7b
6,483
yaml
YAML
socialNetwork/helm-chart/charts/post-storage-memcached/templates/configmap.yaml
SuvigyaJain1/DeathStarBench
1888b29a45521cd32c99baadafdacffe2a42508d
[ "Apache-2.0" ]
null
null
null
socialNetwork/helm-chart/charts/post-storage-memcached/templates/configmap.yaml
SuvigyaJain1/DeathStarBench
1888b29a45521cd32c99baadafdacffe2a42508d
[ "Apache-2.0" ]
null
null
null
socialNetwork/helm-chart/charts/post-storage-memcached/templates/configmap.yaml
SuvigyaJain1/DeathStarBench
1888b29a45521cd32c99baadafdacffe2a42508d
[ "Apache-2.0" ]
null
null
null
--- # Source: social-network/charts/post-storage-memcached/templates/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: post-storage-memcached labels: socialnetwork/service: post-storage-memcached data: jaeger-config.yml: | disabled: true reporter: logSpans: false localAgentHostPort: "jaeger:6831" queueSize: 1000000 bufferFlushInterval: 10 sampler: type: "probabilistic" param: 0.01 service-config.json: | { "secret": "secret", "social-graph-service": { "addr": "social-graph-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "social-graph-mongodb": { "addr": "social-graph-mongodb", "port": 27017, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "social-graph-redis": { "addr": "social-graph-redis", "port": 6379, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "write-home-timeline-service": { "addr": "write-home-timeline-service", "port": 9090, "workers": 32, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "write-home-timeline-rabbitmq": { "addr": "write-home-timeline-rabbitmq", "port": 5672, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "home-timeline-redis": { "addr": "home-timeline-redis", "port": 6379, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "compose-post-service": { "addr": "compose-post-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "compose-post-redis": { "addr": "compose-post-redis", "port": 6379, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "user-timeline-service": { "addr": "user-timeline-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "user-timeline-mongodb": { "addr": "user-timeline-mongodb", "port": 27017, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "user-timeline-redis": { "addr": "user-timeline-redis", "port": 6379, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "post-storage-service": { "addr": "post-storage-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "post-storage-mongodb": { "addr": "post-storage-mongodb", "port": 27017, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "post-storage-memcached": { "addr": "post-storage-memcached", "port": 11211, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "unique-id-service": { "addr": "unique-id-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000, "netif": "eth0" }, "media-service": { "addr": "media-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "media-mongodb": { "addr": "media-mongodb", "port": 27017, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "media-memcached": { "addr": "media-memcached", "port": 11211, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "media-frontend": { "addr": "media-frontend", "port": 8081, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "text-service": { "addr": "text-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "user-mention-service": { "addr": "user-mention-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "url-shorten-service": { "addr": "url-shorten-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "url-shorten-memcached": { "addr": "url-shorten-memcached", "port": 11211, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "url-shorten-mongodb": { "addr": "url-shorten-mongodb", "port": 27017, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "user-service": { "addr": "user-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000, "netif": "eth0" }, "user-memcached": { "addr": "user-memcached", "port": 11211, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "user-mongodb": { "addr": "user-mongodb", "port": 27017, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "home-timeline-service": { "addr": "home-timeline-service", "port": 9090, "connections": 512, "timeout_ms": 10000, "keepalive_ms": 10000 }, "ssl": { "enabled": false, "caPath": "/keys/CA.pem", "ciphers": "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH", "serverKeyPath": "/keys/server.key", "serverCertPath": "/keys/server.crt" } }
28.186957
79
0.47077
7377e346867ef9689ce770739af3f1e14fe24b3f
449
yaml
YAML
scripts/openshift/galaxy-ui-deployment.yaml
mansong1/galaxy
a113b8154dd354ee4da7052a4f09034b85ad90da
[ "Apache-2.0" ]
null
null
null
scripts/openshift/galaxy-ui-deployment.yaml
mansong1/galaxy
a113b8154dd354ee4da7052a4f09034b85ad90da
[ "Apache-2.0" ]
null
null
null
scripts/openshift/galaxy-ui-deployment.yaml
mansong1/galaxy
a113b8154dd354ee4da7052a4f09034b85ad90da
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: labels: app: galaxy-ui name: galaxy-ui spec: replicas: 1 selector: matchLabels: app: galaxy-ui template: metadata: labels: app: galaxy-ui spec: containers: - image: mansong/galaxy-static:latest imagePullPolicy: "Always" name: galaxy-ui ports: - containerPort: 80 resources: {} restartPolicy: Always
18.708333
43
0.596882
73780b7c0231a43b60ed40eda4dc0efb1d08895c
1,409
yaml
YAML
manifests/deployment.yaml
simonlam008/azch-captureorder
df859e5bc99997084fb36d25cedfdf1761980a29
[ "MIT" ]
null
null
null
manifests/deployment.yaml
simonlam008/azch-captureorder
df859e5bc99997084fb36d25cedfdf1761980a29
[ "MIT" ]
null
null
null
manifests/deployment.yaml
simonlam008/azch-captureorder
df859e5bc99997084fb36d25cedfdf1761980a29
[ "MIT" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: captureorder spec: selector: matchLabels: app: captureorder replicas: 2 template: metadata: labels: app: captureorder spec: containers: - name: captureorder image: acrcluster2.azurecr.io/captureorder # replace with your own repository imagePullPolicy: Always readinessProbe: httpGet: port: 8080 path: /healthz livenessProbe: httpGet: port: 8080 path: /healthz resources: requests: memory: "64Mi" cpu: "100m" limits: memory: "128Mi" cpu: "500m" env: - name: TEAMNAME value: 'placeholder' # will be patched at deployment time by value of 'teamName' in the variable group - name: MONGOHOST valueFrom: secretKeyRef: name: mongodb key: mongoHost - name: MONGOUSER valueFrom: secretKeyRef: name: mongodb key: mongoUser - name: MONGOPASSWORD valueFrom: secretKeyRef: name: mongodb key: mongoPassword ports: - containerPort: 80
26.092593
114
0.48616
7378221a449ce3e901963866aee9d946fa67f522
144
yaml
YAML
built-in-references/Kubernetes/perf/violations/violation4382.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation4382.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
built-in-references/Kubernetes/perf/violations/violation4382.yaml
sw47/azure-policy
4a38e4cb4047b5364902653f0defe45f1b21bc61
[ "MIT" ]
null
null
null
apiVersion: v1 kind: Pod metadata: name: image-demo-4382 spec: containers: - name: nginx image: nginx #ritaacr.azurecr.io/nginx:latest
18
49
0.722222
7378d7993d3e5791dcbef5c73dd4398c3ec041a3
8,051
yaml
YAML
besu-kubernetes/besu-operator/deploy/crds/basiccrds/hyperledger.org_besunodes_crd.yaml
freight-trust/infraops
268c447733189cadada34e266148eb31689eacfb
[ "MIT" ]
null
null
null
besu-kubernetes/besu-operator/deploy/crds/basiccrds/hyperledger.org_besunodes_crd.yaml
freight-trust/infraops
268c447733189cadada34e266148eb31689eacfb
[ "MIT" ]
null
null
null
besu-kubernetes/besu-operator/deploy/crds/basiccrds/hyperledger.org_besunodes_crd.yaml
freight-trust/infraops
268c447733189cadada34e266148eb31689eacfb
[ "MIT" ]
1
2022-03-16T11:32:00.000Z
2022-03-16T11:32:00.000Z
apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: besunodes.hyperledger.org spec: group: hyperledger.org names: kind: BesuNode listKind: BesuNodeList plural: besunodes singular: besunode scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: description: BesuNode is the Schema for the besunodes API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: BesuNodeSpec defines the desired state of BesuNode properties: bootnodes: type: integer graphql: default: authenticationEnabled: false enabled: false host: 0.0.0.0 port: 8547 description: GraphQl properties: api: type: string authenticationEnabled: type: boolean corsOrigins: type: string discovery: type: boolean enabled: description: Port is enabled or not type: boolean host: description: Host type: string port: description: Port type: integer type: object httpwhitelist: description: Defaults to ["*"] type: string image: default: pullPolicy: IfNotPresent repository: hyperledger/besu tag: 1.4.6 description: Besu Image Configuration properties: pullPolicy: default: IfNotPresent description: Image pull policy type: string repository: description: Image repository type: string tag: description: Image tag type: string type: object metrics: default: enabled: true host: 0.0.0.0 port: 9545 description: PortConfig defines port configurations of different types of ports properties: api: type: string authenticationEnabled: type: boolean corsOrigins: type: string discovery: type: boolean enabled: description: Port is enabled or not type: boolean host: description: Host type: string port: description: Port type: integer type: object p2p: default: authenticationEnabled: false discovery: true enabled: true host: 0.0.0.0 port: 30303 description: P2P Port configuration properties: api: type: string authenticationEnabled: type: boolean corsOrigins: type: string discovery: type: boolean enabled: description: Port is enabled or not type: boolean host: description: Host type: string port: description: Port type: integer type: object pvcSizeLimit: description: Size of the Volume type: string pvcStorageClass: description: Storage class of the Volume type: string replicas: default: 2 description: Number of replica pods corresponding to this node format: int32 type: integer resources: default: cpuLimit: 500m cpuRequest: 100m memLimit: 2048Mi memRequest: 1024Mi description: Requests and limits properties: cpuLimit: description: CPU Limit type: string cpuRequest: description: CPU Request type: string memLimit: description: Memory Limit type: string memRequest: description: Memory Request type: string type: object rpc: default: authenticationEnabled: false enabled: true host: 0.0.0.0 port: 8545 description: RPC Port Configuration properties: api: type: string authenticationEnabled: type: boolean corsOrigins: type: string discovery: type: boolean enabled: description: Port is enabled or not type: boolean host: description: Host type: string port: description: Port type: integer type: object type: default: Member description: 'Type of node, takes one of the values : Bootnode, Validator, Member' type: string ws: default: authenticationEnabled: false enabled: false host: 0.0.0.0 port: 8546 description: WS properties: api: type: string authenticationEnabled: type: boolean corsOrigins: type: string discovery: type: boolean enabled: description: Port is enabled or not type: boolean host: description: Host type: string port: description: Port type: integer type: object type: object status: description: BesuNodeStatus defines the observed state of BesuNode type: object type: object served: true storage: true subresources: status: {}
34.405983
175
0.432865
73790e5b37481207d289dfe0b82e9b250a108a36
411
yaml
YAML
config/rbac/profileinstallation_editor_role.yaml
Callisto13/profiles
3d382e954e6274b2ad2d79d8631d41a4563a0f36
[ "Apache-2.0" ]
13
2021-02-28T20:53:00.000Z
2022-01-18T11:54:30.000Z
config/rbac/profileinstallation_editor_role.yaml
Callisto13/profiles
3d382e954e6274b2ad2d79d8631d41a4563a0f36
[ "Apache-2.0" ]
215
2021-03-05T16:18:31.000Z
2021-12-13T04:43:05.000Z
config/rbac/profileinstallation_editor_role.yaml
aclevername/profiles
258a2cf2c51b0fffd6483106c6cc0e01125a0c30
[ "Apache-2.0" ]
8
2021-03-08T12:03:31.000Z
2021-08-22T16:37:53.000Z
# permissions for end users to edit profileinstallations. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: profileinstallation-editor-role rules: - apiGroups: - weave.works resources: - profileinstallations verbs: - create - delete - get - list - patch - update - watch - apiGroups: - weave.works resources: - profileinstallations/status verbs: - get
16.44
57
0.712895
737936903dc52daa29eb63ab5c3ea825de529fd4
290
yml
YAML
infrastructure/kubernetes/UserManagement/role.yml
amikshas/gs-spring-boot-docker
cc9944cdd88590855649332e8299740f70f63c20
[ "Apache-2.0" ]
null
null
null
infrastructure/kubernetes/UserManagement/role.yml
amikshas/gs-spring-boot-docker
cc9944cdd88590855649332e8299740f70f63c20
[ "Apache-2.0" ]
null
null
null
infrastructure/kubernetes/UserManagement/role.yml
amikshas/gs-spring-boot-docker
cc9944cdd88590855649332e8299740f70f63c20
[ "Apache-2.0" ]
null
null
null
kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: namespace: <namespace_name> name: user1-role rules: - apiGroups: ["", "extensions", "apps"] resources: ["pods"] verbs: ["get", "list", "get", "watch", "create", "update", "delete"]
29
75
0.568966
737a0c931da01d92ed0787767b2124ee96be6013
823
yaml
YAML
experimental/kubernetes/simple/pods/data-redis-master.yaml
enterstudio/spinnaker
66d0952f09efa2228aa45e6fe669bb3504415467
[ "Apache-2.0" ]
4
2015-11-30T21:52:01.000Z
2018-07-30T22:10:01.000Z
experimental/kubernetes/simple/pods/data-redis-master.yaml
enterstudio/spinnaker
66d0952f09efa2228aa45e6fe669bb3504415467
[ "Apache-2.0" ]
null
null
null
experimental/kubernetes/simple/pods/data-redis-master.yaml
enterstudio/spinnaker
66d0952f09efa2228aa45e6fe669bb3504415467
[ "Apache-2.0" ]
6
2015-11-22T04:00:53.000Z
2019-03-21T08:09:34.000Z
apiVersion: v1 kind: Pod metadata: labels: replication-controller: data-redis-server-v000 replication-controller-prime: data-redis-sentinel-v000 load-balancer-data-redis-sentinel: "true" name: data-redis-master-v000-init namespace: spinnaker spec: containers: - name: master image: gcr.io/kubernetes-spinnaker/redis-cluster:v2 env: - name: MASTER value: "true" ports: - containerPort: 6379 resources: limits: cpu: "0.1" volumeMounts: - mountPath: /redis-master-data name: data - name: sentinel image: gcr.io/kubernetes-spinnaker/redis-cluster:v2 env: - name: SENTINEL value: "true" ports: - containerPort: 26379 volumes: - name: data emptyDir: {}
23.514286
58
0.605103
737a99c6b2c65007c2ab4f79d7cfd117b2a09451
412
yaml
YAML
installer/kubevirt/pengclone/deb-pod.yaml
iorchard/pengrixio
65d01799296fce043e87ba58106f8fa8c1d8aa98
[ "MIT" ]
null
null
null
installer/kubevirt/pengclone/deb-pod.yaml
iorchard/pengrixio
65d01799296fce043e87ba58106f8fa8c1d8aa98
[ "MIT" ]
null
null
null
installer/kubevirt/pengclone/deb-pod.yaml
iorchard/pengrixio
65d01799296fce043e87ba58106f8fa8c1d8aa98
[ "MIT" ]
null
null
null
--- apiVersion: v1 kind: Pod metadata: name: debian namespace: default spec: containers: - name: pengclone image: debian:buster-slim imagePullPolicy: IfNotPresent command: - sleep - "36000" volumeMounts: - mountPath: "/mnt" name: jijisa-win10 volumes: - name: jijisa-win10 persistentVolumeClaim: claimName: jijisa-win10 ...
17.913043
35
0.599515
737aa4259b1d7a71f56219cefc77e062e03f6344
1,233
yml
YAML
templates/postgres-statefulset.template.yml
conradoqg/cvm-fund-explorer-stack
abd08a5305dd1716788aa999548b5ce5a4fa8aaa
[ "MIT" ]
1
2019-09-10T02:57:27.000Z
2019-09-10T02:57:27.000Z
templates/postgres-statefulset.template.yml
conradoqg/cvm-fund-explorer-stack
abd08a5305dd1716788aa999548b5ce5a4fa8aaa
[ "MIT" ]
null
null
null
templates/postgres-statefulset.template.yml
conradoqg/cvm-fund-explorer-stack
abd08a5305dd1716788aa999548b5ce5a4fa8aaa
[ "MIT" ]
2
2021-11-26T11:51:09.000Z
2022-01-08T22:32:58.000Z
# kubetpl:syntax:$ # kubetpl:set:POSTGRES_DB=cvmData # kubetpl:set:POSTGRES_USER=postgres # kubetpl:set:POSTGRES_PASSWORD=temporary apiVersion: apps/v1 kind: StatefulSet metadata: namespace: meajudafi name: postgres-statefulset spec: selector: matchLabels: app: postgres serviceName: postgres-service replicas: 1 template: metadata: labels: app: postgres spec: terminationGracePeriodSeconds: 10 containers: - name: postgres-pod image: postgres:11-alpine ports: - containerPort: 5432 name: prot volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data - name: dshm mountPath: /dev/shm env: - name: POSTGRES_DB value: $POSTGRES_DB - name: POSTGRES_USER value: $POSTGRES_USER - name: POSTGRES_PASSWORD value: $POSTGRES_PASSWORD volumes: - name: dshm emptyDir: medium: Memory volumeClaimTemplates: - metadata: name: postgres-data spec: accessModes: [ "ReadWriteOnce" ] resources: requests: storage: 1000Gi
23.711538
45
0.592052
737aa69df5d06f8cc294716125e58c10b020dc3c
587
yaml
YAML
manifests/0011_service_special-resource-controller-manager-metrics-service.yaml
andymcc/special-resource-operator
4bf1926c1b49df2fae4d9d38f5f6dc95493ec079
[ "Apache-2.0" ]
null
null
null
manifests/0011_service_special-resource-controller-manager-metrics-service.yaml
andymcc/special-resource-operator
4bf1926c1b49df2fae4d9d38f5f6dc95493ec079
[ "Apache-2.0" ]
null
null
null
manifests/0011_service_special-resource-controller-manager-metrics-service.yaml
andymcc/special-resource-operator
4bf1926c1b49df2fae4d9d38f5f6dc95493ec079
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Service metadata: annotations: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" service.beta.openshift.io/serving-cert-secret-name: special-resource-operator-tls labels: control-plane: controller-manager name: special-resource-controller-manager-metrics-service namespace: openshift-sro spec: ports: - name: https port: 8443 targetPort: https selector: control-plane: controller-manager
29.35
85
0.754685