diff --git a/go.mod b/go.mod
index 3020bed8d..29680ccb5 100644
--- a/go.mod
+++ b/go.mod
@@ -7,36 +7,36 @@ require (
github.com/disiqueira/gotree v1.0.0
github.com/fatih/color v1.16.0
github.com/ghodss/yaml v1.0.0
- github.com/gofrs/flock v0.8.1
+ github.com/gofrs/flock v0.12.1
github.com/google/uuid v1.6.0
github.com/jonboulle/clockwork v0.4.0
- github.com/onsi/ginkgo/v2 v2.22.2
- github.com/onsi/gomega v1.36.2
- github.com/openshift/library-go v0.0.0-20241107160307-0064ad7bd060
+ github.com/onsi/ginkgo/v2 v2.23.4
+ github.com/onsi/gomega v1.36.3
+ github.com/openshift/library-go v0.0.0-20250228164547-bad2d1bf3a37
github.com/pkg/errors v0.9.1
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
google.golang.org/grpc v1.67.0
gopkg.in/yaml.v2 v2.4.0
- helm.sh/helm/v3 v3.16.3
- k8s.io/api v0.32.2
- k8s.io/apiextensions-apiserver v0.32.1
- k8s.io/apimachinery v0.32.2
- k8s.io/cli-runtime v0.31.1
- k8s.io/client-go v0.32.2
- k8s.io/component-base v0.32.1
+ helm.sh/helm/v3 v3.17.3
+ k8s.io/api v0.32.3
+ k8s.io/apiextensions-apiserver v0.32.2
+ k8s.io/apimachinery v0.32.3
+ k8s.io/cli-runtime v0.32.2
+ k8s.io/client-go v0.32.3
+ k8s.io/component-base v0.32.3
k8s.io/klog/v2 v2.130.1
- k8s.io/kubectl v0.31.1
+ k8s.io/kubectl v0.32.2
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
- open-cluster-management.io/api v0.16.1
+ open-cluster-management.io/api v0.16.2-0.20250422072120-cadf714c3055
open-cluster-management.io/cluster-proxy v0.7.0
open-cluster-management.io/managed-serviceaccount v0.8.0
- open-cluster-management.io/ocm v0.16.0
- open-cluster-management.io/sdk-go v0.16.0
+ open-cluster-management.io/ocm v0.16.1-0.20250422150056-f4b6dcb15929
+ open-cluster-management.io/sdk-go v0.16.1-0.20250411154302-3a424961ead4
sigs.k8s.io/apiserver-network-proxy v0.29.0
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0
sigs.k8s.io/controller-runtime v0.20.2
- sigs.k8s.io/kustomize/kyaml v0.17.1
+ sigs.k8s.io/kustomize/kyaml v0.18.1
)
require (
@@ -54,7 +54,7 @@ require (
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
- github.com/containerd/containerd v1.7.23 // indirect
+ github.com/containerd/containerd v1.7.24 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
@@ -69,9 +69,9 @@ require (
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
- github.com/evanphx/json-patch v5.9.0+incompatible // indirect
+ github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
- github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
+ github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/camelcase v1.0.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
@@ -88,9 +88,9 @@ require (
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
+ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
@@ -103,7 +103,7 @@ require (
github.com/jmoiron/sqlx v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.11 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.9 // indirect
@@ -125,13 +125,13 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
- github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f // indirect
+ github.com/openshift/api v0.0.0-20250124212313-a770960d61e0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
- github.com/prometheus/client_golang v1.20.5 // indirect
+ github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.55.0 // indirect
+ github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
- github.com/rubenv/sql-migrate v1.7.0 // indirect
+ github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
@@ -145,27 +145,28 @@ require (
go.opentelemetry.io/otel v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0 // indirect
- go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
- golang.org/x/crypto v0.32.0 // indirect
- golang.org/x/net v0.34.0 // indirect
- golang.org/x/oauth2 v0.23.0 // indirect
- golang.org/x/sync v0.10.0 // indirect
- golang.org/x/sys v0.29.0 // indirect
- golang.org/x/term v0.28.0 // indirect
- golang.org/x/text v0.21.0 // indirect
+ go.uber.org/automaxprocs v1.6.0 // indirect
+ golang.org/x/crypto v0.36.0 // indirect
+ golang.org/x/net v0.37.0 // indirect
+ golang.org/x/oauth2 v0.28.0 // indirect
+ golang.org/x/sync v0.12.0 // indirect
+ golang.org/x/sys v0.32.0 // indirect
+ golang.org/x/term v0.30.0 // indirect
+ golang.org/x/text v0.23.0 // indirect
golang.org/x/time v0.7.0 // indirect
- golang.org/x/tools v0.28.0 // indirect
+ golang.org/x/tools v0.31.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
- google.golang.org/protobuf v1.36.1 // indirect
+ google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/apiserver v0.32.1 // indirect
+ k8s.io/apiserver v0.32.3 // indirect
+ k8s.io/kube-aggregator v0.32.1 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
oras.land/oras-go v1.2.5 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect
- sigs.k8s.io/kustomize/api v0.17.2 // indirect
+ sigs.k8s.io/kustomize/api v0.18.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/go.sum b/go.sum
index 3fa8a5756..59faaa34d 100644
--- a/go.sum
+++ b/go.sum
@@ -1,4 +1,3 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
@@ -7,7 +6,6 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
@@ -50,20 +48,15 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembj
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
-github.com/containerd/containerd v1.7.23 h1:H2CClyUkmpKAGlhQp95g2WXHfLYc7whAuvZGBNYOOwQ=
-github.com/containerd/containerd v1.7.23/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw=
+github.com/containerd/containerd v1.7.24 h1:zxszGrGjrra1yYJW/6rhm9cJ1ZQ8rkKBR48brqsa7nA=
+github.com/containerd/containerd v1.7.24/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw=
github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
@@ -111,14 +104,12 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
-github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
+github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
-github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
-github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
+github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
+github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
@@ -164,24 +155,16 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
-github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
-github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
+github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
@@ -190,20 +173,15 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
-github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -242,8 +220,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
-github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -309,18 +287,18 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
-github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
-github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
-github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
+github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
+github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
+github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
+github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
-github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f h1:ya1OmyZm3LIIxI3U9VE9Nyx3ehCHgBwxyFUPflYPWls=
-github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo=
-github.com/openshift/library-go v0.0.0-20241107160307-0064ad7bd060 h1:jiDC7d8d+jmjv2WfiMY0+Uf55q11MGyYkGGqXnfqWTU=
-github.com/openshift/library-go v0.0.0-20241107160307-0064ad7bd060/go.mod h1:9B1MYPoLtP9tqjWxcbUNVpwxy68zOH/3EIP6c31dAM0=
+github.com/openshift/api v0.0.0-20250124212313-a770960d61e0 h1:dCvNfygMrPLVNQ06bpHXrxKfrXHiprO4+etHrRUqI8g=
+github.com/openshift/api v0.0.0-20250124212313-a770960d61e0/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
+github.com/openshift/library-go v0.0.0-20250228164547-bad2d1bf3a37 h1:/YhxswjRkADbww682dqckHddV/AVfwQHFn/XTf2fjsk=
+github.com/openshift/library-go v0.0.0-20250228164547-bad2d1bf3a37/go.mod h1:GHwvopE5KXXCz4ULHp871sTPLLW+FB+hu/RIzlNwxx8=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
@@ -333,20 +311,21 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
+github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
-github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
-github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
+github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
@@ -354,8 +333,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
-github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI=
-github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE=
+github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4=
+github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
@@ -374,10 +353,12 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@@ -413,8 +394,8 @@ go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6b
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
-go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
-go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -425,100 +406,67 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
-golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
-golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
-golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
-golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
+golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
+golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
-golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
-golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
-golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
+golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
-golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
-golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
+golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
+golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
-google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
+google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -535,42 +483,42 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
-helm.sh/helm/v3 v3.16.3 h1:kb8bSxMeRJ+knsK/ovvlaVPfdis0X3/ZhYCSFRP+YmY=
-helm.sh/helm/v3 v3.16.3/go.mod h1:zeVWGDR4JJgiRbT3AnNsjYaX8OTJlIE9zC+Q7F7iUSU=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw=
-k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y=
-k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw=
-k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto=
-k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ=
-k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
-k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak=
-k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw=
-k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk=
-k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U=
-k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA=
-k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94=
-k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk=
-k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w=
+helm.sh/helm/v3 v3.17.3 h1:3n5rW3D0ArjFl0p4/oWO8IbY/HKaNNwJtOQFdH2AZHg=
+helm.sh/helm/v3 v3.17.3/go.mod h1:+uJKMH/UiMzZQOALR3XUf3BLIoczI2RKKD6bMhPh4G8=
+k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
+k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
+k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4=
+k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA=
+k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
+k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
+k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8=
+k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc=
+k8s.io/cli-runtime v0.32.2 h1:aKQR4foh9qeyckKRkNXUccP9moxzffyndZAvr+IXMks=
+k8s.io/cli-runtime v0.32.2/go.mod h1:a/JpeMztz3xDa7GCyyShcwe55p8pbcCVQxvqZnIwXN8=
+k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
+k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
+k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k=
+k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-aggregator v0.32.1 h1:cztPyIHbo6tgrhYHDqmdmvxUufJKuxgAC/vog7yeWek=
+k8s.io/kube-aggregator v0.32.1/go.mod h1:sXjL5T8FO/rlBzTbBhahw9V5Nnr1UtzZHKTj9WxQCOU=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
-k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24=
-k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM=
+k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us=
+k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-open-cluster-management.io/api v0.16.1 h1:mS+4UGxHLPQd7CRM0gdFQdVaz139Lo2bkLfqSE0CDNU=
-open-cluster-management.io/api v0.16.1/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM=
+open-cluster-management.io/api v0.16.2-0.20250422072120-cadf714c3055 h1:D2vT3nnMS/To3ptz8LLg1zfJm/pL8XIaO1g7Qbyc/5o=
+open-cluster-management.io/api v0.16.2-0.20250422072120-cadf714c3055/go.mod h1:/OeqXycNBZQoe3WG6ghuWsMgsKGuMZrK8ZpsU6gWL0Y=
open-cluster-management.io/cluster-proxy v0.7.0 h1:qOok0BIBL6j4mLRArzJdz0gK5nyynGKTwgBvnUHmfkE=
open-cluster-management.io/cluster-proxy v0.7.0/go.mod h1:6cgnExpuprO7Le7aqf7bI3H7Nvu3YnXBJCIbJ7wsC0s=
open-cluster-management.io/managed-serviceaccount v0.8.0 h1:8+Z142IUqVT/enxXkyb0nzLUL7JaR7dBM2fDtlCA4pM=
open-cluster-management.io/managed-serviceaccount v0.8.0/go.mod h1:eTixwpLA6XkPQARDjze3k0KRjwn6N22eFOEFx8CpB0I=
-open-cluster-management.io/ocm v0.16.0 h1:oCxhy/Z7Xbzj35p4860dWL2rr7aBk87vSokEeNVFPdk=
-open-cluster-management.io/ocm v0.16.0/go.mod h1:M2/9PVwAYtP2Rv9ahsUEcGGXUctPPVlqhzrBKRW4Wzs=
-open-cluster-management.io/sdk-go v0.16.0 h1:Ui1jerkeLaNaJPu47xjOJ3lh+rJQgeJHD25ViQMzAMs=
-open-cluster-management.io/sdk-go v0.16.0/go.mod h1:TyOjZC5YxyM5BRNgwTmLuTbHXX6xXqzYBXllrfoVp9w=
+open-cluster-management.io/ocm v0.16.1-0.20250422150056-f4b6dcb15929 h1:6JHU8bRCPYzRNqtbLRAZwSyhjvbVXzjJ8bsPEvDgS7M=
+open-cluster-management.io/ocm v0.16.1-0.20250422150056-f4b6dcb15929/go.mod h1:U5gAR1JeGholW/M/G+HwFjrOQyz6KcQxqVhnKkKg9Yw=
+open-cluster-management.io/sdk-go v0.16.1-0.20250411154302-3a424961ead4 h1:PT6kDaKjDi0EaQyNzIWzYAYeK4QpBHbm+/7VPrpJEkY=
+open-cluster-management.io/sdk-go v0.16.1-0.20250411154302-3a424961ead4/go.mod h1:FtOYjn5dL8e9S1gzNb8cBNsFzHJ1F3cpmCo+qrltido=
oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo=
oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo=
sigs.k8s.io/apiserver-network-proxy v0.29.0 h1:4N+QiVBucLicDSP8V1Izf5h4t8DsKFZ346hA8kQlXDw=
@@ -583,10 +531,10 @@ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek=
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I=
-sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g=
-sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0=
-sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ=
-sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U=
+sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
+sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U=
+sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E=
+sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
diff --git a/pkg/cmd/join/exec.go b/pkg/cmd/join/exec.go
index 9f1818e09..e046077ee 100644
--- a/pkg/cmd/join/exec.go
+++ b/pkg/cmd/join/exec.go
@@ -145,7 +145,7 @@ func (o *Options) complete(cmd *cobra.Command, args []string) (err error) {
if err != nil {
return err
}
- o.klusterletChartConfig.Klusterlet.ResourceRequirement = *resourceRequirement
+ o.klusterletChartConfig.Klusterlet.ResourceRequirement = resourceRequirement
o.klusterletChartConfig.Klusterlet.RegistrationConfiguration = operatorv1.RegistrationConfiguration{
FeatureGates: genericclioptionsclusteradm.ConvertToFeatureGateAPI(
diff --git a/pkg/cmd/upgrade/klusterlet/exec.go b/pkg/cmd/upgrade/klusterlet/exec.go
index f90528c7d..cd019df3d 100644
--- a/pkg/cmd/upgrade/klusterlet/exec.go
+++ b/pkg/cmd/upgrade/klusterlet/exec.go
@@ -59,7 +59,7 @@ func (o *Options) complete(_ *cobra.Command, _ []string) (err error) {
ClusterName: k.Spec.ClusterName,
Namespace: k.Spec.Namespace,
Mode: k.Spec.DeployOption.Mode,
- ResourceRequirement: *k.Spec.ResourceRequirement,
+ ResourceRequirement: k.Spec.ResourceRequirement,
}
}
diff --git a/test/e2e/e2e-test.mk b/test/e2e/e2e-test.mk
index dc373d744..b374ff559 100644
--- a/test/e2e/e2e-test.mk
+++ b/test/e2e/e2e-test.mk
@@ -20,9 +20,9 @@ start-cluster:
.PHONY: start-cluster
test-e2e: clean-e2e ensure-kubebuilder-tools ensure-ginkgo start-cluster deps install
- $(GINKGO) -v ./test/e2e/clusteradm --timeout 3600s
+ $(GINKGO) -v --timeout 3600s ./test/e2e/clusteradm
.PHONY: test-e2e
test-only:
- $(GINKGO) -v ./test/e2e/clusteradm --timeout 3600s
+ $(GINKGO) -v --timeout 3600s ./test/e2e/clusteradm
.PHONY: test-only
diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go
index e1baee4c2..efe886014 100644
--- a/vendor/github.com/containerd/containerd/content/local/store.go
+++ b/vendor/github.com/containerd/containerd/content/local/store.go
@@ -67,6 +67,8 @@ type LabelStore interface {
type store struct {
root string
ls LabelStore
+
+ ensureIngestRootOnce func() error
}
// NewStore returns a local content store
@@ -80,14 +82,13 @@ func NewStore(root string) (content.Store, error) {
// require labels and should use `NewStore`. `NewLabeledStore` is primarily
// useful for tests or standalone implementations.
func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
- if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
- return nil, err
- }
-
- return &store{
+ s := &store{
root: root,
ls: ls,
- }, nil
+ }
+
+ s.ensureIngestRootOnce = sync.OnceValue(s.ensureIngestRoot)
+ return s, nil
}
func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
@@ -294,6 +295,9 @@ func (s *store) Status(ctx context.Context, ref string) (content.Status, error)
func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
fp, err := os.Open(filepath.Join(s.root, "ingest"))
if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
return nil, err
}
@@ -344,6 +348,9 @@ func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Statu
func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error {
fp, err := os.Open(filepath.Join(s.root, "ingest"))
if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
return err
}
@@ -545,6 +552,11 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
)
foundValidIngest := false
+
+ if err := s.ensureIngestRootOnce(); err != nil {
+ return nil, err
+ }
+
// ensure that the ingest path has been created.
if err := os.Mkdir(path, 0755); err != nil {
if !os.IsExist(err) {
@@ -655,6 +667,10 @@ func (s *store) ingestPaths(ref string) (string, string, string) {
return fp, rp, dp
}
+func (s *store) ensureIngestRoot() error {
+ return os.MkdirAll(filepath.Join(s.root, "ingest"), 0777)
+}
+
func readFileString(path string) (string, error) {
p, err := os.ReadFile(path)
return string(p), err
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go
index 4ca2b921e..8ce4cccc0 100644
--- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go
+++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go
@@ -25,8 +25,10 @@ import (
"net"
"net/http"
"net/url"
+ "os"
"path"
"strings"
+ "sync"
"github.com/containerd/log"
"github.com/opencontainers/go-digest"
@@ -717,13 +719,18 @@ func NewHTTPFallback(transport http.RoundTripper) http.RoundTripper {
type httpFallback struct {
super http.RoundTripper
host string
+ mu sync.Mutex
}
func (f *httpFallback) RoundTrip(r *http.Request) (*http.Response, error) {
+ f.mu.Lock()
+ fallback := f.host == r.URL.Host
+ f.mu.Unlock()
+
// only fall back if the same host had previously fell back
- if f.host != r.URL.Host {
+ if !fallback {
resp, err := f.super.RoundTrip(r)
- if !isTLSError(err) {
+ if !isTLSError(err) && !isPortError(err, r.URL.Host) {
return resp, err
}
}
@@ -734,8 +741,12 @@ func (f *httpFallback) RoundTrip(r *http.Request) (*http.Response, error) {
plainHTTPRequest := *r
plainHTTPRequest.URL = &plainHTTPUrl
- if f.host != r.URL.Host {
- f.host = r.URL.Host
+ if !fallback {
+ f.mu.Lock()
+ if f.host != r.URL.Host {
+ f.host = r.URL.Host
+ }
+ f.mu.Unlock()
// update body on the second attempt
if r.Body != nil && r.GetBody != nil {
@@ -765,6 +776,18 @@ func isTLSError(err error) bool {
return false
}
+func isPortError(err error, host string) bool {
+ if isConnError(err) || os.IsTimeout(err) {
+ if _, port, _ := net.SplitHostPort(host); port != "" {
+ // Port is specified, will not retry on different port with scheme change
+ return false
+ }
+ return true
+ }
+
+ return false
+}
+
// HTTPFallback is an http.RoundTripper which allows fallback from https to http
// for registry endpoints with configurations for both http and TLS, such as
// defaulted localhost endpoints.
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver_unix.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver_unix.go
new file mode 100644
index 000000000..4ef0e0062
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver_unix.go
@@ -0,0 +1,28 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "errors"
+ "syscall"
+)
+
+func isConnError(err error) bool {
+ return errors.Is(err, syscall.ECONNREFUSED)
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver_windows.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver_windows.go
new file mode 100644
index 000000000..9c98df04b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver_windows.go
@@ -0,0 +1,30 @@
+//go:build windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package docker
+
+import (
+ "errors"
+ "syscall"
+
+ "golang.org/x/sys/windows"
+)
+
+func isConnError(err error) bool {
+ return errors.Is(err, syscall.ECONNREFUSED) || errors.Is(err, windows.WSAECONNREFUSED)
+}
diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go
index c61791188..b83e75964 100644
--- a/vendor/github.com/containerd/containerd/version/version.go
+++ b/vendor/github.com/containerd/containerd/version/version.go
@@ -23,7 +23,7 @@ var (
Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time.
- Version = "1.7.23+unknown"
+ Version = "1.7.24+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.
diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md
index 97e319b21..86fefd5bf 100644
--- a/vendor/github.com/evanphx/json-patch/README.md
+++ b/vendor/github.com/evanphx/json-patch/README.md
@@ -14,9 +14,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie
go get -u github.com/evanphx/json-patch/v5
```
-**Stable Versions**:
-* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
-* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
+If you need version 4, use `go get -u gopkg.in/evanphx/json-patch.v4`
(previous versions below `v3` are unavailable)
diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go
index cd0274e1e..95136681b 100644
--- a/vendor/github.com/evanphx/json-patch/patch.go
+++ b/vendor/github.com/evanphx/json-patch/patch.go
@@ -3,11 +3,10 @@ package jsonpatch
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"strconv"
"strings"
-
- "github.com/pkg/errors"
)
const (
@@ -277,7 +276,7 @@ func (o Operation) Path() (string, error) {
return op, nil
}
- return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
+ return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing)
}
// From reads the "from" field of the Operation.
@@ -294,7 +293,7 @@ func (o Operation) From() (string, error) {
return op, nil
}
- return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
+ return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing)
}
func (o Operation) value() *lazyNode {
@@ -319,7 +318,7 @@ func (o Operation) ValueInterface() (interface{}, error) {
return v, nil
}
- return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
+ return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing)
}
func isArray(buf []byte) bool {
@@ -398,7 +397,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) {
func (d *partialDoc) remove(key string) error {
_, ok := (*d)[key]
if !ok {
- return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
+ return fmt.Errorf("Unable to remove nonexistent key: %s: %w", key, ErrMissing)
}
delete(*d, key)
@@ -415,10 +414,10 @@ func (d *partialArray) set(key string, val *lazyNode) error {
if idx < 0 {
if !SupportNegativeIndices {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < -len(*d) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
idx += len(*d)
}
@@ -435,7 +434,7 @@ func (d *partialArray) add(key string, val *lazyNode) error {
idx, err := strconv.Atoi(key)
if err != nil {
- return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
+ return fmt.Errorf("value was not a proper array index: '%s': %w", key, err)
}
sz := len(*d) + 1
@@ -445,15 +444,15 @@ func (d *partialArray) add(key string, val *lazyNode) error {
cur := *d
if idx >= len(ary) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < 0 {
if !SupportNegativeIndices {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < -len(ary) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
idx += len(ary)
}
@@ -475,16 +474,16 @@ func (d *partialArray) get(key string) (*lazyNode, error) {
if idx < 0 {
if !SupportNegativeIndices {
- return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < -len(*d) {
- return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
idx += len(*d)
}
if idx >= len(*d) {
- return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
return (*d)[idx], nil
@@ -499,15 +498,15 @@ func (d *partialArray) remove(key string) error {
cur := *d
if idx >= len(cur) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < 0 {
if !SupportNegativeIndices {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < -len(cur) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
idx += len(cur)
}
@@ -525,18 +524,18 @@ func (d *partialArray) remove(key string) error {
func (p Patch) add(doc *container, op Operation) error {
path, err := op.Path()
if err != nil {
- return errors.Wrapf(ErrMissing, "add operation failed to decode path")
+ return fmt.Errorf("add operation failed to decode path: %w", ErrMissing)
}
con, key := findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
+ return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing)
}
err = con.add(key, op.value())
if err != nil {
- return errors.Wrapf(err, "error in add for path: '%s'", path)
+ return fmt.Errorf("error in add for path: '%s': %w", path, err)
}
return nil
@@ -545,18 +544,18 @@ func (p Patch) add(doc *container, op Operation) error {
func (p Patch) remove(doc *container, op Operation) error {
path, err := op.Path()
if err != nil {
- return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
+ return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing)
}
con, key := findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
+ return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing)
}
err = con.remove(key)
if err != nil {
- return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ return fmt.Errorf("error in remove for path: '%s': %w", path, err)
}
return nil
@@ -565,7 +564,7 @@ func (p Patch) remove(doc *container, op Operation) error {
func (p Patch) replace(doc *container, op Operation) error {
path, err := op.Path()
if err != nil {
- return errors.Wrapf(err, "replace operation failed to decode path")
+ return fmt.Errorf("replace operation failed to decode path: %w", err)
}
if path == "" {
@@ -574,7 +573,7 @@ func (p Patch) replace(doc *container, op Operation) error {
if val.which == eRaw {
if !val.tryDoc() {
if !val.tryAry() {
- return errors.Wrapf(err, "replace operation value must be object or array")
+ return fmt.Errorf("replace operation value must be object or array: %w", err)
}
}
}
@@ -585,7 +584,7 @@ func (p Patch) replace(doc *container, op Operation) error {
case eDoc:
*doc = &val.doc
case eRaw:
- return errors.Wrapf(err, "replace operation hit impossible case")
+ return fmt.Errorf("replace operation hit impossible case: %w", err)
}
return nil
@@ -594,17 +593,17 @@ func (p Patch) replace(doc *container, op Operation) error {
con, key := findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
+ return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing)
}
_, ok := con.get(key)
if ok != nil {
- return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
+ return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing)
}
err = con.set(key, op.value())
if err != nil {
- return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ return fmt.Errorf("error in remove for path: '%s': %w", path, err)
}
return nil
@@ -613,39 +612,39 @@ func (p Patch) replace(doc *container, op Operation) error {
func (p Patch) move(doc *container, op Operation) error {
from, err := op.From()
if err != nil {
- return errors.Wrapf(err, "move operation failed to decode from")
+ return fmt.Errorf("move operation failed to decode from: %w", err)
}
con, key := findObject(doc, from)
if con == nil {
- return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
+ return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing)
}
val, err := con.get(key)
if err != nil {
- return errors.Wrapf(err, "error in move for path: '%s'", key)
+ return fmt.Errorf("error in move for path: '%s': %w", key, err)
}
err = con.remove(key)
if err != nil {
- return errors.Wrapf(err, "error in move for path: '%s'", key)
+ return fmt.Errorf("error in move for path: '%s': %w", key, err)
}
path, err := op.Path()
if err != nil {
- return errors.Wrapf(err, "move operation failed to decode path")
+ return fmt.Errorf("move operation failed to decode path: %w", err)
}
con, key = findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
+ return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing)
}
err = con.add(key, val)
if err != nil {
- return errors.Wrapf(err, "error in move for path: '%s'", path)
+ return fmt.Errorf("error in move for path: '%s': %w", path, err)
}
return nil
@@ -654,7 +653,7 @@ func (p Patch) move(doc *container, op Operation) error {
func (p Patch) test(doc *container, op Operation) error {
path, err := op.Path()
if err != nil {
- return errors.Wrapf(err, "test operation failed to decode path")
+ return fmt.Errorf("test operation failed to decode path: %w", err)
}
if path == "" {
@@ -673,67 +672,67 @@ func (p Patch) test(doc *container, op Operation) error {
return nil
}
- return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
}
con, key := findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
+ return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing)
}
val, err := con.get(key)
if err != nil {
- return errors.Wrapf(err, "error in test for path: '%s'", path)
+ return fmt.Errorf("error in test for path: '%s': %w", path, err)
}
if val == nil {
if op.value() == nil || op.value().raw == nil {
return nil
}
- return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
} else if op.value() == nil {
- return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
}
if val.equal(op.value()) {
return nil
}
- return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
}
func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
from, err := op.From()
if err != nil {
- return errors.Wrapf(err, "copy operation failed to decode from")
+ return fmt.Errorf("copy operation failed to decode from: %w", err)
}
con, key := findObject(doc, from)
if con == nil {
- return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
+ return fmt.Errorf("copy operation does not apply: doc is missing from path: %s: %w", from, ErrMissing)
}
val, err := con.get(key)
if err != nil {
- return errors.Wrapf(err, "error in copy for from: '%s'", from)
+ return fmt.Errorf("error in copy for from: '%s': %w", from, err)
}
path, err := op.Path()
if err != nil {
- return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
+ return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing)
}
con, key = findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
+ return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing)
}
valCopy, sz, err := deepCopy(val)
if err != nil {
- return errors.Wrapf(err, "error while performing deep copy")
+ return fmt.Errorf("error while performing deep copy: %w", err)
}
(*accumulatedCopySize) += int64(sz)
@@ -743,7 +742,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er
err = con.add(key, valCopy)
if err != nil {
- return errors.Wrapf(err, "error while adding value during copy")
+ return fmt.Errorf("error while adding value during copy: %w", err)
}
return nil
diff --git a/vendor/github.com/exponent-io/jsonpath/.travis.yml b/vendor/github.com/exponent-io/jsonpath/.travis.yml
index f4f458a41..53bb8b3f9 100644
--- a/vendor/github.com/exponent-io/jsonpath/.travis.yml
+++ b/vendor/github.com/exponent-io/jsonpath/.travis.yml
@@ -1,5 +1,7 @@
language: go
-
+arch:
+ - amd64
+ - ppc64le
go:
- - 1.5
+ - 1.15
- tip
diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go
index 31de46c73..5e3a06548 100644
--- a/vendor/github.com/exponent-io/jsonpath/decoder.go
+++ b/vendor/github.com/exponent-io/jsonpath/decoder.go
@@ -39,16 +39,15 @@ func NewDecoder(r io.Reader) *Decoder {
// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only.
func (d *Decoder) SeekTo(path ...interface{}) (bool, error) {
- if len(path) == 0 {
- return len(d.path) == 0, nil
- }
- last := len(path) - 1
- if i, ok := path[last].(int); ok {
- path[last] = i - 1
+ if len(path) > 0 {
+ last := len(path) - 1
+ if i, ok := path[last].(int); ok {
+ path[last] = i - 1
+ }
}
for {
- if d.path.Equal(path) {
+ if len(path) == len(d.path) && d.path.Equal(path) {
return true, nil
}
_, err := d.Token()
diff --git a/vendor/github.com/gofrs/flock/.golangci.yml b/vendor/github.com/gofrs/flock/.golangci.yml
new file mode 100644
index 000000000..3ad88a38f
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/.golangci.yml
@@ -0,0 +1,114 @@
+run:
+ timeout: 10m
+
+linters:
+ enable:
+ - asasalint
+ - bidichk
+ - dogsled
+ - dupword
+ - durationcheck
+ - err113
+ - errname
+ - errorlint
+ - fatcontext
+ - forbidigo
+ - gocheckcompilerdirectives
+ - gochecknoinits
+ - gocritic
+ - godot
+ - godox
+ - gofumpt
+ - goheader
+ - goimports
+ - gomoddirectives
+ - goprintffuncname
+ - gosec
+ - inamedparam
+ - interfacebloat
+ - ireturn
+ - mirror
+ - misspell
+ - nolintlint
+ - revive
+ - stylecheck
+ - tenv
+ - testifylint
+ - thelper
+ - unconvert
+ - unparam
+ - usestdlibvars
+ - whitespace
+
+linters-settings:
+ misspell:
+ locale: US
+ godox:
+ keywords:
+ - FIXME
+ goheader:
+ template: |-
+ Copyright 2015 Tim Heckman. All rights reserved.
+ Copyright 2018-{{ YEAR }} The Gofrs. All rights reserved.
+ Use of this source code is governed by the BSD 3-Clause
+ license that can be found in the LICENSE file.
+ gofumpt:
+ extra-rules: true
+ gocritic:
+ enabled-tags:
+ - diagnostic
+ - style
+ - performance
+ disabled-checks:
+ - paramTypeCombine # already handle by gofumpt.extra-rules
+ - whyNoLint # already handle by nonolint
+ - unnamedResult
+ - hugeParam
+ - sloppyReassign
+ - rangeValCopy
+ - octalLiteral
+ - ptrToRefParam
+ - appendAssign
+ - ruleguard
+ - httpNoBody
+ - exposedSyncMutex
+
+ revive:
+ rules:
+ - name: struct-tag
+ - name: blank-imports
+ - name: context-as-argument
+ - name: context-keys-type
+ - name: dot-imports
+ - name: error-return
+ - name: error-strings
+ - name: error-naming
+ - name: exported
+ - name: if-return
+ - name: increment-decrement
+ - name: var-naming
+ - name: var-declaration
+ - name: package-comments
+ - name: range
+ - name: receiver-naming
+ - name: time-naming
+ - name: unexported-return
+ - name: indent-error-flow
+ - name: errorf
+ - name: empty-block
+ - name: superfluous-else
+ - name: unused-parameter
+ - name: unreachable-code
+ - name: redefines-builtin-id
+
+issues:
+ exclude-use-default: true
+ max-issues-per-linter: 0
+ max-same-issues: 0
+
+output:
+ show-stats: true
+ sort-results: true
+ sort-order:
+ - linter
+ - file
diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml
deleted file mode 100644
index b16d040fa..000000000
--- a/vendor/github.com/gofrs/flock/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go:
- - 1.14.x
- - 1.15.x
-script: go test -v -check.vv -race ./...
-sudo: false
-notifications:
- email:
- on_success: never
- on_failure: always
diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE
index 8b8ff36fe..7de525bf0 100644
--- a/vendor/github.com/gofrs/flock/LICENSE
+++ b/vendor/github.com/gofrs/flock/LICENSE
@@ -1,3 +1,4 @@
+Copyright (c) 2018-2024, The Gofrs
Copyright (c) 2015-2020, Tim Heckman
All rights reserved.
diff --git a/vendor/github.com/gofrs/flock/Makefile b/vendor/github.com/gofrs/flock/Makefile
new file mode 100644
index 000000000..65c139d68
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/Makefile
@@ -0,0 +1,15 @@
+.PHONY: lint test test_race build_cross_os
+
+default: lint test build_cross_os
+
+test:
+ go test -v -cover ./...
+
+test_race:
+ CGO_ENABLED=1 go test -v -race ./...
+
+lint:
+ golangci-lint run
+
+build_cross_os:
+ ./build.sh
diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md
index 71ce63692..f7ca0dd9c 100644
--- a/vendor/github.com/gofrs/flock/README.md
+++ b/vendor/github.com/gofrs/flock/README.md
@@ -1,26 +1,22 @@
# flock
-[](https://travis-ci.org/gofrs/flock)
-[](https://godoc.org/github.com/gofrs/flock)
-[](https://github.com/gofrs/flock/blob/master/LICENSE)
-[](https://goreportcard.com/report/github.com/gofrs/flock)
-`flock` implements a thread-safe sync.Locker interface for file locking. It also
-includes a non-blocking TryLock() function to allow locking without blocking execution.
+[](https://pkg.go.dev/github.com/gofrs/flock)
+[](https://github.com/gofrs/flock/blob/main/LICENSE)
+[](https://goreportcard.com/report/github.com/gofrs/flock)
-## License
-`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details.
+`flock` implements a thread-safe file lock.
-## Go Compatibility
-This package makes use of the `context` package that was introduced in Go 1.7. As such, this
-package has an implicit dependency on Go 1.7+.
+It also includes a non-blocking `TryLock()` function to allow locking without blocking execution.
## Installation
-```
+
+```bash
go get -u github.com/gofrs/flock
```
## Usage
-```Go
+
+```go
import "github.com/gofrs/flock"
fileLock := flock.New("/var/lock/go-lock.lock")
@@ -38,4 +34,12 @@ if locked {
```
For more detailed usage information take a look at the package API docs on
-[GoDoc](https://godoc.org/github.com/gofrs/flock).
+[GoDoc](https://pkg.go.dev/github.com/gofrs/flock).
+
+## License
+
+`flock` is released under the BSD 3-Clause License. See the [`LICENSE`](./LICENSE) file for more details.
+
+## Project History
+
+This project was originally `github.com/theckman/go-flock`, it was transferred to Gofrs by the original author [Tim Heckman ](https://github.com/theckman).
diff --git a/vendor/github.com/gofrs/flock/SECURITY.md b/vendor/github.com/gofrs/flock/SECURITY.md
new file mode 100644
index 000000000..01419bd59
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/SECURITY.md
@@ -0,0 +1,21 @@
+# Security Policy
+
+## Supported Versions
+
+We support the latest version of this library.
+We do not guarantee support of previous versions.
+
+If a defect is reported, it will generally be fixed on the latest version (provided it exists) irrespective of whether it was introduced in a prior version.
+
+## Reporting a Vulnerability
+
+To report a potential security vulnerability, please create a [security advisory](https://github.com/gofrs/flock/security/advisories/new).
+
+For us to respond to your report most effectively, please include any of the following:
+
+- Steps to reproduce or a proof-of-concept
+- Any relevant information, including the versions used
+
+## Security Scorecard
+
+This project submits security [results](https://scorecard.dev/viewer/?uri=github.com/gofrs/flock) to the [OpenSSF Scorecard](https://securityscorecards.dev/).
diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml
deleted file mode 100644
index 909b4bf7c..000000000
--- a/vendor/github.com/gofrs/flock/appveyor.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-version: '{build}'
-
-build: false
-deploy: false
-
-clone_folder: 'c:\gopath\src\github.com\gofrs\flock'
-
-environment:
- GOPATH: 'c:\gopath'
- GOVERSION: '1.15'
-
-init:
- - git config --global core.autocrlf input
-
-install:
- - rmdir c:\go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi
- - msiexec /i go%GOVERSION%.windows-amd64.msi /q
- - set Path=c:\go\bin;c:\gopath\bin;%Path%
- - go version
- - go env
-
-test_script:
- - go get -t ./...
- - go test -race -v ./...
diff --git a/vendor/github.com/gofrs/flock/build.sh b/vendor/github.com/gofrs/flock/build.sh
new file mode 100644
index 000000000..60f7809f0
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/build.sh
@@ -0,0 +1,18 @@
+#!/bin/bash -e
+
+# Not supported by flock:
+# - plan9/*
+# - js/wasm
+# - wasp1/wasm
+
+for row in $(go tool dist list -json | jq -r '.[] | @base64'); do
+ _jq() {
+ echo ${row} | base64 --decode | jq -r ${1}
+ }
+
+ GOOS=$(_jq '.GOOS')
+ GOARCH=$(_jq '.GOARCH')
+
+ echo "$GOOS/$GOARCH"
+ GOOS=$GOOS GOARCH=$GOARCH go build
+done
diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go
index 95c784ca5..ff942b228 100644
--- a/vendor/github.com/gofrs/flock/flock.go
+++ b/vendor/github.com/gofrs/flock/flock.go
@@ -1,4 +1,5 @@
// Copyright 2015 Tim Heckman. All rights reserved.
+// Copyright 2018-2024 The Gofrs. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
@@ -18,12 +19,29 @@ package flock
import (
"context"
+ "io/fs"
"os"
"runtime"
"sync"
"time"
)
+type Option func(f *Flock)
+
+// SetFlag sets the flag used to create/open the file.
+func SetFlag(flag int) Option {
+ return func(f *Flock) {
+ f.flag = flag
+ }
+}
+
+// SetPermissions sets the OS permissions to set on the file.
+func SetPermissions(perm fs.FileMode) Option {
+ return func(f *Flock) {
+ f.perm = perm
+ }
+}
+
// Flock is the struct type to handle file locking. All fields are unexported,
// with access to some of the fields provided by getter methods (Path() and Locked()).
type Flock struct {
@@ -32,12 +50,37 @@ type Flock struct {
fh *os.File
l bool
r bool
+
+ // flag is the flag used to create/open the file.
+ flag int
+ // perm is the OS permissions to set on the file.
+ perm fs.FileMode
}
// New returns a new instance of *Flock. The only parameter
// it takes is the path to the desired lockfile.
-func New(path string) *Flock {
- return &Flock{path: path}
+func New(path string, opts ...Option) *Flock {
+ // create it if it doesn't exist, and open the file read-only.
+ flags := os.O_CREATE
+ switch runtime.GOOS {
+ case "aix", "solaris", "illumos":
+ // AIX cannot preform write-lock (i.e. exclusive) on a read-only file.
+ flags |= os.O_RDWR
+ default:
+ flags |= os.O_RDONLY
+ }
+
+ f := &Flock{
+ path: path,
+ flag: flags,
+ perm: fs.FileMode(0o600),
+ }
+
+ for _, opt := range opts {
+ opt(f)
+ }
+
+ return f
}
// NewFlock returns a new instance of *Flock. The only parameter
@@ -67,6 +110,7 @@ func (f *Flock) Path() string {
func (f *Flock) Locked() bool {
f.m.RLock()
defer f.m.RUnlock()
+
return f.l
}
@@ -76,6 +120,7 @@ func (f *Flock) Locked() bool {
func (f *Flock) RLocked() bool {
f.m.RLock()
defer f.m.RUnlock()
+
return f.r
}
@@ -83,16 +128,18 @@ func (f *Flock) String() string {
return f.path
}
-// TryLockContext repeatedly tries to take an exclusive lock until one of the
-// conditions is met: TryLock succeeds, TryLock fails with error, or Context
-// Done channel is closed.
+// TryLockContext repeatedly tries to take an exclusive lock until one of the conditions is met:
+// - TryLock succeeds
+// - TryLock fails with error
+// - Context Done channel is closed.
func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
return tryCtx(ctx, f.TryLock, retryDelay)
}
-// TryRLockContext repeatedly tries to take a shared lock until one of the
-// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context
-// Done channel is closed.
+// TryRLockContext repeatedly tries to take a shared lock until one of the conditions is met:
+// - TryRLock succeeds
+// - TryRLock fails with error
+// - Context Done channel is closed.
func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
return tryCtx(ctx, f.TryRLock, retryDelay)
}
@@ -101,10 +148,12 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati
if ctx.Err() != nil {
return false, ctx.Err()
}
+
for {
if ok, err := fn(); ok || err != nil {
return ok, err
}
+
select {
case <-ctx.Done():
return false, ctx.Err()
@@ -114,31 +163,44 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati
}
}
-func (f *Flock) setFh() error {
+func (f *Flock) setFh(flag int) error {
// open a new os.File instance
- // create it if it doesn't exist, and open the file read-only.
- flags := os.O_CREATE
- if runtime.GOOS == "aix" {
- // AIX cannot preform write-lock (ie exclusive) on a
- // read-only file.
- flags |= os.O_RDWR
- } else {
- flags |= os.O_RDONLY
- }
- fh, err := os.OpenFile(f.path, flags, os.FileMode(0600))
+ fh, err := os.OpenFile(f.path, flag, f.perm)
if err != nil {
return err
}
- // set the filehandle on the struct
+ // set the file handle on the struct
f.fh = fh
+
return nil
}
-// ensure the file handle is closed if no lock is held
+// resetFh resets file handle:
+// - tries to close the file (ignore errors)
+// - sets fh to nil.
+func (f *Flock) resetFh() {
+ if f.fh == nil {
+ return
+ }
+
+ _ = f.fh.Close()
+
+ f.fh = nil
+}
+
+// ensure the file handle is closed if no lock is held.
func (f *Flock) ensureFhState() {
- if !f.l && !f.r && f.fh != nil {
- f.fh.Close()
- f.fh = nil
+ if f.l || f.r || f.fh == nil {
+ return
}
+
+ f.resetFh()
+}
+
+func (f *Flock) reset() {
+ f.l = false
+ f.r = false
+
+ f.resetFh()
}
diff --git a/vendor/github.com/gofrs/flock/flock_aix.go b/vendor/github.com/gofrs/flock/flock_aix.go
deleted file mode 100644
index 7277c1b6b..000000000
--- a/vendor/github.com/gofrs/flock/flock_aix.go
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright 2019 Tim Heckman. All rights reserved. Use of this source code is
-// governed by the BSD 3-Clause license that can be found in the LICENSE file.
-
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code implements the filelock API using POSIX 'fcntl' locks, which attach
-// to an (inode, process) pair rather than a file descriptor. To avoid unlocking
-// files prematurely when the same file is opened through different descriptors,
-// we allow only one read-lock at a time.
-//
-// This code is adapted from the Go package:
-// cmd/go/internal/lockedfile/internal/filelock
-
-//+build aix
-
-package flock
-
-import (
- "errors"
- "io"
- "os"
- "sync"
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-type lockType int16
-
-const (
- readLock lockType = unix.F_RDLCK
- writeLock lockType = unix.F_WRLCK
-)
-
-type cmdType int
-
-const (
- tryLock cmdType = unix.F_SETLK
- waitLock cmdType = unix.F_SETLKW
-)
-
-type inode = uint64
-
-type inodeLock struct {
- owner *Flock
- queue []<-chan *Flock
-}
-
-var (
- mu sync.Mutex
- inodes = map[*Flock]inode{}
- locks = map[inode]inodeLock{}
-)
-
-// Lock is a blocking call to try and take an exclusive file lock. It will wait
-// until it is able to obtain the exclusive file lock. It's recommended that
-// TryLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
-//
-// If we are already exclusive-locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
-//
-// If the *Flock has a shared lock (RLock), this may transparently replace the
-// shared lock with an exclusive lock on some UNIX-like operating systems. Be
-// careful when using exclusive locks in conjunction with shared locks
-// (RLock()), because calling Unlock() may accidentally release the exclusive
-// lock that was once a shared lock.
-func (f *Flock) Lock() error {
- return f.lock(&f.l, writeLock)
-}
-
-// RLock is a blocking call to try and take a shared file lock. It will wait
-// until it is able to obtain the shared file lock. It's recommended that
-// TryRLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
-//
-// If we are already shared-locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
-func (f *Flock) RLock() error {
- return f.lock(&f.r, readLock)
-}
-
-func (f *Flock) lock(locked *bool, flag lockType) error {
- f.m.Lock()
- defer f.m.Unlock()
-
- if *locked {
- return nil
- }
-
- if f.fh == nil {
- if err := f.setFh(); err != nil {
- return err
- }
- defer f.ensureFhState()
- }
-
- if _, err := f.doLock(waitLock, flag, true); err != nil {
- return err
- }
-
- *locked = true
- return nil
-}
-
-func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) {
- // POSIX locks apply per inode and process, and the lock for an inode is
- // released when *any* descriptor for that inode is closed. So we need to
- // synchronize access to each inode internally, and must serialize lock and
- // unlock calls that refer to the same inode through different descriptors.
- fi, err := f.fh.Stat()
- if err != nil {
- return false, err
- }
- ino := inode(fi.Sys().(*syscall.Stat_t).Ino)
-
- mu.Lock()
- if i, dup := inodes[f]; dup && i != ino {
- mu.Unlock()
- return false, &os.PathError{
- Path: f.Path(),
- Err: errors.New("inode for file changed since last Lock or RLock"),
- }
- }
-
- inodes[f] = ino
-
- var wait chan *Flock
- l := locks[ino]
- if l.owner == f {
- // This file already owns the lock, but the call may change its lock type.
- } else if l.owner == nil {
- // No owner: it's ours now.
- l.owner = f
- } else if !blocking {
- // Already owned: cannot take the lock.
- mu.Unlock()
- return false, nil
- } else {
- // Already owned: add a channel to wait on.
- wait = make(chan *Flock)
- l.queue = append(l.queue, wait)
- }
- locks[ino] = l
- mu.Unlock()
-
- if wait != nil {
- wait <- f
- }
-
- err = setlkw(f.fh.Fd(), cmd, lt)
-
- if err != nil {
- f.doUnlock()
- if cmd == tryLock && err == unix.EACCES {
- return false, nil
- }
- return false, err
- }
-
- return true, nil
-}
-
-func (f *Flock) Unlock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- // if we aren't locked or if the lockfile instance is nil
- // just return a nil error because we are unlocked
- if (!f.l && !f.r) || f.fh == nil {
- return nil
- }
-
- if err := f.doUnlock(); err != nil {
- return err
- }
-
- f.fh.Close()
-
- f.l = false
- f.r = false
- f.fh = nil
-
- return nil
-}
-
-func (f *Flock) doUnlock() (err error) {
- var owner *Flock
- mu.Lock()
- ino, ok := inodes[f]
- if ok {
- owner = locks[ino].owner
- }
- mu.Unlock()
-
- if owner == f {
- err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK)
- }
-
- mu.Lock()
- l := locks[ino]
- if len(l.queue) == 0 {
- // No waiters: remove the map entry.
- delete(locks, ino)
- } else {
- // The first waiter is sending us their file now.
- // Receive it and update the queue.
- l.owner = <-l.queue[0]
- l.queue = l.queue[1:]
- locks[ino] = l
- }
- delete(inodes, f)
- mu.Unlock()
-
- return err
-}
-
-// TryLock is the preferred function for taking an exclusive file lock. This
-// function takes an RW-mutex lock before it tries to lock the file, so there is
-// the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
-//
-// The actual file lock is non-blocking. If we are unable to get the exclusive
-// file lock, the function will return false instead of waiting for the lock. If
-// we get the lock, we also set the *Flock instance as being exclusive-locked.
-func (f *Flock) TryLock() (bool, error) {
- return f.try(&f.l, writeLock)
-}
-
-// TryRLock is the preferred function for taking a shared file lock. This
-// function takes an RW-mutex lock before it tries to lock the file, so there is
-// the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
-//
-// The actual file lock is non-blocking. If we are unable to get the shared file
-// lock, the function will return false instead of waiting for the lock. If we
-// get the lock, we also set the *Flock instance as being share-locked.
-func (f *Flock) TryRLock() (bool, error) {
- return f.try(&f.r, readLock)
-}
-
-func (f *Flock) try(locked *bool, flag lockType) (bool, error) {
- f.m.Lock()
- defer f.m.Unlock()
-
- if *locked {
- return true, nil
- }
-
- if f.fh == nil {
- if err := f.setFh(); err != nil {
- return false, err
- }
- defer f.ensureFhState()
- }
-
- haslock, err := f.doLock(tryLock, flag, false)
- if err != nil {
- return false, err
- }
-
- *locked = haslock
- return haslock, nil
-}
-
-// setlkw calls FcntlFlock with cmd for the entire file indicated by fd.
-func setlkw(fd uintptr, cmd cmdType, lt lockType) error {
- for {
- err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{
- Type: int16(lt),
- Whence: io.SeekStart,
- Start: 0,
- Len: 0, // All bytes.
- })
- if err != unix.EINTR {
- return err
- }
- }
-}
diff --git a/vendor/github.com/gofrs/flock/flock_others.go b/vendor/github.com/gofrs/flock/flock_others.go
new file mode 100644
index 000000000..18b14f1bd
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/flock_others.go
@@ -0,0 +1,40 @@
+//go:build (!unix && !windows) || plan9
+
+package flock
+
+import (
+ "errors"
+ "io/fs"
+)
+
+func (f *Flock) Lock() error {
+ return &fs.PathError{
+ Op: "Lock",
+ Path: f.Path(),
+ Err: errors.ErrUnsupported,
+ }
+}
+
+func (f *Flock) RLock() error {
+ return &fs.PathError{
+ Op: "RLock",
+ Path: f.Path(),
+ Err: errors.ErrUnsupported,
+ }
+}
+
+func (f *Flock) Unlock() error {
+ return &fs.PathError{
+ Op: "Unlock",
+ Path: f.Path(),
+ Err: errors.ErrUnsupported,
+ }
+}
+
+func (f *Flock) TryLock() (bool, error) {
+ return false, f.Lock()
+}
+
+func (f *Flock) TryRLock() (bool, error) {
+ return false, f.RLock()
+}
diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go
index c315a3e29..cf8919c7a 100644
--- a/vendor/github.com/gofrs/flock/flock_unix.go
+++ b/vendor/github.com/gofrs/flock/flock_unix.go
@@ -1,42 +1,44 @@
// Copyright 2015 Tim Heckman. All rights reserved.
+// Copyright 2018-2024 The Gofrs. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
-// +build !aix,!windows
+//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd
package flock
import (
+ "errors"
"os"
- "syscall"
+
+ "golang.org/x/sys/unix"
)
-// Lock is a blocking call to try and take an exclusive file lock. It will wait
-// until it is able to obtain the exclusive file lock. It's recommended that
-// TryLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
+// Lock is a blocking call to try and take an exclusive file lock.
+// It will wait until it is able to obtain the exclusive file lock.
+// It's recommended that TryLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
//
-// If we are already exclusive-locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
+// If we are already exclusive-locked,
+// this function short-circuits and returns immediately assuming it can take the mutex lock.
//
-// If the *Flock has a shared lock (RLock), this may transparently replace the
-// shared lock with an exclusive lock on some UNIX-like operating systems. Be
-// careful when using exclusive locks in conjunction with shared locks
-// (RLock()), because calling Unlock() may accidentally release the exclusive
-// lock that was once a shared lock.
+// If the *Flock has a shared lock (RLock),
+// this may transparently replace the shared lock with an exclusive lock on some UNIX-like operating systems.
+// Be careful when using exclusive locks in conjunction with shared locks (RLock()),
+// because calling Unlock() may accidentally release the exclusive lock that was once a shared lock.
func (f *Flock) Lock() error {
- return f.lock(&f.l, syscall.LOCK_EX)
+ return f.lock(&f.l, unix.LOCK_EX)
}
-// RLock is a blocking call to try and take a shared file lock. It will wait
-// until it is able to obtain the shared file lock. It's recommended that
-// TryRLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
+// RLock is a blocking call to try and take a shared file lock.
+// It will wait until it is able to obtain the shared file lock.
+// It's recommended that TryRLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
//
-// If we are already shared-locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
+// If we are already shared-locked,
+// this function short-circuits and returns immediately assuming it can take the mutex lock.
func (f *Flock) RLock() error {
- return f.lock(&f.r, syscall.LOCK_SH)
+ return f.lock(&f.r, unix.LOCK_SH)
}
func (f *Flock) lock(locked *bool, flag int) error {
@@ -48,13 +50,15 @@ func (f *Flock) lock(locked *bool, flag int) error {
}
if f.fh == nil {
- if err := f.setFh(); err != nil {
+ if err := f.setFh(f.flag); err != nil {
return err
}
+
defer f.ensureFhState()
}
- if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil {
+ err := unix.Flock(int(f.fh.Fd()), flag)
+ if err != nil {
shouldRetry, reopenErr := f.reopenFDOnError(err)
if reopenErr != nil {
return reopenErr
@@ -64,71 +68,74 @@ func (f *Flock) lock(locked *bool, flag int) error {
return err
}
- if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil {
+ err = unix.Flock(int(f.fh.Fd()), flag)
+ if err != nil {
return err
}
}
*locked = true
+
return nil
}
-// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
-// while it is running the Locked() and RLocked() functions will be blocked.
+// Unlock is a function to unlock the file.
+// This file takes a RW-mutex lock,
+// so while it is running the Locked() and RLocked() functions will be blocked.
//
-// This function short-circuits if we are unlocked already. If not, it calls
-// syscall.LOCK_UN on the file and closes the file descriptor. It does not
-// remove the file from disk. It's up to your application to do.
+// This function short-circuits if we are unlocked already.
+// If not, it calls unix.LOCK_UN on the file and closes the file descriptor.
+// It does not remove the file from disk. It's up to your application to do.
//
-// Please note, if your shared lock became an exclusive lock this may
-// unintentionally drop the exclusive lock if called by the consumer that
-// believes they have a shared lock. Please see Lock() for more details.
+// Please note,
+// if your shared lock became an exclusive lock,
+// this may unintentionally drop the exclusive lock if called by the consumer that believes they have a shared lock.
+// Please see Lock() for more details.
func (f *Flock) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
- // if we aren't locked or if the lockfile instance is nil
- // just return a nil error because we are unlocked
+ // If we aren't locked or if the lockfile instance is nil
+ // just return a nil error because we are unlocked.
if (!f.l && !f.r) || f.fh == nil {
return nil
}
- // mark the file as unlocked
- if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil {
+ // Mark the file as unlocked.
+ err := unix.Flock(int(f.fh.Fd()), unix.LOCK_UN)
+ if err != nil {
return err
}
- f.fh.Close()
-
- f.l = false
- f.r = false
- f.fh = nil
+ f.reset()
return nil
}
-// TryLock is the preferred function for taking an exclusive file lock. This
-// function takes an RW-mutex lock before it tries to lock the file, so there is
-// the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
+// TryLock is the preferred function for taking an exclusive file lock.
+// This function takes an RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
//
-// The actual file lock is non-blocking. If we are unable to get the exclusive
-// file lock, the function will return false instead of waiting for the lock. If
-// we get the lock, we also set the *Flock instance as being exclusive-locked.
+// The actual file lock is non-blocking.
+// If we are unable to get the exclusive file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being exclusive-locked.
func (f *Flock) TryLock() (bool, error) {
- return f.try(&f.l, syscall.LOCK_EX)
+ return f.try(&f.l, unix.LOCK_EX)
}
-// TryRLock is the preferred function for taking a shared file lock. This
-// function takes an RW-mutex lock before it tries to lock the file, so there is
-// the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
+// TryRLock is the preferred function for taking a shared file lock.
+// This function takes an RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
//
-// The actual file lock is non-blocking. If we are unable to get the shared file
-// lock, the function will return false instead of waiting for the lock. If we
-// get the lock, we also set the *Flock instance as being share-locked.
+// The actual file lock is non-blocking.
+// If we are unable to get the shared file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being share-locked.
func (f *Flock) TryRLock() (bool, error) {
- return f.try(&f.r, syscall.LOCK_SH)
+ return f.try(&f.r, unix.LOCK_SH)
}
func (f *Flock) try(locked *bool, flag int) (bool, error) {
@@ -140,25 +147,28 @@ func (f *Flock) try(locked *bool, flag int) (bool, error) {
}
if f.fh == nil {
- if err := f.setFh(); err != nil {
+ if err := f.setFh(f.flag); err != nil {
return false, err
}
+
defer f.ensureFhState()
}
var retried bool
retry:
- err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB)
+ err := unix.Flock(int(f.fh.Fd()), flag|unix.LOCK_NB)
- switch err {
- case syscall.EWOULDBLOCK:
+ switch {
+ case errors.Is(err, unix.EWOULDBLOCK):
return false, nil
- case nil:
+ case err == nil:
*locked = true
return true, nil
}
+
if !retried {
- if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil {
+ shouldRetry, reopenErr := f.reopenFDOnError(err)
+ if reopenErr != nil {
return false, reopenErr
} else if shouldRetry {
retried = true
@@ -169,29 +179,32 @@ retry:
return false, err
}
-// reopenFDOnError determines whether we should reopen the file handle
-// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c:
-// Since Linux 3.4 (commit 55725513)
-// Probably NFSv4 where flock() is emulated by fcntl().
+// reopenFDOnError determines whether we should reopen the file handle in readwrite mode and try again.
+// This comes from `util-linux/sys-utils/flock.c`:
+// > Since Linux 3.4 (commit 55725513)
+// > Probably NFSv4 where flock() is emulated by fcntl().
+// > https://github.com/util-linux/util-linux/blob/198e920aa24743ef6ace4e07cf6237de527f9261/sys-utils/flock.c#L374-L390
func (f *Flock) reopenFDOnError(err error) (bool, error) {
- if err != syscall.EIO && err != syscall.EBADF {
+ if !errors.Is(err, unix.EIO) && !errors.Is(err, unix.EBADF) {
return false, nil
}
- if st, err := f.fh.Stat(); err == nil {
- // if the file is able to be read and written
- if st.Mode()&0600 == 0600 {
- f.fh.Close()
- f.fh = nil
-
- // reopen in read-write mode and set the filehandle
- fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600))
- if err != nil {
- return false, err
- }
- f.fh = fh
- return true, nil
- }
+
+ st, err := f.fh.Stat()
+ if err != nil {
+ return false, nil
+ }
+
+ if st.Mode()&f.perm != f.perm {
+ return false, nil
+ }
+
+ f.resetFh()
+
+ // reopen in read-write mode and set the file handle
+ err = f.setFh(f.flag | os.O_RDWR)
+ if err != nil {
+ return false, err
}
- return false, nil
+ return true, nil
}
diff --git a/vendor/github.com/gofrs/flock/flock_unix_fcntl.go b/vendor/github.com/gofrs/flock/flock_unix_fcntl.go
new file mode 100644
index 000000000..ea007b47d
--- /dev/null
+++ b/vendor/github.com/gofrs/flock/flock_unix_fcntl.go
@@ -0,0 +1,393 @@
+// Copyright 2015 Tim Heckman. All rights reserved.
+// Copyright 2018-2024 The Gofrs. All rights reserved.
+// Use of this source code is governed by the BSD 3-Clause
+// license that can be found in the LICENSE file.
+
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code implements the filelock API using POSIX 'fcntl' locks,
+// which attach to an (inode, process) pair rather than a file descriptor.
+// To avoid unlocking files prematurely when the same file is opened through different descriptors,
+// we allow only one read-lock at a time.
+//
+// This code is adapted from the Go package (go.22):
+// https://github.com/golang/go/blob/release-branch.go1.22/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go
+
+//go:build aix || (solaris && !illumos)
+
+package flock
+
+import (
+ "errors"
+ "io"
+ "io/fs"
+ "math/rand"
+ "sync"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L28
+type lockType int16
+
+// String returns the name of the function corresponding to lt
+// (Lock, RLock, or Unlock).
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go#L67
+func (lt lockType) String() string {
+ switch lt {
+ case readLock:
+ return "RLock"
+ case writeLock:
+ return "Lock"
+ default:
+ return "Unlock"
+ }
+}
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L30-L33
+const (
+ readLock lockType = unix.F_RDLCK
+ writeLock lockType = unix.F_WRLCK
+)
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L35
+type inode = uint64
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L37-L40
+type inodeLock struct {
+ owner *Flock
+ queue []<-chan *Flock
+}
+
+type cmdType int
+
+const (
+ tryLock cmdType = unix.F_SETLK
+ waitLock cmdType = unix.F_SETLKW
+)
+
+var (
+ mu sync.Mutex
+ inodes = map[*Flock]inode{}
+ locks = map[inode]inodeLock{}
+)
+
+// Lock is a blocking call to try and take an exclusive file lock.
+// It will wait until it is able to obtain the exclusive file lock.
+// It's recommended that TryLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
+//
+// If we are already exclusive-locked, this function short-circuits and
+// returns immediately assuming it can take the mutex lock.
+//
+// If the *Flock has a shared lock (RLock),
+// this may transparently replace the shared lock with an exclusive lock on some UNIX-like operating systems.
+// Be careful when using exclusive locks in conjunction with shared locks (RLock()),
+// because calling Unlock() may accidentally release the exclusive lock that was once a shared lock.
+func (f *Flock) Lock() error {
+ return f.lock(&f.l, writeLock)
+}
+
+// RLock is a blocking call to try and take a shared file lock.
+// It will wait until it is able to obtain the shared file lock.
+// It's recommended that TryRLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
+//
+// If we are already shared-locked, this function short-circuits and
+// returns immediately assuming it can take the mutex lock.
+func (f *Flock) RLock() error {
+ return f.lock(&f.r, readLock)
+}
+
+func (f *Flock) lock(locked *bool, flag lockType) error {
+ f.m.Lock()
+ defer f.m.Unlock()
+
+ if *locked {
+ return nil
+ }
+
+ if f.fh == nil {
+ if err := f.setFh(f.flag); err != nil {
+ return err
+ }
+
+ defer f.ensureFhState()
+ }
+
+ _, err := f.doLock(waitLock, flag, true)
+ if err != nil {
+ return err
+ }
+
+ *locked = true
+
+ return nil
+}
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L48
+func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) {
+ // POSIX locks apply per inode and process,
+ // and the lock for an inode is released when *any* descriptor for that inode is closed.
+ // So we need to synchronize access to each inode internally,
+ // and must serialize lock and unlock calls that refer to the same inode through different descriptors.
+ fi, err := f.fh.Stat()
+ if err != nil {
+ return false, err
+ }
+
+ // Note(ldez): don't replace `syscall.Stat_t` by `unix.Stat_t` because `FileInfo.Sys()` returns `syscall.Stat_t`
+ ino := fi.Sys().(*syscall.Stat_t).Ino
+
+ mu.Lock()
+
+ if i, dup := inodes[f]; dup && i != ino {
+ mu.Unlock()
+ return false, &fs.PathError{
+ Op: lt.String(),
+ Path: f.Path(),
+ Err: errors.New("inode for file changed since last Lock or RLock"),
+ }
+ }
+
+ inodes[f] = ino
+
+ var wait chan *Flock
+
+ l := locks[ino]
+
+ switch {
+ case l.owner == f:
+ // This file already owns the lock, but the call may change its lock type.
+ case l.owner == nil:
+ // No owner: it's ours now.
+ l.owner = f
+
+ case !blocking:
+ // Already owned: cannot take the lock.
+ mu.Unlock()
+ return false, nil
+
+ default:
+ // Already owned: add a channel to wait on.
+ wait = make(chan *Flock)
+ l.queue = append(l.queue, wait)
+ }
+
+ locks[ino] = l
+
+ mu.Unlock()
+
+ if wait != nil {
+ wait <- f
+ }
+
+ // Spurious EDEADLK errors arise on platforms that compute deadlock graphs at
+ // the process, rather than thread, level. Consider processes P and Q, with
+ // threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be
+ // reported as a deadlock on systems that consider only process granularity:
+ //
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 blocks on file B. (This is erroneously reported as a deadlock.)
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 unblocks and locks file B.
+ // P.2 unlocks file B.
+ //
+ // These spurious errors were observed in practice on AIX and Solaris in
+ // cmd/go: see https://golang.org/issue/32817.
+ //
+ // We work around this bug by treating EDEADLK as always spurious. If there
+ // really is a lock-ordering bug between the interacting processes, it will
+ // become a livelock instead, but that's not appreciably worse than if we had
+ // a proper flock implementation (which generally does not even attempt to
+ // diagnose deadlocks).
+ //
+ // In the above example, that changes the trace to:
+ //
+ // P.1 locks file A.
+ // Q.3 locks file B.
+ // Q.3 blocks on file A.
+ // P.2 spuriously fails to lock file B and goes to sleep.
+ // P.1 unlocks file A.
+ // Q.3 unblocks and locks file A.
+ // Q.3 unlocks files A and B.
+ // P.2 wakes up and locks file B.
+ // P.2 unlocks file B.
+ //
+ // We know that the retry loop will not introduce a *spurious* livelock
+ // because, according to the POSIX specification, EDEADLK is only to be
+ // returned when “the lock is blocked by a lock from another process”.
+ // If that process is blocked on some lock that we are holding, then the
+ // resulting livelock is due to a real deadlock (and would manifest as such
+ // when using, for example, the flock implementation of this package).
+ // If the other process is *not* blocked on some other lock that we are
+ // holding, then it will eventually release the requested lock.
+
+ nextSleep := 1 * time.Millisecond
+ const maxSleep = 500 * time.Millisecond
+ for {
+ err = setlkw(f.fh.Fd(), cmd, lt)
+ if !errors.Is(err, unix.EDEADLK) {
+ break
+ }
+
+ time.Sleep(nextSleep)
+
+ nextSleep += nextSleep
+ if nextSleep > maxSleep {
+ nextSleep = maxSleep
+ }
+ // Apply 10% jitter to avoid synchronizing collisions when we finally unblock.
+ nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep))
+ }
+
+ if err != nil {
+ f.doUnlock()
+
+ if cmd == tryLock && errors.Is(err, unix.EACCES) {
+ return false, nil
+ }
+
+ return false, &fs.PathError{
+ Op: lt.String(),
+ Path: f.Path(),
+ Err: err,
+ }
+ }
+
+ return true, nil
+}
+
+func (f *Flock) Unlock() error {
+ f.m.Lock()
+ defer f.m.Unlock()
+
+ // If we aren't locked or if the lockfile instance is nil
+ // just return a nil error because we are unlocked.
+ if (!f.l && !f.r) || f.fh == nil {
+ return nil
+ }
+
+ if err := f.doUnlock(); err != nil {
+ return err
+ }
+
+ f.reset()
+
+ return nil
+}
+
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L163
+func (f *Flock) doUnlock() (err error) {
+ var owner *Flock
+
+ mu.Lock()
+
+ ino, ok := inodes[f]
+ if ok {
+ owner = locks[ino].owner
+ }
+
+ mu.Unlock()
+
+ if owner == f {
+ err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK)
+ }
+
+ mu.Lock()
+
+ l := locks[ino]
+
+ if len(l.queue) == 0 {
+ // No waiters: remove the map entry.
+ delete(locks, ino)
+ } else {
+ // The first waiter is sending us their file now.
+ // Receive it and update the queue.
+ l.owner = <-l.queue[0]
+ l.queue = l.queue[1:]
+ locks[ino] = l
+ }
+
+ delete(inodes, f)
+
+ mu.Unlock()
+
+ return err
+}
+
+// TryLock is the preferred function for taking an exclusive file lock.
+// This function takes an RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
+//
+// The actual file lock is non-blocking.
+// If we are unable to get the exclusive file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being exclusive-locked.
+func (f *Flock) TryLock() (bool, error) {
+ return f.try(&f.l, writeLock)
+}
+
+// TryRLock is the preferred function for taking a shared file lock.
+// This function takes an RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
+//
+// The actual file lock is non-blocking.
+// If we are unable to get the shared file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being share-locked.
+func (f *Flock) TryRLock() (bool, error) {
+ return f.try(&f.r, readLock)
+}
+
+func (f *Flock) try(locked *bool, flag lockType) (bool, error) {
+ f.m.Lock()
+ defer f.m.Unlock()
+
+ if *locked {
+ return true, nil
+ }
+
+ if f.fh == nil {
+ if err := f.setFh(f.flag); err != nil {
+ return false, err
+ }
+
+ defer f.ensureFhState()
+ }
+
+ hasLock, err := f.doLock(tryLock, flag, false)
+ if err != nil {
+ return false, err
+ }
+
+ *locked = hasLock
+
+ return hasLock, nil
+}
+
+// setlkw calls FcntlFlock with cmd for the entire file indicated by fd.
+// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L198
+func setlkw(fd uintptr, cmd cmdType, lt lockType) error {
+ for {
+ err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{
+ Type: int16(lt),
+ Whence: io.SeekStart,
+ Start: 0,
+ Len: 0, // All bytes.
+ })
+ if !errors.Is(err, unix.EINTR) {
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/gofrs/flock/flock_winapi.go b/vendor/github.com/gofrs/flock/flock_winapi.go
deleted file mode 100644
index fe405a255..000000000
--- a/vendor/github.com/gofrs/flock/flock_winapi.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 Tim Heckman. All rights reserved.
-// Use of this source code is governed by the BSD 3-Clause
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-package flock
-
-import (
- "syscall"
- "unsafe"
-)
-
-var (
- kernel32, _ = syscall.LoadLibrary("kernel32.dll")
- procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx")
- procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx")
-)
-
-const (
- winLockfileFailImmediately = 0x00000001
- winLockfileExclusiveLock = 0x00000002
- winLockfileSharedLock = 0x00000000
-)
-
-// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows
-// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as:
-//
-// > The function requests an exclusive lock. Otherwise, it requests a shared
-// > lock.
-//
-// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
-
-func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
- r1, _, errNo := syscall.Syscall6(
- uintptr(procLockFileEx),
- 6,
- uintptr(handle),
- uintptr(flags),
- uintptr(reserved),
- uintptr(numberOfBytesToLockLow),
- uintptr(numberOfBytesToLockHigh),
- uintptr(unsafe.Pointer(offset)))
-
- if r1 != 1 {
- if errNo == 0 {
- return false, syscall.EINVAL
- }
-
- return false, errNo
- }
-
- return true, 0
-}
-
-func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
- r1, _, errNo := syscall.Syscall6(
- uintptr(procUnlockFileEx),
- 5,
- uintptr(handle),
- uintptr(reserved),
- uintptr(numberOfBytesToLockLow),
- uintptr(numberOfBytesToLockHigh),
- uintptr(unsafe.Pointer(offset)),
- 0)
-
- if r1 != 1 {
- if errNo == 0 {
- return false, syscall.EINVAL
- }
-
- return false, errNo
- }
-
- return true, 0
-}
diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go
index ddb534cce..dfd31e15f 100644
--- a/vendor/github.com/gofrs/flock/flock_windows.go
+++ b/vendor/github.com/gofrs/flock/flock_windows.go
@@ -1,35 +1,48 @@
// Copyright 2015 Tim Heckman. All rights reserved.
+// Copyright 2018-2024 The Gofrs. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
+//go:build windows
+
package flock
import (
- "syscall"
+ "errors"
+
+ "golang.org/x/sys/windows"
)
-// ErrorLockViolation is the error code returned from the Windows syscall when a
-// lock would block and you ask to fail immediately.
-const ErrorLockViolation syscall.Errno = 0x21 // 33
+// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows `LockFileEX` docs,
+// which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as:
+//
+// > The function requests an exclusive lock. Otherwise, it requests a shared lock.
+//
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+const winLockfileSharedLock = 0x00000000
+
+// ErrorLockViolation is the error code returned from the Windows syscall when a lock would block,
+// and you ask to fail immediately.
+const ErrorLockViolation windows.Errno = 0x21 // 33
-// Lock is a blocking call to try and take an exclusive file lock. It will wait
-// until it is able to obtain the exclusive file lock. It's recommended that
-// TryLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
+// Lock is a blocking call to try and take an exclusive file lock.
+// It will wait until it is able to obtain the exclusive file lock.
+// It's recommended that TryLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
//
-// If we are already locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
+// If we are already locked, this function short-circuits and
+// returns immediately assuming it can take the mutex lock.
func (f *Flock) Lock() error {
- return f.lock(&f.l, winLockfileExclusiveLock)
+ return f.lock(&f.l, windows.LOCKFILE_EXCLUSIVE_LOCK)
}
-// RLock is a blocking call to try and take a shared file lock. It will wait
-// until it is able to obtain the shared file lock. It's recommended that
-// TryRLock() be used over this function. This function may block the ability to
-// query the current Locked() or RLocked() status due to a RW-mutex lock.
+// RLock is a blocking call to try and take a shared file lock.
+// It will wait until it is able to obtain the shared file lock.
+// It's recommended that TryRLock() be used over this function.
+// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
//
-// If we are already locked, this function short-circuits and returns
-// immediately assuming it can take the mutex lock.
+// If we are already locked, this function short-circuits and
+// returns immediately assuming it can take the mutex lock.
func (f *Flock) RLock() error {
return f.lock(&f.r, winLockfileSharedLock)
}
@@ -43,26 +56,31 @@ func (f *Flock) lock(locked *bool, flag uint32) error {
}
if f.fh == nil {
- if err := f.setFh(); err != nil {
+ if err := f.setFh(f.flag); err != nil {
return err
}
+
defer f.ensureFhState()
}
- if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
- return errNo
+ err := windows.LockFileEx(windows.Handle(f.fh.Fd()), flag, 0, 1, 0, &windows.Overlapped{})
+ if err != nil && !errors.Is(err, windows.Errno(0)) {
+ return err
}
*locked = true
+
return nil
}
-// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
-// while it is running the Locked() and RLocked() functions will be blocked.
+// Unlock is a function to unlock the file.
+// This file takes a RW-mutex lock,
+// so while it is running the Locked() and RLocked() functions will be blocked.
//
-// This function short-circuits if we are unlocked already. If not, it calls
-// UnlockFileEx() on the file and closes the file descriptor. It does not remove
-// the file from disk. It's up to your application to do.
+// This function short-circuits if we are unlocked already.
+// If not, it calls UnlockFileEx() on the file and closes the file descriptor.
+// It does not remove the file from disk.
+// It's up to your application to do.
func (f *Flock) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
@@ -74,39 +92,37 @@ func (f *Flock) Unlock() error {
}
// mark the file as unlocked
- if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
- return errNo
+ err := windows.UnlockFileEx(windows.Handle(f.fh.Fd()), 0, 1, 0, &windows.Overlapped{})
+ if err != nil && !errors.Is(err, windows.Errno(0)) {
+ return err
}
- f.fh.Close()
-
- f.l = false
- f.r = false
- f.fh = nil
+ f.reset()
return nil
}
-// TryLock is the preferred function for taking an exclusive file lock. This
-// function does take a RW-mutex lock before it tries to lock the file, so there
-// is the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
+// TryLock is the preferred function for taking an exclusive file lock.
+// This function does take a RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time
+// if another goroutine is trying to take any action.
//
-// The actual file lock is non-blocking. If we are unable to get the exclusive
-// file lock, the function will return false instead of waiting for the lock. If
-// we get the lock, we also set the *Flock instance as being exclusive-locked.
+// The actual file lock is non-blocking.
+// If we are unable to get the exclusive file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being exclusive-locked.
func (f *Flock) TryLock() (bool, error) {
- return f.try(&f.l, winLockfileExclusiveLock)
+ return f.try(&f.l, windows.LOCKFILE_EXCLUSIVE_LOCK)
}
-// TryRLock is the preferred function for taking a shared file lock. This
-// function does take a RW-mutex lock before it tries to lock the file, so there
-// is the possibility that this function may block for a short time if another
-// goroutine is trying to take any action.
+// TryRLock is the preferred function for taking a shared file lock.
+// This function does take a RW-mutex lock before it tries to lock the file,
+// so there is the possibility that this function may block for a short time if another goroutine is trying to take any action.
//
-// The actual file lock is non-blocking. If we are unable to get the shared file
-// lock, the function will return false instead of waiting for the lock. If we
-// get the lock, we also set the *Flock instance as being shared-locked.
+// The actual file lock is non-blocking.
+// If we are unable to get the shared file lock,
+// the function will return false instead of waiting for the lock.
+// If we get the lock, we also set the *Flock instance as being shared-locked.
func (f *Flock) TryRLock() (bool, error) {
return f.try(&f.r, winLockfileSharedLock)
}
@@ -120,20 +136,20 @@ func (f *Flock) try(locked *bool, flag uint32) (bool, error) {
}
if f.fh == nil {
- if err := f.setFh(); err != nil {
+ if err := f.setFh(f.flag); err != nil {
return false, err
}
+
defer f.ensureFhState()
}
- _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{})
-
- if errNo > 0 {
- if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING {
+ err := windows.LockFileEx(windows.Handle(f.fh.Fd()), flag|windows.LOCKFILE_FAIL_IMMEDIATELY, 0, 1, 0, &windows.Overlapped{})
+ if err != nil && !errors.Is(err, windows.Errno(0)) {
+ if errors.Is(err, ErrorLockViolation) || errors.Is(err, windows.ERROR_IO_PENDING) {
return false, nil
}
- return false, errNo
+ return false, err
}
*locked = true
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
index d127d4362..def01a6be 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -19,6 +19,7 @@ const (
tbFunc // func(T) bool
ttbFunc // func(T, T) bool
+ ttiFunc // func(T, T) int
trbFunc // func(T, R) bool
tibFunc // func(T, I) bool
trFunc // func(T) R
@@ -28,11 +29,13 @@ const (
Transformer = trFunc // func(T) R
ValueFilter = ttbFunc // func(T, T) bool
Less = ttbFunc // func(T, T) bool
+ Compare = ttiFunc // func(T, T) int
ValuePredicate = tbFunc // func(T) bool
KeyValuePredicate = trbFunc // func(T, R) bool
)
var boolType = reflect.TypeOf(true)
+var intType = reflect.TypeOf(0)
// IsType reports whether the reflect.Type is of the specified function type.
func IsType(t reflect.Type, ft funcType) bool {
@@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool {
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
return true
}
+ case ttiFunc: // func(T, T) int
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType {
+ return true
+ }
case trbFunc: // func(T, R) bool
if ni == 2 && no == 1 && t.Out(0) == boolType {
return true
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
index 754496f3b..ba3fce81f 100644
--- a/vendor/github.com/google/go-cmp/cmp/options.go
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
// Named type with unexported fields.
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
- if _, ok := reflect.New(t).Interface().(error); ok {
+ isProtoMessage := func(t reflect.Type) bool {
+ m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect")
+ return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 &&
+ m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" &&
+ m.Type.Out(0).Name() == "Message"
+ }
+ if isProtoMessage(t) {
+ help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types`
+ } else if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values"
} else if t.Comparable() {
help = "consider using cmpopts.EquateComparable to compare comparable Go types"
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index de264c85a..244ee19c4 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -14,8 +14,34 @@ This package provides various compression algorithms.
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
[](https://sourcegraph.com/github.com/klauspost/compress?badge)
+# package usage
+
+Use `go get github.com/klauspost/compress@latest` to add it to your project.
+
+This package will support the current Go version and 2 versions back.
+
+* Use the `nounsafe` tag to disable all use of the "unsafe" package.
+* Use the `noasm` tag to disable all assembly across packages.
+
+Use the links above for more information on each.
+
# changelog
+* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
+ * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
+ * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
+ * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043
+ * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045
+ * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048
+ * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
+ * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
+
+* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
+ * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
+ * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
+ * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011
+ * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013
+
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
@@ -65,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
- * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
+ * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
- * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
+ * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
@@ -124,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
See changes to v1.15.x
* Jan 21st, 2023 (v1.15.15)
- * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
+ * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
@@ -167,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
- * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+ * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643
* July 13, 2022 (v1.15.8)
@@ -209,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
- * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
+ * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590
* May 11, 2022 (v1.15.4)
@@ -236,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
* Mar 3, 2022 (v1.15.0)
- * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
- * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
- * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
- * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
- * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+ * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
@@ -258,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
- * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
+ * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
* Feb 17, 2022 (v1.14.3)
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
@@ -565,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
-| old import | new import | Documentation
-|--------------------|-----------------------------------------|--------------------|
-| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
-| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
-| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
-| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
+Typical speed is about 2x of the standard library packages.
+
+| old import | new import | Documentation |
+|------------------|---------------------------------------|-------------------------------------------------------------------------|
+| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) |
+| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) |
+| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) |
+| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
@@ -625,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle.
Compression is almost always worse than the fastest compression level
and each write will allocate (a little) memory.
-# Performance Update 2018
-
-It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
-
-The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
-
-The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
-
-The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
-
-
-## Overall differences.
-
-There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
-
-The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
-
-This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
-
-There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
-
-## Web Content
-
-This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
-
-Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
-
-Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
-
-## Object files
-
-This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
-
-The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
-
-The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
-
-## Highly Compressible File
-
-This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
-
-It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
-
-So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
-
-## Medium-High Compressible
-
-This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
-
-We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
-
-## Medium Compressible
-
-I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
-
-The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
-
-
-## Un-compressible Content
-
-This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
-
-
-## Huffman only compression
-
-This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
-
-This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
-
-Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
-
-The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
-
-The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
-
-For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-
-This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# Other packages
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index e36d9742f..bfc7a523d 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -6,10 +6,11 @@
package huff0
import (
- "encoding/binary"
"errors"
"fmt"
"io"
+
+ "github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error {
return nil
}
-// peekBitsFast requires that at least one bit is requested every time.
+// peekByteFast requires that at least one byte is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReaderBytes) peekByteFast() uint8 {
got := uint8(b.value >> 56)
@@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() {
}
// 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() {
// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
func (b *bitReaderBytes) fillFastStart() {
// Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() {
if b.bitsRead < 32 {
return
}
- if b.off > 4 {
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ if b.off >= 4 {
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() {
return
}
- // 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
@@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() {
// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
func (b *bitReaderShifted) fillFastStart() {
- // Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() {
return
}
if b.off > 4 {
- v := b.in[b.off-4 : b.off]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go
new file mode 100644
index 000000000..e54909e16
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/le.go
@@ -0,0 +1,5 @@
+package le
+
+type Indexer interface {
+ int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
new file mode 100644
index 000000000..0cfb5c0e2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
@@ -0,0 +1,42 @@
+//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
+
+package le
+
+import (
+ "encoding/binary"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+ return b[i]
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+ return binary.LittleEndian.Uint16(b[i:])
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+ return binary.LittleEndian.Uint32(b[i:])
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+ return binary.LittleEndian.Uint64(b[i:])
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+ binary.LittleEndian.PutUint16(b, v)
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+ binary.LittleEndian.PutUint32(b, v)
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+ binary.LittleEndian.PutUint64(b, v)
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
new file mode 100644
index 000000000..ada45cd90
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
@@ -0,0 +1,55 @@
+// We enable 64 bit LE platforms:
+
+//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
+
+package le
+
+import (
+ "unsafe"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+ //return binary.LittleEndian.Uint16(b[i:])
+ //return *(*uint16)(unsafe.Pointer(&b[i]))
+ return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+ //return binary.LittleEndian.Uint16(b[i:])
+ //return *(*uint16)(unsafe.Pointer(&b[i]))
+ return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+ //return binary.LittleEndian.Uint32(b[i:])
+ //return *(*uint32)(unsafe.Pointer(&b[i]))
+ return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+ //return binary.LittleEndian.Uint64(b[i:])
+ //return *(*uint64)(unsafe.Pointer(&b[i]))
+ return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+ //binary.LittleEndian.PutUint16(b, v)
+ *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+ //binary.LittleEndian.PutUint32(b, v)
+ *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+ //binary.LittleEndian.PutUint64(b, v)
+ *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
index 5a4412f90..81bda5e29 100644
--- a/vendor/github.com/klauspost/compress/s2sx.mod
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -1,4 +1,3 @@
module github.com/klauspost/compress
-go 1.19
-
+go 1.22
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index 92e2347bb..c11d7fa28 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
-This package is pure Go and without use of "unsafe".
+This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
The `zstd` package is provided as open source software using a Go standard license.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 25ca98394..d41e3e170 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -5,11 +5,12 @@
package zstd
import (
- "encoding/binary"
"errors"
"fmt"
"io"
"math/bits"
+
+ "github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@@ -18,6 +19,7 @@ import (
type bitReader struct {
in []byte
value uint64 // Maybe use [16]byte, but shifting is awkward.
+ cursor int // offset where next read should end
bitsRead uint8
}
@@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error {
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
+ b.cursor = len(in)
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
@@ -67,18 +70,15 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
- v := b.in[len(b.in)-4:]
- b.in = b.in[:len(b.in)-4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
+ b.cursor -= 4
+ b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
}
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() {
- v := b.in[len(b.in)-8:]
- b.in = b.in[:len(b.in)-8]
- b.value = binary.LittleEndian.Uint64(v)
+ b.cursor -= 8
+ b.value = le.Load64(b.in, b.cursor)
b.bitsRead = 0
}
@@ -87,25 +87,23 @@ func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
- if len(b.in) >= 4 {
- v := b.in[len(b.in)-4:]
- b.in = b.in[:len(b.in)-4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
+ if b.cursor >= 4 {
+ b.cursor -= 4
+ b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
return
}
- b.bitsRead -= uint8(8 * len(b.in))
- for len(b.in) > 0 {
- b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
- b.in = b.in[:len(b.in)-1]
+ b.bitsRead -= uint8(8 * b.cursor)
+ for b.cursor > 0 {
+ b.cursor -= 1
+ b.value = (b.value << 8) | uint64(b.in[b.cursor])
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
- return len(b.in) == 0 && b.bitsRead >= 64
+ return b.cursor == 0 && b.bitsRead >= 64
}
// overread returns true if more bits have been requested than is on the stream.
@@ -115,13 +113,14 @@ func (b *bitReader) overread() bool {
// remain returns the number of bits remaining.
func (b *bitReader) remain() uint {
- return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
+ return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error {
// Release reference.
b.in = nil
+ b.cursor = 0
if !b.finished() {
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 9c28840c3..0dd742fd2 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,14 +5,10 @@
package zstd
import (
- "bytes"
- "encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
- "os"
- "path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@@ -648,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err)
return err
}
- // Extract blocks...
- if false && hist.dict == nil {
- fatalErr := func(err error) {
- if err != nil {
- panic(err)
- }
- }
- fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
- var buf bytes.Buffer
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
- buf.Write(in)
- os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
- }
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 32a7f401d..fd35ea148 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -9,6 +9,7 @@ import (
"fmt"
"math"
"math/bits"
+ "slices"
"github.com/klauspost/compress/huff0"
)
@@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int {
// All 0
return 0
}
- maxCount := func(a []uint32) int {
- var max uint32
- for _, v := range a {
- if v > max {
- max = v
- }
- }
- return int(max)
- }
- cnt := maxCount(hist[:maxSym])
+ cnt := int(slices.Max(hist[:maxSym]))
if cnt == len(data) {
// RLE
return 0
@@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() {
}
}
}
- maxCount := func(a []uint32) int {
- var max uint32
- for _, v := range a {
- if v > max {
- max = v
- }
- }
- return int(max)
- }
if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
}
@@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() {
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
}
- b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
- b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
- b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+ b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
+ b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
+ b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index bbca17234..ea2a19376 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
}
// Read bytes from the decompressed stream into p.
-// Returns the number of bytes written and any error that occurred.
+// Returns the number of bytes read and any error that occurred.
// When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) {
var n int
@@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.bBuf = nil
if frame.history.decoders.br != nil {
frame.history.decoders.br.in = nil
+ frame.history.decoders.br.cursor = 0
}
d.decoders <- block
}()
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 5ca46038a..7d250c67f 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(err)
}
if t < 0 {
- err := fmt.Sprintf("s (%d) < 0", s)
+ err := fmt.Sprintf("t (%d) < 0", t)
panic(err)
}
if s-t > e.maxMatchOff {
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
index 57b9c31c0..bea1779e9 100644
--- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
@@ -7,20 +7,25 @@
package zstd
import (
- "encoding/binary"
"math/bits"
+
+ "github.com/klauspost/compress/internal/le"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
- for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
- diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
+ left := len(a)
+ for left >= 8 {
+ diff := le.Load64(a, n) ^ le.Load64(b, n)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
+ left -= 8
}
+ a = a[n:]
+ b = b[n:]
for i := range a {
if a[i] != b[i] {
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index d7fe6d82d..9a7de82f9 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return io.ErrUnexpectedEOF
}
var ll, mo, ml int
- if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
+ if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index f5591fa1e..a708ca6d3 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -7,9 +7,9 @@
TEXT ·sequenceDecs_decode_amd64(SB), $8-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -335,9 +335,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -634,9 +634,9 @@ error_overread:
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -920,9 +920,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -1787,9 +1787,9 @@ empty_seqs:
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -2281,8 +2281,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -2349,9 +2349,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -2801,8 +2801,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -2869,9 +2869,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ br+8(FP), CX
MOVQ 24(CX), DX
- MOVBQZX 32(CX), BX
+ MOVBQZX 40(CX), BX
MOVQ (CX), AX
- MOVQ 8(CX), SI
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -3465,8 +3465,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), AX
MOVQ DX, 24(AX)
- MOVB BL, 32(AX)
- MOVQ SI, 8(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -3533,9 +3533,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ br+8(FP), BX
MOVQ 24(BX), AX
- MOVBQZX 32(BX), DX
+ MOVBQZX 40(BX), DX
MOVQ (BX), CX
- MOVQ 8(BX), BX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -4087,8 +4087,8 @@ handle_loop:
loop_finished:
MOVQ br+8(FP), CX
MOVQ AX, 24(CX)
- MOVB DL, 32(CX)
- MOVQ BX, 8(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
index 2fb35b788..7cec2197c 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
for i := range seqs {
var ll, mo, ml int
- if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
+ if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
index 8014174a7..65045eabd 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{
func llCode(litLength uint32) uint8 {
const llDeltaCode = 19
if litLength <= 63 {
- // Compiler insists on bounds check (Go 1.12)
return llCodeTable[litLength&63]
}
return uint8(highBit(litLength)) + llDeltaCode
@@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{
func mlCode(mlBase uint32) uint8 {
const mlDeltaCode = 36
if mlBase <= 127 {
- // Compiler insists on bounds check (Go 1.12)
return mlCodeTable[mlBase&127]
}
return uint8(highBit(mlBase)) + mlDeltaCode
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
index ec13594e8..a17381b8f 100644
--- a/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
n, r.err = w.Write(r.block.output)
if r.err != nil {
- return written, err
+ return written, r.err
}
written += int64(n)
continue
@@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
}
n, r.err = w.Write(r.block.output)
if r.err != nil {
- return written, err
+ return written, r.err
}
written += int64(n)
continue
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 066bef2a4..6252b46ae 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -5,10 +5,11 @@ package zstd
import (
"bytes"
- "encoding/binary"
"errors"
"log"
"math"
+
+ "github.com/klauspost/compress/internal/le"
)
// enable debug printing
@@ -110,11 +111,11 @@ func printf(format string, a ...interface{}) {
}
func load3232(b []byte, i int32) uint32 {
- return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
+ return le.Load32(b, i)
}
func load6432(b []byte, i int32) uint64 {
- return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
+ return le.Load64(b, i)
}
type byter interface {
diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
index f9d52e584..69b15d184 100644
--- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -1,3 +1,74 @@
+## 2.23.4
+
+Prior to this release Ginkgo would compute the incorrect number of available CPUs when running with `-p` in a linux container. Thanks to @emirot for the fix!
+
+### Features
+- Add automaxprocs for using CPUQuota [2b9c428]
+
+### Fixes
+- clarify gotchas about -vet flag [1f59d07]
+
+### Maintenance
+- bump dependencies [2d134d5]
+
+## 2.23.3
+
+### Fixes
+
+- allow `-` as a standalone argument [cfcc1a5]
+- Bug Fix: Add GinkoTBWrapper.Chdir() and GinkoTBWrapper.Context() [feaf292]
+- ignore exit code for symbol test on linux [88e2282]
+
+## 2.23.2
+
+🎉🎉🎉
+
+At long last, some long-standing performance gaps between `ginkgo` and `go test` have been resolved!
+
+Ginkgo operates by running `go test -c` to generate test binaries, and then running those binaries. It turns out that the compilation step of `go test -c` is slower than `go test`'s compilation step because `go test` strips out debug symbols (`ldflags=-w`) whereas `go test -c` does not.
+
+Ginkgo now passes the appropriate `ldflags` to `go test -c` when running specs to strip out symbols. This is only done when it is safe to do so and symbols are preferred when profiling is enabled and when `ginkgo build` is called explicitly.
+
+This, coupled, with the [instructions for disabling XProtect on MacOS](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) yields a much better performance experience with Ginkgo.
+
+## 2.23.1
+
+## 🚨 For users on MacOS 🚨
+
+A long-standing Ginkgo performance issue on MacOS seems to be due to mac's antimalware XProtect. You can follow the instructions [here](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) to disable it in your terminal. Doing so sped up Ginkgo's own test suite from 1m8s to 47s.
+
+### Fixes
+
+Ginkgo's CLI is now a bit clearer if you pass flags in incorrectly:
+
+- make it clearer that you need to pass a filename to the various profile flags, not an absolute directory [a0e52ff]
+- emit an error and exit if the ginkgo invocation includes flags after positional arguments [b799d8d]
+
+This might cause existing CI builds to fail. If so then it's likely that your CI build was misconfigured and should be corrected. Open an issue if you need help.
+
+## 2.23.0
+
+Ginkgo 2.23.0 adds a handful of methods to `GinkgoT()` to make it compatible with the `testing.TB` interface in Go 1.24. `GinkgoT().Context()`, in particular, is a useful shorthand for generating a new context that will clean itself up in a `DeferCleanup()`. This has subtle behavior differences from the golang implementation but should make sense in a Ginkgo... um... context.
+
+### Features
+- bump to go 1.24.0 - support new testing.TB methods and add a test to cover testing.TB regressions [37a511b]
+
+### Fixes
+- fix edge case where build -o is pointing at an explicit file, not a directory [7556a86]
+- Fix binary paths when precompiling multiple suites. [4df06c6]
+
+### Maintenance
+- Fix: Correct Markdown list rendering in MIGRATING_TO_V2.md [cbcf39a]
+- docs: fix test workflow badge (#1512) [9b261ff]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1516) [00f19c8]
+- Bump golang.org/x/tools from 0.28.0 to 0.30.0 (#1515) [e98a4df]
+- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#1504) [60cc4e2]
+- Bump github-pages from 231 to 232 in /docs (#1447) [fea6f2d]
+- Bump rexml from 3.2.8 to 3.3.9 in /docs (#1497) [31d7813]
+- Bump webrick from 1.8.1 to 1.9.1 in /docs (#1501) [fc3bbd6]
+- Code linting (#1500) [aee0d56]
+- change interface{} to any (#1502) [809a710]
+
## 2.22.2
### Maintenance
@@ -630,7 +701,7 @@ Ginkgo also uses this progress reporting infrastructure under the hood when hand
### Features
- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes.
- As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
+ As a result the **signature of these methods has changed** and now includes a trailing `args ...any`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
### Maintenance
- Modernize the invocation of Ginkgo in github actions [0ffde58]
diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md
index cb23ffdf6..e3d0c13cc 100644
--- a/vendor/github.com/onsi/ginkgo/v2/README.md
+++ b/vendor/github.com/onsi/ginkgo/v2/README.md
@@ -1,6 +1,6 @@

-[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
+[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
---
diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
index a3e8237e9..d027bdff9 100644
--- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
@@ -83,9 +83,9 @@ func exitIfErrors(errors []error) {
type GinkgoWriterInterface interface {
io.Writer
- Print(a ...interface{})
- Printf(format string, a ...interface{})
- Println(a ...interface{})
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
TeeTo(writer io.Writer)
ClearTeeWriters()
@@ -243,7 +243,7 @@ for more on how specs are parallelized in Ginkgo.
You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite.
*/
-func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
+func RunSpecs(t GinkgoTestingT, description string, args ...any) bool {
if suiteDidRun {
exitIfErr(types.GinkgoErrors.RerunningSuite())
}
@@ -316,7 +316,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
return passed
}
-func extractSuiteConfiguration(args []interface{}) Labels {
+func extractSuiteConfiguration(args []any) Labels {
suiteLabels := Labels{}
configErrors := []error{}
for _, arg := range args {
@@ -491,14 +491,14 @@ to Describe the behavior of an object or function and, within that Describe, out
You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes
In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
*/
-func Describe(text string, args ...interface{}) bool {
+func Describe(text string, args ...any) bool {
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
}
/*
FDescribe focuses specs within the Describe block.
*/
-func FDescribe(text string, args ...interface{}) bool {
+func FDescribe(text string, args ...any) bool {
args = append(args, internal.Focus)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
}
@@ -506,7 +506,7 @@ func FDescribe(text string, args ...interface{}) bool {
/*
PDescribe marks specs within the Describe block as pending.
*/
-func PDescribe(text string, args ...interface{}) bool {
+func PDescribe(text string, args ...any) bool {
args = append(args, internal.Pending)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
}
@@ -522,18 +522,18 @@ var XDescribe = PDescribe
var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe
/* When is an alias for Describe - it generates the exact same kind of Container node */
-func When(text string, args ...interface{}) bool {
+func When(text string, args ...any) bool {
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
}
/* When is an alias for Describe - it generates the exact same kind of Container node */
-func FWhen(text string, args ...interface{}) bool {
+func FWhen(text string, args ...any) bool {
args = append(args, internal.Focus)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
}
/* When is an alias for Describe - it generates the exact same kind of Container node */
-func PWhen(text string, args ...interface{}) bool {
+func PWhen(text string, args ...any) bool {
args = append(args, internal.Pending)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
}
@@ -550,14 +550,14 @@ You can pass It nodes bare functions (func() {}) or functions that receive a Spe
You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it
In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
*/
-func It(text string, args ...interface{}) bool {
+func It(text string, args ...any) bool {
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
}
/*
FIt allows you to focus an individual It.
*/
-func FIt(text string, args ...interface{}) bool {
+func FIt(text string, args ...any) bool {
args = append(args, internal.Focus)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
}
@@ -565,7 +565,7 @@ func FIt(text string, args ...interface{}) bool {
/*
PIt allows you to mark an individual It as pending.
*/
-func PIt(text string, args ...interface{}) bool {
+func PIt(text string, args ...any) bool {
args = append(args, internal.Pending)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
}
@@ -611,8 +611,8 @@ BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(c
You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
*/
-func BeforeSuite(body interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func BeforeSuite(body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))
}
@@ -630,8 +630,8 @@ AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(co
You cannot nest any other Ginkgo nodes within an AfterSuite node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
*/
-func AfterSuite(body interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func AfterSuite(body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))
}
@@ -667,8 +667,8 @@ If either function receives a context.Context/SpecContext it is considered inter
You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure.
You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
*/
-func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{process1Body, allProcessBody}
+func SynchronizedBeforeSuite(process1Body any, allProcessBody any, args ...any) bool {
+ combinedArgs := []any{process1Body, allProcessBody}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))
@@ -687,8 +687,8 @@ Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accompli
You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure.
You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
*/
-func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{allProcessBody, process1Body}
+func SynchronizedAfterSuite(allProcessBody any, process1Body any, args ...any) bool {
+ combinedArgs := []any{allProcessBody, process1Body}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))
@@ -703,7 +703,7 @@ BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(co
You cannot nest any other Ginkgo nodes within a BeforeEach node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach
*/
-func BeforeEach(args ...interface{}) bool {
+func BeforeEach(args ...any) bool {
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...))
}
@@ -716,7 +716,7 @@ JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/fun
You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure.
You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach
*/
-func JustBeforeEach(args ...interface{}) bool {
+func JustBeforeEach(args ...any) bool {
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))
}
@@ -731,7 +731,7 @@ AfterEach can take a func() body, or an interruptible func(SpecContext)/func(con
You cannot nest any other Ginkgo nodes within an AfterEach node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup
*/
-func AfterEach(args ...interface{}) bool {
+func AfterEach(args ...any) bool {
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...))
}
@@ -743,7 +743,7 @@ JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func
You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure.
You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach
*/
-func JustAfterEach(args ...interface{}) bool {
+func JustAfterEach(args ...any) bool {
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...))
}
@@ -758,7 +758,7 @@ You cannot nest any other Ginkgo nodes within a BeforeAll node's closure.
You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
*/
-func BeforeAll(args ...interface{}) bool {
+func BeforeAll(args ...any) bool {
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...))
}
@@ -775,7 +775,7 @@ You cannot nest any other Ginkgo nodes within an AfterAll node's closure.
You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
*/
-func AfterAll(args ...interface{}) bool {
+func AfterAll(args ...any) bool {
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...))
}
@@ -818,7 +818,7 @@ When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite,
Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node.
You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup
*/
-func DeferCleanup(args ...interface{}) {
+func DeferCleanup(args ...any) {
fail := func(message string, cl types.CodeLocation) {
global.Failer.Fail(message, cl)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
index f912bbec6..fd45b8bea 100644
--- a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
@@ -118,9 +118,9 @@ Use Gomega's gmeasure package instead.
You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
*/
type Benchmarker interface {
- Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
- RecordValue(name string, value float64, info ...interface{})
- RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
+ Time(name string, body func(), info ...any) (elapsedTime time.Duration)
+ RecordValue(name string, value float64, info ...any)
+ RecordValueWithPrecision(name string, value float64, units string, precision int, info ...any)
}
/*
@@ -129,7 +129,7 @@ Deprecated: Measure() has been removed from Ginkgo 2.0
Use Gomega's gmeasure package instead.
You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
*/
-func Measure(_ ...interface{}) bool {
+func Measure(_ ...any) bool {
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1))
return true
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
index 4d5749114..f61356db1 100644
--- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
@@ -24,15 +24,15 @@ const (
var SingletonFormatter = New(ColorModeTerminal)
-func F(format string, args ...interface{}) string {
+func F(format string, args ...any) string {
return SingletonFormatter.F(format, args...)
}
-func Fi(indentation uint, format string, args ...interface{}) string {
+func Fi(indentation uint, format string, args ...any) string {
return SingletonFormatter.Fi(indentation, format, args...)
}
-func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+func Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
}
@@ -115,15 +115,15 @@ func New(colorMode ColorMode) Formatter {
return f
}
-func (f Formatter) F(format string, args ...interface{}) string {
+func (f Formatter) F(format string, args ...any) string {
return f.Fi(0, format, args...)
}
-func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
+func (f Formatter) Fi(indentation uint, format string, args ...any) string {
return f.Fiw(indentation, 0, format, args...)
}
-func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
out := f.style(format)
if len(args) > 0 {
out = fmt.Sprintf(out, args...)
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
index fd1726084..2b36b2feb 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
@@ -44,7 +44,7 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
internal.VerifyCLIAndFrameworkVersion(suites)
opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
- opc.StartCompiling(suites, goFlagsConfig)
+ opc.StartCompiling(suites, goFlagsConfig, true)
for {
suiteIdx, suite := opc.Next()
@@ -55,18 +55,22 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
fmt.Println(suite.CompilationError.Error())
} else {
- if len(goFlagsConfig.O) == 0 {
- goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test")
- } else {
+ var testBinPath string
+ if len(goFlagsConfig.O) != 0 {
stat, err := os.Stat(goFlagsConfig.O)
if err != nil {
panic(err)
}
if stat.IsDir() {
- goFlagsConfig.O += "/" + suite.PackageName + ".test"
+ testBinPath = goFlagsConfig.O + "/" + suite.PackageName + ".test"
+ } else {
+ testBinPath = goFlagsConfig.O
}
}
- fmt.Printf("Compiled %s\n", goFlagsConfig.O)
+ if len(testBinPath) == 0 {
+ testBinPath = path.Join(suite.Path, suite.PackageName+".test")
+ }
+ fmt.Printf("Compiled %s\n", testBinPath)
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
index 2efd28608..f0e7331f7 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
@@ -12,7 +12,7 @@ func Abort(details AbortDetails) {
panic(details)
}
-func AbortGracefullyWith(format string, args ...interface{}) {
+func AbortGracefullyWith(format string, args ...any) {
Abort(AbortDetails{
ExitCode: 0,
Error: fmt.Errorf(format, args...),
@@ -20,7 +20,7 @@ func AbortGracefullyWith(format string, args ...interface{}) {
})
}
-func AbortWith(format string, args ...interface{}) {
+func AbortWith(format string, args ...any) {
Abort(AbortDetails{
ExitCode: 1,
Error: fmt.Errorf(format, args...),
@@ -28,7 +28,7 @@ func AbortWith(format string, args ...interface{}) {
})
}
-func AbortWithUsage(format string, args ...interface{}) {
+func AbortWithUsage(format string, args ...any) {
Abort(AbortDetails{
ExitCode: 1,
Error: fmt.Errorf(format, args...),
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
index 12e0e5659..79b83a3af 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
@@ -24,7 +24,11 @@ func (c Command) Run(args []string, additionalArgs []string) {
if err != nil {
AbortWithUsage(err.Error())
}
-
+ for _, arg := range args {
+ if len(arg) > 1 && strings.HasPrefix(arg, "-") {
+ AbortWith(types.GinkgoErrors.FlagAfterPositionalParameter().Error())
+ }
+ }
c.Command(args, additionalArgs)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
index 88dd8d6b0..c3f6d3a11 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
@@ -68,7 +68,6 @@ func (p Program) RunAndExit(osArgs []string) {
fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
}
p.Exiter(exitCode)
- return
}()
args, additionalArgs := []string{}, []string{}
@@ -157,7 +156,6 @@ func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
p.EmitUsage(writer)
Abort(AbortDetails{ExitCode: 1})
}
- return
}
func (p Program) EmitUsage(writer io.Writer) {
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
index 48827cc5e..7bbe6be0f 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
@@ -11,7 +11,7 @@ import (
"github.com/onsi/ginkgo/v2/types"
)
-func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
+func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) TestSuite {
if suite.PathToCompiledTest != "" {
return suite
}
@@ -46,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
return suite
}
- args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath)
+ args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath, preserveSymbols)
if err != nil {
suite.State = TestSuiteStateFailedToCompile
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
@@ -120,7 +120,7 @@ func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
}
}
-func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
+func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) {
opc.stopped = false
opc.idx = 0
opc.numSuites = len(suites)
@@ -135,7 +135,7 @@ func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsCon
stopped := opc.stopped
opc.mutex.Unlock()
if !stopped {
- suite = CompileSuite(suite, goFlagsConfig)
+ suite = CompileSuite(suite, goFlagsConfig, preserveSymbols)
}
c <- suite
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
index 3c5079ff4..87cfa1119 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
@@ -89,7 +89,7 @@ func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int)
}
i := 0
- if sortFunc(i) != true {
+ if !sortFunc(i) {
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
index e9abb27d8..bd6b8fbff 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
@@ -3,7 +3,7 @@ package main
import (
"fmt"
"os"
-
+ _ "go.uber.org/automaxprocs"
"github.com/onsi/ginkgo/v2/ginkgo/build"
"github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/generators"
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
index aaed4d570..03875b979 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
@@ -107,7 +107,7 @@ OUTER_LOOP:
}
opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
- opc.StartCompiling(suites, r.goFlagsConfig)
+ opc.StartCompiling(suites, r.goFlagsConfig, false)
SUITE_LOOP:
for {
@@ -142,7 +142,7 @@ OUTER_LOOP:
}
if !endTime.IsZero() {
- r.suiteConfig.Timeout = endTime.Sub(time.Now())
+ r.suiteConfig.Timeout = time.Until(endTime)
if r.suiteConfig.Timeout <= 0 {
suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
opc.StopAndDrain()
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
index bde4193ce..fe1ca3051 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
@@ -153,7 +153,7 @@ func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
}
func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
- suite = internal.CompileSuite(suite, w.goFlagsConfig)
+ suite = internal.CompileSuite(suite, w.goFlagsConfig, false)
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
fmt.Println(suite.CompilationError.Error())
return suite
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
index 02c6739e5..993279de2 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
@@ -1,6 +1,7 @@
package ginkgo
import (
+ "context"
"testing"
"github.com/onsi/ginkgo/v2/internal/testingtproxy"
@@ -48,6 +49,8 @@ The portion of the interface returned by GinkgoT() that maps onto methods in the
*/
type GinkgoTInterface interface {
Cleanup(func())
+ Chdir(dir string)
+ Context() context.Context
Setenv(kev, value string)
Error(args ...any)
Errorf(format string, args ...any)
@@ -127,6 +130,12 @@ type GinkgoTBWrapper struct {
func (g *GinkgoTBWrapper) Cleanup(f func()) {
g.GinkgoT.Cleanup(f)
}
+func (g *GinkgoTBWrapper) Chdir(dir string) {
+ g.GinkgoT.Chdir(dir)
+}
+func (g *GinkgoTBWrapper) Context() context.Context {
+ return g.GinkgoT.Context()
+}
func (g *GinkgoTBWrapper) Error(args ...any) {
g.GinkgoT.Error(args...)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
index e9bd9565f..8c5de9c16 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
@@ -32,7 +32,7 @@ func (f *Failer) GetFailure() types.Failure {
return f.failure
}
-func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
+func (f *Failer) Panic(location types.CodeLocation, forwardedPanic any) {
f.lock.Lock()
defer f.lock.Unlock()
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
index 8ed86111f..79bfa87db 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
@@ -40,7 +40,7 @@ func (ic InterruptCause) String() string {
}
type InterruptStatus struct {
- Channel chan interface{}
+ Channel chan any
Level InterruptLevel
Cause InterruptCause
}
@@ -62,14 +62,14 @@ type InterruptHandlerInterface interface {
}
type InterruptHandler struct {
- c chan interface{}
+ c chan any
lock *sync.Mutex
level InterruptLevel
cause InterruptCause
client parallel_support.Client
- stop chan interface{}
+ stop chan any
signals []os.Signal
- requestAbortCheck chan interface{}
+ requestAbortCheck chan any
}
func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
@@ -77,10 +77,10 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *
signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
}
handler := &InterruptHandler{
- c: make(chan interface{}),
+ c: make(chan any),
lock: &sync.Mutex{},
- stop: make(chan interface{}),
- requestAbortCheck: make(chan interface{}),
+ stop: make(chan any),
+ requestAbortCheck: make(chan any),
client: client,
signals: signals,
}
@@ -98,9 +98,9 @@ func (handler *InterruptHandler) registerForInterrupts() {
signal.Notify(signalChannel, handler.signals...)
// cross-process abort handling
- var abortChannel chan interface{}
+ var abortChannel chan any
if handler.client != nil {
- abortChannel = make(chan interface{})
+ abortChannel = make(chan any)
go func() {
pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
for {
@@ -125,7 +125,7 @@ func (handler *InterruptHandler) registerForInterrupts() {
}()
}
- go func(abortChannel chan interface{}) {
+ go func(abortChannel chan any) {
var interruptCause InterruptCause
for {
select {
@@ -151,7 +151,7 @@ func (handler *InterruptHandler) registerForInterrupts() {
}
if handler.level != oldLevel {
close(handler.c)
- handler.c = make(chan interface{})
+ handler.c = make(chan any)
}
handler.lock.Unlock()
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
index 0686f7410..8096950b6 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
@@ -84,7 +84,7 @@ const SuppressProgressReporting = suppressProgressReporting(true)
type FlakeAttempts uint
type MustPassRepeatedly uint
type Offset uint
-type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
+type Done chan<- any // Deprecated Done Channel for asynchronous testing
type Labels []string
type PollProgressInterval time.Duration
type PollProgressAfter time.Duration
@@ -110,9 +110,9 @@ func UnionOfLabels(labels ...Labels) Labels {
return out
}
-func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
- decorations := []interface{}{}
- remainingArgs := []interface{}{}
+func PartitionDecorations(args ...any) ([]any, []any) {
+ decorations := []any{}
+ remainingArgs := []any{}
for _, arg := range args {
if isDecoration(arg) {
decorations = append(decorations, arg)
@@ -123,7 +123,7 @@ func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
return decorations, remainingArgs
}
-func isDecoration(arg interface{}) bool {
+func isDecoration(arg any) bool {
switch t := reflect.TypeOf(arg); {
case t == nil:
return false
@@ -168,7 +168,7 @@ func isDecoration(arg interface{}) bool {
}
}
-func isSliceOfDecorations(slice interface{}) bool {
+func isSliceOfDecorations(slice any) bool {
vSlice := reflect.ValueOf(slice)
if vSlice.Len() == 0 {
return false
@@ -184,7 +184,7 @@ func isSliceOfDecorations(slice interface{}) bool {
var contextType = reflect.TypeOf(new(context.Context)).Elem()
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
-func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
+func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
@@ -207,7 +207,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
args = unrollInterfaceSlice(args)
- remainingArgs := []interface{}{}
+ remainingArgs := []any{}
// First get the CodeLocation up-to-date
for _, arg := range args {
switch v := arg.(type) {
@@ -223,7 +223,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
labelsSeen := map[string]bool{}
trackedFunctionError := false
args = remainingArgs
- remainingArgs = []interface{}{}
+ remainingArgs = []any{}
// now process the rest of the args
for _, arg := range args {
switch t := reflect.TypeOf(arg); {
@@ -451,7 +451,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
var doneType = reflect.TypeOf(make(Done))
-func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) {
+func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg any) (func(SpecContext), bool) {
t := reflect.TypeOf(arg)
if t.NumOut() > 0 || t.NumIn() > 1 {
return nil, false
@@ -477,7 +477,7 @@ func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.
var byteType = reflect.TypeOf([]byte{})
-func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) {
+func extractSynchronizedBeforeSuiteProc1Body(arg any) (func(SpecContext) []byte, bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
@@ -505,7 +505,7 @@ func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext)
}, hasContext
}
-func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) {
+func extractSynchronizedBeforeSuiteAllProcsBody(arg any) (func(SpecContext, []byte), bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
hasContext, hasByte := false, false
@@ -536,11 +536,11 @@ func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecConte
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
-func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
+func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...any) (Node, []error) {
decorations, remainingArgs := PartitionDecorations(args...)
baseOffset := 2
cl := types.NewCodeLocation(baseOffset)
- finalArgs := []interface{}{}
+ finalArgs := []any{}
for _, arg := range decorations {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(Offset(0)):
@@ -920,12 +920,12 @@ func (n Nodes) GetMaxMustPassRepeatedly() int {
return maxMustPassRepeatedly
}
-func unrollInterfaceSlice(args interface{}) []interface{} {
+func unrollInterfaceSlice(args any) []any {
v := reflect.ValueOf(args)
if v.Kind() != reflect.Slice {
- return []interface{}{args}
+ return []any{args}
}
- out := []interface{}{}
+ out := []any{}
for i := 0; i < v.Len(); i++ {
el := reflect.ValueOf(v.Index(i).Interface())
if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
index 4a1c09461..5598f15cb 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
@@ -69,7 +69,7 @@ type pipePair struct {
writer *os.File
}
-func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
+func startPipeFactory(pipeChannel chan pipePair, shutdown chan any) {
for {
//make the next pipe...
pair := pipePair{}
@@ -101,8 +101,8 @@ type genericOutputInterceptor struct {
stderrClone *os.File
pipe pipePair
- shutdown chan interface{}
- emergencyBailout chan interface{}
+ shutdown chan any
+ emergencyBailout chan any
pipeChannel chan pipePair
interceptedContent chan string
@@ -139,7 +139,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
interceptor.intercepting = true
if interceptor.stdoutClone == nil {
interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
- interceptor.shutdown = make(chan interface{})
+ interceptor.shutdown = make(chan any)
go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
}
@@ -147,13 +147,13 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
// we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
interceptor.pipe = <-interceptor.pipeChannel
- interceptor.emergencyBailout = make(chan interface{})
+ interceptor.emergencyBailout = make(chan any)
//Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
go func() {
buffer := &bytes.Buffer{}
destination := io.MultiWriter(buffer, interceptor.forwardTo)
- copyFinished := make(chan interface{})
+ copyFinished := make(chan any)
reader := interceptor.pipe.reader
go func() {
io.Copy(destination, reader)
@@ -224,7 +224,7 @@ func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
- shutdown: make(chan interface{}),
+ shutdown: make(chan any),
implementation: &osGlobalReassigningOutputInterceptorImpl{},
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
index 8a237f446..e0f1431d5 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
@@ -13,7 +13,7 @@ func NewOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
- shutdown: make(chan interface{}),
+ shutdown: make(chan any),
implementation: &dupSyscallOutputInterceptorImpl{},
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
index b3cd64292..4234d802c 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
@@ -30,7 +30,7 @@ type Server interface {
Close()
Address() string
RegisterAlive(node int, alive func() bool)
- GetSuiteDone() chan interface{}
+ GetSuiteDone() chan any
GetOutputDestination() io.Writer
SetOutputDestination(io.Writer)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
index 6547c7a66..4aa10ae4f 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
@@ -34,7 +34,7 @@ func (client *httpClient) Close() error {
return nil
}
-func (client *httpClient) post(path string, data interface{}) error {
+func (client *httpClient) post(path string, data any) error {
var body io.Reader
if data != nil {
encoded, err := json.Marshal(data)
@@ -54,7 +54,7 @@ func (client *httpClient) post(path string, data interface{}) error {
return nil
}
-func (client *httpClient) poll(path string, data interface{}) error {
+func (client *httpClient) poll(path string, data any) error {
for {
resp, err := http.Get(client.serverHost + path)
if err != nil {
@@ -153,10 +153,7 @@ func (client *httpClient) PostAbort() error {
func (client *httpClient) ShouldAbort() bool {
err := client.poll("/abort", nil)
- if err == ErrorGone {
- return true
- }
- return false
+ return err == ErrorGone
}
func (client *httpClient) Write(p []byte) (int, error) {
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
index d2c71ab1b..8a1b7a5bb 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
@@ -75,7 +75,7 @@ func (server *httpServer) Address() string {
return "http://" + server.listener.Addr().String()
}
-func (server *httpServer) GetSuiteDone() chan interface{} {
+func (server *httpServer) GetSuiteDone() chan any {
return server.handler.done
}
@@ -96,7 +96,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) {
//
// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
-func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
+func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object any) bool {
defer request.Body.Close()
if json.NewDecoder(request.Body).Decode(object) != nil {
writer.WriteHeader(http.StatusBadRequest)
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
index 59e8e6fd0..bb4675a02 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
@@ -35,7 +35,7 @@ func (client *rpcClient) Close() error {
return client.client.Close()
}
-func (client *rpcClient) poll(method string, data interface{}) error {
+func (client *rpcClient) poll(method string, data any) error {
for {
err := client.client.Call(method, voidSender, data)
if err == nil {
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
index 2620fd562..1574f99ac 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
@@ -25,7 +25,7 @@ type RPCServer struct {
handler *ServerHandler
}
-//Create a new server, automatically selecting a port
+// Create a new server, automatically selecting a port
func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
@@ -37,7 +37,7 @@ func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, e
}, nil
}
-//Start the server. You don't need to `go s.Start()`, just `s.Start()`
+// Start the server. You don't need to `go s.Start()`, just `s.Start()`
func (server *RPCServer) Start() {
rpcServer := rpc.NewServer()
rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
@@ -48,17 +48,17 @@ func (server *RPCServer) Start() {
go httpServer.Serve(server.listener)
}
-//Stop the server
+// Stop the server
func (server *RPCServer) Close() {
server.listener.Close()
}
-//The address the server can be reached it. Pass this into the `ForwardingReporter`.
+// The address the server can be reached it. Pass this into the `ForwardingReporter`.
func (server *RPCServer) Address() string {
return server.listener.Addr().String()
}
-func (server *RPCServer) GetSuiteDone() chan interface{} {
+func (server *RPCServer) GetSuiteDone() chan any {
return server.handler.done
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
index a6d98793e..ab9e11372 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
@@ -18,7 +18,7 @@ var voidSender Void
// It handles all the business logic to avoid duplication between the two servers
type ServerHandler struct {
- done chan interface{}
+ done chan any
outputDestination io.Writer
reporter reporters.Reporter
alives []func() bool
@@ -46,7 +46,7 @@ func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHan
parallelTotal: parallelTotal,
outputDestination: os.Stdout,
- done: make(chan interface{}),
+ done: make(chan any),
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
index cc351a39b..9c18dc8e5 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
@@ -8,7 +8,7 @@ import (
type ReportEntry = types.ReportEntry
-func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
+func NewReportEntry(name string, cl types.CodeLocation, args ...any) (ReportEntry, error) {
out := ReportEntry{
Visibility: types.ReportEntryVisibilityAlways,
Name: name,
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
index 73e265565..b4ecc7cb8 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
@@ -1,6 +1,7 @@
package testingtproxy
import (
+ "context"
"fmt"
"io"
"os"
@@ -19,9 +20,9 @@ type addReportEntryFunc func(names string, args ...any)
type ginkgoWriterInterface interface {
io.Writer
- Print(a ...interface{})
- Printf(format string, a ...interface{})
- Println(a ...interface{})
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
}
type ginkgoRecoverFunc func()
type attachProgressReporterFunc func(func() string) func()
@@ -80,11 +81,31 @@ func (t *ginkgoTestingTProxy) Setenv(key, value string) {
}
}
-func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Chdir(dir string) {
+ currentDir, err := os.Getwd()
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to get current directory: %v", err), 1)
+ }
+
+ t.cleanup(os.Chdir, currentDir, internal.Offset(1))
+
+ err = os.Chdir(dir)
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to change directory: %v", err), 1)
+ }
+}
+
+func (t *ginkgoTestingTProxy) Context() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ t.cleanup(cancel, internal.Offset(1))
+ return ctx
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...any) {
t.fail(fmt.Sprintln(args...), t.offset)
}
-func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...any) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
@@ -100,11 +121,11 @@ func (t *ginkgoTestingTProxy) Failed() bool {
return t.report().Failed()
}
-func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Fatal(args ...any) {
t.fail(fmt.Sprintln(args...), t.offset)
}
-func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...any) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
@@ -112,11 +133,11 @@ func (t *ginkgoTestingTProxy) Helper() {
types.MarkAsHelper(1)
}
-func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Log(args ...any) {
fmt.Fprintln(t.writer, args...)
}
-func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Logf(format string, args ...any) {
t.Log(fmt.Sprintf(format, args...))
}
@@ -128,7 +149,7 @@ func (t *ginkgoTestingTProxy) Parallel() {
// No-op
}
-func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Skip(args ...any) {
t.skip(fmt.Sprintln(args...), t.offset)
}
@@ -136,7 +157,7 @@ func (t *ginkgoTestingTProxy) SkipNow() {
t.skip("skip", t.offset)
}
-func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...any) {
t.skip(fmt.Sprintf(format, args...), t.offset)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
index aab42d5fb..1c4e0534e 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
@@ -121,15 +121,15 @@ func (w *Writer) ClearTeeWriters() {
w.teeWriters = []io.Writer{}
}
-func (w *Writer) Print(a ...interface{}) {
+func (w *Writer) Print(a ...any) {
fmt.Fprint(w, a...)
}
-func (w *Writer) Printf(format string, a ...interface{}) {
+func (w *Writer) Printf(format string, a ...any) {
fmt.Fprintf(w, format, a...)
}
-func (w *Writer) Println(a ...interface{}) {
+func (w *Writer) Println(a ...any) {
fmt.Fprintln(w, a...)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
index 480730486..74ad0768b 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
@@ -685,11 +685,11 @@ func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) {
}
/* Rendering text */
-func (r *DefaultReporter) f(format string, args ...interface{}) string {
+func (r *DefaultReporter) f(format string, args ...any) string {
return r.formatter.F(format, args...)
}
-func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string {
+func (r *DefaultReporter) fi(indentation uint, format string, args ...any) string {
return r.formatter.Fi(indentation, format, args...)
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
index aa1a35176..5bf2e62e9 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
@@ -60,7 +60,7 @@ AddReportEntry() must be called within a Subject or Setup node - not in a Contai
You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
*/
-func AddReportEntry(name string, args ...interface{}) {
+func AddReportEntry(name string, args ...any) {
cl := types.NewCodeLocation(1)
reportEntry, err := internal.NewReportEntry(name, cl, args...)
if err != nil {
@@ -89,7 +89,7 @@ You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#g
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
func ReportBeforeEach(body any, args ...any) bool {
- combinedArgs := []interface{}{body}
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))
@@ -113,7 +113,7 @@ You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#ge
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
func ReportAfterEach(body any, args ...any) bool {
- combinedArgs := []interface{}{body}
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
@@ -143,7 +143,7 @@ You can learn more about Ginkgo's reporting infrastructure, including generating
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
func ReportBeforeSuite(body any, args ...any) bool {
- combinedArgs := []interface{}{body}
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
}
@@ -174,8 +174,8 @@ You can learn more about Ginkgo's reporting infrastructure, including generating
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportAfterSuite(text string, body any, args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func ReportAfterSuite(text string, body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
index 9074a57ac..b9e0ca9ef 100644
--- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
@@ -23,7 +23,7 @@ You can learn more about generating EntryDescriptions here: https://onsi.github.
*/
type EntryDescription string
-func (ed EntryDescription) render(args ...interface{}) string {
+func (ed EntryDescription) render(args ...any) string {
return fmt.Sprintf(string(ed), args...)
}
@@ -44,7 +44,7 @@ For example:
You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs
And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
*/
-func DescribeTable(description string, args ...interface{}) bool {
+func DescribeTable(description string, args ...any) bool {
GinkgoHelper()
generateTable(description, false, args...)
return true
@@ -53,7 +53,7 @@ func DescribeTable(description string, args ...interface{}) bool {
/*
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
*/
-func FDescribeTable(description string, args ...interface{}) bool {
+func FDescribeTable(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Focus)
generateTable(description, false, args...)
@@ -63,7 +63,7 @@ func FDescribeTable(description string, args ...interface{}) bool {
/*
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
*/
-func PDescribeTable(description string, args ...interface{}) bool {
+func PDescribeTable(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Pending)
generateTable(description, false, args...)
@@ -109,7 +109,7 @@ Note that you **must** place define an It inside the body function.
You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs
And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
*/
-func DescribeTableSubtree(description string, args ...interface{}) bool {
+func DescribeTableSubtree(description string, args ...any) bool {
GinkgoHelper()
generateTable(description, true, args...)
return true
@@ -118,7 +118,7 @@ func DescribeTableSubtree(description string, args ...interface{}) bool {
/*
You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`.
*/
-func FDescribeTableSubtree(description string, args ...interface{}) bool {
+func FDescribeTableSubtree(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Focus)
generateTable(description, true, args...)
@@ -128,7 +128,7 @@ func FDescribeTableSubtree(description string, args ...interface{}) bool {
/*
You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`.
*/
-func PDescribeTableSubtree(description string, args ...interface{}) bool {
+func PDescribeTableSubtree(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Pending)
generateTable(description, true, args...)
@@ -144,9 +144,9 @@ var XDescribeTableSubtree = PDescribeTableSubtree
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
*/
type TableEntry struct {
- description interface{}
- decorations []interface{}
- parameters []interface{}
+ description any
+ decorations []any
+ parameters []any
codeLocation types.CodeLocation
}
@@ -162,7 +162,7 @@ If you want to generate interruptible specs simply write a Table function that a
You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
*/
-func Entry(description interface{}, args ...interface{}) TableEntry {
+func Entry(description any, args ...any) TableEntry {
GinkgoHelper()
decorations, parameters := internal.PartitionDecorations(args...)
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
@@ -171,7 +171,7 @@ func Entry(description interface{}, args ...interface{}) TableEntry {
/*
You can focus a particular entry with FEntry. This is equivalent to FIt.
*/
-func FEntry(description interface{}, args ...interface{}) TableEntry {
+func FEntry(description any, args ...any) TableEntry {
GinkgoHelper()
decorations, parameters := internal.PartitionDecorations(args...)
decorations = append(decorations, internal.Focus)
@@ -181,7 +181,7 @@ func FEntry(description interface{}, args ...interface{}) TableEntry {
/*
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
*/
-func PEntry(description interface{}, args ...interface{}) TableEntry {
+func PEntry(description any, args ...any) TableEntry {
GinkgoHelper()
decorations, parameters := internal.PartitionDecorations(args...)
decorations = append(decorations, internal.Pending)
@@ -196,17 +196,17 @@ var XEntry = PEntry
var contextType = reflect.TypeOf(new(context.Context)).Elem()
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
-func generateTable(description string, isSubtree bool, args ...interface{}) {
+func generateTable(description string, isSubtree bool, args ...any) {
GinkgoHelper()
cl := types.NewCodeLocation(0)
- containerNodeArgs := []interface{}{cl}
+ containerNodeArgs := []any{cl}
entries := []TableEntry{}
- var internalBody interface{}
+ var internalBody any
var internalBodyType reflect.Type
- var tableLevelEntryDescription interface{}
- tableLevelEntryDescription = func(args ...interface{}) string {
+ var tableLevelEntryDescription any
+ tableLevelEntryDescription = func(args ...any) string {
out := []string{}
for _, arg := range args {
out = append(out, fmt.Sprint(arg))
@@ -265,7 +265,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) {
err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation)
}
- internalNodeArgs := []interface{}{entry.codeLocation}
+ internalNodeArgs := []any{entry.codeLocation}
internalNodeArgs = append(internalNodeArgs, entry.decorations...)
hasContext := false
@@ -290,7 +290,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) {
if err != nil {
panic(err)
}
- invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...))
+ invokeFunction(internalBody, append([]any{c}, entry.parameters...))
})
if isSubtree {
exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl))
@@ -316,7 +316,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) {
pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))
}
-func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value {
+func invokeFunction(function any, parameters []any) []reflect.Value {
inValues := make([]reflect.Value, len(parameters))
funcType := reflect.TypeOf(function)
@@ -339,7 +339,7 @@ func invokeFunction(function interface{}, parameters []interface{}) []reflect.Va
return reflect.ValueOf(function).Call(inValues)
}
-func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error {
+func validateParameters(function any, parameters []any, kind string, cl types.CodeLocation, hasContext bool) error {
funcType := reflect.TypeOf(function)
limit := funcType.NumIn()
offset := 0
@@ -377,7 +377,7 @@ func validateParameters(function interface{}, parameters []interface{}, kind str
return nil
}
-func computeValue(parameter interface{}, t reflect.Type) reflect.Value {
+func computeValue(parameter any, t reflect.Type) reflect.Value {
if parameter == nil {
return reflect.Zero(t)
} else {
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go
index 8c0dfab8c..2e827efe3 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/config.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go
@@ -159,7 +159,7 @@ func (g CLIConfig) ComputedProcs() int {
n := 1
if g.Parallel {
- n = runtime.NumCPU()
+ n = runtime.GOMAXPROCS(-1)
if n > 4 {
n = n - 1
}
@@ -172,7 +172,7 @@ func (g CLIConfig) ComputedNumCompilers() int {
return g.NumCompilers
}
- return runtime.NumCPU()
+ return runtime.GOMAXPROCS(-1)
}
// Configuration for the Ginkgo CLI capturing available go flags
@@ -231,6 +231,10 @@ func (g GoFlagsConfig) BinaryMustBePreserved() bool {
return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
}
+func (g GoFlagsConfig) NeedsSymbols() bool {
+ return g.BinaryMustBePreserved()
+}
+
// Configuration that were deprecated in 2.0
type deprecatedConfig struct {
DebugParallel bool
@@ -257,8 +261,12 @@ var FlagSections = GinkgoFlagSections{
{Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"},
{Key: "failure", Style: "{{red}}", Heading: "Failure Handling"},
{Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"},
- {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"},
- {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"},
+ {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis",
+ Description: "When generating a cover files, please pass a filename {{bold}}not{{/}} a path. To specify a different directory use {{magenta}}--output-dir{{/}}.",
+ },
+ {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis",
+ Description: "When generating profile files, please pass filenames {{bold}}not{{/}} a path. Ginkgo will generate a profile file with the given name in the package's directory. To specify a different directory use {{magenta}}--output-dir{{/}}.",
+ },
{Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests",
Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."},
{Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"},
@@ -365,7 +373,7 @@ var ReporterConfigFlags = GinkgoFlags{
func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) {
flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...)
flags = flags.WithPrefix("ginkgo")
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": suiteConfig,
"R": reporterConfig,
"D": &deprecatedConfig{},
@@ -515,7 +523,7 @@ var GoBuildFlags = GinkgoFlags{
{KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."},
{KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
- Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
+ Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty (by explicitly passing --vet=""), "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
{KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."},
{KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis",
@@ -572,7 +580,7 @@ var GoBuildFlags = GinkgoFlags{
// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
var GoRunFlags = GinkgoFlags{
{KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis",
- Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`},
+ Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover. Must be passed a filename, not a path. Use output-dir to control the location of the output.`},
{KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis",
Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`},
{KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
@@ -600,6 +608,22 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails())
}
+ if strings.ContainsRune(goFlagsConfig.CoverProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--coverprofile", goFlagsConfig.CoverProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.CPUProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--cpuprofile", goFlagsConfig.CPUProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.MemProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--memprofile", goFlagsConfig.MemProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.BlockProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--blockprofile", goFlagsConfig.BlockProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.MutexProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--mutexprofile", goFlagsConfig.MutexProfile))
+ }
+
//initialize the output directory
if cliConfig.OutputDir != "" {
err := os.MkdirAll(cliConfig.OutputDir, 0777)
@@ -620,7 +644,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
}
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
-func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) {
+func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string, preserveSymbols bool) ([]string, error) {
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
// the built test binary can generate a coverprofile
if goFlagsConfig.CoverProfile != "" {
@@ -643,10 +667,14 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
}
+ if !goFlagsConfig.NeedsSymbols() && goFlagsConfig.LDFlags == "" && !preserveSymbols {
+ goFlagsConfig.LDFlags = "-w -s"
+ }
+
args := []string{"test", "-c", packageToBuild}
goArgs, err := GenerateFlagArgs(
GoBuildFlags,
- map[string]interface{}{
+ map[string]any{
"Go": &goFlagsConfig,
},
)
@@ -665,7 +693,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC
flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...)
flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...)
flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": &suiteConfig,
"R": &reporterConfig,
"Go": &goFlagsConfig,
@@ -677,7 +705,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC
// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary
func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {
flags := GoRunFlags.WithPrefix("test")
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"Go": &goFlagsConfig,
}
@@ -699,7 +727,7 @@ func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterCo
flags = flags.CopyAppend(GoBuildFlags...)
flags = flags.CopyAppend(GoRunFlags...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": suiteConfig,
"R": reporterConfig,
"C": cliConfig,
@@ -720,7 +748,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter
flags = flags.CopyAppend(GoBuildFlags...)
flags = flags.CopyAppend(GoRunFlags...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": suiteConfig,
"R": reporterConfig,
"C": cliConfig,
@@ -736,7 +764,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig
flags := GinkgoCLISharedFlags
flags = flags.CopyAppend(GoBuildFlags...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"C": cliConfig,
"Go": goFlagsConfig,
"D": &deprecatedConfig{},
@@ -760,7 +788,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig
func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) {
flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package")
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"C": cliConfig,
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
index 17922304b..518989a84 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
@@ -113,7 +113,7 @@ type DeprecatedSpecFailure struct {
type DeprecatedSpecMeasurement struct {
Name string
- Info interface{}
+ Info any
Order int
Results []float64
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
index 6bb72d00c..c2796b549 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
@@ -88,7 +88,7 @@ body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, n
}
}
-func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error {
+func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic any, cl CodeLocation) error {
return GinkgoError{
Heading: "Assertion or Panic detected during tree construction",
Message: formatter.F(
@@ -189,7 +189,7 @@ func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl
}
}
-func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error {
+func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator any) error {
return GinkgoError{
Heading: "Unknown Decorator",
Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator),
@@ -345,7 +345,7 @@ func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error {
}
/* ReportEntry errors */
-func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error {
+func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg any) error {
return GinkgoError{
Heading: "Too Many ReportEntry Values",
Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg),
@@ -539,7 +539,7 @@ func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error {
/* Configuration errors */
-func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error {
+func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value any) error {
return GinkgoError{
Heading: "Unknown Type passed to RunSpecs",
Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value),
@@ -629,6 +629,20 @@ func (g ginkgoErrors) BothRepeatAndUntilItFails() error {
}
}
+func (g ginkgoErrors) ExpectFilenameNotPath(flag string, path string) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("%s expects a filename but was given a path: %s", flag, path),
+ Message: fmt.Sprintf("%s takes a filename, not a path. Use --output-dir to specify a directory to collect all test outputs.", flag),
+ }
+}
+
+func (g ginkgoErrors) FlagAfterPositionalParameter() error {
+ return GinkgoError{
+ Heading: "Malformed arguments - detected a flag after the package liste",
+ Message: "Make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages (or './...').\n{{gray}}e.g. 'ginkgo run -p my_package' is valid but `ginkgo -p run my_package` is not.\n{{gray}}e.g. 'ginkgo -p -vet=\"\" ./...' is valid but 'ginkgo -p ./... -vet=\"\"' is not{{/}}",
+ }
+}
+
/* Stack-Trace parsing errors */
func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
index de69f3022..8409653f9 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
@@ -92,7 +92,7 @@ func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) {
type GinkgoFlagSet struct {
flags GinkgoFlags
- bindings interface{}
+ bindings any
sections GinkgoFlagSections
extraGoFlagsSection GinkgoFlagSection
@@ -101,7 +101,7 @@ type GinkgoFlagSet struct {
}
// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet
-func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
+func NewGinkgoFlagSet(flags GinkgoFlags, bindings any, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
return bindFlagSet(GinkgoFlagSet{
flags: flags,
bindings: bindings,
@@ -110,7 +110,7 @@ func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFl
}
// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet
-func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
+func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings any, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
return bindFlagSet(GinkgoFlagSet{
flags: flags,
bindings: bindings,
@@ -335,7 +335,7 @@ func (f GinkgoFlagSet) substituteUsage() {
fmt.Fprintln(f.flagSet.Output(), f.Usage())
}
-func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) {
+func valueAtKeyPath(root any, keyPath string) (reflect.Value, bool) {
if len(keyPath) == 0 {
return reflect.Value{}, false
}
@@ -433,7 +433,7 @@ func (ssv stringSliceVar) Set(s string) error {
}
// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
-func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
+func GenerateFlagArgs(flags GinkgoFlags, bindings any) ([]string, error) {
result := []string{}
for _, flag := range flags {
name := flag.ExportAs
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
index 7fdc8aa23..40a909b6d 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
@@ -343,7 +343,7 @@ func tokenize(input string) func() (*treeNode, error) {
consumeUntil := func(cutset string) (string, int) {
j := i
for ; j < len(runes); j++ {
- if strings.IndexRune(cutset, runes[j]) >= 0 {
+ if strings.ContainsRune(cutset, runes[j]) {
break
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
index 7b1524b52..63f7a9f6d 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
@@ -9,18 +9,18 @@ import (
// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
// and across the network connection when running in parallel
type ReportEntryValue struct {
- raw interface{} //unexported to prevent gob from freaking out about unregistered structs
+ raw any //unexported to prevent gob from freaking out about unregistered structs
AsJSON string
Representation string
}
-func WrapEntryValue(value interface{}) ReportEntryValue {
+func WrapEntryValue(value any) ReportEntryValue {
return ReportEntryValue{
raw: value,
}
}
-func (rev ReportEntryValue) GetRawValue() interface{} {
+func (rev ReportEntryValue) GetRawValue() any {
return rev.raw
}
@@ -118,7 +118,7 @@ func (entry ReportEntry) StringRepresentation() string {
// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be
// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON
// field yourself.
-func (entry ReportEntry) GetRawValue() interface{} {
+func (entry ReportEntry) GetRawValue() any {
return entry.Value.GetRawValue()
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go
index 879e1d86c..158ac2fd8 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/version.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -1,3 +1,3 @@
package types
-const VERSION = "2.22.2"
+const VERSION = "2.23.4"
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index a20d997cd..ba3bfe7a9 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,19 @@
+## 1.36.3
+
+### Maintenance
+
+- bump all the things [adb8b49]
+- chore: replace `interface{}` with `any` [7613216]
+- Bump google.golang.org/protobuf from 1.36.1 to 1.36.5 (#822) [9fe5259]
+- remove spurious "toolchain" from go.mod (#819) [a0e85b9]
+- Bump golang.org/x/net from 0.33.0 to 0.35.0 (#823) [604a8b1]
+- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#772) [36fbc84]
+- Bump github-pages from 231 to 232 in /docs (#778) [ced70d7]
+- Bump rexml from 3.2.6 to 3.3.9 in /docs (#788) [c8b4a07]
+- Bump github.com/onsi/ginkgo/v2 from 2.22.1 to 2.22.2 (#812) [06431b9]
+- Bump webrick from 1.8.1 to 1.9.1 in /docs (#800) [b55a92d]
+- Fix typos (#813) [a1d518b]
+
## 1.36.2
### Maintenance
@@ -322,7 +338,7 @@ Require Go 1.22+
### Features
-Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers.
+Introducing [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers.
This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable.
@@ -461,7 +477,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/
- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497]
- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5]
- Fix for Go 1.18 (#532) [56d2a29]
-- Document precendence of timeouts (#533) [b607941]
+- Document precedence of timeouts (#533) [b607941]
## 1.18.1
@@ -478,7 +494,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/
## Fixes
- Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0]
-## Maintenace
+## Maintenance
- Remove Travis workflow (#491) [72e6040]
- Upgrade to Ginkgo 2.0.0 GA [f383637]
- chore: fix description of HaveField matcher (#487) [2b4b2c0]
@@ -726,7 +742,7 @@ Improvements:
- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout.
- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests.
-- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
+- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShouldNot(Receive()) always passes with a closed channel.
- Added `HavePrefix` and `HaveSuffix` matchers.
- `ghttp` can now handle concurrent requests.
- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
@@ -736,7 +752,7 @@ Improvements:
- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives.
- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
-- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time.
+- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the allotted time.
- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer.
Bug Fixes:
@@ -781,7 +797,7 @@ New Matchers:
Updated Matchers:
-- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
+- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an object that satisfies the passed-in matcher.
- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed.
Misc:
diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go
index 6c1680638..96f04b210 100644
--- a/vendor/github.com/onsi/gomega/format/format.go
+++ b/vendor/github.com/onsi/gomega/format/format.go
@@ -57,7 +57,7 @@ var Indent = " "
var longFormThreshold = 20
-// GomegaStringer allows for custom formating of objects for gomega.
+// GomegaStringer allows for custom formatting of objects for gomega.
type GomegaStringer interface {
// GomegaString will be used to custom format an object.
// It does not follow UseStringerRepresentation value and will always be called regardless.
@@ -73,7 +73,7 @@ If the CustomFormatter does not want to handle the object it should return ("",
Strings returned by CustomFormatters are not truncated
*/
-type CustomFormatter func(value interface{}) (string, bool)
+type CustomFormatter func(value any) (string, bool)
type CustomFormatterKey uint
var customFormatterKey CustomFormatterKey = 1
@@ -125,7 +125,7 @@ If expected is omitted, then the message looks like:
*/
-func Message(actual interface{}, message string, expected ...interface{}) string {
+func Message(actual any, message string, expected ...any) string {
if len(expected) == 0 {
return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
}
@@ -255,7 +255,7 @@ recursing into the object.
Set PrintContextObjects to true to print the content of objects implementing context.Context
*/
-func Object(object interface{}, indentation uint) string {
+func Object(object any, indentation uint) string {
indent := strings.Repeat(Indent, int(indentation))
value := reflect.ValueOf(object)
commonRepresentation := ""
@@ -392,7 +392,7 @@ func formatValue(value reflect.Value, indentation uint) string {
}
}
-func formatString(object interface{}, indentation uint) string {
+func formatString(object any, indentation uint) string {
if indentation == 1 {
s := fmt.Sprintf("%s", object)
components := strings.Split(s, "\n")
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 9a028f3f3..270e4b767 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.36.2"
+const GOMEGA_VERSION = "1.36.3"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
@@ -191,7 +191,7 @@ func ensureDefaultGomegaIsConfigured() {
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Ω and Expect are identical
-func Ω(actual interface{}, extra ...interface{}) Assertion {
+func Ω(actual any, extra ...any) Assertion {
ensureDefaultGomegaIsConfigured()
return Default.Ω(actual, extra...)
}
@@ -217,7 +217,7 @@ func Ω(actual interface{}, extra ...interface{}) Assertion {
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Expect and Ω are identical
-func Expect(actual interface{}, extra ...interface{}) Assertion {
+func Expect(actual any, extra ...any) Assertion {
ensureDefaultGomegaIsConfigured()
return Default.Expect(actual, extra...)
}
@@ -233,7 +233,7 @@ func Expect(actual interface{}, extra ...interface{}) Assertion {
// This is most useful in helper functions that make assertions. If you want Gomega's
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
// set the first argument of `ExpectWithOffset` appropriately.
-func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion {
+func ExpectWithOffset(offset int, actual any, extra ...any) Assertion {
ensureDefaultGomegaIsConfigured()
return Default.ExpectWithOffset(offset, actual, extra...)
}
@@ -319,19 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in
Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17))
}, SpecTimeout(time.Second))
-Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
+Either way the context passed to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example:
Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17))
-now either the context cacnellation or the timeout will cause Eventually to stop polling.
+now either the context cancellation or the timeout will cause Eventually to stop polling.
If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call:
EnforceDefaultTimeoutsWhenUsingContexts()
-in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses.
+in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if either the context is cancelled or the default timeout elapses.
**Category 3: Making assertions _in_ the function passed into Eventually**
@@ -390,7 +390,7 @@ is equivalent to
Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
*/
-func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func Eventually(actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.Eventually(actualOrCtx, args...)
}
@@ -404,7 +404,7 @@ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are
// the same as `Eventually(...).WithOffset(...).WithTimeout` or
// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`.
-func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.EventuallyWithOffset(offset, actualOrCtx, args...)
}
@@ -424,7 +424,7 @@ Consistently is useful in cases where you want to assert that something *does no
This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received.
*/
-func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func Consistently(actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.Consistently(actualOrCtx, args...)
}
@@ -435,13 +435,13 @@ func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
//
// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and
// optional `WithTimeout` and `WithPolling`.
-func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...)
}
/*
-StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal.
+StopTrying can be used to signal to Eventually and Consistently that they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal.
You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution.
diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go
index 08356a610..cc846e7ce 100644
--- a/vendor/github.com/onsi/gomega/internal/assertion.go
+++ b/vendor/github.com/onsi/gomega/internal/assertion.go
@@ -9,19 +9,19 @@ import (
)
type Assertion struct {
- actuals []interface{} // actual value plus all extra values
- actualIndex int // value to pass to the matcher
- vet vetinari // the vet to call before calling Gomega matcher
+ actuals []any // actual value plus all extra values
+ actualIndex int // value to pass to the matcher
+ vet vetinari // the vet to call before calling Gomega matcher
offset int
g *Gomega
}
// ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right.
-type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool
+type vetinari func(assertion *Assertion, optionalDescription ...any) bool
-func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion {
+func NewAssertion(actualInput any, g *Gomega, offset int, extra ...any) *Assertion {
return &Assertion{
- actuals: append([]interface{}{actualInput}, extra...),
+ actuals: append([]any{actualInput}, extra...),
actualIndex: 0,
vet: (*Assertion).vetActuals,
offset: offset,
@@ -44,37 +44,37 @@ func (assertion *Assertion) Error() types.Assertion {
}
}
-func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
-func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
-func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
+func (assertion *Assertion) buildDescription(optionalDescription ...any) string {
switch len(optionalDescription) {
case 0:
return ""
@@ -86,7 +86,7 @@ func (assertion *Assertion) buildDescription(optionalDescription ...interface{})
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
-func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool {
actualInput := assertion.actuals[assertion.actualIndex]
matches, err := matcher.Match(actualInput)
assertion.g.THelper()
@@ -113,7 +113,7 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool
// vetActuals vets the actual values, with the (optional) exception of a
// specific value, such as the first value in case non-error assertions, or the
// last value in case of Error()-based assertions.
-func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool {
+func (assertion *Assertion) vetActuals(optionalDescription ...any) bool {
success, message := vetActuals(assertion.actuals, assertion.actualIndex)
if success {
return true
@@ -129,7 +129,7 @@ func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool
// the final error value is non-zero. Otherwise, it doesn't vet the actual
// values, as these are allowed to take on any values unless there is a non-zero
// error value.
-func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
+func (assertion *Assertion) vetError(optionalDescription ...any) bool {
if err := assertion.actuals[assertion.actualIndex]; err != nil {
// Go error result idiom: all other actual values must be zero values.
return assertion.vetActuals(optionalDescription...)
@@ -139,7 +139,7 @@ func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
// vetActuals vets a slice of actual values, optionally skipping a particular
// value slice element, such as the first or last value slice element.
-func vetActuals(actuals []interface{}, skipIndex int) (bool, string) {
+func vetActuals(actuals []any, skipIndex int) (bool, string) {
for i, actual := range actuals {
if i == skipIndex {
continue
diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go
index 8b4cd1f5b..9932640ef 100644
--- a/vendor/github.com/onsi/gomega/internal/async_assertion.go
+++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go
@@ -69,8 +69,8 @@ type AsyncAssertion struct {
asyncType AsyncAssertionType
actualIsFunc bool
- actual interface{}
- argsToForward []interface{}
+ actual any
+ argsToForward []any
timeoutInterval time.Duration
pollingInterval time.Duration
@@ -80,7 +80,7 @@ type AsyncAssertion struct {
g *Gomega
}
-func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion {
+func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput any, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion {
out := &AsyncAssertion{
asyncType: asyncType,
timeoutInterval: timeoutInterval,
@@ -129,7 +129,7 @@ func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAss
return assertion
}
-func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion {
+func (assertion *AsyncAssertion) WithArguments(argsToForward ...any) types.AsyncAssertion {
assertion.argsToForward = argsToForward
return assertion
}
@@ -139,19 +139,19 @@ func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssert
return assertion
}
-func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Asynchronous assertion", optionalDescription...)
return assertion.match(matcher, true, optionalDescription...)
}
-func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Asynchronous assertion", optionalDescription...)
return assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
+func (assertion *AsyncAssertion) buildDescription(optionalDescription ...any) string {
switch len(optionalDescription) {
case 0:
return ""
@@ -163,7 +163,7 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
-func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) {
+func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (any, error) {
if len(values) == 0 {
return nil, &asyncPolledActualError{
message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType),
@@ -224,7 +224,7 @@ func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvid
if numProvided == 1 {
have = "has"
}
- return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments.
+ return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the correct set of arguments.
You can learn more at https://onsi.github.io/gomega/#eventually
`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType)
@@ -237,9 +237,9 @@ You can learn more at https://onsi.github.io/gomega/#eventually
`, assertion.asyncType, reason)
}
-func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) {
+func (assertion *AsyncAssertion) buildActualPoller() (func() (any, error), error) {
if !assertion.actualIsFunc {
- return func() (interface{}, error) { return assertion.actual, nil }, nil
+ return func() (any, error) { return assertion.actual, nil }, nil
}
actualValue := reflect.ValueOf(assertion.actual)
actualType := reflect.TypeOf(assertion.actual)
@@ -301,7 +301,7 @@ func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error
return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1")
}
- return func() (actual interface{}, err error) {
+ return func() (actual any, err error) {
var values []reflect.Value
assertionFailure = nil
defer func() {
@@ -354,14 +354,14 @@ func (assertion *AsyncAssertion) afterPolling() <-chan time.Time {
}
}
-func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool {
+func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value any) bool {
if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) {
return false
}
return true
}
-func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) {
+func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value any) (matches bool, err error) {
defer func() {
if e := recover(); e != nil {
if _, isAsyncError := AsPollingSignalError(e); isAsyncError {
@@ -377,13 +377,13 @@ func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value
return
}
-func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool {
timer := time.Now()
timeout := assertion.afterTimeout()
lock := sync.Mutex{}
var matches, hasLastValidActual bool
- var actual, lastValidActual interface{}
+ var actual, lastValidActual any
var actualErr, matcherErr error
var oracleMatcherSaysStop bool
diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go
index 2e026c336..1019deb88 100644
--- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go
+++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go
@@ -49,7 +49,7 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration {
return duration
}
-func toDuration(input interface{}) (time.Duration, error) {
+func toDuration(input any) (time.Duration, error) {
duration, ok := input.(time.Duration)
if ok {
return duration, nil
diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go
index c6e2fcc0e..66dfe7d04 100644
--- a/vendor/github.com/onsi/gomega/internal/gomega.go
+++ b/vendor/github.com/onsi/gomega/internal/gomega.go
@@ -40,45 +40,45 @@ func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega {
return g
}
-func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion {
+func (g *Gomega) Ω(actual any, extra ...any) types.Assertion {
return g.ExpectWithOffset(0, actual, extra...)
}
-func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion {
+func (g *Gomega) Expect(actual any, extra ...any) types.Assertion {
return g.ExpectWithOffset(0, actual, extra...)
}
-func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion {
+func (g *Gomega) ExpectWithOffset(offset int, actual any, extra ...any) types.Assertion {
return NewAssertion(actual, g, offset, extra...)
}
-func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) Eventually(actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...)
}
-func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...)
}
-func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) Consistently(actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...)
}
-func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...)
}
-func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx any, args ...any) types.AsyncAssertion {
baseOffset := 3
timeoutInterval := -time.Duration(1)
pollingInterval := -time.Duration(1)
- intervals := []interface{}{}
+ intervals := []any{}
var ctx context.Context
actual := actualOrCtx
startingIndex := 0
if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 {
- // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration
+ // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argument **and** the second argument is not a parseable duration
// this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual
if _, err := toDuration(args[0]); err != nil {
ctx = actualOrCtx.(context.Context)
diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
index 3a4f7ddd9..450c40333 100644
--- a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
+++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
@@ -100,7 +100,7 @@ func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration {
return s.duration
}
-func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) {
+func AsPollingSignalError(actual any) (*PollingSignalErrorImpl, bool) {
if actual == nil {
return nil, false
}
diff --git a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
index f29587641..b748de41f 100644
--- a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
+++ b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
@@ -10,7 +10,7 @@ import (
// Gomega matcher at the beginning it panics. This allows for rendering Gomega
// matchers as part of an optional Description, as long as they're not in the
// first slot.
-func vetOptionalDescription(assertion string, optionalDescription ...interface{}) {
+func vetOptionalDescription(assertion string, optionalDescription ...any) {
if len(optionalDescription) == 0 {
return
}
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
index 7ef27dc9c..10b6693fd 100644
--- a/vendor/github.com/onsi/gomega/matchers.go
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -12,7 +12,7 @@ import (
// Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
// types when performing comparisons.
// It is an error for both actual and expected to be nil. Use BeNil() instead.
-func Equal(expected interface{}) types.GomegaMatcher {
+func Equal(expected any) types.GomegaMatcher {
return &matchers.EqualMatcher{
Expected: expected,
}
@@ -22,7 +22,7 @@ func Equal(expected interface{}) types.GomegaMatcher {
// This is done by converting actual to have the type of expected before
// attempting equality with reflect.DeepEqual.
// It is an error for actual and expected to be nil. Use BeNil() instead.
-func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
+func BeEquivalentTo(expected any) types.GomegaMatcher {
return &matchers.BeEquivalentToMatcher{
Expected: expected,
}
@@ -31,7 +31,7 @@ func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison.
// You can pass cmp.Option as options.
// It is an error for actual and expected to be nil. Use BeNil() instead.
-func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher {
+func BeComparableTo(expected any, opts ...cmp.Option) types.GomegaMatcher {
return &matchers.BeComparableToMatcher{
Expected: expected,
Options: opts,
@@ -41,7 +41,7 @@ func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatche
// BeIdenticalTo uses the == operator to compare actual with expected.
// BeIdenticalTo is strict about types when performing comparisons.
// It is an error for both actual and expected to be nil. Use BeNil() instead.
-func BeIdenticalTo(expected interface{}) types.GomegaMatcher {
+func BeIdenticalTo(expected any) types.GomegaMatcher {
return &matchers.BeIdenticalToMatcher{
Expected: expected,
}
@@ -139,7 +139,7 @@ func Succeed() types.GomegaMatcher {
// Error interface
//
// The optional second argument is a description of the error function, if used. This is required when passing a function but is ignored in all other cases.
-func MatchError(expected interface{}, functionErrorDescription ...any) types.GomegaMatcher {
+func MatchError(expected any, functionErrorDescription ...any) types.GomegaMatcher {
return &matchers.MatchErrorMatcher{
Expected: expected,
FuncErrDescription: functionErrorDescription,
@@ -202,11 +202,11 @@ func BeClosed() types.GomegaMatcher {
// Expect(myThing.IsValid()).Should(BeTrue())
//
// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received,
-// you can pass a pointer to a variable of the approriate type first, and second a matcher:
+// you can pass a pointer to a variable of the appropriate type first, and second a matcher:
//
// var myThing thing
// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar")))
-func Receive(args ...interface{}) types.GomegaMatcher {
+func Receive(args ...any) types.GomegaMatcher {
return &matchers.ReceiveMatcher{
Args: args,
}
@@ -224,7 +224,7 @@ func Receive(args ...interface{}) types.GomegaMatcher {
//
// Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
// Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
-func BeSent(arg interface{}) types.GomegaMatcher {
+func BeSent(arg any) types.GomegaMatcher {
return &matchers.BeSentMatcher{
Arg: arg,
}
@@ -233,7 +233,7 @@ func BeSent(arg interface{}) types.GomegaMatcher {
// MatchRegexp succeeds if actual is a string or stringer that matches the
// passed-in regexp. Optional arguments can be provided to construct a regexp
// via fmt.Sprintf().
-func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
+func MatchRegexp(regexp string, args ...any) types.GomegaMatcher {
return &matchers.MatchRegexpMatcher{
Regexp: regexp,
Args: args,
@@ -243,7 +243,7 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
// ContainSubstring succeeds if actual is a string or stringer that contains the
// passed-in substring. Optional arguments can be provided to construct the substring
// via fmt.Sprintf().
-func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
+func ContainSubstring(substr string, args ...any) types.GomegaMatcher {
return &matchers.ContainSubstringMatcher{
Substr: substr,
Args: args,
@@ -253,7 +253,7 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
// HavePrefix succeeds if actual is a string or stringer that contains the
// passed-in string as a prefix. Optional arguments can be provided to construct
// via fmt.Sprintf().
-func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
+func HavePrefix(prefix string, args ...any) types.GomegaMatcher {
return &matchers.HavePrefixMatcher{
Prefix: prefix,
Args: args,
@@ -263,7 +263,7 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
// HaveSuffix succeeds if actual is a string or stringer that contains the
// passed-in string as a suffix. Optional arguments can be provided to construct
// via fmt.Sprintf().
-func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
+func HaveSuffix(suffix string, args ...any) types.GomegaMatcher {
return &matchers.HaveSuffixMatcher{
Suffix: suffix,
Args: args,
@@ -273,7 +273,7 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
// MatchJSON succeeds if actual is a string or stringer of JSON that matches
// the expected JSON. The JSONs are decoded and the resulting objects are compared via
// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
-func MatchJSON(json interface{}) types.GomegaMatcher {
+func MatchJSON(json any) types.GomegaMatcher {
return &matchers.MatchJSONMatcher{
JSONToMatch: json,
}
@@ -282,7 +282,7 @@ func MatchJSON(json interface{}) types.GomegaMatcher {
// MatchXML succeeds if actual is a string or stringer of XML that matches
// the expected XML. The XMLs are decoded and the resulting objects are compared via
// reflect.DeepEqual so things like whitespaces shouldn't matter.
-func MatchXML(xml interface{}) types.GomegaMatcher {
+func MatchXML(xml any) types.GomegaMatcher {
return &matchers.MatchXMLMatcher{
XMLToMatch: xml,
}
@@ -291,7 +291,7 @@ func MatchXML(xml interface{}) types.GomegaMatcher {
// MatchYAML succeeds if actual is a string or stringer of YAML that matches
// the expected YAML. The YAML's are decoded and the resulting objects are compared via
// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
-func MatchYAML(yaml interface{}) types.GomegaMatcher {
+func MatchYAML(yaml any) types.GomegaMatcher {
return &matchers.MatchYAMLMatcher{
YAMLToMatch: yaml,
}
@@ -338,7 +338,7 @@ func BeZero() types.GomegaMatcher {
//
// var findings []string
// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings)))
-func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher {
+func ContainElement(element any, result ...any) types.GomegaMatcher {
return &matchers.ContainElementMatcher{
Element: element,
Result: result,
@@ -358,7 +358,7 @@ func ContainElement(element interface{}, result ...interface{}) types.GomegaMatc
// Expect(2).Should(BeElementOf(1, 2))
//
// Actual must be typed.
-func BeElementOf(elements ...interface{}) types.GomegaMatcher {
+func BeElementOf(elements ...any) types.GomegaMatcher {
return &matchers.BeElementOfMatcher{
Elements: elements,
}
@@ -368,7 +368,7 @@ func BeElementOf(elements ...interface{}) types.GomegaMatcher {
// BeKeyOf() always uses Equal() to perform the match between actual and the map keys.
//
// Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false}))
-func BeKeyOf(element interface{}) types.GomegaMatcher {
+func BeKeyOf(element any) types.GomegaMatcher {
return &matchers.BeKeyOfMatcher{
Map: element,
}
@@ -388,14 +388,14 @@ func BeKeyOf(element interface{}) types.GomegaMatcher {
//
// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
//
-// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
-func ConsistOf(elements ...interface{}) types.GomegaMatcher {
+// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []any are different types - hence the need for this special rule.
+func ConsistOf(elements ...any) types.GomegaMatcher {
return &matchers.ConsistOfMatcher{
Elements: elements,
}
}
-// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter.
+// HaveExactElements succeeds if actual contains elements that precisely match the elements passed into the matcher. The ordering of the elements does matter.
// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
//
// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar"))
@@ -403,7 +403,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher {
// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo")))
//
// Actual must be an array or slice.
-func HaveExactElements(elements ...interface{}) types.GomegaMatcher {
+func HaveExactElements(elements ...any) types.GomegaMatcher {
return &matchers.HaveExactElementsMatcher{
Elements: elements,
}
@@ -417,7 +417,7 @@ func HaveExactElements(elements ...interface{}) types.GomegaMatcher {
//
// Actual must be an array, slice or map.
// For maps, ContainElements searches through the map's values.
-func ContainElements(elements ...interface{}) types.GomegaMatcher {
+func ContainElements(elements ...any) types.GomegaMatcher {
return &matchers.ContainElementsMatcher{
Elements: elements,
}
@@ -432,7 +432,7 @@ func ContainElements(elements ...interface{}) types.GomegaMatcher {
//
// Actual must be an array, slice or map.
// For maps, HaveEach searches through the map's values.
-func HaveEach(element interface{}) types.GomegaMatcher {
+func HaveEach(element any) types.GomegaMatcher {
return &matchers.HaveEachMatcher{
Element: element,
}
@@ -443,7 +443,7 @@ func HaveEach(element interface{}) types.GomegaMatcher {
// matcher can be passed in instead:
//
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
-func HaveKey(key interface{}) types.GomegaMatcher {
+func HaveKey(key any) types.GomegaMatcher {
return &matchers.HaveKeyMatcher{
Key: key,
}
@@ -455,7 +455,7 @@ func HaveKey(key interface{}) types.GomegaMatcher {
//
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
-func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
+func HaveKeyWithValue(key any, value any) types.GomegaMatcher {
return &matchers.HaveKeyWithValueMatcher{
Key: key,
Value: value,
@@ -483,7 +483,7 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
// Expect(book).To(HaveField("Title", ContainSubstring("Les"))
// Expect(book).To(HaveField("Author.FirstName", Equal("Victor"))
// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900))
-func HaveField(field string, expected interface{}) types.GomegaMatcher {
+func HaveField(field string, expected any) types.GomegaMatcher {
return &matchers.HaveFieldMatcher{
Field: field,
Expected: expected,
@@ -535,7 +535,7 @@ func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher {
// Expect(1.0).Should(BeNumerically(">=", 1.0))
// Expect(1.0).Should(BeNumerically("<", 3))
// Expect(1.0).Should(BeNumerically("<=", 1.0))
-func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
+func BeNumerically(comparator string, compareTo ...any) types.GomegaMatcher {
return &matchers.BeNumericallyMatcher{
Comparator: comparator,
CompareTo: compareTo,
@@ -562,7 +562,7 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura
// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
-func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
+func BeAssignableToTypeOf(expected any) types.GomegaMatcher {
return &matchers.AssignableToTypeOfMatcher{
Expected: expected,
}
@@ -581,7 +581,7 @@ func Panic() types.GomegaMatcher {
// matcher can be passed in instead:
//
// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`)))
-func PanicWith(expected interface{}) types.GomegaMatcher {
+func PanicWith(expected any) types.GomegaMatcher {
return &matchers.PanicMatcher{Expected: expected}
}
@@ -610,7 +610,7 @@ func BeADirectory() types.GomegaMatcher {
// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200
// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found"
// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204
-func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
+func HaveHTTPStatus(expected ...any) types.GomegaMatcher {
return &matchers.HaveHTTPStatusMatcher{Expected: expected}
}
@@ -618,7 +618,7 @@ func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
// Actual must be either a *http.Response or *httptest.ResponseRecorder.
// Expected must be a string header name, followed by a header value which
// can be a string, or another matcher.
-func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatcher {
+func HaveHTTPHeaderWithValue(header string, value any) types.GomegaMatcher {
return &matchers.HaveHTTPHeaderWithValueMatcher{
Header: header,
Value: value,
@@ -628,7 +628,7 @@ func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatch
// HaveHTTPBody matches if the body matches.
// Actual must be either a *http.Response or *httptest.ResponseRecorder.
// Expected must be either a string, []byte, or other matcher
-func HaveHTTPBody(expected interface{}) types.GomegaMatcher {
+func HaveHTTPBody(expected any) types.GomegaMatcher {
return &matchers.HaveHTTPBodyMatcher{Expected: expected}
}
@@ -687,15 +687,15 @@ func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
// Expect(1).To(WithTransform(failingplus1, Equal(2)))
//
// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
-func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
+func WithTransform(transform any, matcher types.GomegaMatcher) types.GomegaMatcher {
return matchers.NewWithTransformMatcher(transform, matcher)
}
// Satisfy matches the actual value against the `predicate` function.
-// The given predicate must be a function of one paramter that returns bool.
+// The given predicate must be a function of one parameter that returns bool.
//
// var isEven = func(i int) bool { return i%2 == 0 }
// Expect(2).To(Satisfy(isEven))
-func Satisfy(predicate interface{}) types.GomegaMatcher {
+func Satisfy(predicate any) types.GomegaMatcher {
return matchers.NewSatisfyMatcher(predicate)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go
index 6bd826adc..db48e90b3 100644
--- a/vendor/github.com/onsi/gomega/matchers/and.go
+++ b/vendor/github.com/onsi/gomega/matchers/and.go
@@ -14,7 +14,7 @@ type AndMatcher struct {
firstFailedMatcher types.GomegaMatcher
}
-func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *AndMatcher) Match(actual any) (success bool, err error) {
m.firstFailedMatcher = nil
for _, matcher := range m.Matchers {
success, err := matcher.Match(actual)
@@ -26,16 +26,16 @@ func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
return true, nil
}
-func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *AndMatcher) FailureMessage(actual any) (message string) {
return m.firstFailedMatcher.FailureMessage(actual)
}
-func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *AndMatcher) NegatedFailureMessage(actual any) (message string) {
// not the most beautiful list of matchers, but not bad either...
return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
}
-func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *AndMatcher) MatchMayChangeInTheFuture(actual any) bool {
/*
Example with 3 matchers: A, B, C
diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
index be4839520..a100e5c07 100644
--- a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
@@ -10,10 +10,10 @@ import (
)
type AssignableToTypeOfMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *AssignableToTypeOfMatcher) Match(actual any) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
} else if matcher.Expected == nil {
@@ -28,10 +28,10 @@ func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success boo
return actualType.AssignableTo(expectedType), nil
}
-func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
+func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual any) string {
return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
}
-func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
+func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual any) string {
return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
index 93d4497c7..1d8236048 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
@@ -24,11 +24,11 @@ func (t notADirectoryError) Error() string {
}
type BeADirectoryMatcher struct {
- expected interface{}
+ expected any
err error
}
-func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeADirectoryMatcher) Match(actual any) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
@@ -47,10 +47,10 @@ func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err
return true, nil
}
-func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeADirectoryMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
}
-func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not be a directory")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
index 8fefc4deb..3e53d6285 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
@@ -24,11 +24,11 @@ func (t notARegularFileError) Error() string {
}
type BeARegularFileMatcher struct {
- expected interface{}
+ expected any
err error
}
-func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeARegularFileMatcher) Match(actual any) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path")
@@ -47,10 +47,10 @@ func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, e
return true, nil
}
-func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeARegularFileMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err))
}
-func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not be a regular file")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
index e2bdd2811..04f156db3 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
@@ -10,10 +10,10 @@ import (
)
type BeAnExistingFileMatcher struct {
- expected interface{}
+ expected any
}
-func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeAnExistingFileMatcher) Match(actual any) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path")
@@ -31,10 +31,10 @@ func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool,
return true, nil
}
-func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeAnExistingFileMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to exist")
}
-func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to exist")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
index f13c24490..4319dde45 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
@@ -12,7 +12,7 @@ import (
type BeClosedMatcher struct {
}
-func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeClosedMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -39,10 +39,10 @@ func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err err
return closed, nil
}
-func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeClosedMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be closed")
}
-func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeClosedMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "to be open")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
index 4e3897858..532fc3744 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
@@ -9,11 +9,11 @@ import (
)
type BeComparableToMatcher struct {
- Expected interface{}
+ Expected any
Options cmp.Options
}
-func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -40,10 +40,10 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m
return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil
}
-func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeComparableToMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...))
}
-func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be comparable to", matcher.Expected)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
index 9ee75a5d5..406fe5484 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
@@ -10,10 +10,10 @@ import (
)
type BeElementOfMatcher struct {
- Elements []interface{}
+ Elements []any
}
-func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeElementOfMatcher) Match(actual any) (success bool, err error) {
if reflect.TypeOf(actual) == nil {
return false, fmt.Errorf("BeElement matcher expects actual to be typed")
}
@@ -34,10 +34,10 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err
return false, lastError
}
-func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeElementOfMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be an element of", presentable(matcher.Elements))
}
-func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be an element of", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
index bd7f0b96e..e9e0644f3 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
@@ -13,7 +13,7 @@ import (
type BeEmptyMatcher struct {
}
-func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeEmptyMatcher) Match(actual any) (success bool, err error) {
// short-circuit the iterator case, as we only need to see the first
// element, if any.
if miter.IsIter(actual) {
@@ -34,10 +34,10 @@ func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err erro
return length == 0, nil
}
-func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeEmptyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be empty")
}
-func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be empty")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
index 263627f40..37b3080ba 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
@@ -10,10 +10,10 @@ import (
)
type BeEquivalentToMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeEquivalentToMatcher) Match(actual any) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Both actual and expected must not be nil.")
}
@@ -27,10 +27,10 @@ func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, e
return reflect.DeepEqual(convertedActual, matcher.Expected), nil
}
-func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeEquivalentToMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be equivalent to", matcher.Expected)
}
-func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be equivalent to", matcher.Expected)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
index 8ee2b1c51..55e869515 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
@@ -12,7 +12,7 @@ type BeFalseMatcher struct {
Reason string
}
-func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeFalseMatcher) Match(actual any) (success bool, err error) {
if !isBool(actual) {
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
}
@@ -20,7 +20,7 @@ func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err erro
return actual == false, nil
}
-func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeFalseMatcher) FailureMessage(actual any) (message string) {
if matcher.Reason == "" {
return format.Message(actual, "to be false")
} else {
@@ -28,7 +28,7 @@ func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message strin
}
}
-func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeFalseMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.Reason == "" {
return format.Message(actual, "not to be false")
} else {
diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
index 631ce11e3..579aa41b3 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
@@ -10,10 +10,10 @@ import (
)
type BeIdenticalToMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+func (matcher *BeIdenticalToMatcher) Match(actual any) (success bool, matchErr error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -30,10 +30,10 @@ func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, ma
return actual == matcher.Expected, nil
}
-func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string {
+func (matcher *BeIdenticalToMatcher) FailureMessage(actual any) string {
return format.Message(actual, "to be identical to", matcher.Expected)
}
-func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string {
+func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual any) string {
return format.Message(actual, "not to be identical to", matcher.Expected)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
index 449a291ef..3fff3df78 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
@@ -8,10 +8,10 @@ import (
)
type BeKeyOfMatcher struct {
- Map interface{}
+ Map any
}
-func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeKeyOfMatcher) Match(actual any) (success bool, err error) {
if !isMap(matcher.Map) {
return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type")
}
@@ -36,10 +36,10 @@ func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err erro
return false, lastError
}
-func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeKeyOfMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map)))
}
-func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map)))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
index 551d99d74..cab37f4f9 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
@@ -7,14 +7,14 @@ import "github.com/onsi/gomega/format"
type BeNilMatcher struct {
}
-func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeNilMatcher) Match(actual any) (success bool, err error) {
return isNil(actual), nil
}
-func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeNilMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be nil")
}
-func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeNilMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be nil")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
index 100735de3..7e6ce154e 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
@@ -11,18 +11,18 @@ import (
type BeNumericallyMatcher struct {
Comparator string
- CompareTo []interface{}
+ CompareTo []any
}
-func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeNumericallyMatcher) FailureMessage(actual any) (message string) {
return matcher.FormatFailureMessage(actual, false)
}
-func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual any) (message string) {
return matcher.FormatFailureMessage(actual, true)
}
-func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, negated bool) (message string) {
+func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual any, negated bool) (message string) {
if len(matcher.CompareTo) == 1 {
message = fmt.Sprintf("to be %s", matcher.Comparator)
} else {
@@ -34,7 +34,7 @@ func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, ne
return format.Message(actual, message, matcher.CompareTo[0])
}
-func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeNumericallyMatcher) Match(actual any) (success bool, err error) {
if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 {
return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
index cf582a3fc..14ffbf6c4 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
@@ -10,11 +10,11 @@ import (
)
type BeSentMatcher struct {
- Arg interface{}
+ Arg any
channelClosed bool
}
-func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeSentMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -56,15 +56,15 @@ func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error
return didSend, nil
}
-func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeSentMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to send:", matcher.Arg)
}
-func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeSentMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to send:", matcher.Arg)
}
-func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual any) bool {
if !isChan(actual) {
return false
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
index dec4db024..edb647c6f 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
@@ -15,17 +15,17 @@ type BeTemporallyMatcher struct {
Threshold []time.Duration
}
-func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeTemporallyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo)
}
-func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo)
}
-func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) {
+func (matcher *BeTemporallyMatcher) Match(actual any) (bool, error) {
// predicate to test for time.Time type
- isTime := func(t interface{}) bool {
+ isTime := func(t any) bool {
_, ok := t.(time.Time)
return ok
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
index 3576aac88..a010bec5a 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
@@ -12,7 +12,7 @@ type BeTrueMatcher struct {
Reason string
}
-func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeTrueMatcher) Match(actual any) (success bool, err error) {
if !isBool(actual) {
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
}
@@ -20,7 +20,7 @@ func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error
return actual.(bool), nil
}
-func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeTrueMatcher) FailureMessage(actual any) (message string) {
if matcher.Reason == "" {
return format.Message(actual, "to be true")
} else {
@@ -28,7 +28,7 @@ func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string
}
}
-func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeTrueMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.Reason == "" {
return format.Message(actual, "not to be true")
} else {
diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
index 26196f168..f5f5d7f7d 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
@@ -9,7 +9,7 @@ import (
type BeZeroMatcher struct {
}
-func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeZeroMatcher) Match(actual any) (success bool, err error) {
if actual == nil {
return true, nil
}
@@ -19,10 +19,10 @@ func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error
}
-func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeZeroMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be zero-valued")
}
-func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeZeroMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be zero-valued")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go
index a11188182..05c751b66 100644
--- a/vendor/github.com/onsi/gomega/matchers/consist_of.go
+++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go
@@ -12,12 +12,12 @@ import (
)
type ConsistOfMatcher struct {
- Elements []interface{}
- missingElements []interface{}
- extraElements []interface{}
+ Elements []any
+ missingElements []any
+ extraElements []any
}
-func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ConsistOfMatcher) Match(actual any) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1))
}
@@ -35,19 +35,19 @@ func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err er
return true, nil
}
- var missingMatchers []interface{}
+ var missingMatchers []any
matcher.extraElements, missingMatchers = bipartiteGraph.FreeLeftRight(edges)
matcher.missingElements = equalMatchersToElements(missingMatchers)
return false, nil
}
-func neighbours(value, matcher interface{}) (bool, error) {
+func neighbours(value, matcher any) (bool, error) {
match, err := matcher.(omegaMatcher).Match(value)
return match && err == nil, nil
}
-func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
+func equalMatchersToElements(matchers []any) (elements []any) {
for _, matcher := range matchers {
if equalMatcher, ok := matcher.(*EqualMatcher); ok {
elements = append(elements, equalMatcher.Expected)
@@ -60,7 +60,7 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
return
}
-func flatten(elems []interface{}) []interface{} {
+func flatten(elems []any) []any {
if len(elems) != 1 ||
!(isArrayOrSlice(elems[0]) ||
(miter.IsIter(elems[0]) && !miter.IsSeq2(elems[0]))) {
@@ -77,14 +77,14 @@ func flatten(elems []interface{}) []interface{} {
}
value := reflect.ValueOf(elems[0])
- flattened := make([]interface{}, value.Len())
+ flattened := make([]any, value.Len())
for i := 0; i < value.Len(); i++ {
flattened[i] = value.Index(i).Interface()
}
return flattened
}
-func matchers(expectedElems []interface{}) (matchers []interface{}) {
+func matchers(expectedElems []any) (matchers []any) {
for _, e := range flatten(expectedElems) {
if e == nil {
matchers = append(matchers, &BeNilMatcher{})
@@ -97,11 +97,11 @@ func matchers(expectedElems []interface{}) (matchers []interface{}) {
return
}
-func presentable(elems []interface{}) interface{} {
+func presentable(elems []any) any {
elems = flatten(elems)
if len(elems) == 0 {
- return []interface{}{}
+ return []any{}
}
sv := reflect.ValueOf(elems)
@@ -125,9 +125,9 @@ func presentable(elems []interface{}) interface{} {
return ss.Interface()
}
-func valuesOf(actual interface{}) []interface{} {
+func valuesOf(actual any) []any {
value := reflect.ValueOf(actual)
- values := []interface{}{}
+ values := []any{}
if miter.IsIter(actual) {
if miter.IsSeq2(actual) {
miter.IterateKV(actual, func(k, v reflect.Value) bool {
@@ -154,7 +154,7 @@ func valuesOf(actual interface{}) []interface{} {
return values
}
-func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ConsistOfMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to consist of", presentable(matcher.Elements))
message = appendMissingElements(message, matcher.missingElements)
if len(matcher.extraElements) > 0 {
@@ -164,7 +164,7 @@ func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message str
return
}
-func appendMissingElements(message string, missingElements []interface{}) string {
+func appendMissingElements(message string, missingElements []any) string {
if len(missingElements) == 0 {
return message
}
@@ -172,6 +172,6 @@ func appendMissingElements(message string, missingElements []interface{}) string
format.Object(presentable(missingElements), 1))
}
-func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to consist of", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
index 830239c7b..8337a5261 100644
--- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
@@ -12,11 +12,11 @@ import (
)
type ContainElementMatcher struct {
- Element interface{}
- Result []interface{}
+ Element any
+ Result []any
}
-func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ContainElementMatcher) Match(actual any) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map/iterator. Got:\n%s", format.Object(actual, 1))
}
@@ -132,14 +132,14 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
var lastError error
if !miter.IsIter(actual) {
- var valueAt func(int) interface{}
+ var valueAt func(int) any
var foundAt func(int)
// We're dealing with an array/slice/map, so in all cases we can iterate
// over the elements in actual using indices (that can be considered
// keys in case of maps).
if isMap(actual) {
keys := value.MapKeys()
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.MapIndex(keys[i]).Interface()
}
if result.Kind() != reflect.Invalid {
@@ -150,7 +150,7 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
}
}
} else {
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.Index(i).Interface()
}
if result.Kind() != reflect.Invalid {
@@ -251,7 +251,7 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
}
// pick up any findings the test is interested in as it specified a non-nil
- // result reference. However, the expection always is that there are at
+ // result reference. However, the expectation always is that there are at
// least one or multiple findings. So, if a result is expected, but we had
// no findings, then this is an error.
findings := getFindings()
@@ -284,10 +284,10 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
return true, nil
}
-func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain element matching", matcher.Element)
}
-func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain element matching", matcher.Element)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
index d9fcb8b80..ce3041892 100644
--- a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
@@ -9,11 +9,11 @@ import (
)
type ContainElementsMatcher struct {
- Elements []interface{}
- missingElements []interface{}
+ Elements []any
+ missingElements []any
}
-func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ContainElementsMatcher) Match(actual any) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
return false, fmt.Errorf("ContainElements matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1))
}
@@ -35,11 +35,11 @@ func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool,
return false, nil
}
-func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementsMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to contain elements", presentable(matcher.Elements))
return appendMissingElements(message, matcher.missingElements)
}
-func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
index e725f8c27..d9980ee26 100644
--- a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
@@ -11,10 +11,10 @@ import (
type ContainSubstringMatcher struct {
Substr string
- Args []interface{}
+ Args []any
}
-func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ContainSubstringMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -31,10 +31,10 @@ func (matcher *ContainSubstringMatcher) stringToMatch() string {
return stringToMatch
}
-func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ContainSubstringMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain substring", matcher.stringToMatch())
}
-func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain substring", matcher.stringToMatch())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
index befb7bdfd..4ad166157 100644
--- a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
@@ -9,10 +9,10 @@ import (
)
type EqualMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *EqualMatcher) Match(actual any) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -27,7 +27,7 @@ func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error)
return reflect.DeepEqual(actual, matcher.Expected), nil
}
-func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *EqualMatcher) FailureMessage(actual any) (message string) {
actualString, actualOK := actual.(string)
expectedString, expectedOK := matcher.Expected.(string)
if actualOK && expectedOK {
@@ -37,6 +37,6 @@ func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string)
return format.Message(actual, "to equal", matcher.Expected)
}
-func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *EqualMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to equal", matcher.Expected)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
index 9856752f1..a4fcfc425 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
@@ -12,7 +12,7 @@ type HaveCapMatcher struct {
Count int
}
-func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveCapMatcher) Match(actual any) (success bool, err error) {
length, ok := capOf(actual)
if !ok {
return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1))
@@ -21,10 +21,10 @@ func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err erro
return length == matcher.Count, nil
}
-func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveCapMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count)
}
-func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveCapMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
index 4111f2b86..4c45063bd 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
@@ -9,10 +9,10 @@ import (
)
type HaveEachMatcher struct {
- Element interface{}
+ Element any
}
-func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveEachMatcher) Match(actual any) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s",
format.Object(actual, 1))
@@ -61,14 +61,14 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
format.Object(actual, 1))
}
- var valueAt func(int) interface{}
+ var valueAt func(int) any
if isMap(actual) {
keys := value.MapKeys()
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.MapIndex(keys[i]).Interface()
}
} else {
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.Index(i).Interface()
}
}
@@ -89,11 +89,11 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
}
// FailureMessage returns a suitable failure message.
-func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveEachMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain element matching", matcher.Element)
}
// NegatedFailureMessage returns a suitable negated failure message.
-func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveEachMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain element matching", matcher.Element)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
index 23799f1c6..8b2d297c5 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
@@ -14,13 +14,13 @@ type mismatchFailure struct {
}
type HaveExactElementsMatcher struct {
- Elements []interface{}
+ Elements []any
mismatchFailures []mismatchFailure
missingIndex int
extraIndex int
}
-func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveExactElementsMatcher) Match(actual any) (success bool, err error) {
matcher.resetState()
if isMap(actual) || miter.IsSeq2(actual) {
@@ -108,7 +108,7 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool
return success, nil
}
-func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExactElementsMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements))
if matcher.missingIndex > 0 {
message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex)
@@ -125,7 +125,7 @@ func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (mes
return
}
-func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
index b57018745..a5a028e9a 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
@@ -11,7 +11,7 @@ type HaveExistingFieldMatcher struct {
Field string
}
-func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveExistingFieldMatcher) Match(actual any) (success bool, err error) {
// we don't care about the field's actual value, just about any error in
// trying to find the field (or method).
_, err = extractField(actual, matcher.Field, "HaveExistingField")
@@ -27,10 +27,10 @@ func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool
return false, err
}
-func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExistingFieldMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field)
}
-func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go
index 293457e85..d9fbeaf75 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_field.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_field.go
@@ -17,7 +17,7 @@ func (e missingFieldError) Error() string {
return string(e)
}
-func extractField(actual interface{}, field string, matchername string) (any, error) {
+func extractField(actual any, field string, matchername string) (any, error) {
fields := strings.SplitN(field, ".", 2)
actualValue := reflect.ValueOf(actual)
@@ -68,7 +68,7 @@ func extractField(actual interface{}, field string, matchername string) (any, er
type HaveFieldMatcher struct {
Field string
- Expected interface{}
+ Expected any
}
func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher {
@@ -80,7 +80,7 @@ func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher {
return expectedMatcher
}
-func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveFieldMatcher) Match(actual any) (success bool, err error) {
extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
return false, err
@@ -89,7 +89,7 @@ func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err er
return matcher.expectedMatcher().Match(extractedField)
}
-func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveFieldMatcher) FailureMessage(actual any) (message string) {
extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
// this really shouldn't happen
@@ -101,7 +101,7 @@ func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message str
return message
}
-func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual any) (message string) {
extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
// this really shouldn't happen
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
index d14d9e5fc..2d561b9a2 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -11,12 +11,12 @@ import (
)
type HaveHTTPBodyMatcher struct {
- Expected interface{}
- cachedResponse interface{}
+ Expected any
+ cachedResponse any
cachedBody []byte
}
-func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
+func (matcher *HaveHTTPBodyMatcher) Match(actual any) (bool, error) {
body, err := matcher.body(actual)
if err != nil {
return false, err
@@ -34,7 +34,7 @@ func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
}
}
-func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual any) (message string) {
body, err := matcher.body(actual)
if err != nil {
return fmt.Sprintf("failed to read body: %s", err)
@@ -52,7 +52,7 @@ func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message
}
}
-func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual any) (message string) {
body, err := matcher.body(actual)
if err != nil {
return fmt.Sprintf("failed to read body: %s", err)
@@ -73,7 +73,7 @@ func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (m
// body returns the body. It is cached because once we read it in Match()
// the Reader is closed and it is not readable again in FailureMessage()
// or NegatedFailureMessage()
-func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
+func (matcher *HaveHTTPBodyMatcher) body(actual any) ([]byte, error) {
if matcher.cachedResponse == actual && matcher.cachedBody != nil {
return matcher.cachedBody, nil
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
index c256f452e..756722659 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
@@ -11,10 +11,10 @@ import (
type HaveHTTPHeaderWithValueMatcher struct {
Header string
- Value interface{}
+ Value any
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual any) (success bool, err error) {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
return false, err
@@ -28,7 +28,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (succes
return headerMatcher.Match(headerValue)
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string {
+func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual any) string {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
panic(err) // protected by Match()
@@ -43,7 +43,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}
return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual any) (message string) {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
panic(err) // protected by Match()
@@ -69,7 +69,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatc
}
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual any) (string, error) {
switch r := actual.(type) {
case *http.Response:
return r.Header.Get(matcher.Header), nil
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
index 0f66e46ec..8b25b3a9f 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
@@ -12,10 +12,10 @@ import (
)
type HaveHTTPStatusMatcher struct {
- Expected []interface{}
+ Expected []any
}
-func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveHTTPStatusMatcher) Match(actual any) (success bool, err error) {
var resp *http.Response
switch a := actual.(type) {
case *http.Response:
@@ -48,11 +48,11 @@ func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, e
return false, nil
}
-func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString())
}
-func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString())
}
@@ -64,7 +64,7 @@ func (matcher *HaveHTTPStatusMatcher) expectedString() string {
return strings.Join(lines, "\n")
}
-func formatHttpResponse(input interface{}) string {
+func formatHttpResponse(input any) string {
var resp *http.Response
switch r := input.(type) {
case *http.Response:
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
index b62ee93cb..9e16dcf5d 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
@@ -11,10 +11,10 @@ import (
)
type HaveKeyMatcher struct {
- Key interface{}
+ Key any
}
-func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveKeyMatcher) Match(actual any) (success bool, err error) {
if !isMap(actual) && !miter.IsSeq2(actual) {
return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
}
@@ -52,7 +52,7 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro
return false, nil
}
-func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyMatcher) FailureMessage(actual any) (message string) {
switch matcher.Key.(type) {
case omegaMatcher:
return format.Message(actual, "to have key matching", matcher.Key)
@@ -61,7 +61,7 @@ func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message strin
}
}
-func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual any) (message string) {
switch matcher.Key.(type) {
case omegaMatcher:
return format.Message(actual, "not to have key matching", matcher.Key)
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
index 3d608f63e..1c53f1e56 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
@@ -11,11 +11,11 @@ import (
)
type HaveKeyWithValueMatcher struct {
- Key interface{}
- Value interface{}
+ Key any
+ Value any
}
-func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveKeyWithValueMatcher) Match(actual any) (success bool, err error) {
if !isMap(actual) && !miter.IsSeq2(actual) {
return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
}
@@ -70,7 +70,7 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool,
return false, nil
}
-func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual any) (message string) {
str := "to have {key: value}"
if _, ok := matcher.Key.(omegaMatcher); ok {
str += " matching"
@@ -78,12 +78,12 @@ func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (mess
str += " matching"
}
- expect := make(map[interface{}]interface{}, 1)
+ expect := make(map[any]any, 1)
expect[matcher.Key] = matcher.Value
return format.Message(actual, str, expect)
}
-func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual any) (message string) {
kStr := "not to have key"
if _, ok := matcher.Key.(omegaMatcher); ok {
kStr = "not to have key matching"
diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
index ca25713fe..c334d4c0a 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
@@ -10,7 +10,7 @@ type HaveLenMatcher struct {
Count int
}
-func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveLenMatcher) Match(actual any) (success bool, err error) {
length, ok := lengthOf(actual)
if !ok {
return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1))
@@ -19,10 +19,10 @@ func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err erro
return length == matcher.Count, nil
}
-func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveLenMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count)
}
-func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveLenMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
index 22a1b6730..a240f1a1c 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
@@ -11,7 +11,7 @@ import (
type HaveOccurredMatcher struct {
}
-func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveOccurredMatcher) Match(actual any) (success bool, err error) {
// is purely nil?
if actual == nil {
return false, nil
@@ -26,10 +26,10 @@ func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err
return !isNil(actual), nil
}
-func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveOccurredMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1))
}
-func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
index 1d8e80270..7987d41f7 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
@@ -8,10 +8,10 @@ import (
type HavePrefixMatcher struct {
Prefix string
- Args []interface{}
+ Args []any
}
-func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HavePrefixMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -27,10 +27,10 @@ func (matcher *HavePrefixMatcher) prefix() string {
return matcher.Prefix
}
-func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HavePrefixMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to have prefix", matcher.prefix())
}
-func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to have prefix", matcher.prefix())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
index 40a3526eb..2aa4ceacb 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
@@ -8,10 +8,10 @@ import (
type HaveSuffixMatcher struct {
Suffix string
- Args []interface{}
+ Args []any
}
-func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveSuffixMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -27,10 +27,10 @@ func (matcher *HaveSuffixMatcher) suffix() string {
return matcher.Suffix
}
-func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveSuffixMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to have suffix", matcher.suffix())
}
-func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to have suffix", matcher.suffix())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_value.go b/vendor/github.com/onsi/gomega/matchers/have_value.go
index f67252835..4c39e0db0 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_value.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_value.go
@@ -12,10 +12,10 @@ const maxIndirections = 31
type HaveValueMatcher struct {
Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value.
- resolvedActual interface{} // the ("resolved") value.
+ resolvedActual any // the ("resolved") value.
}
-func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
+func (m *HaveValueMatcher) Match(actual any) (bool, error) {
val := reflect.ValueOf(actual)
for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- {
// return an error if value isn't valid. Please note that we cannot
@@ -45,10 +45,10 @@ func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
return false, errors.New(format.Message(actual, "too many indirections"))
}
-func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) {
+func (m *HaveValueMatcher) FailureMessage(_ any) (message string) {
return m.Matcher.FailureMessage(m.resolvedActual)
}
-func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+func (m *HaveValueMatcher) NegatedFailureMessage(_ any) (message string) {
return m.Matcher.NegatedFailureMessage(m.resolvedActual)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
index c539dd389..f9d313772 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
@@ -71,14 +71,14 @@ func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) {
format.Object(expected, 1))
}
-func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchErrorMatcher) FailureMessage(actual any) (message string) {
if matcher.isFunc {
return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0]))
}
return format.Message(actual, "to match error", matcher.Expected)
}
-func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.isFunc {
return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0]))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
index f962f139f..331f289ab 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
@@ -9,18 +9,18 @@ import (
)
type MatchJSONMatcher struct {
- JSONToMatch interface{}
- firstFailurePath []interface{}
+ JSONToMatch any
+ firstFailurePath []any
}
-func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchJSONMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.prettyPrint(actual)
if err != nil {
return false, err
}
- var aval interface{}
- var eval interface{}
+ var aval any
+ var eval any
// this is guarded by prettyPrint
json.Unmarshal([]byte(actualString), &aval)
@@ -30,17 +30,17 @@ func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err er
return equal, nil
}
-func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchJSONMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.prettyPrint(actual)
return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.prettyPrint(actual)
return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchJSONMatcher) prettyPrint(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, ok := toString(actual)
if !ok {
return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
index adac5db6b..779be683e 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
@@ -9,10 +9,10 @@ import (
type MatchRegexpMatcher struct {
Regexp string
- Args []interface{}
+ Args []any
}
-func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchRegexpMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1))
@@ -26,11 +26,11 @@ func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err
return match, nil
}
-func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchRegexpMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to match regular expression", matcher.regexp())
}
-func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to match regular expression", matcher.regexp())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
index 5c815f5af..f7dcaf6fd 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
@@ -15,10 +15,10 @@ import (
)
type MatchXMLMatcher struct {
- XMLToMatch interface{}
+ XMLToMatch any
}
-func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchXMLMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.formattedPrint(actual)
if err != nil {
return false, err
@@ -37,17 +37,17 @@ func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err err
return reflect.DeepEqual(aval, eval), nil
}
-func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchXMLMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.formattedPrint(actual)
return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString)
}
-func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.formattedPrint(actual)
return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString)
}
-func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) {
+func (matcher *MatchXMLMatcher) formattedPrint(actual any) (actualString, expectedString string, err error) {
var ok bool
actualString, ok = toString(actual)
if !ok {
diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
index 2cb6b47db..95057c26c 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
@@ -9,18 +9,18 @@ import (
)
type MatchYAMLMatcher struct {
- YAMLToMatch interface{}
- firstFailurePath []interface{}
+ YAMLToMatch any
+ firstFailurePath []any
}
-func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchYAMLMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.toStrings(actual)
if err != nil {
return false, err
}
- var aval interface{}
- var eval interface{}
+ var aval any
+ var eval any
if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil {
return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err)
@@ -34,23 +34,23 @@ func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err er
return equal, nil
}
-func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchYAMLMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, expectedString, err := matcher.toStrings(actual)
return normalise(actualString), normalise(expectedString), err
}
func normalise(input string) string {
- var val interface{}
+ var val any
err := yaml.Unmarshal([]byte(input), &val)
if err != nil {
panic(err) // unreachable since Match already calls Unmarshal
@@ -62,7 +62,7 @@ func normalise(input string) string {
return strings.TrimSpace(string(output))
}
-func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchYAMLMatcher) toStrings(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, ok := toString(actual)
if !ok {
return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go
index 78b71910d..c598b7899 100644
--- a/vendor/github.com/onsi/gomega/matchers/not.go
+++ b/vendor/github.com/onsi/gomega/matchers/not.go
@@ -8,7 +8,7 @@ type NotMatcher struct {
Matcher types.GomegaMatcher
}
-func (m *NotMatcher) Match(actual interface{}) (bool, error) {
+func (m *NotMatcher) Match(actual any) (bool, error) {
success, err := m.Matcher.Match(actual)
if err != nil {
return false, err
@@ -16,14 +16,14 @@ func (m *NotMatcher) Match(actual interface{}) (bool, error) {
return !success, nil
}
-func (m *NotMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *NotMatcher) FailureMessage(actual any) (message string) {
return m.Matcher.NegatedFailureMessage(actual) // works beautifully
}
-func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *NotMatcher) NegatedFailureMessage(actual any) (message string) {
return m.Matcher.FailureMessage(actual) // works beautifully
}
-func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *NotMatcher) MatchMayChangeInTheFuture(actual any) bool {
return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value
}
diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go
index 841ae26ab..6578404b0 100644
--- a/vendor/github.com/onsi/gomega/matchers/or.go
+++ b/vendor/github.com/onsi/gomega/matchers/or.go
@@ -14,7 +14,7 @@ type OrMatcher struct {
firstSuccessfulMatcher types.GomegaMatcher
}
-func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *OrMatcher) Match(actual any) (success bool, err error) {
m.firstSuccessfulMatcher = nil
for _, matcher := range m.Matchers {
success, err := matcher.Match(actual)
@@ -29,16 +29,16 @@ func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
return false, nil
}
-func (m *OrMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *OrMatcher) FailureMessage(actual any) (message string) {
// not the most beautiful list of matchers, but not bad either...
return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers))
}
-func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *OrMatcher) NegatedFailureMessage(actual any) (message string) {
return m.firstSuccessfulMatcher.NegatedFailureMessage(actual)
}
-func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *OrMatcher) MatchMayChangeInTheFuture(actual any) bool {
/*
Example with 3 matchers: A, B, C
diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
index adc8cee63..8be5a7ccf 100644
--- a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
@@ -8,11 +8,11 @@ import (
)
type PanicMatcher struct {
- Expected interface{}
- object interface{}
+ Expected any
+ object any
}
-func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *PanicMatcher) Match(actual any) (success bool, err error) {
if actual == nil {
return false, fmt.Errorf("PanicMatcher expects a non-nil actual.")
}
@@ -52,7 +52,7 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error)
return
}
-func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *PanicMatcher) FailureMessage(actual any) (message string) {
if matcher.Expected == nil {
// We wanted any panic to occur, but none did.
return format.Message(actual, "to panic")
@@ -91,7 +91,7 @@ func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string)
}
}
-func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *PanicMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.Expected == nil {
// We didn't want any panic to occur, but one did.
return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1)))
diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
index 948164eaf..1d9f61d63 100644
--- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
@@ -11,12 +11,12 @@ import (
)
type ReceiveMatcher struct {
- Args []interface{}
+ Args []any
receivedValue reflect.Value
channelClosed bool
}
-func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ReceiveMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -30,7 +30,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
var subMatcher omegaMatcher
var hasSubMatcher bool
- var resultReference interface{}
+ var resultReference any
// Valid arg formats are as follows, always with optional POINTER before
// optional MATCHER:
@@ -115,8 +115,8 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
return false, nil
}
-func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
- var matcherArg interface{}
+func (matcher *ReceiveMatcher) FailureMessage(actual any) (message string) {
+ var matcherArg any
if len(matcher.Args) > 0 {
matcherArg = matcher.Args[len(matcher.Args)-1]
}
@@ -136,8 +136,8 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin
return format.Message(actual, "to receive something."+closedAddendum)
}
-func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- var matcherArg interface{}
+func (matcher *ReceiveMatcher) NegatedFailureMessage(actual any) (message string) {
+ var matcherArg any
if len(matcher.Args) > 0 {
matcherArg = matcher.Args[len(matcher.Args)-1]
}
@@ -157,7 +157,7 @@ func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (messag
return format.Message(actual, "not to receive anything."+closedAddendum)
}
-func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual any) bool {
if !isChan(actual) {
return false
}
diff --git a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
index ec68fe8b6..2adc4825a 100644
--- a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
@@ -8,13 +8,13 @@ import (
)
type SatisfyMatcher struct {
- Predicate interface{}
+ Predicate any
// cached type
predicateArgType reflect.Type
}
-func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
+func NewSatisfyMatcher(predicate any) *SatisfyMatcher {
if predicate == nil {
panic("predicate cannot be nil")
}
@@ -35,7 +35,7 @@ func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
}
}
-func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *SatisfyMatcher) Match(actual any) (success bool, err error) {
// prepare a parameter to pass to the predicate
var param reflect.Value
if actual != nil && reflect.TypeOf(actual).AssignableTo(m.predicateArgType) {
@@ -57,10 +57,10 @@ func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
return result[0].Bool(), nil
}
-func (m *SatisfyMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *SatisfyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to satisfy predicate", m.Predicate)
}
-func (m *SatisfyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *SatisfyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "to not satisfy predicate", m.Predicate)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
index 1369c1e87..30dd58f4a 100644
--- a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
+++ b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
@@ -8,7 +8,7 @@ import (
"strings"
)
-func formattedMessage(comparisonMessage string, failurePath []interface{}) string {
+func formattedMessage(comparisonMessage string, failurePath []any) string {
var diffMessage string
if len(failurePath) == 0 {
diffMessage = ""
@@ -18,7 +18,7 @@ func formattedMessage(comparisonMessage string, failurePath []interface{}) strin
return fmt.Sprintf("%s%s", comparisonMessage, diffMessage)
}
-func formattedFailurePath(failurePath []interface{}) string {
+func formattedFailurePath(failurePath []any) string {
formattedPaths := []string{}
for i := len(failurePath) - 1; i >= 0; i-- {
switch p := failurePath[i].(type) {
@@ -34,33 +34,33 @@ func formattedFailurePath(failurePath []interface{}) string {
return strings.Join(formattedPaths, "")
}
-func deepEqual(a interface{}, b interface{}) (bool, []interface{}) {
- var errorPath []interface{}
+func deepEqual(a any, b any) (bool, []any) {
+ var errorPath []any
if reflect.TypeOf(a) != reflect.TypeOf(b) {
return false, errorPath
}
switch a.(type) {
- case []interface{}:
- if len(a.([]interface{})) != len(b.([]interface{})) {
+ case []any:
+ if len(a.([]any)) != len(b.([]any)) {
return false, errorPath
}
- for i, v := range a.([]interface{}) {
- elementEqual, keyPath := deepEqual(v, b.([]interface{})[i])
+ for i, v := range a.([]any) {
+ elementEqual, keyPath := deepEqual(v, b.([]any)[i])
if !elementEqual {
return false, append(keyPath, i)
}
}
return true, errorPath
- case map[interface{}]interface{}:
- if len(a.(map[interface{}]interface{})) != len(b.(map[interface{}]interface{})) {
+ case map[any]any:
+ if len(a.(map[any]any)) != len(b.(map[any]any)) {
return false, errorPath
}
- for k, v1 := range a.(map[interface{}]interface{}) {
- v2, ok := b.(map[interface{}]interface{})[k]
+ for k, v1 := range a.(map[any]any) {
+ v2, ok := b.(map[any]any)[k]
if !ok {
return false, errorPath
}
@@ -71,13 +71,13 @@ func deepEqual(a interface{}, b interface{}) (bool, []interface{}) {
}
return true, errorPath
- case map[string]interface{}:
- if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) {
+ case map[string]any:
+ if len(a.(map[string]any)) != len(b.(map[string]any)) {
return false, errorPath
}
- for k, v1 := range a.(map[string]interface{}) {
- v2, ok := b.(map[string]interface{})[k]
+ for k, v1 := range a.(map[string]any) {
+ v2, ok := b.(map[string]any)[k]
if !ok {
return false, errorPath
}
diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
index 327350f7b..f0b2c4aa6 100644
--- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
@@ -14,7 +14,7 @@ type formattedGomegaError interface {
type SucceedMatcher struct {
}
-func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *SucceedMatcher) Match(actual any) (success bool, err error) {
// is purely nil?
if actual == nil {
return true, nil
@@ -29,7 +29,7 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro
return isNil(actual), nil
}
-func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *SucceedMatcher) FailureMessage(actual any) (message string) {
var fgErr formattedGomegaError
if errors.As(actual.(error), &fgErr) {
return fgErr.FormattedGomegaError()
@@ -37,6 +37,6 @@ func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message strin
return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1))
}
-func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *SucceedMatcher) NegatedFailureMessage(actual any) (message string) {
return "Expected failure, but got no error."
}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
index 830e30827..0d78779d4 100644
--- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
@@ -11,7 +11,7 @@ type BipartiteGraph struct {
Edges EdgeSet
}
-func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) {
+func NewBipartiteGraph(leftValues, rightValues []any, neighbours func(any, any) (bool, error)) (*BipartiteGraph, error) {
left := NodeOrderedSet{}
for i, v := range leftValues {
left = append(left, Node{ID: i, Value: v})
@@ -41,7 +41,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in
// FreeLeftRight returns left node values and right node values
// of the BipartiteGraph's nodes which are not part of the given edges.
-func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []interface{}) {
+func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []any) {
for _, node := range bg.Left {
if edges.Free(node) {
leftValues = append(leftValues, node.Value)
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
index cd597a2f2..66d3578d5 100644
--- a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
@@ -2,7 +2,7 @@ package node
type Node struct {
ID int
- Value interface{}
+ Value any
}
type NodeOrderedSet []Node
diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go
index b9440ac7a..d020dedc3 100644
--- a/vendor/github.com/onsi/gomega/matchers/type_support.go
+++ b/vendor/github.com/onsi/gomega/matchers/type_support.go
@@ -20,16 +20,16 @@ import (
)
type omegaMatcher interface {
- Match(actual interface{}) (success bool, err error)
- FailureMessage(actual interface{}) (message string)
- NegatedFailureMessage(actual interface{}) (message string)
+ Match(actual any) (success bool, err error)
+ FailureMessage(actual any) (message string)
+ NegatedFailureMessage(actual any) (message string)
}
-func isBool(a interface{}) bool {
+func isBool(a any) bool {
return reflect.TypeOf(a).Kind() == reflect.Bool
}
-func isNumber(a interface{}) bool {
+func isNumber(a any) bool {
if a == nil {
return false
}
@@ -37,22 +37,22 @@ func isNumber(a interface{}) bool {
return reflect.Int <= kind && kind <= reflect.Float64
}
-func isInteger(a interface{}) bool {
+func isInteger(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Int <= kind && kind <= reflect.Int64
}
-func isUnsignedInteger(a interface{}) bool {
+func isUnsignedInteger(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Uint <= kind && kind <= reflect.Uint64
}
-func isFloat(a interface{}) bool {
+func isFloat(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Float32 <= kind && kind <= reflect.Float64
}
-func toInteger(a interface{}) int64 {
+func toInteger(a any) int64 {
if isInteger(a) {
return reflect.ValueOf(a).Int()
} else if isUnsignedInteger(a) {
@@ -63,7 +63,7 @@ func toInteger(a interface{}) int64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func toUnsignedInteger(a interface{}) uint64 {
+func toUnsignedInteger(a any) uint64 {
if isInteger(a) {
return uint64(reflect.ValueOf(a).Int())
} else if isUnsignedInteger(a) {
@@ -74,7 +74,7 @@ func toUnsignedInteger(a interface{}) uint64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func toFloat(a interface{}) float64 {
+func toFloat(a any) float64 {
if isInteger(a) {
return float64(reflect.ValueOf(a).Int())
} else if isUnsignedInteger(a) {
@@ -85,26 +85,26 @@ func toFloat(a interface{}) float64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func isError(a interface{}) bool {
+func isError(a any) bool {
_, ok := a.(error)
return ok
}
-func isChan(a interface{}) bool {
+func isChan(a any) bool {
if isNil(a) {
return false
}
return reflect.TypeOf(a).Kind() == reflect.Chan
}
-func isMap(a interface{}) bool {
+func isMap(a any) bool {
if a == nil {
return false
}
return reflect.TypeOf(a).Kind() == reflect.Map
}
-func isArrayOrSlice(a interface{}) bool {
+func isArrayOrSlice(a any) bool {
if a == nil {
return false
}
@@ -116,14 +116,14 @@ func isArrayOrSlice(a interface{}) bool {
}
}
-func isString(a interface{}) bool {
+func isString(a any) bool {
if a == nil {
return false
}
return reflect.TypeOf(a).Kind() == reflect.String
}
-func toString(a interface{}) (string, bool) {
+func toString(a any) (string, bool) {
aString, isString := a.(string)
if isString {
return aString, true
@@ -147,7 +147,7 @@ func toString(a interface{}) (string, bool) {
return "", false
}
-func lengthOf(a interface{}) (int, bool) {
+func lengthOf(a any) (int, bool) {
if a == nil {
return 0, false
}
@@ -169,7 +169,7 @@ func lengthOf(a interface{}) (int, bool) {
return 0, false
}
}
-func capOf(a interface{}) (int, bool) {
+func capOf(a any) (int, bool) {
if a == nil {
return 0, false
}
@@ -181,7 +181,7 @@ func capOf(a interface{}) (int, bool) {
}
}
-func isNil(a interface{}) bool {
+func isNil(a any) bool {
if a == nil {
return true
}
diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go
index 6f743b1b3..6231c3b47 100644
--- a/vendor/github.com/onsi/gomega/matchers/with_transform.go
+++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -9,20 +9,20 @@ import (
type WithTransformMatcher struct {
// input
- Transform interface{} // must be a function of one parameter that returns one value and an optional error
+ Transform any // must be a function of one parameter that returns one value and an optional error
Matcher types.GomegaMatcher
// cached value
transformArgType reflect.Type
// state
- transformedValue interface{}
+ transformedValue any
}
// reflect.Type for error
var errorT = reflect.TypeOf((*error)(nil)).Elem()
-func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
+func NewWithTransformMatcher(transform any, matcher types.GomegaMatcher) *WithTransformMatcher {
if transform == nil {
panic("transform function cannot be nil")
}
@@ -43,7 +43,7 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher)
}
}
-func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
+func (m *WithTransformMatcher) Match(actual any) (bool, error) {
// prepare a parameter to pass to the Transform function
var param reflect.Value
if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) {
@@ -72,15 +72,15 @@ func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
return m.Matcher.Match(m.transformedValue)
}
-func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) {
+func (m *WithTransformMatcher) FailureMessage(_ any) (message string) {
return m.Matcher.FailureMessage(m.transformedValue)
}
-func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+func (m *WithTransformMatcher) NegatedFailureMessage(_ any) (message string) {
return m.Matcher.NegatedFailureMessage(m.transformedValue)
}
-func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool {
+func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ any) bool {
// TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.)
//
// Querying the next matcher is fine if the transformer always will return the same value.
diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go
index 30f2beed3..da39b3611 100644
--- a/vendor/github.com/onsi/gomega/types/types.go
+++ b/vendor/github.com/onsi/gomega/types/types.go
@@ -10,20 +10,20 @@ type GomegaFailHandler func(message string, callerSkip ...int)
// A simple *testing.T interface wrapper
type GomegaTestingT interface {
Helper()
- Fatalf(format string, args ...interface{})
+ Fatalf(format string, args ...any)
}
-// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers
+// Gomega represents an object that can perform synchronous and asynchronous assertions with Gomega matchers
type Gomega interface {
- Ω(actual interface{}, extra ...interface{}) Assertion
- Expect(actual interface{}, extra ...interface{}) Assertion
- ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion
+ Ω(actual any, extra ...any) Assertion
+ Expect(actual any, extra ...any) Assertion
+ ExpectWithOffset(offset int, actual any, extra ...any) Assertion
- Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
- EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ Eventually(actualOrCtx any, args ...any) AsyncAssertion
+ EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion
- Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
- ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ Consistently(actualOrCtx any, args ...any) AsyncAssertion
+ ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion
SetDefaultEventuallyTimeout(time.Duration)
SetDefaultEventuallyPollingInterval(time.Duration)
@@ -37,9 +37,9 @@ type Gomega interface {
//
// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers
type GomegaMatcher interface {
- Match(actual interface{}) (success bool, err error)
- FailureMessage(actual interface{}) (message string)
- NegatedFailureMessage(actual interface{}) (message string)
+ Match(actual any) (success bool, err error)
+ FailureMessage(actual any) (message string)
+ NegatedFailureMessage(actual any) (message string)
}
/*
@@ -52,10 +52,10 @@ For example, a process' exit code can never change. So, gexec's Exit matcher re
for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
*/
type OracleMatcher interface {
- MatchMayChangeInTheFuture(actual interface{}) bool
+ MatchMayChangeInTheFuture(actual any) bool
}
-func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
+func MatchMayChangeInTheFuture(matcher GomegaMatcher, value any) bool {
oracleMatcher, ok := matcher.(OracleMatcher)
if !ok {
return true
@@ -67,8 +67,8 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
// AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure
// they are eventually satisfied
type AsyncAssertion interface {
- Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ Should(matcher GomegaMatcher, optionalDescription ...any) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool
WithOffset(offset int) AsyncAssertion
WithTimeout(interval time.Duration) AsyncAssertion
@@ -76,18 +76,18 @@ type AsyncAssertion interface {
Within(timeout time.Duration) AsyncAssertion
ProbeEvery(interval time.Duration) AsyncAssertion
WithContext(ctx context.Context) AsyncAssertion
- WithArguments(argsToForward ...interface{}) AsyncAssertion
+ WithArguments(argsToForward ...any) AsyncAssertion
MustPassRepeatedly(count int) AsyncAssertion
}
// Assertions are returned by Ω and Expect and enable assertions against Gomega matchers
type Assertion interface {
- Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ Should(matcher GomegaMatcher, optionalDescription ...any) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool
- To(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ To(matcher GomegaMatcher, optionalDescription ...any) bool
+ ToNot(matcher GomegaMatcher, optionalDescription ...any) bool
+ NotTo(matcher GomegaMatcher, optionalDescription ...any) bool
WithOffset(offset int) Assertion
diff --git a/vendor/github.com/openshift/api/.ci-operator.yaml b/vendor/github.com/openshift/api/.ci-operator.yaml
index 64887a08b..7c15f83e3 100644
--- a/vendor/github.com/openshift/api/.ci-operator.yaml
+++ b/vendor/github.com/openshift/api/.ci-operator.yaml
@@ -1,4 +1,4 @@
build_root_image:
name: release
namespace: openshift
- tag: rhel-9-release-golang-1.22-openshift-4.18
+ tag: rhel-9-release-golang-1.23-openshift-4.19
diff --git a/vendor/github.com/openshift/api/.golangci.yaml b/vendor/github.com/openshift/api/.golangci.yaml
new file mode 100644
index 000000000..19746532c
--- /dev/null
+++ b/vendor/github.com/openshift/api/.golangci.yaml
@@ -0,0 +1,24 @@
+linters-settings:
+ custom:
+ kal:
+ type: "module"
+ description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices.
+ settings:
+ linters:
+ enable:
+ - "maxlength"
+ - "nobools"
+ - "statussubresource"
+ lintersConfig:
+ conditions:
+ isFirstField: Warn
+ useProtobuf: Ignore
+linters:
+ disable-all: true
+ enable:
+ - kal
+issues:
+ # We have a lot of existing issues.
+ # Want to make sure that those adding new fields have an
+ # opportunity to fix them when running the linter locally.
+ max-issues-per-linter: 1000
diff --git a/vendor/github.com/openshift/api/Dockerfile.ocp b/vendor/github.com/openshift/api/Dockerfile.ocp
index f815fa9cf..d485bed24 100644
--- a/vendor/github.com/openshift/api/Dockerfile.ocp
+++ b/vendor/github.com/openshift/api/Dockerfile.ocp
@@ -1,4 +1,4 @@
-FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-openshift-4.18 AS builder
+FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder
WORKDIR /go/src/github.com/openshift/api
COPY . .
ENV GO_PACKAGE github.com/openshift/api
diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile
index 5e6a6b131..79aa36e3a 100644
--- a/vendor/github.com/openshift/api/Makefile
+++ b/vendor/github.com/openshift/api/Makefile
@@ -4,7 +4,7 @@ all: build
update: update-codegen-crds
RUNTIME ?= podman
-RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.22-openshift-4.17
+RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.23-openshift-4.19
EXCLUDE_DIRS := _output/ dependencymagnet/ hack/ third_party/ tls/ tools/ vendor/ tests/
GO_PACKAGES :=$(addsuffix ...,$(addprefix ./,$(filter-out $(EXCLUDE_DIRS), $(wildcard */))))
@@ -39,6 +39,24 @@ update-codegen-crds: update-scripts
#
#####################
+# When not otherwise set, diff/lint against the local master branch
+PULL_BASE_SHA ?= master
+
+.PHONY: lint
+lint:
+ hack/golangci-lint.sh run --new-from-rev=${PULL_BASE_SHA} ${EXTRA_ARGS}
+
+.PHONY: lint-fix
+lint-fix: EXTRA_ARGS=--fix
+lint-fix: lint
+
+# Ignore the exit code of the fix lint, it will always error as there are unfixed issues
+# that cannot be fixed from historic commits.
+.PHONY: verify-lint-fix
+verify-lint-fix:
+ make lint-fix 2>/dev/null || true
+ git diff --exit-code
+
.PHONY: verify-scripts
verify-scripts:
bash -x hack/verify-deepcopy.sh
@@ -56,7 +74,7 @@ verify-scripts:
hack/verify-promoted-features-pass-tests.sh
.PHONY: verify
-verify: verify-scripts verify-crd-schema verify-codegen-crds
+verify: verify-scripts lint verify-crd-schema verify-codegen-crds
.PHONY: verify-codegen-crds
verify-codegen-crds:
diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md
index 2f503a88d..934bcd329 100644
--- a/vendor/github.com/openshift/api/README.md
+++ b/vendor/github.com/openshift/api/README.md
@@ -111,6 +111,25 @@ conventions](https://github.com/openshift/enhancements/blob/master/CONVENTIONS.m
and then follow the instructions below to regenerate CRDs (if necessary) and
submit a pull request with your new API definitions and generated files.
+New APIs (new CRDs) must be added first as an unstable API (v1alpha1).
+Once the feature is more developed, and ready to be promoted to stable, the API can be promoted to v1.
+
+### Why do we start with v1alpha1?
+
+By starting an API as a v1alpha1, we can iterate on the API with the ability to make breaking changes.
+We can make changes to the schema, change validations, change entire types and even serialization without worry.
+
+When changes are made to an API, any existing client code will need to be updated to match.
+If there are breaking changes (such as changing the serialization), then this requires a new version of the API.
+
+If we did not bump the API version for each breaking change, a client, generated prior to the breaking change,
+would panic when it tried to deserialize the new serialization of the API.
+
+If, during development of a feature, we need to make a breaking change, we should move the feature to v1alpha2 (or v1alpha3, etc),
+until we reach a version that we are happy to promote to v1.
+
+Do not make changes to the API when promoting the feature to v1.
+
### Adding a new stable API (v1)
When copying, it matters which `// +foo` markers are two comments blocks up and which are one comment block up.
diff --git a/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go
index eb4918a66..645d796f7 100644
--- a/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go
+++ b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go
@@ -35,7 +35,6 @@ type APIRequestCount struct {
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec defines the characteristics of the resource.
- // +kubebuilder:validation:Required
// +required
Spec APIRequestCountSpec `json:"spec"`
@@ -126,7 +125,7 @@ type PerNodeAPIRequestLog struct {
// PerUserAPIRequestCount contains logs of a user's requests.
type PerUserAPIRequestCount struct {
- // userName that made the request.
+ // username that made the request.
// +kubebuilder:validation:MaxLength=512
UserName string `json:"username"`
diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go
index 27d74b6c1..b3d6b615f 100644
--- a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go
@@ -74,7 +74,7 @@ func (PerResourceAPIRequestLog) SwaggerDoc() map[string]string {
var map_PerUserAPIRequestCount = map[string]string{
"": "PerUserAPIRequestCount contains logs of a user's requests.",
- "username": "userName that made the request.",
+ "username": "username that made the request.",
"userAgent": "userAgent that made the request. The same user often has multiple binaries which connect (pods with many containers). The different binaries will have different userAgents, but the same user. In addition, we have userAgents with version information embedded and the userName isn't likely to change.",
"requestCount": "requestCount of requests by the user across all verbs.",
"byVerb": "byVerb details by verb.",
diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto
index 010f36b95..6f50fcaf9 100644
--- a/vendor/github.com/openshift/api/apps/v1/generated.proto
+++ b/vendor/github.com/openshift/api/apps/v1/generated.proto
@@ -15,39 +15,39 @@ option go_package = "github.com/openshift/api/apps/v1";
// CustomDeploymentStrategyParams are the input to the Custom deployment strategy.
message CustomDeploymentStrategyParams {
- // Image specifies a container image which can carry out a deployment.
+ // image specifies a container image which can carry out a deployment.
optional string image = 1;
- // Environment holds the environment which will be given to the container for Image.
+ // environment holds the environment which will be given to the container for Image.
repeated .k8s.io.api.core.v1.EnvVar environment = 2;
- // Command is optional and overrides CMD in the container Image.
+ // command is optional and overrides CMD in the container Image.
repeated string command = 3;
}
// DeploymentCause captures information about a particular cause of a deployment.
message DeploymentCause {
- // Type of the trigger that resulted in the creation of a new deployment
+ // type of the trigger that resulted in the creation of a new deployment
optional string type = 1;
- // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change
+ // imageTrigger contains the image trigger details, if this trigger was fired based on an image change
optional DeploymentCauseImageTrigger imageTrigger = 2;
}
// DeploymentCauseImageTrigger represents details about the cause of a deployment originating
// from an image change trigger
message DeploymentCauseImageTrigger {
- // From is a reference to the changed object which triggered a deployment. The field may have
+ // from is a reference to the changed object which triggered a deployment. The field may have
// the kinds DockerImage, ImageStreamTag, or ImageStreamImage.
optional .k8s.io.api.core.v1.ObjectReference from = 1;
}
// DeploymentCondition describes the state of a deployment config at a certain point.
message DeploymentCondition {
- // Type of deployment condition.
+ // type of deployment condition.
optional string type = 1;
- // Status of the condition, one of True, False, Unknown.
+ // status of the condition, one of True, False, Unknown.
optional string status = 2;
// The last time this condition was updated.
@@ -81,10 +81,10 @@ message DeploymentConfig {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
- // Spec represents a desired deployment state and how to deploy to it.
+ // spec represents a desired deployment state and how to deploy to it.
optional DeploymentConfigSpec spec = 2;
- // Status represents the current deployment state.
+ // status represents the current deployment state.
// +optional
optional DeploymentConfigStatus status = 3;
}
@@ -98,7 +98,7 @@ message DeploymentConfigList {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
- // Items is a list of deployment configs
+ // items is a list of deployment configs
repeated DeploymentConfig items = 2;
}
@@ -107,108 +107,108 @@ message DeploymentConfigList {
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message DeploymentConfigRollback {
- // Name of the deployment config that will be rolled back.
+ // name of the deployment config that will be rolled back.
optional string name = 1;
- // UpdatedAnnotations is a set of new annotations that will be added in the deployment config.
+ // updatedAnnotations is a set of new annotations that will be added in the deployment config.
map updatedAnnotations = 2;
- // Spec defines the options to rollback generation.
+ // spec defines the options to rollback generation.
optional DeploymentConfigRollbackSpec spec = 3;
}
// DeploymentConfigRollbackSpec represents the options for rollback generation.
message DeploymentConfigRollbackSpec {
- // From points to a ReplicationController which is a deployment.
+ // from points to a ReplicationController which is a deployment.
optional .k8s.io.api.core.v1.ObjectReference from = 1;
- // Revision to rollback to. If set to 0, rollback to the last revision.
+ // revision to rollback to. If set to 0, rollback to the last revision.
optional int64 revision = 2;
- // IncludeTriggers specifies whether to include config Triggers.
+ // includeTriggers specifies whether to include config Triggers.
optional bool includeTriggers = 3;
- // IncludeTemplate specifies whether to include the PodTemplateSpec.
+ // includeTemplate specifies whether to include the PodTemplateSpec.
optional bool includeTemplate = 4;
- // IncludeReplicationMeta specifies whether to include the replica count and selector.
+ // includeReplicationMeta specifies whether to include the replica count and selector.
optional bool includeReplicationMeta = 5;
- // IncludeStrategy specifies whether to include the deployment Strategy.
+ // includeStrategy specifies whether to include the deployment Strategy.
optional bool includeStrategy = 6;
}
// DeploymentConfigSpec represents the desired state of the deployment.
message DeploymentConfigSpec {
- // Strategy describes how a deployment is executed.
+ // strategy describes how a deployment is executed.
// +optional
optional DeploymentStrategy strategy = 1;
- // MinReadySeconds is the minimum number of seconds for which a newly created pod should
+ // minReadySeconds is the minimum number of seconds for which a newly created pod should
// be ready without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
optional int32 minReadySeconds = 9;
- // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers
+ // triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers
// are defined, a new deployment can only occur as a result of an explicit client update to the
// DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.
// +optional
optional DeploymentTriggerPolicies triggers = 2;
- // Replicas is the number of desired replicas.
+ // replicas is the number of desired replicas.
// +optional
optional int32 replicas = 3;
- // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks.
+ // revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks.
// This field is a pointer to allow for differentiation between an explicit zero and not specified.
// Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)
optional int32 revisionHistoryLimit = 4;
- // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the
+ // test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the
// deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding
// or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.
// +optional
optional bool test = 5;
- // Paused indicates that the deployment config is paused resulting in no new deployments on template
+ // paused indicates that the deployment config is paused resulting in no new deployments on template
// changes or changes in the template caused by other triggers.
optional bool paused = 6;
- // Selector is a label query over pods that should match the Replicas count.
+ // selector is a label query over pods that should match the Replicas count.
map selector = 7;
- // Template is the object that describes the pod that will be created if
+ // template is the object that describes the pod that will be created if
// insufficient replicas are detected.
optional .k8s.io.api.core.v1.PodTemplateSpec template = 8;
}
// DeploymentConfigStatus represents the current deployment state.
message DeploymentConfigStatus {
- // LatestVersion is used to determine whether the current deployment associated with a deployment
+ // latestVersion is used to determine whether the current deployment associated with a deployment
// config is out of sync.
optional int64 latestVersion = 1;
- // ObservedGeneration is the most recent generation observed by the deployment config controller.
+ // observedGeneration is the most recent generation observed by the deployment config controller.
optional int64 observedGeneration = 2;
- // Replicas is the total number of pods targeted by this deployment config.
+ // replicas is the total number of pods targeted by this deployment config.
optional int32 replicas = 3;
- // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config
+ // updatedReplicas is the total number of non-terminated pods targeted by this deployment config
// that have the desired template spec.
optional int32 updatedReplicas = 4;
- // AvailableReplicas is the total number of available pods targeted by this deployment config.
+ // availableReplicas is the total number of available pods targeted by this deployment config.
optional int32 availableReplicas = 5;
- // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.
+ // unavailableReplicas is the total number of unavailable pods targeted by this deployment config.
optional int32 unavailableReplicas = 6;
- // Details are the reasons for the update to this deployment config.
+ // details are the reasons for the update to this deployment config.
// This could be based on a change made by the user or caused by an automatic trigger
optional DeploymentDetails details = 7;
- // Conditions represents the latest available observations of a deployment config's current state.
+ // conditions represents the latest available observations of a deployment config's current state.
// +patchMergeKey=type
// +patchStrategy=merge
repeated DeploymentCondition conditions = 8;
@@ -219,10 +219,10 @@ message DeploymentConfigStatus {
// DeploymentDetails captures information about the causes of a deployment.
message DeploymentDetails {
- // Message is the user specified change message, if this deployment was triggered manually by the user
+ // message is the user specified change message, if this deployment was triggered manually by the user
optional string message = 1;
- // Causes are extended data associated with all the causes for creating a new deployment
+ // causes are extended data associated with all the causes for creating a new deployment
repeated DeploymentCause causes = 2;
}
@@ -241,7 +241,7 @@ message DeploymentLogOptions {
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
optional string container = 1;
- // Follow if true indicates that the build log should be streamed until
+ // follow if true indicates that the build log should be streamed until
// the build terminates.
optional bool follow = 2;
@@ -273,12 +273,12 @@ message DeploymentLogOptions {
// slightly more or slightly less than the specified limit.
optional int64 limitBytes = 8;
- // NoWait if true causes the call to return immediately even if the deployment
+ // nowait if true causes the call to return immediately even if the deployment
// is not available yet. Otherwise the server will wait until the deployment has started.
// TODO: Fix the tag to 'noWait' in v2
optional bool nowait = 9;
- // Version of the deployment for which to view logs.
+ // version of the deployment for which to view logs.
optional int64 version = 10;
}
@@ -287,17 +287,17 @@ message DeploymentLogOptions {
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message DeploymentRequest {
- // Name of the deployment config for requesting a new deployment.
+ // name of the deployment config for requesting a new deployment.
optional string name = 1;
- // Latest will update the deployment config with the latest state from all triggers.
+ // latest will update the deployment config with the latest state from all triggers.
optional bool latest = 2;
- // Force will try to force a new deployment to run. If the deployment config is paused,
+ // force will try to force a new deployment to run. If the deployment config is paused,
// then setting this to true will return an Invalid error.
optional bool force = 3;
- // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers.
+ // excludeTriggers instructs the instantiator to avoid processing the specified triggers.
// This field overrides the triggers from latest and allows clients to control specific
// logic. This field is ignored if not specified.
repeated string excludeTriggers = 4;
@@ -305,53 +305,53 @@ message DeploymentRequest {
// DeploymentStrategy describes how to perform a deployment.
message DeploymentStrategy {
- // Type is the name of a deployment strategy.
+ // type is the name of a deployment strategy.
// +optional
optional string type = 1;
- // CustomParams are the input to the Custom deployment strategy, and may also
+ // customParams are the input to the Custom deployment strategy, and may also
// be specified for the Recreate and Rolling strategies to customize the execution
// process that runs the deployment.
optional CustomDeploymentStrategyParams customParams = 2;
- // RecreateParams are the input to the Recreate deployment strategy.
+ // recreateParams are the input to the Recreate deployment strategy.
optional RecreateDeploymentStrategyParams recreateParams = 3;
- // RollingParams are the input to the Rolling deployment strategy.
+ // rollingParams are the input to the Rolling deployment strategy.
optional RollingDeploymentStrategyParams rollingParams = 4;
- // Resources contains resource requirements to execute the deployment and any hooks.
+ // resources contains resource requirements to execute the deployment and any hooks.
optional .k8s.io.api.core.v1.ResourceRequirements resources = 5;
- // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
+ // labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
map labels = 6;
- // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
+ // annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
map annotations = 7;
- // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment
+ // activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment
// config may be active on a node before the system actively tries to terminate them.
optional int64 activeDeadlineSeconds = 8;
}
// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.
message DeploymentTriggerImageChangeParams {
- // Automatic means that the detection of a new tag value should result in an image update
+ // automatic means that the detection of a new tag value should result in an image update
// inside the pod template.
optional bool automatic = 1;
- // ContainerNames is used to restrict tag updates to the specified set of container names in a pod.
+ // containerNames is used to restrict tag updates to the specified set of container names in a pod.
// If multiple triggers point to the same containers, the resulting behavior is undefined. Future
// API versions will make this a validation error. If ContainerNames does not point to a valid container,
// the trigger will be ignored. Future API versions will make this a validation error.
repeated string containerNames = 2;
- // From is a reference to an image stream tag to watch for changes. From.Name is the only
+ // from is a reference to an image stream tag to watch for changes. From.Name is the only
// required subfield - if From.Namespace is blank, the namespace of the current deployment
// trigger will be used.
optional .k8s.io.api.core.v1.ObjectReference from = 3;
- // LastTriggeredImage is the last image to be triggered.
+ // lastTriggeredImage is the last image to be triggered.
optional string lastTriggeredImage = 4;
}
@@ -366,10 +366,10 @@ message DeploymentTriggerPolicies {
// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.
message DeploymentTriggerPolicy {
- // Type of the trigger
+ // type of the trigger
optional string type = 1;
- // ImageChangeParams represents the parameters for the ImageChange trigger.
+ // imageChangeParams represents the parameters for the ImageChange trigger.
optional DeploymentTriggerImageChangeParams imageChangeParams = 2;
}
@@ -377,17 +377,17 @@ message DeploymentTriggerPolicy {
// based on the specified container which is assumed to be part of the
// deployment template.
message ExecNewPodHook {
- // Command is the action command and its arguments.
+ // command is the action command and its arguments.
repeated string command = 1;
- // Env is a set of environment variables to supply to the hook pod's container.
+ // env is a set of environment variables to supply to the hook pod's container.
repeated .k8s.io.api.core.v1.EnvVar env = 2;
- // ContainerName is the name of a container in the deployment pod template
+ // containerName is the name of a container in the deployment pod template
// whose container image will be used for the hook pod's container.
optional string containerName = 3;
- // Volumes is a list of named volumes from the pod template which should be
+ // volumes is a list of named volumes from the pod template which should be
// copied to the hook pod. Volumes names not found in pod spec are ignored.
// An empty list means no volumes will be copied.
repeated string volumes = 4;
@@ -395,32 +395,32 @@ message ExecNewPodHook {
// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.
message LifecycleHook {
- // FailurePolicy specifies what action to take if the hook fails.
+ // failurePolicy specifies what action to take if the hook fails.
optional string failurePolicy = 1;
- // ExecNewPod specifies the options for a lifecycle hook backed by a pod.
+ // execNewPod specifies the options for a lifecycle hook backed by a pod.
optional ExecNewPodHook execNewPod = 2;
- // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.
+ // tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.
repeated TagImageHook tagImages = 3;
}
// RecreateDeploymentStrategyParams are the input to the Recreate deployment
// strategy.
message RecreateDeploymentStrategyParams {
- // TimeoutSeconds is the time to wait for updates before giving up. If the
+ // timeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
optional int64 timeoutSeconds = 1;
- // Pre is a lifecycle hook which is executed before the strategy manipulates
+ // pre is a lifecycle hook which is executed before the strategy manipulates
// the deployment. All LifecycleHookFailurePolicy values are supported.
optional LifecycleHook pre = 2;
- // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new
+ // mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new
// pod is created. All LifecycleHookFailurePolicy values are supported.
optional LifecycleHook mid = 3;
- // Post is a lifecycle hook which is executed after the strategy has
+ // post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. All LifecycleHookFailurePolicy values are supported.
optional LifecycleHook post = 4;
}
@@ -428,19 +428,19 @@ message RecreateDeploymentStrategyParams {
// RollingDeploymentStrategyParams are the input to the Rolling deployment
// strategy.
message RollingDeploymentStrategyParams {
- // UpdatePeriodSeconds is the time to wait between individual pod updates.
+ // updatePeriodSeconds is the time to wait between individual pod updates.
// If the value is nil, a default will be used.
optional int64 updatePeriodSeconds = 1;
- // IntervalSeconds is the time to wait between polling deployment status
+ // intervalSeconds is the time to wait between polling deployment status
// after update. If the value is nil, a default will be used.
optional int64 intervalSeconds = 2;
- // TimeoutSeconds is the time to wait for updates before giving up. If the
+ // timeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
optional int64 timeoutSeconds = 3;
- // MaxUnavailable is the maximum number of pods that can be unavailable
+ // maxUnavailable is the maximum number of pods that can be unavailable
// during the update. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of update (ex: 10%). Absolute
// number is calculated from percentage by rounding down.
@@ -454,7 +454,7 @@ message RollingDeploymentStrategyParams {
// all times during the update.
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 4;
- // MaxSurge is the maximum number of pods that can be scheduled above the
+ // maxSurge is the maximum number of pods that can be scheduled above the
// original number of pods. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
@@ -468,11 +468,11 @@ message RollingDeploymentStrategyParams {
// pods.
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 5;
- // Pre is a lifecycle hook which is executed before the deployment process
+ // pre is a lifecycle hook which is executed before the deployment process
// begins. All LifecycleHookFailurePolicy values are supported.
optional LifecycleHook pre = 7;
- // Post is a lifecycle hook which is executed after the strategy has
+ // post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. All LifecycleHookFailurePolicy values
// are supported.
optional LifecycleHook post = 8;
@@ -480,11 +480,11 @@ message RollingDeploymentStrategyParams {
// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.
message TagImageHook {
- // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single
+ // containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single
// container this value will be defaulted to the name of that container.
optional string containerName = 1;
- // To is the target ImageStreamTag to set the container's image onto.
+ // to is the target ImageStreamTag to set the container's image onto.
optional .k8s.io.api.core.v1.ObjectReference to = 2;
}
diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go
index 1465aea27..619c30e82 100644
--- a/vendor/github.com/openshift/api/apps/v1/types.go
+++ b/vendor/github.com/openshift/api/apps/v1/types.go
@@ -38,81 +38,81 @@ type DeploymentConfig struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // Spec represents a desired deployment state and how to deploy to it.
+ // spec represents a desired deployment state and how to deploy to it.
Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
- // Status represents the current deployment state.
+ // status represents the current deployment state.
// +optional
Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// DeploymentConfigSpec represents the desired state of the deployment.
type DeploymentConfigSpec struct {
- // Strategy describes how a deployment is executed.
+ // strategy describes how a deployment is executed.
// +optional
Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"`
- // MinReadySeconds is the minimum number of seconds for which a newly created pod should
+ // minReadySeconds is the minimum number of seconds for which a newly created pod should
// be ready without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
- // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers
+ // triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers
// are defined, a new deployment can only occur as a result of an explicit client update to the
// DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.
// +optional
Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"`
- // Replicas is the number of desired replicas.
+ // replicas is the number of desired replicas.
// +optional
Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"`
- // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks.
+ // revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks.
// This field is a pointer to allow for differentiation between an explicit zero and not specified.
// Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"`
- // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the
+ // test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the
// deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding
// or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.
// +optional
Test bool `json:"test" protobuf:"varint,5,opt,name=test"`
- // Paused indicates that the deployment config is paused resulting in no new deployments on template
+ // paused indicates that the deployment config is paused resulting in no new deployments on template
// changes or changes in the template caused by other triggers.
Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"`
- // Selector is a label query over pods that should match the Replicas count.
+ // selector is a label query over pods that should match the Replicas count.
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"`
- // Template is the object that describes the pod that will be created if
+ // template is the object that describes the pod that will be created if
// insufficient replicas are detected.
Template *corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"`
}
// DeploymentStrategy describes how to perform a deployment.
type DeploymentStrategy struct {
- // Type is the name of a deployment strategy.
+ // type is the name of a deployment strategy.
// +optional
Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
- // CustomParams are the input to the Custom deployment strategy, and may also
+ // customParams are the input to the Custom deployment strategy, and may also
// be specified for the Recreate and Rolling strategies to customize the execution
// process that runs the deployment.
CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"`
- // RecreateParams are the input to the Recreate deployment strategy.
+ // recreateParams are the input to the Recreate deployment strategy.
RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"`
- // RollingParams are the input to the Rolling deployment strategy.
+ // rollingParams are the input to the Rolling deployment strategy.
RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"`
- // Resources contains resource requirements to execute the deployment and any hooks.
+ // resources contains resource requirements to execute the deployment and any hooks.
Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"`
- // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
+ // labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"`
- // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
+ // annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"`
- // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment
+ // activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment
// config may be active on a node before the system actively tries to terminate them.
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=activeDeadlineSeconds"`
}
@@ -131,27 +131,27 @@ const (
// CustomDeploymentStrategyParams are the input to the Custom deployment strategy.
type CustomDeploymentStrategyParams struct {
- // Image specifies a container image which can carry out a deployment.
+ // image specifies a container image which can carry out a deployment.
Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"`
- // Environment holds the environment which will be given to the container for Image.
+ // environment holds the environment which will be given to the container for Image.
Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"`
- // Command is optional and overrides CMD in the container Image.
+ // command is optional and overrides CMD in the container Image.
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
}
// RecreateDeploymentStrategyParams are the input to the Recreate deployment
// strategy.
type RecreateDeploymentStrategyParams struct {
- // TimeoutSeconds is the time to wait for updates before giving up. If the
+ // timeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
- // Pre is a lifecycle hook which is executed before the strategy manipulates
+ // pre is a lifecycle hook which is executed before the strategy manipulates
// the deployment. All LifecycleHookFailurePolicy values are supported.
Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"`
- // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new
+ // mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new
// pod is created. All LifecycleHookFailurePolicy values are supported.
Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"`
- // Post is a lifecycle hook which is executed after the strategy has
+ // post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. All LifecycleHookFailurePolicy values are supported.
Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"`
}
@@ -159,16 +159,16 @@ type RecreateDeploymentStrategyParams struct {
// RollingDeploymentStrategyParams are the input to the Rolling deployment
// strategy.
type RollingDeploymentStrategyParams struct {
- // UpdatePeriodSeconds is the time to wait between individual pod updates.
+ // updatePeriodSeconds is the time to wait between individual pod updates.
// If the value is nil, a default will be used.
UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"`
- // IntervalSeconds is the time to wait between polling deployment status
+ // intervalSeconds is the time to wait between polling deployment status
// after update. If the value is nil, a default will be used.
IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"`
- // TimeoutSeconds is the time to wait for updates before giving up. If the
+ // timeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
- // MaxUnavailable is the maximum number of pods that can be unavailable
+ // maxUnavailable is the maximum number of pods that can be unavailable
// during the update. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of update (ex: 10%). Absolute
// number is calculated from percentage by rounding down.
@@ -181,7 +181,7 @@ type RollingDeploymentStrategyParams struct {
// ensuring that at least 70% of original number of pods are available at
// all times during the update.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"`
- // MaxSurge is the maximum number of pods that can be scheduled above the
+ // maxSurge is the maximum number of pods that can be scheduled above the
// original number of pods. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
@@ -194,10 +194,10 @@ type RollingDeploymentStrategyParams struct {
// pods running at any time during the update is atmost 130% of original
// pods.
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"`
- // Pre is a lifecycle hook which is executed before the deployment process
+ // pre is a lifecycle hook which is executed before the deployment process
// begins. All LifecycleHookFailurePolicy values are supported.
Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"`
- // Post is a lifecycle hook which is executed after the strategy has
+ // post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. All LifecycleHookFailurePolicy values
// are supported.
Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"`
@@ -205,13 +205,13 @@ type RollingDeploymentStrategyParams struct {
// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.
type LifecycleHook struct {
- // FailurePolicy specifies what action to take if the hook fails.
+ // failurePolicy specifies what action to take if the hook fails.
FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"`
- // ExecNewPod specifies the options for a lifecycle hook backed by a pod.
+ // execNewPod specifies the options for a lifecycle hook backed by a pod.
ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"`
- // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.
+ // tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.
TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"`
}
@@ -231,14 +231,14 @@ const (
// based on the specified container which is assumed to be part of the
// deployment template.
type ExecNewPodHook struct {
- // Command is the action command and its arguments.
+ // command is the action command and its arguments.
Command []string `json:"command" protobuf:"bytes,1,rep,name=command"`
- // Env is a set of environment variables to supply to the hook pod's container.
+ // env is a set of environment variables to supply to the hook pod's container.
Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"`
- // ContainerName is the name of a container in the deployment pod template
+ // containerName is the name of a container in the deployment pod template
// whose container image will be used for the hook pod's container.
ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"`
- // Volumes is a list of named volumes from the pod template which should be
+ // volumes is a list of named volumes from the pod template which should be
// copied to the hook pod. Volumes names not found in pod spec are ignored.
// An empty list means no volumes will be copied.
Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"`
@@ -246,10 +246,10 @@ type ExecNewPodHook struct {
// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.
type TagImageHook struct {
- // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single
+ // containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single
// container this value will be defaulted to the name of that container.
ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"`
- // To is the target ImageStreamTag to set the container's image onto.
+ // to is the target ImageStreamTag to set the container's image onto.
To corev1.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"`
}
@@ -264,9 +264,9 @@ func (t DeploymentTriggerPolicies) String() string {
// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.
type DeploymentTriggerPolicy struct {
- // Type of the trigger
+ // type of the trigger
Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"`
- // ImageChangeParams represents the parameters for the ImageChange trigger.
+ // imageChangeParams represents the parameters for the ImageChange trigger.
ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"`
}
@@ -284,42 +284,42 @@ const (
// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.
type DeploymentTriggerImageChangeParams struct {
- // Automatic means that the detection of a new tag value should result in an image update
+ // automatic means that the detection of a new tag value should result in an image update
// inside the pod template.
Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"`
- // ContainerNames is used to restrict tag updates to the specified set of container names in a pod.
+ // containerNames is used to restrict tag updates to the specified set of container names in a pod.
// If multiple triggers point to the same containers, the resulting behavior is undefined. Future
// API versions will make this a validation error. If ContainerNames does not point to a valid container,
// the trigger will be ignored. Future API versions will make this a validation error.
ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"`
- // From is a reference to an image stream tag to watch for changes. From.Name is the only
+ // from is a reference to an image stream tag to watch for changes. From.Name is the only
// required subfield - if From.Namespace is blank, the namespace of the current deployment
// trigger will be used.
From corev1.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"`
- // LastTriggeredImage is the last image to be triggered.
+ // lastTriggeredImage is the last image to be triggered.
LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"`
}
// DeploymentConfigStatus represents the current deployment state.
type DeploymentConfigStatus struct {
- // LatestVersion is used to determine whether the current deployment associated with a deployment
+ // latestVersion is used to determine whether the current deployment associated with a deployment
// config is out of sync.
LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"`
- // ObservedGeneration is the most recent generation observed by the deployment config controller.
+ // observedGeneration is the most recent generation observed by the deployment config controller.
ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"`
- // Replicas is the total number of pods targeted by this deployment config.
+ // replicas is the total number of pods targeted by this deployment config.
Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"`
- // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config
+ // updatedReplicas is the total number of non-terminated pods targeted by this deployment config
// that have the desired template spec.
UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"`
- // AvailableReplicas is the total number of available pods targeted by this deployment config.
+ // availableReplicas is the total number of available pods targeted by this deployment config.
AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"`
- // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.
+ // unavailableReplicas is the total number of unavailable pods targeted by this deployment config.
UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"`
- // Details are the reasons for the update to this deployment config.
+ // details are the reasons for the update to this deployment config.
// This could be based on a change made by the user or caused by an automatic trigger
Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"`
- // Conditions represents the latest available observations of a deployment config's current state.
+ // conditions represents the latest available observations of a deployment config's current state.
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"`
@@ -329,24 +329,24 @@ type DeploymentConfigStatus struct {
// DeploymentDetails captures information about the causes of a deployment.
type DeploymentDetails struct {
- // Message is the user specified change message, if this deployment was triggered manually by the user
+ // message is the user specified change message, if this deployment was triggered manually by the user
Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"`
- // Causes are extended data associated with all the causes for creating a new deployment
+ // causes are extended data associated with all the causes for creating a new deployment
Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"`
}
// DeploymentCause captures information about a particular cause of a deployment.
type DeploymentCause struct {
- // Type of the trigger that resulted in the creation of a new deployment
+ // type of the trigger that resulted in the creation of a new deployment
Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"`
- // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change
+ // imageTrigger contains the image trigger details, if this trigger was fired based on an image change
ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"`
}
// DeploymentCauseImageTrigger represents details about the cause of a deployment originating
// from an image change trigger
type DeploymentCauseImageTrigger struct {
- // From is a reference to the changed object which triggered a deployment. The field may have
+ // from is a reference to the changed object which triggered a deployment. The field may have
// the kinds DockerImage, ImageStreamTag, or ImageStreamImage.
From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
}
@@ -381,9 +381,9 @@ const (
// DeploymentCondition describes the state of a deployment config at a certain point.
type DeploymentCondition struct {
- // Type of deployment condition.
+ // type of deployment condition.
Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
- // Status of the condition, one of True, False, Unknown.
+ // status of the condition, one of True, False, Unknown.
Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
// The last time this condition was updated.
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
@@ -411,7 +411,7 @@ type DeploymentConfigList struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // Items is a list of deployment configs
+ // items is a list of deployment configs
Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"`
}
@@ -426,27 +426,27 @@ type DeploymentConfigList struct {
// +openshift:compatibility-gen:level=1
type DeploymentConfigRollback struct {
metav1.TypeMeta `json:",inline"`
- // Name of the deployment config that will be rolled back.
+ // name of the deployment config that will be rolled back.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
- // UpdatedAnnotations is a set of new annotations that will be added in the deployment config.
+ // updatedAnnotations is a set of new annotations that will be added in the deployment config.
UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"`
- // Spec defines the options to rollback generation.
+ // spec defines the options to rollback generation.
Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"`
}
// DeploymentConfigRollbackSpec represents the options for rollback generation.
type DeploymentConfigRollbackSpec struct {
- // From points to a ReplicationController which is a deployment.
+ // from points to a ReplicationController which is a deployment.
From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
- // Revision to rollback to. If set to 0, rollback to the last revision.
+ // revision to rollback to. If set to 0, rollback to the last revision.
Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"`
- // IncludeTriggers specifies whether to include config Triggers.
+ // includeTriggers specifies whether to include config Triggers.
IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"`
- // IncludeTemplate specifies whether to include the PodTemplateSpec.
+ // includeTemplate specifies whether to include the PodTemplateSpec.
IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"`
- // IncludeReplicationMeta specifies whether to include the replica count and selector.
+ // includeReplicationMeta specifies whether to include the replica count and selector.
IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"`
- // IncludeStrategy specifies whether to include the deployment Strategy.
+ // includeStrategy specifies whether to include the deployment Strategy.
IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"`
}
@@ -461,14 +461,14 @@ type DeploymentConfigRollbackSpec struct {
// +openshift:compatibility-gen:level=1
type DeploymentRequest struct {
metav1.TypeMeta `json:",inline"`
- // Name of the deployment config for requesting a new deployment.
+ // name of the deployment config for requesting a new deployment.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
- // Latest will update the deployment config with the latest state from all triggers.
+ // latest will update the deployment config with the latest state from all triggers.
Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"`
- // Force will try to force a new deployment to run. If the deployment config is paused,
+ // force will try to force a new deployment to run. If the deployment config is paused,
// then setting this to true will return an Invalid error.
Force bool `json:"force" protobuf:"varint,3,opt,name=force"`
- // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers.
+ // excludeTriggers instructs the instantiator to avoid processing the specified triggers.
// This field overrides the triggers from latest and allows clients to control specific
// logic. This field is ignored if not specified.
ExcludeTriggers []DeploymentTriggerType `json:"excludeTriggers,omitempty" protobuf:"bytes,4,rep,name=excludeTriggers,casttype=DeploymentTriggerType"`
@@ -501,7 +501,7 @@ type DeploymentLogOptions struct {
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
- // Follow if true indicates that the build log should be streamed until
+ // follow if true indicates that the build log should be streamed until
// the build terminates.
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous deployment logs. Defaults to false.
@@ -527,11 +527,11 @@ type DeploymentLogOptions struct {
// slightly more or slightly less than the specified limit.
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
- // NoWait if true causes the call to return immediately even if the deployment
+ // nowait if true causes the call to return immediately even if the deployment
// is not available yet. Otherwise the server will wait until the deployment has started.
// TODO: Fix the tag to 'noWait' in v2
NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"`
- // Version of the deployment for which to view logs.
+ // version of the deployment for which to view logs.
Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"`
}
diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go
index ab137d59b..55b53c5da 100644
--- a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go
@@ -13,9 +13,9 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE
var map_CustomDeploymentStrategyParams = map[string]string{
"": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.",
- "image": "Image specifies a container image which can carry out a deployment.",
- "environment": "Environment holds the environment which will be given to the container for Image.",
- "command": "Command is optional and overrides CMD in the container Image.",
+ "image": "image specifies a container image which can carry out a deployment.",
+ "environment": "environment holds the environment which will be given to the container for Image.",
+ "command": "command is optional and overrides CMD in the container Image.",
}
func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string {
@@ -24,8 +24,8 @@ func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string {
var map_DeploymentCause = map[string]string{
"": "DeploymentCause captures information about a particular cause of a deployment.",
- "type": "Type of the trigger that resulted in the creation of a new deployment",
- "imageTrigger": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change",
+ "type": "type of the trigger that resulted in the creation of a new deployment",
+ "imageTrigger": "imageTrigger contains the image trigger details, if this trigger was fired based on an image change",
}
func (DeploymentCause) SwaggerDoc() map[string]string {
@@ -34,7 +34,7 @@ func (DeploymentCause) SwaggerDoc() map[string]string {
var map_DeploymentCauseImageTrigger = map[string]string{
"": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger",
- "from": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.",
+ "from": "from is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.",
}
func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string {
@@ -43,8 +43,8 @@ func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string {
var map_DeploymentCondition = map[string]string{
"": "DeploymentCondition describes the state of a deployment config at a certain point.",
- "type": "Type of deployment condition.",
- "status": "Status of the condition, one of True, False, Unknown.",
+ "type": "type of deployment condition.",
+ "status": "status of the condition, one of True, False, Unknown.",
"lastUpdateTime": "The last time this condition was updated.",
"lastTransitionTime": "The last time the condition transitioned from one status to another.",
"reason": "The reason for the condition's last transition.",
@@ -58,8 +58,8 @@ func (DeploymentCondition) SwaggerDoc() map[string]string {
var map_DeploymentConfig = map[string]string{
"": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). Deprecated: Use deployments or other means for declarative updates for pods instead.",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "spec": "Spec represents a desired deployment state and how to deploy to it.",
- "status": "Status represents the current deployment state.",
+ "spec": "spec represents a desired deployment state and how to deploy to it.",
+ "status": "status represents the current deployment state.",
}
func (DeploymentConfig) SwaggerDoc() map[string]string {
@@ -69,7 +69,7 @@ func (DeploymentConfig) SwaggerDoc() map[string]string {
var map_DeploymentConfigList = map[string]string{
"": "DeploymentConfigList is a collection of deployment configs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "items": "Items is a list of deployment configs",
+ "items": "items is a list of deployment configs",
}
func (DeploymentConfigList) SwaggerDoc() map[string]string {
@@ -78,9 +78,9 @@ func (DeploymentConfigList) SwaggerDoc() map[string]string {
var map_DeploymentConfigRollback = map[string]string{
"": "DeploymentConfigRollback provides the input to rollback generation.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
- "name": "Name of the deployment config that will be rolled back.",
- "updatedAnnotations": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.",
- "spec": "Spec defines the options to rollback generation.",
+ "name": "name of the deployment config that will be rolled back.",
+ "updatedAnnotations": "updatedAnnotations is a set of new annotations that will be added in the deployment config.",
+ "spec": "spec defines the options to rollback generation.",
}
func (DeploymentConfigRollback) SwaggerDoc() map[string]string {
@@ -89,12 +89,12 @@ func (DeploymentConfigRollback) SwaggerDoc() map[string]string {
var map_DeploymentConfigRollbackSpec = map[string]string{
"": "DeploymentConfigRollbackSpec represents the options for rollback generation.",
- "from": "From points to a ReplicationController which is a deployment.",
- "revision": "Revision to rollback to. If set to 0, rollback to the last revision.",
- "includeTriggers": "IncludeTriggers specifies whether to include config Triggers.",
- "includeTemplate": "IncludeTemplate specifies whether to include the PodTemplateSpec.",
- "includeReplicationMeta": "IncludeReplicationMeta specifies whether to include the replica count and selector.",
- "includeStrategy": "IncludeStrategy specifies whether to include the deployment Strategy.",
+ "from": "from points to a ReplicationController which is a deployment.",
+ "revision": "revision to rollback to. If set to 0, rollback to the last revision.",
+ "includeTriggers": "includeTriggers specifies whether to include config Triggers.",
+ "includeTemplate": "includeTemplate specifies whether to include the PodTemplateSpec.",
+ "includeReplicationMeta": "includeReplicationMeta specifies whether to include the replica count and selector.",
+ "includeStrategy": "includeStrategy specifies whether to include the deployment Strategy.",
}
func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string {
@@ -103,15 +103,15 @@ func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string {
var map_DeploymentConfigSpec = map[string]string{
"": "DeploymentConfigSpec represents the desired state of the deployment.",
- "strategy": "Strategy describes how a deployment is executed.",
- "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
- "triggers": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.",
- "replicas": "Replicas is the number of desired replicas.",
- "revisionHistoryLimit": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)",
- "test": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.",
- "paused": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.",
- "selector": "Selector is a label query over pods that should match the Replicas count.",
- "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected.",
+ "strategy": "strategy describes how a deployment is executed.",
+ "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "triggers": "triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.",
+ "replicas": "replicas is the number of desired replicas.",
+ "revisionHistoryLimit": "revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)",
+ "test": "test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.",
+ "paused": "paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.",
+ "selector": "selector is a label query over pods that should match the Replicas count.",
+ "template": "template is the object that describes the pod that will be created if insufficient replicas are detected.",
}
func (DeploymentConfigSpec) SwaggerDoc() map[string]string {
@@ -120,14 +120,14 @@ func (DeploymentConfigSpec) SwaggerDoc() map[string]string {
var map_DeploymentConfigStatus = map[string]string{
"": "DeploymentConfigStatus represents the current deployment state.",
- "latestVersion": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.",
- "observedGeneration": "ObservedGeneration is the most recent generation observed by the deployment config controller.",
- "replicas": "Replicas is the total number of pods targeted by this deployment config.",
- "updatedReplicas": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.",
- "availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.",
- "unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.",
- "details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger",
- "conditions": "Conditions represents the latest available observations of a deployment config's current state.",
+ "latestVersion": "latestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.",
+ "observedGeneration": "observedGeneration is the most recent generation observed by the deployment config controller.",
+ "replicas": "replicas is the total number of pods targeted by this deployment config.",
+ "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.",
+ "availableReplicas": "availableReplicas is the total number of available pods targeted by this deployment config.",
+ "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment config.",
+ "details": "details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger",
+ "conditions": "conditions represents the latest available observations of a deployment config's current state.",
"readyReplicas": "Total number of ready pods targeted by this deployment.",
}
@@ -137,8 +137,8 @@ func (DeploymentConfigStatus) SwaggerDoc() map[string]string {
var map_DeploymentDetails = map[string]string{
"": "DeploymentDetails captures information about the causes of a deployment.",
- "message": "Message is the user specified change message, if this deployment was triggered manually by the user",
- "causes": "Causes are extended data associated with all the causes for creating a new deployment",
+ "message": "message is the user specified change message, if this deployment was triggered manually by the user",
+ "causes": "causes are extended data associated with all the causes for creating a new deployment",
}
func (DeploymentDetails) SwaggerDoc() map[string]string {
@@ -156,15 +156,15 @@ func (DeploymentLog) SwaggerDoc() map[string]string {
var map_DeploymentLogOptions = map[string]string{
"": "DeploymentLogOptions is the REST options for a deployment log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
- "follow": "Follow if true indicates that the build log should be streamed until the build terminates.",
+ "follow": "follow if true indicates that the build log should be streamed until the build terminates.",
"previous": "Return previous deployment logs. Defaults to false.",
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
- "nowait": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.",
- "version": "Version of the deployment for which to view logs.",
+ "nowait": "nowait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.",
+ "version": "version of the deployment for which to view logs.",
}
func (DeploymentLogOptions) SwaggerDoc() map[string]string {
@@ -173,10 +173,10 @@ func (DeploymentLogOptions) SwaggerDoc() map[string]string {
var map_DeploymentRequest = map[string]string{
"": "DeploymentRequest is a request to a deployment config for a new deployment.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
- "name": "Name of the deployment config for requesting a new deployment.",
- "latest": "Latest will update the deployment config with the latest state from all triggers.",
- "force": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.",
- "excludeTriggers": "ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.",
+ "name": "name of the deployment config for requesting a new deployment.",
+ "latest": "latest will update the deployment config with the latest state from all triggers.",
+ "force": "force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.",
+ "excludeTriggers": "excludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.",
}
func (DeploymentRequest) SwaggerDoc() map[string]string {
@@ -185,14 +185,14 @@ func (DeploymentRequest) SwaggerDoc() map[string]string {
var map_DeploymentStrategy = map[string]string{
"": "DeploymentStrategy describes how to perform a deployment.",
- "type": "Type is the name of a deployment strategy.",
- "customParams": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.",
- "recreateParams": "RecreateParams are the input to the Recreate deployment strategy.",
- "rollingParams": "RollingParams are the input to the Rolling deployment strategy.",
- "resources": "Resources contains resource requirements to execute the deployment and any hooks.",
- "labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
- "annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
- "activeDeadlineSeconds": "ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.",
+ "type": "type is the name of a deployment strategy.",
+ "customParams": "customParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.",
+ "recreateParams": "recreateParams are the input to the Recreate deployment strategy.",
+ "rollingParams": "rollingParams are the input to the Rolling deployment strategy.",
+ "resources": "resources contains resource requirements to execute the deployment and any hooks.",
+ "labels": "labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
+ "annotations": "annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
+ "activeDeadlineSeconds": "activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.",
}
func (DeploymentStrategy) SwaggerDoc() map[string]string {
@@ -201,10 +201,10 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string {
var map_DeploymentTriggerImageChangeParams = map[string]string{
"": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.",
- "automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.",
- "containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.",
- "from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.",
- "lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.",
+ "automatic": "automatic means that the detection of a new tag value should result in an image update inside the pod template.",
+ "containerNames": "containerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.",
+ "from": "from is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.",
+ "lastTriggeredImage": "lastTriggeredImage is the last image to be triggered.",
}
func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string {
@@ -213,8 +213,8 @@ func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string {
var map_DeploymentTriggerPolicy = map[string]string{
"": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.",
- "type": "Type of the trigger",
- "imageChangeParams": "ImageChangeParams represents the parameters for the ImageChange trigger.",
+ "type": "type of the trigger",
+ "imageChangeParams": "imageChangeParams represents the parameters for the ImageChange trigger.",
}
func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string {
@@ -223,10 +223,10 @@ func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string {
var map_ExecNewPodHook = map[string]string{
"": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.",
- "command": "Command is the action command and its arguments.",
- "env": "Env is a set of environment variables to supply to the hook pod's container.",
- "containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.",
- "volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.",
+ "command": "command is the action command and its arguments.",
+ "env": "env is a set of environment variables to supply to the hook pod's container.",
+ "containerName": "containerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.",
+ "volumes": "volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.",
}
func (ExecNewPodHook) SwaggerDoc() map[string]string {
@@ -235,9 +235,9 @@ func (ExecNewPodHook) SwaggerDoc() map[string]string {
var map_LifecycleHook = map[string]string{
"": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.",
- "failurePolicy": "FailurePolicy specifies what action to take if the hook fails.",
- "execNewPod": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.",
- "tagImages": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.",
+ "failurePolicy": "failurePolicy specifies what action to take if the hook fails.",
+ "execNewPod": "execNewPod specifies the options for a lifecycle hook backed by a pod.",
+ "tagImages": "tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.",
}
func (LifecycleHook) SwaggerDoc() map[string]string {
@@ -246,10 +246,10 @@ func (LifecycleHook) SwaggerDoc() map[string]string {
var map_RecreateDeploymentStrategyParams = map[string]string{
"": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.",
- "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
- "pre": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.",
- "mid": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.",
- "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.",
+ "timeoutSeconds": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
+ "pre": "pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.",
+ "mid": "mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.",
+ "post": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.",
}
func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string {
@@ -258,13 +258,13 @@ func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string {
var map_RollingDeploymentStrategyParams = map[string]string{
"": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.",
- "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.",
- "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.",
- "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
- "maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.",
- "maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.",
- "pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.",
- "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.",
+ "updatePeriodSeconds": "updatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.",
+ "intervalSeconds": "intervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.",
+ "timeoutSeconds": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
+ "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.",
+ "maxSurge": "maxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.",
+ "pre": "pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.",
+ "post": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.",
}
func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string {
@@ -273,8 +273,8 @@ func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string {
var map_TagImageHook = map[string]string{
"": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.",
- "containerName": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.",
- "to": "To is the target ImageStreamTag to set the container's image onto.",
+ "containerName": "containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.",
+ "to": "to is the target ImageStreamTag to set the container's image onto.",
}
func (TagImageHook) SwaggerDoc() map[string]string {
diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto
index 28e4e8ce6..f7d7b772a 100644
--- a/vendor/github.com/openshift/api/authorization/v1/generated.proto
+++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto
@@ -16,10 +16,10 @@ option go_package = "github.com/openshift/api/authorization/v1";
// Action describes a request to the API server
message Action {
- // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
+ // namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
optional string namespace = 1;
- // Verb is one of: get, list, watch, create, update, delete
+ // verb is one of: get, list, watch, create, update, delete
optional string verb = 2;
// Group is the API group of the resource
@@ -30,19 +30,19 @@ message Action {
// Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined
optional string resourceAPIVersion = 4;
- // Resource is one of the existing resource types
+ // resource is one of the existing resource types
optional string resource = 5;
- // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete"
+ // resourceName is the name of the resource being requested for a "get" or deleted for a "delete"
optional string resourceName = 6;
- // Path is the path of a non resource URL
+ // path is the path of a non resource URL
optional string path = 8;
- // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)
+ // isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)
optional bool isNonResourceURL = 9;
- // Content is the actual content of the request for create and update
+ // content is the actual content of the request for create and update
// +kubebuilder:pruning:PreserveUnknownFields
optional .k8s.io.apimachinery.pkg.runtime.RawExtension content = 7;
}
@@ -56,10 +56,10 @@ message ClusterRole {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
- // Rules holds all the PolicyRules for this ClusterRole
+ // rules holds all the PolicyRules for this ClusterRole
repeated PolicyRule rules = 2;
- // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+ // aggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
// If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
// stomped by the controller.
optional .k8s.io.api.rbac.v1.AggregationRule aggregationRule = 3;
@@ -76,28 +76,28 @@ message ClusterRoleBinding {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
- // UserNames holds all the usernames directly bound to the role.
+ // userNames holds all the usernames directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
optional OptionalNames userNames = 2;
- // GroupNames holds all the groups directly bound to the role.
+ // groupNames holds all the groups directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
optional OptionalNames groupNames = 3;
- // Subjects hold object references to authorize with this rule.
+ // subjects hold object references to authorize with this rule.
// This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
// Thus newer clients that do not need to support backwards compatibility should send
// only fully qualified Subjects and should omit the UserNames and GroupNames fields.
// Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
repeated .k8s.io.api.core.v1.ObjectReference subjects = 4;
- // RoleRef can only reference the current namespace and the global namespace.
+ // roleRef can only reference the current namespace and the global namespace.
// If the ClusterRoleRef cannot be resolved, the Authorizer must return an error.
// Since Policy is a singleton, this is sufficient knowledge to locate a role.
optional .k8s.io.api.core.v1.ObjectReference roleRef = 5;
@@ -112,7 +112,7 @@ message ClusterRoleBindingList {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
- // Items is a list of ClusterRoleBindings
+ // items is a list of ClusterRoleBindings
repeated ClusterRoleBinding items = 2;
}
@@ -125,14 +125,14 @@ message ClusterRoleList {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
- // Items is a list of ClusterRoles
+ // items is a list of ClusterRoles
repeated ClusterRole items = 2;
}
// GroupRestriction matches a group either by a string match on the group name
// or a label selector applied to group labels.
message GroupRestriction {
- // Groups is a list of groups used to match against an individual user's
+ // groups is a list of groups used to match against an individual user's
// groups. If the user is a member of one of the whitelisted groups, the user
// is allowed to be bound to a role.
// +nullable
@@ -175,14 +175,14 @@ message LocalSubjectAccessReview {
// Action describes the action being tested. The Namespace element is FORCED to the current namespace.
optional Action Action = 1;
- // User is optional. If both User and Groups are empty, the current authenticated user is used.
+ // user is optional. If both User and Groups are empty, the current authenticated user is used.
optional string user = 2;
- // Groups is optional. Groups is the list of groups to which the User belongs.
+ // groups is optional. Groups is the list of groups to which the User belongs.
// +k8s:conversion-gen=false
repeated string groups = 3;
- // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil for a self-SAR, means "use the scopes on this request".
// Nil for a regular SAR, means the same as empty.
// +k8s:conversion-gen=false
@@ -191,37 +191,37 @@ message LocalSubjectAccessReview {
// NamedClusterRole relates a name with a cluster role
message NamedClusterRole {
- // Name is the name of the cluster role
+ // name is the name of the cluster role
optional string name = 1;
- // Role is the cluster role being named
+ // role is the cluster role being named
optional ClusterRole role = 2;
}
// NamedClusterRoleBinding relates a name with a cluster role binding
message NamedClusterRoleBinding {
- // Name is the name of the cluster role binding
+ // name is the name of the cluster role binding
optional string name = 1;
- // RoleBinding is the cluster role binding being named
+ // roleBinding is the cluster role binding being named
optional ClusterRoleBinding roleBinding = 2;
}
// NamedRole relates a Role with a name
message NamedRole {
- // Name is the name of the role
+ // name is the name of the role
optional string name = 1;
- // Role is the role being named
+ // role is the role being named
optional Role role = 2;
}
// NamedRoleBinding relates a role binding with a name
message NamedRoleBinding {
- // Name is the name of the role binding
+ // name is the name of the role binding
optional string name = 1;
- // RoleBinding is the role binding being named
+ // roleBinding is the role binding being named
optional RoleBinding roleBinding = 2;
}
@@ -246,25 +246,25 @@ message OptionalScopes {
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
message PolicyRule {
- // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ // verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
repeated string verbs = 1;
- // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
+ // attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
// If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
// +kubebuilder:pruning:PreserveUnknownFields
optional .k8s.io.apimachinery.pkg.runtime.RawExtension attributeRestrictions = 2;
- // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed.
+ // apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed.
// That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request
// will be allowed
// +optional
// +nullable
repeated string apiGroups = 3;
- // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ // resources is a list of resources this rule applies to. ResourceAll represents all resources.
repeated string resources = 4;
- // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ // resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
repeated string resourceNames = 5;
// NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
@@ -291,7 +291,7 @@ message ResourceAccessReview {
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message ResourceAccessReviewResponse {
- // Namespace is the namespace used for the access review
+ // namespace is the namespace used for the access review
optional string namespace = 1;
// UsersSlice is the list of users who can perform the action
@@ -317,7 +317,7 @@ message Role {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
- // Rules holds all the PolicyRules for this Role
+ // rules holds all the PolicyRules for this Role
repeated PolicyRule rules = 2;
}
@@ -332,28 +332,28 @@ message RoleBinding {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
- // UserNames holds all the usernames directly bound to the role.
+ // userNames holds all the usernames directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
optional OptionalNames userNames = 2;
- // GroupNames holds all the groups directly bound to the role.
+ // groupNames holds all the groups directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
optional OptionalNames groupNames = 3;
- // Subjects hold object references to authorize with this rule.
+ // subjects hold object references to authorize with this rule.
// This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
// Thus newer clients that do not need to support backwards compatibility should send
// only fully qualified Subjects and should omit the UserNames and GroupNames fields.
// Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
repeated .k8s.io.api.core.v1.ObjectReference subjects = 4;
- // RoleRef can only reference the current namespace and the global namespace.
+ // roleRef can only reference the current namespace and the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.
// Since Policy is a singleton, this is sufficient knowledge to locate a role.
optional .k8s.io.api.core.v1.ObjectReference roleRef = 5;
@@ -368,7 +368,7 @@ message RoleBindingList {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
- // Items is a list of RoleBindings
+ // items is a list of RoleBindings
repeated RoleBinding items = 2;
}
@@ -390,7 +390,7 @@ message RoleBindingRestriction {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
- // Spec defines the matcher.
+ // spec defines the matcher.
optional RoleBindingRestrictionSpec spec = 2;
}
@@ -403,22 +403,22 @@ message RoleBindingRestrictionList {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
- // Items is a list of RoleBindingRestriction objects.
+ // items is a list of RoleBindingRestriction objects.
repeated RoleBindingRestriction items = 2;
}
// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one
// field must be non-nil.
message RoleBindingRestrictionSpec {
- // UserRestriction matches against user subjects.
+ // userrestriction matches against user subjects.
// +nullable
optional UserRestriction userrestriction = 1;
- // GroupRestriction matches against group subjects.
+ // grouprestriction matches against group subjects.
// +nullable
optional GroupRestriction grouprestriction = 2;
- // ServiceAccountRestriction matches against service-account subjects.
+ // serviceaccountrestriction matches against service-account subjects.
// +nullable
optional ServiceAccountRestriction serviceaccountrestriction = 3;
}
@@ -432,7 +432,7 @@ message RoleList {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
- // Items is a list of Roles
+ // items is a list of Roles
repeated Role items = 2;
}
@@ -445,16 +445,16 @@ message SelfSubjectRulesReview {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3;
- // Spec adds information about how to conduct the check
+ // spec adds information about how to conduct the check
optional SelfSubjectRulesReviewSpec spec = 1;
- // Status is completed by the server to tell which permissions you have
+ // status is completed by the server to tell which permissions you have
optional SubjectRulesReviewStatus status = 2;
}
// SelfSubjectRulesReviewSpec adds information about how to conduct the check
message SelfSubjectRulesReviewSpec {
- // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil means "use the scopes on this request".
// +k8s:conversion-gen=false
optional OptionalScopes scopes = 1;
@@ -463,10 +463,10 @@ message SelfSubjectRulesReviewSpec {
// ServiceAccountReference specifies a service account and namespace by their
// names.
message ServiceAccountReference {
- // Name is the name of the service account.
+ // name is the name of the service account.
optional string name = 1;
- // Namespace is the namespace of the service account. Service accounts from
+ // namespace is the namespace of the service account. Service accounts from
// inside the whitelisted namespaces are allowed to be bound to roles. If
// Namespace is empty, then the namespace of the RoleBindingRestriction in
// which the ServiceAccountReference is embedded is used.
@@ -477,10 +477,10 @@ message ServiceAccountReference {
// either the service-account name or the name of the service account's
// namespace.
message ServiceAccountRestriction {
- // ServiceAccounts specifies a list of literal service-account names.
+ // serviceaccounts specifies a list of literal service-account names.
repeated ServiceAccountReference serviceaccounts = 1;
- // Namespaces specifies a list of literal namespace names.
+ // namespaces specifies a list of literal namespace names.
repeated string namespaces = 2;
}
@@ -496,14 +496,14 @@ message SubjectAccessReview {
// Action describes the action being tested.
optional Action Action = 1;
- // User is optional. If both User and Groups are empty, the current authenticated user is used.
+ // user is optional. If both User and Groups are empty, the current authenticated user is used.
optional string user = 2;
// GroupsSlice is optional. Groups is the list of groups to which the User belongs.
// +k8s:conversion-gen=false
repeated string groups = 3;
- // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil for a self-SAR, means "use the scopes on this request".
// Nil for a regular SAR, means the same as empty.
// +k8s:conversion-gen=false
@@ -515,16 +515,16 @@ message SubjectAccessReview {
// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
// +openshift:compatibility-gen:level=1
message SubjectAccessReviewResponse {
- // Namespace is the namespace used for the access review
+ // namespace is the namespace used for the access review
optional string namespace = 1;
- // Allowed is required. True if the action would be allowed, false otherwise.
+ // allowed is required. True if the action would be allowed, false otherwise.
optional bool allowed = 2;
- // Reason is optional. It indicates why a request was allowed or denied.
+ // reason is optional. It indicates why a request was allowed or denied.
optional string reason = 3;
- // EvaluationError is an indication that some error occurred during the authorization check.
+ // evaluationError is an indication that some error occurred during the authorization check.
// It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is
// most common when a bound role is missing, but enough roles are still present and bound to reason about the request.
optional string evaluationError = 4;
@@ -539,31 +539,31 @@ message SubjectRulesReview {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3;
- // Spec adds information about how to conduct the check
+ // spec adds information about how to conduct the check
optional SubjectRulesReviewSpec spec = 1;
- // Status is completed by the server to tell which permissions you have
+ // status is completed by the server to tell which permissions you have
optional SubjectRulesReviewStatus status = 2;
}
// SubjectRulesReviewSpec adds information about how to conduct the check
message SubjectRulesReviewSpec {
- // User is optional. At least one of User and Groups must be specified.
+ // user is optional. At least one of User and Groups must be specified.
optional string user = 1;
- // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.
+ // groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.
repeated string groups = 2;
- // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
optional OptionalScopes scopes = 3;
}
// SubjectRulesReviewStatus is contains the result of a rules check
message SubjectRulesReviewStatus {
- // Rules is the list of rules (no particular sort) that are allowed for the subject
+ // rules is the list of rules (no particular sort) that are allowed for the subject
repeated PolicyRule rules = 1;
- // EvaluationError can appear in combination with Rules. It means some error happened during evaluation
+ // evaluationError can appear in combination with Rules. It means some error happened during evaluation
// that may have prevented additional rules from being populated.
optional string evaluationError = 2;
}
@@ -572,10 +572,10 @@ message SubjectRulesReviewStatus {
// a string match on the name of a group to which the user belongs, or a label
// selector applied to the user labels.
message UserRestriction {
- // Users specifies a list of literal user names.
+ // users specifies a list of literal user names.
repeated string users = 1;
- // Groups specifies a list of literal group names.
+ // groups specifies a list of literal group names.
// +nullable
repeated string groups = 2;
diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go
index e8dd0c29f..bf4071867 100644
--- a/vendor/github.com/openshift/api/authorization/v1/types.go
+++ b/vendor/github.com/openshift/api/authorization/v1/types.go
@@ -28,21 +28,21 @@ const (
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
type PolicyRule struct {
- // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ // verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
- // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
+ // attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
// If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
// +kubebuilder:pruning:PreserveUnknownFields
AttributeRestrictions kruntime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"`
- // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed.
+ // apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed.
// That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request
// will be allowed
// +optional
// +nullable
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"`
- // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ // resources is a list of resources this rule applies to. ResourceAll represents all resources.
Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"`
- // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ // resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"`
// NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
// This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
@@ -73,7 +73,7 @@ type Role struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // Rules holds all the PolicyRules for this Role
+ // rules holds all the PolicyRules for this Role
Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
}
@@ -102,26 +102,26 @@ type RoleBinding struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // UserNames holds all the usernames directly bound to the role.
+ // userNames holds all the usernames directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"`
- // GroupNames holds all the groups directly bound to the role.
+ // groupNames holds all the groups directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"`
- // Subjects hold object references to authorize with this rule.
+ // subjects hold object references to authorize with this rule.
// This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
// Thus newer clients that do not need to support backwards compatibility should send
// only fully qualified Subjects and should omit the UserNames and GroupNames fields.
// Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"`
- // RoleRef can only reference the current namespace and the global namespace.
+ // roleRef can only reference the current namespace and the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.
// Since Policy is a singleton, this is sufficient knowledge to locate a role.
RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"`
@@ -129,17 +129,17 @@ type RoleBinding struct {
// NamedRole relates a Role with a name
type NamedRole struct {
- // Name is the name of the role
+ // name is the name of the role
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
- // Role is the role being named
+ // role is the role being named
Role Role `json:"role" protobuf:"bytes,2,opt,name=role"`
}
// NamedRoleBinding relates a role binding with a name
type NamedRoleBinding struct {
- // Name is the name of the role binding
+ // name is the name of the role binding
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
- // RoleBinding is the role binding being named
+ // roleBinding is the role binding being named
RoleBinding RoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"`
}
@@ -158,16 +158,16 @@ type SelfSubjectRulesReview struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"`
- // Spec adds information about how to conduct the check
+ // spec adds information about how to conduct the check
Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
- // Status is completed by the server to tell which permissions you have
+ // status is completed by the server to tell which permissions you have
Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// SelfSubjectRulesReviewSpec adds information about how to conduct the check
type SelfSubjectRulesReviewSpec struct {
- // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil means "use the scopes on this request".
// +k8s:conversion-gen=false
Scopes OptionalScopes `json:"scopes" protobuf:"bytes,1,rep,name=scopes"`
@@ -188,28 +188,28 @@ type SubjectRulesReview struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"`
- // Spec adds information about how to conduct the check
+ // spec adds information about how to conduct the check
Spec SubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
- // Status is completed by the server to tell which permissions you have
+ // status is completed by the server to tell which permissions you have
Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// SubjectRulesReviewSpec adds information about how to conduct the check
type SubjectRulesReviewSpec struct {
- // User is optional. At least one of User and Groups must be specified.
+ // user is optional. At least one of User and Groups must be specified.
User string `json:"user" protobuf:"bytes,1,opt,name=user"`
- // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.
+ // groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.
Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"`
- // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
Scopes OptionalScopes `json:"scopes" protobuf:"bytes,3,opt,name=scopes"`
}
// SubjectRulesReviewStatus is contains the result of a rules check
type SubjectRulesReviewStatus struct {
- // Rules is the list of rules (no particular sort) that are allowed for the subject
+ // rules is the list of rules (no particular sort) that are allowed for the subject
Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"`
- // EvaluationError can appear in combination with Rules. It means some error happened during evaluation
+ // evaluationError can appear in combination with Rules. It means some error happened during evaluation
// that may have prevented additional rules from being populated.
EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"`
}
@@ -223,7 +223,7 @@ type SubjectRulesReviewStatus struct {
type ResourceAccessReviewResponse struct {
metav1.TypeMeta `json:",inline"`
- // Namespace is the namespace used for the access review
+ // namespace is the namespace used for the access review
Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
// UsersSlice is the list of users who can perform the action
// +k8s:conversion-gen=false
@@ -269,13 +269,13 @@ type ResourceAccessReview struct {
type SubjectAccessReviewResponse struct {
metav1.TypeMeta `json:",inline"`
- // Namespace is the namespace used for the access review
+ // namespace is the namespace used for the access review
Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
- // Allowed is required. True if the action would be allowed, false otherwise.
+ // allowed is required. True if the action would be allowed, false otherwise.
Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"`
- // Reason is optional. It indicates why a request was allowed or denied.
+ // reason is optional. It indicates why a request was allowed or denied.
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
- // EvaluationError is an indication that some error occurred during the authorization check.
+ // evaluationError is an indication that some error occurred during the authorization check.
// It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is
// most common when a bound role is missing, but enough roles are still present and bound to reason about the request.
EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"`
@@ -309,12 +309,12 @@ type SubjectAccessReview struct {
// Action describes the action being tested.
Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
- // User is optional. If both User and Groups are empty, the current authenticated user is used.
+ // user is optional. If both User and Groups are empty, the current authenticated user is used.
User string `json:"user" protobuf:"bytes,2,opt,name=user"`
// GroupsSlice is optional. Groups is the list of groups to which the User belongs.
// +k8s:conversion-gen=false
GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"`
- // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil for a self-SAR, means "use the scopes on this request".
// Nil for a regular SAR, means the same as empty.
// +k8s:conversion-gen=false
@@ -359,12 +359,12 @@ type LocalSubjectAccessReview struct {
// Action describes the action being tested. The Namespace element is FORCED to the current namespace.
Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
- // User is optional. If both User and Groups are empty, the current authenticated user is used.
+ // user is optional. If both User and Groups are empty, the current authenticated user is used.
User string `json:"user" protobuf:"bytes,2,opt,name=user"`
- // Groups is optional. Groups is the list of groups to which the User belongs.
+ // groups is optional. Groups is the list of groups to which the User belongs.
// +k8s:conversion-gen=false
GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"`
- // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil for a self-SAR, means "use the scopes on this request".
// Nil for a regular SAR, means the same as empty.
// +k8s:conversion-gen=false
@@ -373,9 +373,9 @@ type LocalSubjectAccessReview struct {
// Action describes a request to the API server
type Action struct {
- // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
+ // namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
- // Verb is one of: get, list, watch, create, update, delete
+ // verb is one of: get, list, watch, create, update, delete
Verb string `json:"verb" protobuf:"bytes,2,opt,name=verb"`
// Group is the API group of the resource
// Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined
@@ -383,15 +383,15 @@ type Action struct {
// Version is the API version of the resource
// Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined
Version string `json:"resourceAPIVersion" protobuf:"bytes,4,opt,name=resourceAPIVersion"`
- // Resource is one of the existing resource types
+ // resource is one of the existing resource types
Resource string `json:"resource" protobuf:"bytes,5,opt,name=resource"`
- // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete"
+ // resourceName is the name of the resource being requested for a "get" or deleted for a "delete"
ResourceName string `json:"resourceName" protobuf:"bytes,6,opt,name=resourceName"`
- // Path is the path of a non resource URL
+ // path is the path of a non resource URL
Path string `json:"path" protobuf:"bytes,8,opt,name=path"`
- // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)
+ // isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)
IsNonResourceURL bool `json:"isNonResourceURL" protobuf:"varint,9,opt,name=isNonResourceURL"`
- // Content is the actual content of the request for create and update
+ // content is the actual content of the request for create and update
// +kubebuilder:pruning:PreserveUnknownFields
Content kruntime.RawExtension `json:"content,omitempty" protobuf:"bytes,7,opt,name=content"`
}
@@ -409,7 +409,7 @@ type RoleBindingList struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // Items is a list of RoleBindings
+ // items is a list of RoleBindings
Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
}
@@ -426,7 +426,7 @@ type RoleList struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // Items is a list of Roles
+ // items is a list of Roles
Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"`
}
@@ -445,10 +445,10 @@ type ClusterRole struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // Rules holds all the PolicyRules for this ClusterRole
+ // rules holds all the PolicyRules for this ClusterRole
Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
- // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+ // aggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
// If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
// stomped by the controller.
AggregationRule *rbacv1.AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"`
@@ -471,26 +471,26 @@ type ClusterRoleBinding struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // UserNames holds all the usernames directly bound to the role.
+ // userNames holds all the usernames directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"`
- // GroupNames holds all the groups directly bound to the role.
+ // groupNames holds all the groups directly bound to the role.
// This field should only be specified when supporting legacy clients and servers.
// See Subjects for further details.
// +k8s:conversion-gen=false
// +optional
GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"`
- // Subjects hold object references to authorize with this rule.
+ // subjects hold object references to authorize with this rule.
// This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
// Thus newer clients that do not need to support backwards compatibility should send
// only fully qualified Subjects and should omit the UserNames and GroupNames fields.
// Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"`
- // RoleRef can only reference the current namespace and the global namespace.
+ // roleRef can only reference the current namespace and the global namespace.
// If the ClusterRoleRef cannot be resolved, the Authorizer must return an error.
// Since Policy is a singleton, this is sufficient knowledge to locate a role.
RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"`
@@ -498,17 +498,17 @@ type ClusterRoleBinding struct {
// NamedClusterRole relates a name with a cluster role
type NamedClusterRole struct {
- // Name is the name of the cluster role
+ // name is the name of the cluster role
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
- // Role is the cluster role being named
+ // role is the cluster role being named
Role ClusterRole `json:"role" protobuf:"bytes,2,opt,name=role"`
}
// NamedClusterRoleBinding relates a name with a cluster role binding
type NamedClusterRoleBinding struct {
- // Name is the name of the cluster role binding
+ // name is the name of the cluster role binding
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
- // RoleBinding is the cluster role binding being named
+ // roleBinding is the cluster role binding being named
RoleBinding ClusterRoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"`
}
@@ -525,7 +525,7 @@ type ClusterRoleBindingList struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // Items is a list of ClusterRoleBindings
+ // items is a list of ClusterRoleBindings
Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
}
@@ -542,7 +542,7 @@ type ClusterRoleList struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // Items is a list of ClusterRoles
+ // items is a list of ClusterRoles
Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"`
}
@@ -569,22 +569,22 @@ type RoleBindingRestriction struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
- // Spec defines the matcher.
+ // spec defines the matcher.
Spec RoleBindingRestrictionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
}
// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one
// field must be non-nil.
type RoleBindingRestrictionSpec struct {
- // UserRestriction matches against user subjects.
+ // userrestriction matches against user subjects.
// +nullable
UserRestriction *UserRestriction `json:"userrestriction" protobuf:"bytes,1,opt,name=userrestriction"`
- // GroupRestriction matches against group subjects.
+ // grouprestriction matches against group subjects.
// +nullable
GroupRestriction *GroupRestriction `json:"grouprestriction" protobuf:"bytes,2,opt,name=grouprestriction"`
- // ServiceAccountRestriction matches against service-account subjects.
+ // serviceaccountrestriction matches against service-account subjects.
// +nullable
ServiceAccountRestriction *ServiceAccountRestriction `json:"serviceaccountrestriction" protobuf:"bytes,3,opt,name=serviceaccountrestriction"`
}
@@ -602,7 +602,7 @@ type RoleBindingRestrictionList struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // Items is a list of RoleBindingRestriction objects.
+ // items is a list of RoleBindingRestriction objects.
Items []RoleBindingRestriction `json:"items" protobuf:"bytes,2,rep,name=items"`
}
@@ -610,10 +610,10 @@ type RoleBindingRestrictionList struct {
// a string match on the name of a group to which the user belongs, or a label
// selector applied to the user labels.
type UserRestriction struct {
- // Users specifies a list of literal user names.
+ // users specifies a list of literal user names.
Users []string `json:"users" protobuf:"bytes,1,rep,name=users"`
- // Groups specifies a list of literal group names.
+ // groups specifies a list of literal group names.
// +nullable
Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"`
@@ -625,7 +625,7 @@ type UserRestriction struct {
// GroupRestriction matches a group either by a string match on the group name
// or a label selector applied to group labels.
type GroupRestriction struct {
- // Groups is a list of groups used to match against an individual user's
+ // groups is a list of groups used to match against an individual user's
// groups. If the user is a member of one of the whitelisted groups, the user
// is allowed to be bound to a role.
// +nullable
@@ -640,20 +640,20 @@ type GroupRestriction struct {
// either the service-account name or the name of the service account's
// namespace.
type ServiceAccountRestriction struct {
- // ServiceAccounts specifies a list of literal service-account names.
+ // serviceaccounts specifies a list of literal service-account names.
ServiceAccounts []ServiceAccountReference `json:"serviceaccounts" protobuf:"bytes,1,rep,name=serviceaccounts"`
- // Namespaces specifies a list of literal namespace names.
+ // namespaces specifies a list of literal namespace names.
Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
}
// ServiceAccountReference specifies a service account and namespace by their
// names.
type ServiceAccountReference struct {
- // Name is the name of the service account.
+ // name is the name of the service account.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
- // Namespace is the namespace of the service account. Service accounts from
+ // namespace is the namespace of the service account. Service accounts from
// inside the whitelisted namespaces are allowed to be bound to roles. If
// Namespace is empty, then the namespace of the RoleBindingRestriction in
// which the ServiceAccountReference is embedded is used.
diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go
index a8f9b374e..a1c28a3ec 100644
--- a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go
@@ -13,15 +13,15 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE
var map_Action = map[string]string{
"": "Action describes a request to the API server",
- "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces",
- "verb": "Verb is one of: get, list, watch, create, update, delete",
+ "namespace": "namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces",
+ "verb": "verb is one of: get, list, watch, create, update, delete",
"resourceAPIGroup": "Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined",
"resourceAPIVersion": "Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined",
- "resource": "Resource is one of the existing resource types",
- "resourceName": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"",
- "path": "Path is the path of a non resource URL",
- "isNonResourceURL": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)",
- "content": "Content is the actual content of the request for create and update",
+ "resource": "resource is one of the existing resource types",
+ "resourceName": "resourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"",
+ "path": "path is the path of a non resource URL",
+ "isNonResourceURL": "isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)",
+ "content": "content is the actual content of the request for create and update",
}
func (Action) SwaggerDoc() map[string]string {
@@ -31,8 +31,8 @@ func (Action) SwaggerDoc() map[string]string {
var map_ClusterRole = map[string]string{
"": "ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "rules": "Rules holds all the PolicyRules for this ClusterRole",
- "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
+ "rules": "rules holds all the PolicyRules for this ClusterRole",
+ "aggregationRule": "aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
}
func (ClusterRole) SwaggerDoc() map[string]string {
@@ -42,10 +42,10 @@ func (ClusterRole) SwaggerDoc() map[string]string {
var map_ClusterRoleBinding = map[string]string{
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
- "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
- "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.",
- "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.",
+ "userNames": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
+ "groupNames": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
+ "subjects": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.",
+ "roleRef": "roleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.",
}
func (ClusterRoleBinding) SwaggerDoc() map[string]string {
@@ -55,7 +55,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string {
var map_ClusterRoleBindingList = map[string]string{
"": "ClusterRoleBindingList is a collection of ClusterRoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "items": "Items is a list of ClusterRoleBindings",
+ "items": "items is a list of ClusterRoleBindings",
}
func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
@@ -65,7 +65,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
var map_ClusterRoleList = map[string]string{
"": "ClusterRoleList is a collection of ClusterRoles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "items": "Items is a list of ClusterRoles",
+ "items": "items is a list of ClusterRoles",
}
func (ClusterRoleList) SwaggerDoc() map[string]string {
@@ -74,7 +74,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string {
var map_GroupRestriction = map[string]string{
"": "GroupRestriction matches a group either by a string match on the group name or a label selector applied to group labels.",
- "groups": "Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.",
+ "groups": "groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.",
"labels": "Selectors specifies a list of label selectors over group labels.",
}
@@ -102,9 +102,9 @@ func (LocalResourceAccessReview) SwaggerDoc() map[string]string {
var map_LocalSubjectAccessReview = map[string]string{
"": "LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.",
- "groups": "Groups is optional. Groups is the list of groups to which the User belongs.",
- "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.",
+ "user": "user is optional. If both User and Groups are empty, the current authenticated user is used.",
+ "groups": "groups is optional. Groups is the list of groups to which the User belongs.",
+ "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.",
}
func (LocalSubjectAccessReview) SwaggerDoc() map[string]string {
@@ -113,8 +113,8 @@ func (LocalSubjectAccessReview) SwaggerDoc() map[string]string {
var map_NamedClusterRole = map[string]string{
"": "NamedClusterRole relates a name with a cluster role",
- "name": "Name is the name of the cluster role",
- "role": "Role is the cluster role being named",
+ "name": "name is the name of the cluster role",
+ "role": "role is the cluster role being named",
}
func (NamedClusterRole) SwaggerDoc() map[string]string {
@@ -123,8 +123,8 @@ func (NamedClusterRole) SwaggerDoc() map[string]string {
var map_NamedClusterRoleBinding = map[string]string{
"": "NamedClusterRoleBinding relates a name with a cluster role binding",
- "name": "Name is the name of the cluster role binding",
- "roleBinding": "RoleBinding is the cluster role binding being named",
+ "name": "name is the name of the cluster role binding",
+ "roleBinding": "roleBinding is the cluster role binding being named",
}
func (NamedClusterRoleBinding) SwaggerDoc() map[string]string {
@@ -133,8 +133,8 @@ func (NamedClusterRoleBinding) SwaggerDoc() map[string]string {
var map_NamedRole = map[string]string{
"": "NamedRole relates a Role with a name",
- "name": "Name is the name of the role",
- "role": "Role is the role being named",
+ "name": "name is the name of the role",
+ "role": "role is the role being named",
}
func (NamedRole) SwaggerDoc() map[string]string {
@@ -143,8 +143,8 @@ func (NamedRole) SwaggerDoc() map[string]string {
var map_NamedRoleBinding = map[string]string{
"": "NamedRoleBinding relates a role binding with a name",
- "name": "Name is the name of the role binding",
- "roleBinding": "RoleBinding is the role binding being named",
+ "name": "name is the name of the role binding",
+ "roleBinding": "roleBinding is the role binding being named",
}
func (NamedRoleBinding) SwaggerDoc() map[string]string {
@@ -153,11 +153,11 @@ func (NamedRoleBinding) SwaggerDoc() map[string]string {
var map_PolicyRule = map[string]string{
"": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
- "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
- "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.",
- "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed",
- "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.",
- "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
+ "verbs": "verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
+ "attributeRestrictions": "attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.",
+ "apiGroups": "apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed",
+ "resources": "resources is a list of resources this rule applies to. ResourceAll represents all resources.",
+ "resourceNames": "resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
"nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.",
}
@@ -176,7 +176,7 @@ func (ResourceAccessReview) SwaggerDoc() map[string]string {
var map_ResourceAccessReviewResponse = map[string]string{
"": "ResourceAccessReviewResponse describes who can perform the action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
- "namespace": "Namespace is the namespace used for the access review",
+ "namespace": "namespace is the namespace used for the access review",
"users": "UsersSlice is the list of users who can perform the action",
"groups": "GroupsSlice is the list of groups who can perform the action",
"evalutionError": "EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.",
@@ -189,7 +189,7 @@ func (ResourceAccessReviewResponse) SwaggerDoc() map[string]string {
var map_Role = map[string]string{
"": "Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "rules": "Rules holds all the PolicyRules for this Role",
+ "rules": "rules holds all the PolicyRules for this Role",
}
func (Role) SwaggerDoc() map[string]string {
@@ -199,10 +199,10 @@ func (Role) SwaggerDoc() map[string]string {
var map_RoleBinding = map[string]string{
"": "RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
- "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
- "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.",
- "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.",
+ "userNames": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
+ "groupNames": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
+ "subjects": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.",
+ "roleRef": "roleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.",
}
func (RoleBinding) SwaggerDoc() map[string]string {
@@ -212,7 +212,7 @@ func (RoleBinding) SwaggerDoc() map[string]string {
var map_RoleBindingList = map[string]string{
"": "RoleBindingList is a collection of RoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "items": "Items is a list of RoleBindings",
+ "items": "items is a list of RoleBindings",
}
func (RoleBindingList) SwaggerDoc() map[string]string {
@@ -222,7 +222,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string {
var map_RoleBindingRestriction = map[string]string{
"": "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "spec": "Spec defines the matcher.",
+ "spec": "spec defines the matcher.",
}
func (RoleBindingRestriction) SwaggerDoc() map[string]string {
@@ -232,7 +232,7 @@ func (RoleBindingRestriction) SwaggerDoc() map[string]string {
var map_RoleBindingRestrictionList = map[string]string{
"": "RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "items": "Items is a list of RoleBindingRestriction objects.",
+ "items": "items is a list of RoleBindingRestriction objects.",
}
func (RoleBindingRestrictionList) SwaggerDoc() map[string]string {
@@ -241,9 +241,9 @@ func (RoleBindingRestrictionList) SwaggerDoc() map[string]string {
var map_RoleBindingRestrictionSpec = map[string]string{
"": "RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one field must be non-nil.",
- "userrestriction": "UserRestriction matches against user subjects.",
- "grouprestriction": "GroupRestriction matches against group subjects.",
- "serviceaccountrestriction": "ServiceAccountRestriction matches against service-account subjects.",
+ "userrestriction": "userrestriction matches against user subjects.",
+ "grouprestriction": "grouprestriction matches against group subjects.",
+ "serviceaccountrestriction": "serviceaccountrestriction matches against service-account subjects.",
}
func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string {
@@ -253,7 +253,7 @@ func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string {
var map_RoleList = map[string]string{
"": "RoleList is a collection of Roles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "items": "Items is a list of Roles",
+ "items": "items is a list of Roles",
}
func (RoleList) SwaggerDoc() map[string]string {
@@ -263,8 +263,8 @@ func (RoleList) SwaggerDoc() map[string]string {
var map_SelfSubjectRulesReview = map[string]string{
"": "SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "spec": "Spec adds information about how to conduct the check",
- "status": "Status is completed by the server to tell which permissions you have",
+ "spec": "spec adds information about how to conduct the check",
+ "status": "status is completed by the server to tell which permissions you have",
}
func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
@@ -273,7 +273,7 @@ func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
var map_SelfSubjectRulesReviewSpec = map[string]string{
"": "SelfSubjectRulesReviewSpec adds information about how to conduct the check",
- "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".",
+ "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".",
}
func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string {
@@ -282,8 +282,8 @@ func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string {
var map_ServiceAccountReference = map[string]string{
"": "ServiceAccountReference specifies a service account and namespace by their names.",
- "name": "Name is the name of the service account.",
- "namespace": "Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.",
+ "name": "name is the name of the service account.",
+ "namespace": "namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.",
}
func (ServiceAccountReference) SwaggerDoc() map[string]string {
@@ -292,8 +292,8 @@ func (ServiceAccountReference) SwaggerDoc() map[string]string {
var map_ServiceAccountRestriction = map[string]string{
"": "ServiceAccountRestriction matches a service account by a string match on either the service-account name or the name of the service account's namespace.",
- "serviceaccounts": "ServiceAccounts specifies a list of literal service-account names.",
- "namespaces": "Namespaces specifies a list of literal namespace names.",
+ "serviceaccounts": "serviceaccounts specifies a list of literal service-account names.",
+ "namespaces": "namespaces specifies a list of literal namespace names.",
}
func (ServiceAccountRestriction) SwaggerDoc() map[string]string {
@@ -303,9 +303,9 @@ func (ServiceAccountRestriction) SwaggerDoc() map[string]string {
var map_SubjectAccessReview = map[string]string{
"": "SubjectAccessReview is an object for requesting information about whether a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.",
+ "user": "user is optional. If both User and Groups are empty, the current authenticated user is used.",
"groups": "GroupsSlice is optional. Groups is the list of groups to which the User belongs.",
- "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.",
+ "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.",
}
func (SubjectAccessReview) SwaggerDoc() map[string]string {
@@ -314,10 +314,10 @@ func (SubjectAccessReview) SwaggerDoc() map[string]string {
var map_SubjectAccessReviewResponse = map[string]string{
"": "SubjectAccessReviewResponse describes whether or not a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
- "namespace": "Namespace is the namespace used for the access review",
- "allowed": "Allowed is required. True if the action would be allowed, false otherwise.",
- "reason": "Reason is optional. It indicates why a request was allowed or denied.",
- "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.",
+ "namespace": "namespace is the namespace used for the access review",
+ "allowed": "allowed is required. True if the action would be allowed, false otherwise.",
+ "reason": "reason is optional. It indicates why a request was allowed or denied.",
+ "evaluationError": "evaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.",
}
func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string {
@@ -327,8 +327,8 @@ func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string {
var map_SubjectRulesReview = map[string]string{
"": "SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "spec": "Spec adds information about how to conduct the check",
- "status": "Status is completed by the server to tell which permissions you have",
+ "spec": "spec adds information about how to conduct the check",
+ "status": "status is completed by the server to tell which permissions you have",
}
func (SubjectRulesReview) SwaggerDoc() map[string]string {
@@ -337,9 +337,9 @@ func (SubjectRulesReview) SwaggerDoc() map[string]string {
var map_SubjectRulesReviewSpec = map[string]string{
"": "SubjectRulesReviewSpec adds information about how to conduct the check",
- "user": "User is optional. At least one of User and Groups must be specified.",
- "groups": "Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.",
- "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".",
+ "user": "user is optional. At least one of User and Groups must be specified.",
+ "groups": "groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.",
+ "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".",
}
func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string {
@@ -348,8 +348,8 @@ func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string {
var map_SubjectRulesReviewStatus = map[string]string{
"": "SubjectRulesReviewStatus is contains the result of a rules check",
- "rules": "Rules is the list of rules (no particular sort) that are allowed for the subject",
- "evaluationError": "EvaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.",
+ "rules": "rules is the list of rules (no particular sort) that are allowed for the subject",
+ "evaluationError": "evaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.",
}
func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string {
@@ -358,8 +358,8 @@ func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string {
var map_UserRestriction = map[string]string{
"": "UserRestriction matches a user either by a string match on the user name, a string match on the name of a group to which the user belongs, or a label selector applied to the user labels.",
- "users": "Users specifies a list of literal user names.",
- "groups": "Groups specifies a list of literal group names.",
+ "users": "users specifies a list of literal user names.",
+ "groups": "groups specifies a list of literal group names.",
"labels": "Selectors specifies a list of label selectors over user labels.",
}
diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto
index b71670f4e..92ae73426 100644
--- a/vendor/github.com/openshift/api/build/v1/generated.proto
+++ b/vendor/github.com/openshift/api/build/v1/generated.proto
@@ -81,10 +81,10 @@ message Build {
// BuildCondition describes the state of a build at a certain point.
message BuildCondition {
- // Type of build condition.
+ // type of build condition.
optional string type = 1;
- // Status of the condition, one of True, False, Unknown.
+ // status of the condition, one of True, False, Unknown.
optional string status = 2;
// The last time this condition was updated.
@@ -141,7 +141,7 @@ message BuildConfigSpec {
// +optional
repeated BuildTriggerPolicy triggers = 1;
- // RunPolicy describes how the new build created from this build
+ // runPolicy describes how the new build created from this build
// configuration will be scheduled for execution.
// This is optional, if not specified we default to "Serial".
optional string runPolicy = 2;
@@ -165,7 +165,7 @@ message BuildConfigStatus {
// lastVersion is used to inform about number of last triggered build.
optional int64 lastVersion = 1;
- // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec,
+ // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec,
// including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry
// in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.
repeated ImageChangeTriggerStatus imageChangeTriggers = 2;
@@ -231,7 +231,7 @@ message BuildLogOptions {
// slightly more or slightly less than the specified limit.
optional int64 limitBytes = 8;
- // noWait if true causes the call to return immediately even if the build
+ // nowait if true causes the call to return immediately even if the build
// is not available yet. Otherwise the server will wait until the build has started.
// TODO: Fix the tag to 'noWait' in v2
optional bool nowait = 9;
@@ -259,7 +259,7 @@ message BuildOutput {
// the build unless Namespace is specified.
optional .k8s.io.api.core.v1.ObjectReference to = 1;
- // PushSecret is the name of a Secret that would be used for setting
+ // pushSecret is the name of a Secret that would be used for setting
// up the authentication for executing the Docker push to authentication
// enabled Docker Registry (or Docker Hub).
optional .k8s.io.api.core.v1.LocalObjectReference pushSecret = 2;
@@ -392,10 +392,10 @@ message BuildRequest {
// build configuration and contains information about those triggers.
repeated BuildTriggerCause triggeredBy = 8;
- // DockerStrategyOptions contains additional docker-strategy specific options for the build
+ // dockerStrategyOptions contains additional docker-strategy specific options for the build
optional DockerStrategyOptions dockerStrategyOptions = 9;
- // SourceStrategyOptions contains additional source-strategy specific options for the build
+ // sourceStrategyOptions contains additional source-strategy specific options for the build
optional SourceStrategyOptions sourceStrategyOptions = 10;
}
@@ -510,7 +510,7 @@ message BuildStatus {
// logSnippet is the last few lines of the build log. This value is only set for builds that failed.
optional string logSnippet = 12;
- // Conditions represents the latest available observations of a build's current state.
+ // conditions represents the latest available observations of a build's current state.
// +patchMergeKey=type
// +patchStrategy=merge
repeated BuildCondition conditions = 13;
@@ -550,7 +550,7 @@ message BuildStrategy {
// customStrategy holds the parameters to the Custom build strategy
optional CustomBuildStrategy customStrategy = 4;
- // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy.
+ // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy.
// Deprecated: use OpenShift Pipelines
optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5;
}
@@ -567,7 +567,7 @@ message BuildTriggerCause {
// genericWebHook holds data about a builds generic webhook trigger.
optional GenericWebHookCause genericWebHook = 2;
- // gitHubWebHook represents data for a GitHub webhook that fired a
+ // githubWebHook represents data for a GitHub webhook that fired a
// specific build.
optional GitHubWebHookCause githubWebHook = 3;
@@ -575,11 +575,11 @@ message BuildTriggerCause {
// that triggered a new build.
optional ImageChangeCause imageChangeBuild = 4;
- // GitLabWebHook represents data for a GitLab webhook that fired a specific
+ // gitlabWebHook represents data for a GitLab webhook that fired a specific
// build.
optional GitLabWebHookCause gitlabWebHook = 5;
- // BitbucketWebHook represents data for a Bitbucket webhook that fired a
+ // bitbucketWebHook represents data for a Bitbucket webhook that fired a
// specific build.
optional BitbucketWebHookCause bitbucketWebHook = 6;
}
@@ -743,10 +743,10 @@ message CommonSpec {
// causes into struct so we can share it in the specific causes; it is too late for
// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.
message CommonWebHookCause {
- // Revision is the git source revision information of the trigger.
+ // revision is the git source revision information of the trigger.
optional SourceRevision revision = 1;
- // Secret is the obfuscated webhook secret that triggered a build.
+ // secret is the obfuscated webhook secret that triggered a build.
optional string secret = 2;
}
@@ -884,7 +884,7 @@ message GenericWebHookEvent {
// ValueFrom is not supported.
repeated .k8s.io.api.core.v1.EnvVar env = 3;
- // DockerStrategyOptions contains additional docker-strategy specific options for the build
+ // dockerStrategyOptions contains additional docker-strategy specific options for the build
optional DockerStrategyOptions dockerStrategyOptions = 4;
}
@@ -918,7 +918,7 @@ message GitInfo {
optional GitSourceRevision gitSourceRevision = 2;
- // Refs is a list of GitRefs for the provided repo - generally sent
+ // refs is a list of GitRefs for the provided repo - generally sent
// when used from a post-receive hook. This field is optional and is
// used when sending multiple refs
repeated GitRefInfo refs = 3;
@@ -1061,12 +1061,12 @@ message ImageStreamTagReference {
// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build.
// Deprecated: use OpenShift Pipelines
message JenkinsPipelineBuildStrategy {
- // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline
+ // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline
// relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are
// both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.
optional string jenkinsfilePath = 1;
- // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.
+ // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.
optional string jenkinsfile = 2;
// env contains additional environment variables you want to pass into a build pipeline.
@@ -1115,7 +1115,7 @@ message SecretBuildSource {
// SecretLocalReference contains information that points to the local secret being used
message SecretLocalReference {
- // Name is the name of the resource in the same namespace being referenced
+ // name is the name of the resource in the same namespace being referenced
optional string name = 1;
}
@@ -1176,7 +1176,7 @@ message SourceRevision {
// +k8s:conversion-gen=false
optional string type = 1;
- // Git contains information about git-based build source
+ // git contains information about git-based build source
optional GitSourceRevision git = 2;
}
diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go
index ba836aad8..12bf67db1 100644
--- a/vendor/github.com/openshift/api/build/v1/types.go
+++ b/vendor/github.com/openshift/api/build/v1/types.go
@@ -116,7 +116,7 @@ type BuildTriggerCause struct {
// genericWebHook holds data about a builds generic webhook trigger.
GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"`
- // gitHubWebHook represents data for a GitHub webhook that fired a
+ // githubWebHook represents data for a GitHub webhook that fired a
//specific build.
GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"`
@@ -124,11 +124,11 @@ type BuildTriggerCause struct {
// that triggered a new build.
ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"`
- // GitLabWebHook represents data for a GitLab webhook that fired a specific
+ // gitlabWebHook represents data for a GitLab webhook that fired a specific
// build.
GitLabWebHook *GitLabWebHookCause `json:"gitlabWebHook,omitempty" protobuf:"bytes,5,opt,name=gitlabWebHook"`
- // BitbucketWebHook represents data for a Bitbucket webhook that fired a
+ // bitbucketWebHook represents data for a Bitbucket webhook that fired a
// specific build.
BitbucketWebHook *BitbucketWebHookCause `json:"bitbucketWebHook,omitempty" protobuf:"bytes,6,opt,name=bitbucketWebHook"`
}
@@ -158,10 +158,10 @@ type GitHubWebHookCause struct {
// causes into struct so we can share it in the specific causes; it is too late for
// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.
type CommonWebHookCause struct {
- // Revision is the git source revision information of the trigger.
+ // revision is the git source revision information of the trigger.
Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"`
- // Secret is the obfuscated webhook secret that triggered a build.
+ // secret is the obfuscated webhook secret that triggered a build.
Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
}
@@ -237,7 +237,7 @@ type BuildStatus struct {
// logSnippet is the last few lines of the build log. This value is only set for builds that failed.
LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"`
- // Conditions represents the latest available observations of a build's current state.
+ // conditions represents the latest available observations of a build's current state.
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"`
@@ -358,9 +358,9 @@ type BuildConditionType string
// BuildCondition describes the state of a build at a certain point.
type BuildCondition struct {
- // Type of build condition.
+ // type of build condition.
Type BuildConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildConditionType"`
- // Status of the condition, one of True, False, Unknown.
+ // status of the condition, one of True, False, Unknown.
Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
// The last time this condition was updated.
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
@@ -562,7 +562,7 @@ type SourceRevision struct {
// +k8s:conversion-gen=false
Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"`
- // Git contains information about git-based build source
+ // git contains information about git-based build source
Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"`
}
@@ -632,7 +632,7 @@ type BuildStrategy struct {
// customStrategy holds the parameters to the Custom build strategy
CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"`
- // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy.
+ // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy.
// Deprecated: use OpenShift Pipelines
JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"`
}
@@ -801,12 +801,12 @@ type SourceBuildStrategy struct {
// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build.
// Deprecated: use OpenShift Pipelines
type JenkinsPipelineBuildStrategy struct {
- // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline
+ // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline
// relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are
// both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.
JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"`
- // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.
+ // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.
Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"`
// env contains additional environment variables you want to pass into a build pipeline.
@@ -911,7 +911,7 @@ type BuildOutput struct {
// the build unless Namespace is specified.
To *corev1.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"`
- // PushSecret is the name of a Secret that would be used for setting
+ // pushSecret is the name of a Secret that would be used for setting
// up the authentication for executing the Docker push to authentication
// enabled Docker Registry (or Docker Hub).
PushSecret *corev1.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"`
@@ -964,7 +964,7 @@ type BuildConfigSpec struct {
// +optional
Triggers []BuildTriggerPolicy `json:"triggers,omitempty" protobuf:"bytes,1,rep,name=triggers"`
- // RunPolicy describes how the new build created from this build
+ // runPolicy describes how the new build created from this build
// configuration will be scheduled for execution.
// This is optional, if not specified we default to "Serial".
RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"`
@@ -1007,7 +1007,7 @@ type BuildConfigStatus struct {
// lastVersion is used to inform about number of last triggered build.
LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"`
- // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec,
+ // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec,
// including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry
// in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.
ImageChangeTriggers []ImageChangeTriggerStatus `json:"imageChangeTriggers,omitempty" protobuf:"bytes,2,rep,name=imageChangeTriggers"`
@@ -1015,7 +1015,7 @@ type BuildConfigStatus struct {
// SecretLocalReference contains information that points to the local secret being used
type SecretLocalReference struct {
- // Name is the name of the resource in the same namespace being referenced
+ // name is the name of the resource in the same namespace being referenced
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
}
@@ -1203,7 +1203,7 @@ type GenericWebHookEvent struct {
// ValueFrom is not supported.
Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"`
- // DockerStrategyOptions contains additional docker-strategy specific options for the build
+ // dockerStrategyOptions contains additional docker-strategy specific options for the build
DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,4,opt,name=dockerStrategyOptions"`
}
@@ -1212,7 +1212,7 @@ type GitInfo struct {
GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"`
GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"`
- // Refs is a list of GitRefs for the provided repo - generally sent
+ // refs is a list of GitRefs for the provided repo - generally sent
// when used from a post-receive hook. This field is optional and is
// used when sending multiple refs
Refs []GitRefInfo `json:"refs" protobuf:"bytes,3,rep,name=refs"`
@@ -1287,10 +1287,10 @@ type BuildRequest struct {
// build configuration and contains information about those triggers.
TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,8,rep,name=triggeredBy"`
- // DockerStrategyOptions contains additional docker-strategy specific options for the build
+ // dockerStrategyOptions contains additional docker-strategy specific options for the build
DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,9,opt,name=dockerStrategyOptions"`
- // SourceStrategyOptions contains additional source-strategy specific options for the build
+ // sourceStrategyOptions contains additional source-strategy specific options for the build
SourceStrategyOptions *SourceStrategyOptions `json:"sourceStrategyOptions,omitempty" protobuf:"bytes,10,opt,name=sourceStrategyOptions"`
}
@@ -1368,7 +1368,7 @@ type BuildLogOptions struct {
// slightly more or slightly less than the specified limit.
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
- // noWait if true causes the call to return immediately even if the build
+ // nowait if true causes the call to return immediately even if the build
// is not available yet. Otherwise the server will wait until the build has started.
// TODO: Fix the tag to 'noWait' in v2
NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"`
diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go
index 72ff507b7..1da784353 100644
--- a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go
@@ -57,8 +57,8 @@ func (Build) SwaggerDoc() map[string]string {
var map_BuildCondition = map[string]string{
"": "BuildCondition describes the state of a build at a certain point.",
- "type": "Type of build condition.",
- "status": "Status of the condition, one of True, False, Unknown.",
+ "type": "type of build condition.",
+ "status": "status of the condition, one of True, False, Unknown.",
"lastUpdateTime": "The last time this condition was updated.",
"lastTransitionTime": "The last time the condition transitioned from one status to another.",
"reason": "The reason for the condition's last transition.",
@@ -93,7 +93,7 @@ func (BuildConfigList) SwaggerDoc() map[string]string {
var map_BuildConfigSpec = map[string]string{
"": "BuildConfigSpec describes when and how builds are created",
"triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.",
- "runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".",
+ "runPolicy": "runPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".",
"successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.",
"failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.",
}
@@ -105,7 +105,7 @@ func (BuildConfigSpec) SwaggerDoc() map[string]string {
var map_BuildConfigStatus = map[string]string{
"": "BuildConfigStatus contains current state of the build config object.",
"lastVersion": "lastVersion is used to inform about number of last triggered build.",
- "imageChangeTriggers": "ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.",
+ "imageChangeTriggers": "imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.",
}
func (BuildConfigStatus) SwaggerDoc() map[string]string {
@@ -140,7 +140,7 @@ var map_BuildLogOptions = map[string]string{
"timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
"tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
"limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
- "nowait": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.",
+ "nowait": "nowait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.",
"version": "version of the build for which to view logs.",
"insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).",
}
@@ -152,7 +152,7 @@ func (BuildLogOptions) SwaggerDoc() map[string]string {
var map_BuildOutput = map[string]string{
"": "BuildOutput is input to a build strategy and describes the container image that the strategy should produce.",
"to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a container image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.",
- "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).",
+ "pushSecret": "pushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).",
"imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.",
}
@@ -181,8 +181,8 @@ var map_BuildRequest = map[string]string{
"lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.",
"env": "env contains additional environment variables you want to pass into a builder container.",
"triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.",
- "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build",
- "sourceStrategyOptions": "SourceStrategyOptions contains additional source-strategy specific options for the build",
+ "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build",
+ "sourceStrategyOptions": "sourceStrategyOptions contains additional source-strategy specific options for the build",
}
func (BuildRequest) SwaggerDoc() map[string]string {
@@ -229,7 +229,7 @@ var map_BuildStatus = map[string]string{
"output": "output describes the container image the build has produced.",
"stages": "stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.",
"logSnippet": "logSnippet is the last few lines of the build log. This value is only set for builds that failed.",
- "conditions": "Conditions represents the latest available observations of a build's current state.",
+ "conditions": "conditions represents the latest available observations of a build's current state.",
}
func (BuildStatus) SwaggerDoc() map[string]string {
@@ -260,7 +260,7 @@ var map_BuildStrategy = map[string]string{
"dockerStrategy": "dockerStrategy holds the parameters to the container image build strategy.",
"sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.",
"customStrategy": "customStrategy holds the parameters to the Custom build strategy",
- "jenkinsPipelineStrategy": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines",
+ "jenkinsPipelineStrategy": "jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines",
}
func (BuildStrategy) SwaggerDoc() map[string]string {
@@ -271,10 +271,10 @@ var map_BuildTriggerCause = map[string]string{
"": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.",
"message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.",
"genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.",
- "githubWebHook": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.",
+ "githubWebHook": "githubWebHook represents data for a GitHub webhook that fired a specific build.",
"imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.",
- "gitlabWebHook": "GitLabWebHook represents data for a GitLab webhook that fired a specific build.",
- "bitbucketWebHook": "BitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.",
+ "gitlabWebHook": "gitlabWebHook represents data for a GitLab webhook that fired a specific build.",
+ "bitbucketWebHook": "bitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.",
}
func (BuildTriggerCause) SwaggerDoc() map[string]string {
@@ -347,8 +347,8 @@ func (CommonSpec) SwaggerDoc() map[string]string {
var map_CommonWebHookCause = map[string]string{
"": "CommonWebHookCause factors out the identical format of these webhook causes into struct so we can share it in the specific causes; it is too late for GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.",
- "revision": "Revision is the git source revision information of the trigger.",
- "secret": "Secret is the obfuscated webhook secret that triggered a build.",
+ "revision": "revision is the git source revision information of the trigger.",
+ "secret": "secret is the obfuscated webhook secret that triggered a build.",
}
func (CommonWebHookCause) SwaggerDoc() map[string]string {
@@ -422,7 +422,7 @@ var map_GenericWebHookEvent = map[string]string{
"type": "type is the type of source repository",
"git": "git is the git information if the Type is BuildSourceGit",
"env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.",
- "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build",
+ "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build",
}
func (GenericWebHookEvent) SwaggerDoc() map[string]string {
@@ -451,7 +451,7 @@ func (GitHubWebHookCause) SwaggerDoc() map[string]string {
var map_GitInfo = map[string]string{
"": "GitInfo is the aggregated git information for a generic webhook post",
- "refs": "Refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs",
+ "refs": "refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs",
}
func (GitInfo) SwaggerDoc() map[string]string {
@@ -562,8 +562,8 @@ func (ImageStreamTagReference) SwaggerDoc() map[string]string {
var map_JenkinsPipelineBuildStrategy = map[string]string{
"": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. Deprecated: use OpenShift Pipelines",
- "jenkinsfilePath": "JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.",
- "jenkinsfile": "Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.",
+ "jenkinsfilePath": "jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.",
+ "jenkinsfile": "jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.",
"env": "env contains additional environment variables you want to pass into a build pipeline.",
}
@@ -594,7 +594,7 @@ func (SecretBuildSource) SwaggerDoc() map[string]string {
var map_SecretLocalReference = map[string]string{
"": "SecretLocalReference contains information that points to the local secret being used",
- "name": "Name is the name of the resource in the same namespace being referenced",
+ "name": "name is the name of the resource in the same namespace being referenced",
}
func (SecretLocalReference) SwaggerDoc() map[string]string {
@@ -639,7 +639,7 @@ func (SourceControlUser) SwaggerDoc() map[string]string {
var map_SourceRevision = map[string]string{
"": "SourceRevision is the revision or commit information from the source for the build",
"type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'",
- "git": "Git contains information about git-based build source",
+ "git": "git contains information about git-based build source",
}
func (SourceRevision) SwaggerDoc() map[string]string {
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto
index 085b49b25..328de7c5a 100644
--- a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto
@@ -40,12 +40,10 @@ message CloudPrivateIPConfig {
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// spec is the definition of the desired private IP request.
- // +kubebuilder:validation:Required
// +required
optional CloudPrivateIPConfigSpec spec = 2;
// status is the observed status of the desired private IP request. Read-only.
- // +kubebuilder:validation:Optional
// +optional
optional CloudPrivateIPConfigStatus status = 3;
}
@@ -68,7 +66,6 @@ message CloudPrivateIPConfigList {
// +k8s:openapi-gen=true
message CloudPrivateIPConfigSpec {
// node is the node name, as specified by the Kubernetes field: node.metadata.name
- // +kubebuilder:validation:Optional
// +optional
optional string node = 1;
}
@@ -77,12 +74,10 @@ message CloudPrivateIPConfigSpec {
// +k8s:openapi-gen=true
message CloudPrivateIPConfigStatus {
// node is the node name, as specified by the Kubernetes field: node.metadata.name
- // +kubebuilder:validation:Optional
// +optional
optional string node = 1;
// condition is the assignment condition of the private IP and its status
- // +kubebuilder:validation:Required
// +required
repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2;
}
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/types.go b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go
index 4c19e44c3..de27f8eb6 100644
--- a/vendor/github.com/openshift/api/cloudnetwork/v1/types.go
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go
@@ -33,11 +33,9 @@ type CloudPrivateIPConfig struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec is the definition of the desired private IP request.
- // +kubebuilder:validation:Required
// +required
Spec CloudPrivateIPConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// status is the observed status of the desired private IP request. Read-only.
- // +kubebuilder:validation:Optional
// +optional
Status CloudPrivateIPConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
@@ -46,7 +44,6 @@ type CloudPrivateIPConfig struct {
// +k8s:openapi-gen=true
type CloudPrivateIPConfigSpec struct {
// node is the node name, as specified by the Kubernetes field: node.metadata.name
- // +kubebuilder:validation:Optional
// +optional
Node string `json:"node" protobuf:"bytes,1,opt,name=node"`
}
@@ -55,11 +52,9 @@ type CloudPrivateIPConfigSpec struct {
// +k8s:openapi-gen=true
type CloudPrivateIPConfigStatus struct {
// node is the node name, as specified by the Kubernetes field: node.metadata.name
- // +kubebuilder:validation:Optional
// +optional
Node string `json:"node" protobuf:"bytes,1,opt,name=node"`
// condition is the assignment condition of the private IP and its status
- // +kubebuilder:validation:Required
// +required
Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,2,rep,name=conditions"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go
index d4d09e7fe..3e17ca0cc 100644
--- a/vendor/github.com/openshift/api/config/v1/types.go
+++ b/vendor/github.com/openshift/api/config/v1/types.go
@@ -9,7 +9,7 @@ import (
// The namespace must be specified at the point of use.
type ConfigMapFileReference struct {
Name string `json:"name"`
- // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.
+ // key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.
Key string `json:"key,omitempty"`
}
@@ -17,7 +17,6 @@ type ConfigMapFileReference struct {
// The namespace must be specified at the point of use.
type ConfigMapNameReference struct {
// name is the metadata.name of the referenced config map
- // +kubebuilder:validation:Required
// +required
Name string `json:"name"`
}
@@ -26,7 +25,6 @@ type ConfigMapNameReference struct {
// The namespace must be specified at the point of use.
type SecretNameReference struct {
// name is the metadata.name of the referenced secret
- // +kubebuilder:validation:Required
// +required
Name string `json:"name"`
}
@@ -35,47 +33,47 @@ type SecretNameReference struct {
type HTTPServingInfo struct {
// ServingInfo is the HTTP serving information
ServingInfo `json:",inline"`
- // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.
+ // maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.
MaxRequestsInFlight int64 `json:"maxRequestsInFlight"`
- // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if
+ // requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if
// -1 there is no limit on requests.
RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"`
}
// ServingInfo holds information about serving web pages
type ServingInfo struct {
- // BindAddress is the ip:port to serve on
+ // bindAddress is the ip:port to serve on
BindAddress string `json:"bindAddress"`
- // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp",
+ // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp",
// "tcp4", and "tcp6"
BindNetwork string `json:"bindNetwork"`
// CertInfo is the TLS cert info for serving secure traffic.
// this is anonymous so that we can inline it for serialization
CertInfo `json:",inline"`
- // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
+ // clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
// +optional
ClientCA string `json:"clientCA,omitempty"`
- // NamedCertificates is a list of certificates to use to secure requests to specific hostnames
+ // namedCertificates is a list of certificates to use to secure requests to specific hostnames
NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"`
- // MinTLSVersion is the minimum TLS version supported.
+ // minTLSVersion is the minimum TLS version supported.
// Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
MinTLSVersion string `json:"minTLSVersion,omitempty"`
- // CipherSuites contains an overridden list of ciphers for the server to support.
+ // cipherSuites contains an overridden list of ciphers for the server to support.
// Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants
CipherSuites []string `json:"cipherSuites,omitempty"`
}
// CertInfo relates a certificate with a private key
type CertInfo struct {
- // CertFile is a file containing a PEM-encoded certificate
+ // certFile is a file containing a PEM-encoded certificate
CertFile string `json:"certFile"`
- // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile
+ // keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile
KeyFile string `json:"keyFile"`
}
// NamedCertificate specifies a certificate/key, and the names it should be served for
type NamedCertificate struct {
- // Names is a list of DNS names this certificate should be used to secure
+ // names is a list of DNS names this certificate should be used to secure
// A name can be a normal DNS name, or can contain leading wildcard segments.
Names []string `json:"names,omitempty"`
// CertInfo is the TLS cert info for serving secure traffic
@@ -121,24 +119,24 @@ type StringSource struct {
// StringSourceSpec specifies a string value, or external location
type StringSourceSpec struct {
- // Value specifies the cleartext value, or an encrypted value if keyFile is specified.
+ // value specifies the cleartext value, or an encrypted value if keyFile is specified.
Value string `json:"value"`
- // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.
+ // env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.
Env string `json:"env"`
- // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.
+ // file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.
File string `json:"file"`
- // KeyFile references a file containing the key to use to decrypt the value.
+ // keyFile references a file containing the key to use to decrypt the value.
KeyFile string `json:"keyFile"`
}
// RemoteConnectionInfo holds information necessary for establishing a remote connection
type RemoteConnectionInfo struct {
- // URL is the remote URL to connect to
+ // url is the remote URL to connect to
URL string `json:"url"`
- // CA is the CA for verifying TLS connections
+ // ca is the CA for verifying TLS connections
CA string `json:"ca"`
// CertInfo is the TLS client cert information to present
// this is anonymous so that we can inline it for serialization
@@ -160,11 +158,11 @@ type AdmissionConfig struct {
// AdmissionPluginConfig holds the necessary configuration options for admission plugins
type AdmissionPluginConfig struct {
- // Location is the path to a configuration file that contains the plugin's
+ // location is the path to a configuration file that contains the plugin's
// configuration
Location string `json:"location"`
- // Configuration is an embedded configuration object to be used as the plugin's
+ // configuration is an embedded configuration object to be used as the plugin's
// configuration. If present, it will be used instead of the path to the configuration file.
// +nullable
// +kubebuilder:pruning:PreserveUnknownFields
@@ -205,9 +203,9 @@ type AuditConfig struct {
// Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.
MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"`
- // PolicyFile is a path to the file that defines the audit policy configuration.
+ // policyFile is a path to the file that defines the audit policy configuration.
PolicyFile string `json:"policyFile"`
- // PolicyConfiguration is an embedded policy configuration object to be used
+ // policyConfiguration is an embedded policy configuration object to be used
// as the audit policy configuration. If present, it will be used instead of
// the path to the policy file.
// +nullable
@@ -225,9 +223,9 @@ type AuditConfig struct {
// EtcdConnectionInfo holds information necessary for connecting to an etcd server
type EtcdConnectionInfo struct {
- // URLs are the URLs for etcd
+ // urls are the URLs for etcd
URLs []string `json:"urls,omitempty"`
- // CA is a file containing trusted roots for the etcd server certificates
+ // ca is a file containing trusted roots for the etcd server certificates
CA string `json:"ca"`
// CertInfo is the TLS client cert information for securing communication to etcd
// this is anonymous so that we can inline it for serialization
@@ -237,7 +235,7 @@ type EtcdConnectionInfo struct {
type EtcdStorageConfig struct {
EtcdConnectionInfo `json:",inline"`
- // StoragePrefix is the path within etcd that the OpenShift resources will
+ // storagePrefix is the path within etcd that the OpenShift resources will
// be rooted under. This value, if changed, will mean existing objects in etcd will
// no longer be located.
StoragePrefix string `json:"storagePrefix"`
@@ -287,7 +285,7 @@ type ClientConnectionOverrides struct {
// GenericControllerConfig provides information to configure a controller
type GenericControllerConfig struct {
- // ServingInfo is the HTTP serving information for the controller's endpoints
+ // servingInfo is the HTTP serving information for the controller's endpoints
ServingInfo HTTPServingInfo `json:"servingInfo"`
// leaderElection provides information to elect a leader. Only override this if you have a specific need
@@ -324,7 +322,6 @@ type RequiredHSTSPolicy struct {
// The use of wildcards is allowed like this: *.foo.com matches everything under foo.com.
// foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*.
// +kubebuilder:validation:MinItems=1
- // +kubebuilder:validation:Required
// +required
DomainPatterns []string `json:"domainPatterns"`
diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
index d815556d2..75b647f74 100644
--- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go
+++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
@@ -27,7 +27,6 @@ type APIServer struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec APIServerSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
@@ -129,7 +128,6 @@ type Audit struct {
type AuditCustomRule struct {
// group is a name of group a request user must be member of in order to this profile to apply.
//
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +required
Group string `json:"group"`
@@ -146,7 +144,6 @@ type AuditCustomRule struct {
//
// If unset, the 'Default' profile is used as the default.
//
- // +kubebuilder:validation:Required
// +required
Profile AuditProfileType `json:"profile,omitempty"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go
index f6f0c12a3..65dffddb0 100644
--- a/vendor/github.com/openshift/api/config/v1/types_authentication.go
+++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go
@@ -26,7 +26,6 @@ type Authentication struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec AuthenticationSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
@@ -81,7 +80,7 @@ type AuthenticationSpec struct {
// +optional
ServiceAccountIssuer string `json:"serviceAccountIssuer"`
- // OIDCProviders are OIDC identity providers that can issue tokens
+ // oidcProviders are OIDC identity providers that can issue tokens
// for this cluster
// Can only be set if "Type" is set to "OIDC".
//
@@ -110,7 +109,7 @@ type AuthenticationStatus struct {
// The namespace for this config map is openshift-config-managed.
IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"`
- // OIDCClients is where participating operators place the current OIDC client status
+ // oidcClients is where participating operators place the current OIDC client status
// for OIDC clients that can be customized by the cluster-admin.
//
// +listType=map
@@ -181,7 +180,6 @@ type WebhookTokenAuthenticator struct {
// The key "kubeConfig" is used to locate the data.
// If the secret or expected key is not found, the webhook is not honored.
// If the specified kube config data is not valid, the webhook is not honored.
- // +kubebuilder:validation:Required
// +required
KubeConfig SecretNameReference `json:"kubeConfig"`
}
@@ -195,19 +193,17 @@ const (
)
type OIDCProvider struct {
- // Name of the OIDC provider
+ // name of the OIDC provider
//
// +kubebuilder:validation:MinLength=1
- // +kubebuilder:validation:Required
// +required
Name string `json:"name"`
- // Issuer describes atributes of the OIDC token issuer
+ // issuer describes atributes of the OIDC token issuer
//
- // +kubebuilder:validation:Required
// +required
Issuer TokenIssuer `json:"issuer"`
- // OIDCClients contains configuration for the platform's clients that
+ // oidcClients contains configuration for the platform's clients that
// need to request tokens from the issuer
//
// +listType=map
@@ -216,11 +212,11 @@ type OIDCProvider struct {
// +kubebuilder:validation:MaxItems=20
OIDCClients []OIDCClientConfig `json:"oidcClients"`
- // ClaimMappings describes rules on how to transform information from an
+ // claimMappings describes rules on how to transform information from an
// ID token into a cluster identity
ClaimMappings TokenClaimMappings `json:"claimMappings"`
- // ClaimValidationRules are rules that are applied to validate token claims to authenticate users.
+ // claimValidationRules are rules that are applied to validate token claims to authenticate users.
//
// +listType=atomic
ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"`
@@ -234,17 +230,15 @@ type TokenIssuer struct {
// Must use the https:// scheme.
//
// +kubebuilder:validation:Pattern=`^https:\/\/[^\s]`
- // +kubebuilder:validation:Required
// +required
URL string `json:"issuerURL"`
- // Audiences is an array of audiences that the token was issued for.
+ // audiences is an array of audiences that the token was issued for.
// Valid tokens must include at least one of these values in their
// "aud" claim.
// Must be set to exactly one value.
//
// +listType=set
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinItems=1
// +kubebuilder:validation:MaxItems=10
// +required
@@ -258,94 +252,88 @@ type TokenIssuer struct {
}
type TokenClaimMappings struct {
- // Username is a name of the claim that should be used to construct
+ // username is a name of the claim that should be used to construct
// usernames for the cluster identity.
//
// Default value: "sub"
Username UsernameClaimMapping `json:"username,omitempty"`
- // Groups is a name of the claim that should be used to construct
+ // groups is a name of the claim that should be used to construct
// groups for the cluster identity.
// The referenced claim must use array of strings values.
Groups PrefixedClaimMapping `json:"groups,omitempty"`
}
type TokenClaimMapping struct {
- // Claim is a JWT token claim to be used in the mapping
+ // claim is a JWT token claim to be used in the mapping
//
- // +kubebuilder:validation:Required
// +required
Claim string `json:"claim"`
}
type OIDCClientConfig struct {
- // ComponentName is the name of the component that is supposed to consume this
+ // componentName is the name of the component that is supposed to consume this
// client configuration
//
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=256
- // +kubebuilder:validation:Required
// +required
ComponentName string `json:"componentName"`
- // ComponentNamespace is the namespace of the component that is supposed to consume this
+ // componentNamespace is the namespace of the component that is supposed to consume this
// client configuration
//
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
- // +kubebuilder:validation:Required
// +required
ComponentNamespace string `json:"componentNamespace"`
- // ClientID is the identifier of the OIDC client from the OIDC provider
+ // clientID is the identifier of the OIDC client from the OIDC provider
//
// +kubebuilder:validation:MinLength=1
- // +kubebuilder:validation:Required
// +required
ClientID string `json:"clientID"`
- // ClientSecret refers to a secret in the `openshift-config` namespace that
+ // clientSecret refers to a secret in the `openshift-config` namespace that
// contains the client secret in the `clientSecret` key of the `.data` field
ClientSecret SecretNameReference `json:"clientSecret"`
- // ExtraScopes is an optional set of scopes to request tokens with.
+ // extraScopes is an optional set of scopes to request tokens with.
//
// +listType=set
ExtraScopes []string `json:"extraScopes"`
}
type OIDCClientStatus struct {
- // ComponentName is the name of the component that will consume a client configuration.
+ // componentName is the name of the component that will consume a client configuration.
//
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=256
- // +kubebuilder:validation:Required
// +required
ComponentName string `json:"componentName"`
- // ComponentNamespace is the namespace of the component that will consume a client configuration.
+ // componentNamespace is the namespace of the component that will consume a client configuration.
//
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
- // +kubebuilder:validation:Required
// +required
ComponentNamespace string `json:"componentNamespace"`
- // CurrentOIDCClients is a list of clients that the component is currently using.
+ // currentOIDCClients is a list of clients that the component is currently using.
//
// +listType=map
// +listMapKey=issuerURL
// +listMapKey=clientID
CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"`
- // ConsumingUsers is a slice of ServiceAccounts that need to have read
+ // consumingUsers is a slice of ServiceAccounts that need to have read
// permission on the `clientSecret` secret.
//
// +kubebuilder:validation:MaxItems=5
// +listType=set
ConsumingUsers []ConsumingUser `json:"consumingUsers"`
- // Conditions are used to communicate the state of the `oidcClients` entry.
+ // conditions are used to communicate the state of the `oidcClients` entry.
//
// Supported conditions include Available, Degraded and Progressing.
//
@@ -362,7 +350,6 @@ type OIDCClientReference struct {
// OIDCName refers to the `name` of the provider from `oidcProviders`
//
// +kubebuilder:validation:MinLength=1
- // +kubebuilder:validation:Required
// +required
OIDCProviderName string `json:"oidcProviderName"`
@@ -370,14 +357,12 @@ type OIDCClientReference struct {
// Must use the https:// scheme.
//
// +kubebuilder:validation:Pattern=`^https:\/\/[^\s]`
- // +kubebuilder:validation:Required
// +required
IssuerURL string `json:"issuerURL"`
- // ClientID is the identifier of the OIDC client from the OIDC provider
+ // clientID is the identifier of the OIDC client from the OIDC provider
//
// +kubebuilder:validation:MinLength=1
- // +kubebuilder:validation:Required
// +required
ClientID string `json:"clientID"`
}
@@ -386,7 +371,7 @@ type OIDCClientReference struct {
type UsernameClaimMapping struct {
TokenClaimMapping `json:",inline"`
- // PrefixPolicy specifies how a prefix should apply.
+ // prefixPolicy specifies how a prefix should apply.
//
// By default, claims other than `email` will be prefixed with the issuer URL to
// prevent naming clashes with other plugins.
@@ -427,7 +412,6 @@ var (
)
type UsernamePrefix struct {
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +required
PrefixString string `json:"prefixString"`
@@ -436,7 +420,7 @@ type UsernamePrefix struct {
type PrefixedClaimMapping struct {
TokenClaimMapping `json:",inline"`
- // Prefix is a string to prefix the value from the token in the result of the
+ // prefix is a string to prefix the value from the token in the result of the
// claim mapping.
//
// By default, no prefixing occurs.
@@ -454,30 +438,28 @@ const (
)
type TokenClaimValidationRule struct {
- // Type sets the type of the validation rule
+ // type sets the type of the validation rule
//
// +kubebuilder:validation:Enum={"RequiredClaim"}
// +kubebuilder:default="RequiredClaim"
Type TokenValidationRuleType `json:"type"`
- // RequiredClaim allows configuring a required claim name and its expected
+ // requiredClaim allows configuring a required claim name and its expected
// value
RequiredClaim *TokenRequiredClaim `json:"requiredClaim"`
}
type TokenRequiredClaim struct {
- // Claim is a name of a required claim. Only claims with string values are
+ // claim is a name of a required claim. Only claims with string values are
// supported.
//
// +kubebuilder:validation:MinLength=1
- // +kubebuilder:validation:Required
// +required
Claim string `json:"claim"`
- // RequiredValue is the required value for the claim.
+ // requiredValue is the required value for the claim.
//
// +kubebuilder:validation:MinLength=1
- // +kubebuilder:validation:Required
// +required
RequiredValue string `json:"requiredValue"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go
index dad47666d..dcde1fc5b 100644
--- a/vendor/github.com/openshift/api/config/v1/types_build.go
+++ b/vendor/github.com/openshift/api/config/v1/types_build.go
@@ -29,14 +29,13 @@ type Build struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty"`
- // Spec holds user-settable values for the build controller configuration
- // +kubebuilder:validation:Required
+ // spec holds user-settable values for the build controller configuration
// +required
Spec BuildSpec `json:"spec"`
}
type BuildSpec struct {
- // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
+ // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that
// should be trusted for image pushes and pulls during builds.
// The namespace for this config map is openshift-config.
//
@@ -45,16 +44,16 @@ type BuildSpec struct {
//
// +optional
AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
- // BuildDefaults controls the default information for Builds
+ // buildDefaults controls the default information for Builds
// +optional
BuildDefaults BuildDefaults `json:"buildDefaults"`
- // BuildOverrides controls override settings for builds
+ // buildOverrides controls override settings for builds
// +optional
BuildOverrides BuildOverrides `json:"buildOverrides"`
}
type BuildDefaults struct {
- // DefaultProxy contains the default proxy settings for all build operations, including image pull/push
+ // defaultProxy contains the default proxy settings for all build operations, including image pull/push
// and source download.
//
// Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables
@@ -62,55 +61,55 @@ type BuildDefaults struct {
// +optional
DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"`
- // GitProxy contains the proxy settings for git operations only. If set, this will override
+ // gitProxy contains the proxy settings for git operations only. If set, this will override
// any Proxy settings for all git commands, such as git clone.
//
// Values that are not set here will be inherited from DefaultProxy.
// +optional
GitProxy *ProxySpec `json:"gitProxy,omitempty"`
- // Env is a set of default environment variables that will be applied to the
+ // env is a set of default environment variables that will be applied to the
// build if the specified variables do not exist on the build
// +optional
Env []corev1.EnvVar `json:"env,omitempty"`
- // ImageLabels is a list of docker labels that are applied to the resulting image.
+ // imageLabels is a list of docker labels that are applied to the resulting image.
// User can override a default label by providing a label with the same name in their
// Build/BuildConfig.
// +optional
ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
- // Resources defines resource requirements to execute the build.
+ // resources defines resource requirements to execute the build.
// +optional
Resources corev1.ResourceRequirements `json:"resources"`
}
type ImageLabel struct {
- // Name defines the name of the label. It must have non-zero length.
+ // name defines the name of the label. It must have non-zero length.
Name string `json:"name"`
- // Value defines the literal value of the label.
+ // value defines the literal value of the label.
// +optional
Value string `json:"value,omitempty"`
}
type BuildOverrides struct {
- // ImageLabels is a list of docker labels that are applied to the resulting image.
+ // imageLabels is a list of docker labels that are applied to the resulting image.
// If user provided a label in their Build/BuildConfig with the same name as one in this
// list, the user's label will be overwritten.
// +optional
ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
- // NodeSelector is a selector which must be true for the build pod to fit on a node
+ // nodeSelector is a selector which must be true for the build pod to fit on a node
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
- // Tolerations is a list of Tolerations that will override any existing
+ // tolerations is a list of Tolerations that will override any existing
// tolerations set on a build pod.
// +optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
- // ForcePull overrides, if set, the equivalent value in the builds,
+ // forcePull overrides, if set, the equivalent value in the builds,
// i.e. false disables force pull for all builds,
// true enables force pull for all builds,
// independently of what each build specifies itself
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
index 7951762cc..4a6823640 100644
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
@@ -34,7 +34,6 @@ type ClusterOperator struct {
metav1.ObjectMeta `json:"metadata"`
// spec holds configuration that could apply to any operator.
- // +kubebuilder:validation:Required
// +required
Spec ClusterOperatorSpec `json:"spec"`
@@ -80,14 +79,12 @@ type ClusterOperatorStatus struct {
type OperandVersion struct {
// name is the name of the particular operand this version is for. It usually matches container images, not operators.
- // +kubebuilder:validation:Required
// +required
Name string `json:"name"`
// version indicates which version of a particular operand is currently being managed. It must always match the Available
// operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout
// 1.1.0
- // +kubebuilder:validation:Required
// +required
Version string `json:"version"`
}
@@ -95,18 +92,15 @@ type OperandVersion struct {
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
// group of the referent.
- // +kubebuilder:validation:Required
// +required
Group string `json:"group"`
// resource of the referent.
- // +kubebuilder:validation:Required
// +required
Resource string `json:"resource"`
// namespace of the referent.
// +optional
Namespace string `json:"namespace,omitempty"`
// name of the referent.
- // +kubebuilder:validation:Required
// +required
Name string `json:"name"`
}
@@ -128,17 +122,14 @@ const (
// +k8s:deepcopy-gen=true
type ClusterOperatorStatusCondition struct {
// type specifies the aspect reported by this condition.
- // +kubebuilder:validation:Required
// +required
Type ClusterStatusConditionType `json:"type"`
// status of the condition, one of True, False, Unknown.
- // +kubebuilder:validation:Required
// +required
Status ConditionStatus `json:"status"`
// lastTransitionTime is the time of the last update to the current status property.
- // +kubebuilder:validation:Required
// +required
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
index 61386a72e..8994ca97c 100644
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
@@ -34,7 +34,6 @@ type ClusterVersion struct {
// spec is the desired state of the cluster version - the operator will work
// to ensure that the desired version is applied to the cluster.
- // +kubebuilder:validation:Required
// +required
Spec ClusterVersionSpec `json:"spec"`
// status contains information about the available updates and any in-progress
@@ -51,7 +50,6 @@ type ClusterVersionSpec struct {
// clusterID uniquely identifies this cluster. This is expected to be
// an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in
// hexadecimal values). This is a required field.
- // +kubebuilder:validation:Required
// +required
ClusterID ClusterID `json:"clusterID"`
@@ -138,7 +136,6 @@ type ClusterVersionStatus struct {
// desired is the version that the cluster is reconciling towards.
// If the cluster is not yet fully initialized desired will be set
// with the information available, which may be an image or a tag.
- // +kubebuilder:validation:Required
// +required
Desired Release `json:"desired"`
@@ -156,14 +153,12 @@ type ClusterVersionStatus struct {
// observedGeneration reports which version of the spec is being synced.
// If this value is not equal to metadata.generation, then the desired
// and conditions fields may represent a previous version.
- // +kubebuilder:validation:Required
// +required
ObservedGeneration int64 `json:"observedGeneration"`
// versionHash is a fingerprint of the content that the cluster will be
// updated with. It is used by the operator to avoid unnecessary work
// and is for internal use only.
- // +kubebuilder:validation:Required
// +required
VersionHash string `json:"versionHash"`
@@ -190,7 +185,6 @@ type ClusterVersionStatus struct {
// may be empty if no updates are recommended, if the update service
// is unavailable, or if an invalid channel has been specified.
// +nullable
- // +kubebuilder:validation:Required
// +listType=atomic
// +required
AvailableUpdates []Release `json:"availableUpdates"`
@@ -226,12 +220,10 @@ type UpdateHistory struct {
// indicates the update is not fully applied, while the Completed state
// indicates the update was successfully rolled out at least once (all
// parts of the update successfully applied).
- // +kubebuilder:validation:Required
// +required
State UpdateState `json:"state"`
// startedTime is the time at which the update was started.
- // +kubebuilder:validation:Required
// +required
StartedTime metav1.Time `json:"startedTime"`
@@ -239,7 +231,6 @@ type UpdateHistory struct {
// that is currently being applied will have a null completion time.
// Completion time will always be set for entries that are not the current
// update (usually to the started time of the next update).
- // +kubebuilder:validation:Required
// +required
// +nullable
CompletionTime *metav1.Time `json:"completionTime"`
@@ -253,7 +244,6 @@ type UpdateHistory struct {
// image is a container image location that contains the update. This value
// is always populated.
- // +kubebuilder:validation:Required
// +required
Image string `json:"image"`
@@ -261,7 +251,6 @@ type UpdateHistory struct {
// before it was installed. If this is false the cluster may not be trusted.
// Verified does not cover upgradeable checks that depend on the cluster
// state at the time when the update target was accepted.
- // +kubebuilder:validation:Required
// +required
Verified bool `json:"verified"`
@@ -288,7 +277,7 @@ const (
)
// ClusterVersionCapability enumerates optional, core cluster components.
-// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager
+// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager;OperatorLifecycleManagerV1
type ClusterVersionCapability string
const (
@@ -379,10 +368,14 @@ const (
// allows to distribute Docker images
ClusterVersionCapabilityImageRegistry ClusterVersionCapability = "ImageRegistry"
- // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager
+ // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager (legacy)
// which itself manages the lifecycle of operators
ClusterVersionCapabilityOperatorLifecycleManager ClusterVersionCapability = "OperatorLifecycleManager"
+ // ClusterVersionCapabilityOperatorLifecycleManagerV1 manages the Operator Lifecycle Manager (v1)
+ // which itself manages the lifecycle of operators
+ ClusterVersionCapabilityOperatorLifecycleManagerV1 ClusterVersionCapability = "OperatorLifecycleManagerV1"
+
// ClusterVersionCapabilityCloudCredential manages credentials for cloud providers
// in openshift cluster
ClusterVersionCapabilityCloudCredential ClusterVersionCapability = "CloudCredential"
@@ -422,6 +415,7 @@ var KnownClusterVersionCapabilities = []ClusterVersionCapability{
ClusterVersionCapabilityDeploymentConfig,
ClusterVersionCapabilityImageRegistry,
ClusterVersionCapabilityOperatorLifecycleManager,
+ ClusterVersionCapabilityOperatorLifecycleManagerV1,
ClusterVersionCapabilityCloudCredential,
ClusterVersionCapabilityIngress,
ClusterVersionCapabilityCloudControllerManager,
@@ -600,6 +594,7 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers
ClusterVersionCapabilityDeploymentConfig,
ClusterVersionCapabilityImageRegistry,
ClusterVersionCapabilityOperatorLifecycleManager,
+ ClusterVersionCapabilityOperatorLifecycleManagerV1,
ClusterVersionCapabilityCloudCredential,
ClusterVersionCapabilityIngress,
ClusterVersionCapabilityCloudControllerManager,
@@ -618,6 +613,7 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers
ClusterVersionCapabilityDeploymentConfig,
ClusterVersionCapabilityImageRegistry,
ClusterVersionCapabilityOperatorLifecycleManager,
+ ClusterVersionCapabilityOperatorLifecycleManagerV1,
ClusterVersionCapabilityCloudCredential,
ClusterVersionCapabilityIngress,
ClusterVersionCapabilityCloudControllerManager,
@@ -664,28 +660,23 @@ type ClusterVersionCapabilitiesStatus struct {
// +k8s:deepcopy-gen=true
type ComponentOverride struct {
// kind indentifies which object to override.
- // +kubebuilder:validation:Required
// +required
Kind string `json:"kind"`
// group identifies the API group that the kind is in.
- // +kubebuilder:validation:Required
// +required
Group string `json:"group"`
// namespace is the component's namespace. If the resource is cluster
// scoped, the namespace should be empty.
- // +kubebuilder:validation:Required
// +required
Namespace string `json:"namespace"`
// name is the component's name.
- // +kubebuilder:validation:Required
// +required
Name string `json:"name"`
// unmanaged controls if cluster version operator should stop managing the
// resources in this cluster.
// Default: false
- // +kubebuilder:validation:Required
// +required
Unmanaged bool `json:"unmanaged"`
}
@@ -694,8 +685,8 @@ type ComponentOverride struct {
type URL string
// Update represents an administrator update request.
-// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == '' || self.image == '') : true",message="cannot set both Architecture and Image"
-// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != '' ? self.version != '' : true",message="Version must be set if Architecture is set"
+// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == \"\" || self.image == \"\") : true",message="cannot set both Architecture and Image"
+// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != \"\" ? self.version != \"\" : true",message="Version must be set if Architecture is set"
// +k8s:deepcopy-gen=true
type Update struct {
// architecture is an optional field that indicates the desired
@@ -739,6 +730,16 @@ type Update struct {
// Release represents an OpenShift release image and associated metadata.
// +k8s:deepcopy-gen=true
type Release struct {
+ // architecture is an optional field that indicates the
+ // value of the cluster architecture. In this context cluster
+ // architecture means either a single architecture or a multi
+ // architecture.
+ // Valid values are 'Multi' and empty.
+ //
+ // +openshift:enable:FeatureGate=ImageStreamImportMode
+ // +optional
+ Architecture ClusterVersionArchitecture `json:"architecture,omitempty"`
+
// version is a semantic version identifying the update version. When this
// field is part of spec, version is optional if image is specified.
// +required
@@ -776,7 +777,6 @@ const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates"
// may not be recommended for the current cluster.
type ConditionalUpdate struct {
// release is the target of the update.
- // +kubebuilder:validation:Required
// +required
Release Release `json:"release"`
@@ -785,7 +785,6 @@ type ConditionalUpdate struct {
// operator will evaluate all entries, and only recommend the
// update if there is at least one entry and all entries
// recommend the update.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinItems=1
// +patchMergeKey=name
// +patchStrategy=merge
@@ -809,7 +808,6 @@ type ConditionalUpdate struct {
// +k8s:deepcopy-gen=true
type ConditionalUpdateRisk struct {
// url contains information about this risk.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:Format=uri
// +kubebuilder:validation:MinLength=1
// +required
@@ -818,7 +816,6 @@ type ConditionalUpdateRisk struct {
// name is the CamelCase reason for not recommending a
// conditional update, in the event that matchingRules match the
// cluster state.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +required
Name string `json:"name"`
@@ -828,7 +825,6 @@ type ConditionalUpdateRisk struct {
// state. This is only to be consumed by humans. It may
// contain Line Feed characters (U+000A), which should be
// rendered as new lines.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +required
Message string `json:"message"`
@@ -839,7 +835,6 @@ type ConditionalUpdateRisk struct {
// operator will walk the slice in order, and stop after the
// first it can successfully evaluate. If no condition can be
// successfully evaluated, the update will not be recommended.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinItems=1
// +listType=atomic
// +required
@@ -854,24 +849,22 @@ type ConditionalUpdateRisk struct {
type ClusterCondition struct {
// type represents the cluster-condition type. This defines
// the members and semantics of any additional properties.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:Enum={"Always","PromQL"}
// +required
Type string `json:"type"`
- // promQL represents a cluster condition based on PromQL.
+ // promql represents a cluster condition based on PromQL.
// +optional
PromQL *PromQLClusterCondition `json:"promql,omitempty"`
}
// PromQLClusterCondition represents a cluster condition based on PromQL.
type PromQLClusterCondition struct {
- // PromQL is a PromQL query classifying clusters. This query
+ // promql is a PromQL query classifying clusters. This query
// query should return a 1 in the match case and a 0 in the
// does-not-match case. Queries which return no time
// series, or which return values besides 0 or 1, are
// evaluation failures.
- // +kubebuilder:validation:Required
// +required
PromQL string `json:"promql"`
}
@@ -900,7 +893,7 @@ type SignatureStore struct {
//
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL"
- // +kubebuilder:validation:Required
+ // +required
URL string `json:"url"`
// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go
index e8f197b34..0ccc4a8f8 100644
--- a/vendor/github.com/openshift/api/config/v1/types_console.go
+++ b/vendor/github.com/openshift/api/config/v1/types_console.go
@@ -28,7 +28,6 @@ type Console struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec ConsoleSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go
index 5daa5d78d..06eb75ccf 100644
--- a/vendor/github.com/openshift/api/config/v1/types_dns.go
+++ b/vendor/github.com/openshift/api/config/v1/types_dns.go
@@ -24,7 +24,6 @@ type DNS struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec DNSSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
@@ -121,7 +120,7 @@ type DNSPlatformSpec struct {
// and must handle unrecognized platforms with best-effort defaults.
//
// +unionDiscriminator
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:XValidation:rule="self in ['','AWS']",message="allowed values are '' and 'AWS'"
Type PlatformType `json:"type"`
diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go
index 88d94ac52..81bc14f2c 100644
--- a/vendor/github.com/openshift/api/config/v1/types_feature.go
+++ b/vendor/github.com/openshift/api/config/v1/types_feature.go
@@ -26,7 +26,6 @@ type FeatureGate struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
// +kubebuilder:validation:XValidation:rule="has(oldSelf.featureSet) ? has(self.featureSet) : true",message=".spec.featureSet cannot be removed"
Spec FeatureGateSpec `json:"spec"`
@@ -117,7 +116,6 @@ type FeatureGateStatus struct {
type FeatureGateDetails struct {
// version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field.
- // +kubebuilder:validation:Required
// +required
Version string `json:"version"`
// enabled is a list of all feature gates that are enabled in the cluster for the named version.
@@ -130,7 +128,7 @@ type FeatureGateDetails struct {
type FeatureGateAttributes struct {
// name is the name of the FeatureGate.
- // +kubebuilder:validation:Required
+ // +required
Name FeatureGateName `json:"name"`
// possible (probable?) future additions include
diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go
index d3c694a56..3db935c7f 100644
--- a/vendor/github.com/openshift/api/config/v1/types_image.go
+++ b/vendor/github.com/openshift/api/config/v1/types_image.go
@@ -29,7 +29,6 @@ type Image struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec ImageSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go
index 74df4027f..0bd0d7770 100644
--- a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go
+++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go
@@ -25,7 +25,6 @@ type ImageContentPolicy struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec ImageContentPolicySpec `json:"spec"`
}
@@ -76,7 +75,6 @@ type ImageContentPolicyList struct {
type RepositoryDigestMirrors struct {
// source is the repository that users refer to, e.g. in image pull specifications.
// +required
- // +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$`
Source string `json:"source"`
// allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests.
diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go
index 43d748c0c..df2258d12 100644
--- a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go
+++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go
@@ -25,7 +25,6 @@ type ImageDigestMirrorSet struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec ImageDigestMirrorSetSpec `json:"spec"`
// status contains the observed state of the resource.
@@ -110,7 +109,6 @@ type ImageDigestMirrors struct {
// for more information about the format, see the document about the location field:
// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table
// +required
- // +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$`
Source string `json:"source"`
// mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified.
diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go
index ca8d35515..b7e1a6a87 100644
--- a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go
+++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go
@@ -25,7 +25,6 @@ type ImageTagMirrorSet struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec ImageTagMirrorSetSpec `json:"spec"`
// status contains the observed state of the resource.
@@ -95,7 +94,6 @@ type ImageTagMirrors struct {
// for more information about the format, see the document about the location field:
// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table
// +required
- // +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$`
Source string `json:"source"`
// mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified.
diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
index 392d128c1..0293603d7 100644
--- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
+++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
@@ -27,7 +27,6 @@ type Infrastructure struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec InfrastructureSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
@@ -99,7 +98,8 @@ type InfrastructureStatus struct {
// The 'External' mode indicates that the control plane is hosted externally to the cluster and that
// its components are not visible within the cluster.
// +kubebuilder:default=HighlyAvailable
- // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica;External
+ // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;SingleReplica;External
+ // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;External
ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"`
// infrastructureTopology expresses the expectations for infrastructure services that do not run on control
@@ -136,6 +136,9 @@ const (
// "HighlyAvailable" is for operators to configure high-availability as much as possible.
HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable"
+ // "HighlyAvailableArbiter" is for operators to configure for an arbiter HA deployment.
+ HighlyAvailableArbiterMode TopologyMode = "HighlyAvailableArbiter"
+
// "SingleReplica" is for operators to avoid spending resources for high-availability purpose.
SingleReplicaTopologyMode TopologyMode = "SingleReplica"
@@ -257,7 +260,7 @@ const (
// ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.
type ExternalPlatformSpec struct {
- // PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time.
+ // platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time.
// This field is solely for informational and reporting purposes and is not expected to be used for decision-making.
// +kubebuilder:default:="Unknown"
// +default="Unknown"
@@ -283,55 +286,55 @@ type PlatformSpec struct {
// +unionDiscriminator
Type PlatformType `json:"type"`
- // AWS contains settings specific to the Amazon Web Services infrastructure provider.
+ // aws contains settings specific to the Amazon Web Services infrastructure provider.
// +optional
AWS *AWSPlatformSpec `json:"aws,omitempty"`
- // Azure contains settings specific to the Azure infrastructure provider.
+ // azure contains settings specific to the Azure infrastructure provider.
// +optional
Azure *AzurePlatformSpec `json:"azure,omitempty"`
- // GCP contains settings specific to the Google Cloud Platform infrastructure provider.
+ // gcp contains settings specific to the Google Cloud Platform infrastructure provider.
// +optional
GCP *GCPPlatformSpec `json:"gcp,omitempty"`
- // BareMetal contains settings specific to the BareMetal platform.
+ // baremetal contains settings specific to the BareMetal platform.
// +optional
BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"`
- // OpenStack contains settings specific to the OpenStack infrastructure provider.
+ // openstack contains settings specific to the OpenStack infrastructure provider.
// +optional
OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"`
- // Ovirt contains settings specific to the oVirt infrastructure provider.
+ // ovirt contains settings specific to the oVirt infrastructure provider.
// +optional
Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"`
- // VSphere contains settings specific to the VSphere infrastructure provider.
+ // vsphere contains settings specific to the VSphere infrastructure provider.
// +optional
VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"`
- // IBMCloud contains settings specific to the IBMCloud infrastructure provider.
+ // ibmcloud contains settings specific to the IBMCloud infrastructure provider.
// +optional
IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"`
- // Kubevirt contains settings specific to the kubevirt infrastructure provider.
+ // kubevirt contains settings specific to the kubevirt infrastructure provider.
// +optional
Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"`
- // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.
+ // equinixMetal contains settings specific to the Equinix Metal infrastructure provider.
// +optional
EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"`
- // PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.
+ // powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.
// +optional
PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"`
- // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.
+ // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.
// +optional
AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"`
- // Nutanix contains settings specific to the Nutanix infrastructure provider.
+ // nutanix contains settings specific to the Nutanix infrastructure provider.
// +optional
Nutanix *NutanixPlatformSpec `json:"nutanix,omitempty"`
@@ -401,59 +404,59 @@ type PlatformStatus struct {
// Currently this value cannot be changed once set.
Type PlatformType `json:"type"`
- // AWS contains settings specific to the Amazon Web Services infrastructure provider.
+ // aws contains settings specific to the Amazon Web Services infrastructure provider.
// +optional
AWS *AWSPlatformStatus `json:"aws,omitempty"`
- // Azure contains settings specific to the Azure infrastructure provider.
+ // azure contains settings specific to the Azure infrastructure provider.
// +optional
Azure *AzurePlatformStatus `json:"azure,omitempty"`
- // GCP contains settings specific to the Google Cloud Platform infrastructure provider.
+ // gcp contains settings specific to the Google Cloud Platform infrastructure provider.
// +optional
GCP *GCPPlatformStatus `json:"gcp,omitempty"`
- // BareMetal contains settings specific to the BareMetal platform.
+ // baremetal contains settings specific to the BareMetal platform.
// +optional
BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"`
- // OpenStack contains settings specific to the OpenStack infrastructure provider.
+ // openstack contains settings specific to the OpenStack infrastructure provider.
// +optional
OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"`
- // Ovirt contains settings specific to the oVirt infrastructure provider.
+ // ovirt contains settings specific to the oVirt infrastructure provider.
// +optional
Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"`
- // VSphere contains settings specific to the VSphere infrastructure provider.
+ // vsphere contains settings specific to the VSphere infrastructure provider.
// +optional
VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"`
- // IBMCloud contains settings specific to the IBMCloud infrastructure provider.
+ // ibmcloud contains settings specific to the IBMCloud infrastructure provider.
// +optional
IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"`
- // Kubevirt contains settings specific to the kubevirt infrastructure provider.
+ // kubevirt contains settings specific to the kubevirt infrastructure provider.
// +optional
Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"`
- // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.
+ // equinixMetal contains settings specific to the Equinix Metal infrastructure provider.
// +optional
EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"`
- // PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.
+ // powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider.
// +optional
PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"`
- // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.
+ // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.
// +optional
AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"`
- // Nutanix contains settings specific to the Nutanix infrastructure provider.
+ // nutanix contains settings specific to the Nutanix infrastructure provider.
// +optional
Nutanix *NutanixPlatformStatus `json:"nutanix,omitempty"`
- // External contains settings specific to the generic External infrastructure provider.
+ // external contains settings specific to the generic External infrastructure provider.
// +optional
External *ExternalPlatformStatus `json:"external,omitempty"`
}
@@ -492,7 +495,7 @@ type AWSPlatformStatus struct {
// region holds the default AWS region for new AWS resources created by the cluster.
Region string `json:"region"`
- // ServiceEndpoints list contains custom endpoints which will override default
+ // serviceEndpoints list contains custom endpoints which will override default
// service endpoint of AWS Services.
// There must be only one ServiceEndpoint for a service.
// +listType=atomic
@@ -507,12 +510,25 @@ type AWSPlatformStatus struct {
// +listType=atomic
// +optional
ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"`
+
+ // cloudLoadBalancerConfig holds configuration related to DNS and cloud
+ // load balancers. It allows configuration of in-cluster DNS as an alternative
+ // to the platform default DNS implementation.
+ // When using the ClusterHosted DNS type, Load Balancer IP addresses
+ // must be provided for the API and internal API load balancers as well as the
+ // ingress load balancer.
+ //
+ // +default={"dnsType": "PlatformDefault"}
+ // +kubebuilder:default={"dnsType": "PlatformDefault"}
+ // +openshift:enable:FeatureGate=AWSClusterHostedDNS
+ // +optional
+ // +nullable
+ CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"`
}
// AWSResourceTag is a tag to apply to AWS resources created for the cluster.
type AWSResourceTag struct {
// key is the key of the tag
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=128
// +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$`
@@ -521,7 +537,6 @@ type AWSResourceTag struct {
// value is the value of the tag.
// Some AWS service do not support empty values. Since tags are added to resources in many services, the
// length of the tag value must meet the requirements of all services.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=256
// +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$`
@@ -570,14 +585,14 @@ type AzureResourceTag struct {
// key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key
// must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric
// characters and the following special characters `_ . -`.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=128
// +kubebuilder:validation:Pattern=`^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$`
Key string `json:"key"`
// value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value
// must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=256
// +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.=+-@]+$`
@@ -647,12 +662,12 @@ type GCPPlatformStatus struct {
// Tombstone the field as a reminder.
// ClusterHostedDNS ClusterHostedDNS `json:"clusterHostedDNS,omitempty"`
- // cloudLoadBalancerConfig is a union that contains the IP addresses of API,
- // API-Int and Ingress Load Balancers created on the cloud platform. These
- // values would not be populated on on-prem platforms. These Load Balancer
- // IPs are used to configure the in-cluster DNS instances for API, API-Int
- // and Ingress services. `dnsType` is expected to be set to `ClusterHosted`
- // when these Load Balancer IP addresses are populated and used.
+ // cloudLoadBalancerConfig holds configuration related to DNS and cloud
+ // load balancers. It allows configuration of in-cluster DNS as an alternative
+ // to the platform default DNS implementation.
+ // When using the ClusterHosted DNS type, Load Balancer IP addresses
+ // must be provided for the API and internal API load balancers as well as the
+ // ingress load balancer.
//
// +default={"dnsType": "PlatformDefault"}
// +kubebuilder:default={"dnsType": "PlatformDefault"}
@@ -669,7 +684,7 @@ type GCPResourceLabel struct {
// and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io`
// and `openshift-io`.
// +kubebuilder:validation:XValidation:rule="!self.startsWith('openshift-io') && !self.startsWith('kubernetes-io')",message="label keys must not start with either `openshift-io` or `kubernetes-io`"
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
// +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$`
@@ -677,7 +692,7 @@ type GCPResourceLabel struct {
// value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty.
// Value must contain only lowercase letters, numeric characters, and the following special characters `_-`.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
// +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$`
@@ -693,7 +708,7 @@ type GCPResourceTag struct {
// An OrganizationID must consist of decimal numbers, and cannot have leading zeroes.
// A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers,
// and hyphens, and must start with a letter, and cannot end with a hyphen.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=32
// +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)`
@@ -702,7 +717,7 @@ type GCPResourceTag struct {
// key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty.
// Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase
// alphanumeric characters, and the following special characters `._-`.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
// +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$`
@@ -711,7 +726,7 @@ type GCPResourceTag struct {
// value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty.
// Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase
// alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
// +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$`
@@ -1148,12 +1163,34 @@ type VSpherePlatformLoadBalancer struct {
Type PlatformLoadBalancerType `json:"type,omitempty"`
}
-// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and
-// the vCenter topology of that failure domain.
+// The VSphereFailureDomainZoneType is a string representation of a failure domain
+// zone type. There are two supportable types HostGroup and ComputeCluster
+// +enum
+type VSphereFailureDomainZoneType string
+
+// The VSphereFailureDomainRegionType is a string representation of a failure domain
+// region type. There are two supportable types ComputeCluster and Datacenter
+// +enum
+type VSphereFailureDomainRegionType string
+
+const (
+ // HostGroupFailureDomainZone is a failure domain zone for a vCenter vm-host group.
+ HostGroupFailureDomainZone VSphereFailureDomainZoneType = "HostGroup"
+ // ComputeClusterFailureDomainZone is a failure domain zone for a vCenter compute cluster.
+ ComputeClusterFailureDomainZone VSphereFailureDomainZoneType = "ComputeCluster"
+ // DatacenterFailureDomainRegion is a failure domain region for a vCenter datacenter.
+ DatacenterFailureDomainRegion VSphereFailureDomainRegionType = "Datacenter"
+ // ComputeClusterFailureDomainRegion is a failure domain region for a vCenter compute cluster.
+ ComputeClusterFailureDomainRegion VSphereFailureDomainRegionType = "ComputeCluster"
+)
+
+// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.
+// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'HostGroup' ? has(self.regionAffinity) && self.regionAffinity.type == 'ComputeCluster' : true",message="when zoneAffinity type is HostGroup, regionAffinity type must be ComputeCluster"
+// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'ComputeCluster' ? has(self.regionAffinity) && self.regionAffinity.type == 'Datacenter' : true",message="when zoneAffinity type is ComputeCluster, regionAffinity type must be Datacenter"
type VSpherePlatformFailureDomainSpec struct {
// name defines the arbitrary but unique name
// of a failure domain.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=256
Name string `json:"name"`
@@ -1163,7 +1200,7 @@ type VSpherePlatformFailureDomainSpec struct {
// category in vCenter must be named openshift-region.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=80
- // +kubebuilder:validation:Required
+ // +required
Region string `json:"region"`
// zone defines the name of a zone tag that will
@@ -1171,19 +1208,34 @@ type VSpherePlatformFailureDomainSpec struct {
// category in vCenter must be named openshift-zone.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=80
- // +kubebuilder:validation:Required
+ // +required
Zone string `json:"zone"`
+ // regionAffinity holds the type of region, Datacenter or ComputeCluster.
+ // When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology.
+ // When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology.
+ // +openshift:validation:featureGate=VSphereHostVMGroupZonal
+ // +optional
+ RegionAffinity *VSphereFailureDomainRegionAffinity `json:"regionAffinity,omitempty"`
+
+ // zoneAffinity holds the type of the zone and the hostGroup which
+ // vmGroup and the hostGroup names in vCenter corresponds to
+ // a vm-host group of type Virtual Machine and Host respectively. Is also
+ // contains the vmHostRule which is an affinity vm-host rule in vCenter.
+ // +openshift:validation:featureGate=VSphereHostVMGroupZonal
+ // +optional
+ ZoneAffinity *VSphereFailureDomainZoneAffinity `json:"zoneAffinity,omitempty"`
+
// server is the fully-qualified domain name or the IP address of the vCenter server.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=255
// ---
// + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname
Server string `json:"server"`
- // Topology describes a given failure domain using vSphere constructs
- // +kubebuilder:validation:Required
+ // topology describes a given failure domain using vSphere constructs
+ // +required
Topology VSpherePlatformTopology `json:"topology"`
}
@@ -1192,7 +1244,7 @@ type VSpherePlatformFailureDomainSpec struct {
type VSpherePlatformTopology struct {
// datacenter is the name of vCenter datacenter in which virtual machines will be located.
// The maximum length of the datacenter name is 80 characters.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxLength=80
Datacenter string `json:"datacenter"`
@@ -1200,7 +1252,7 @@ type VSpherePlatformTopology struct {
// in which virtual machine will be located.
// The absolute path is of the form //host/.
// The maximum length of the path is 2048 characters.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxLength=2048
// +kubebuilder:validation:Pattern=`^/.*?/host/.*?`
ComputeCluster string `json:"computeCluster"`
@@ -1213,7 +1265,7 @@ type VSpherePlatformTopology struct {
// `govc ls 'network/*'`
// Networks should be in the form of an absolute path:
// //network/.
- // +kubebuilder:validation:Required
+ // +required
// +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1
// +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiNetworks,maxItems=10
// +kubebuilder:validation:MinItems=1
@@ -1224,7 +1276,7 @@ type VSpherePlatformTopology struct {
// virtual machine is located.
// The absolute path is of the form //datastore/
// The maximum length of the path is 2048 characters.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxLength=2048
// +kubebuilder:validation:Pattern=`^/.*?/datastore/.*?`
Datastore string `json:"datastore"`
@@ -1263,12 +1315,80 @@ type VSpherePlatformTopology struct {
Template string `json:"template,omitempty"`
}
+// VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types)
+// and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal.
+// This configuration within vCenter creates the required association between a failure domain, virtual machines
+// and ESXi hosts to create a vm-host based zone.
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'HostGroup' ? has(self.hostGroup) : !has(self.hostGroup)",message="hostGroup is required when type is HostGroup, and forbidden otherwise"
+// +union
+type VSphereFailureDomainZoneAffinity struct {
+ // type determines the vSphere object type for a zone within this failure domain.
+ // Available types are ComputeCluster and HostGroup.
+ // When set to ComputeCluster, this means the vCenter cluster defined is the zone.
+ // When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and
+ // this means the zone is defined by the grouping of those fields.
+ // +kubebuilder:validation:Enum:=HostGroup;ComputeCluster
+ // +required
+ // +unionDiscriminator
+ Type VSphereFailureDomainZoneType `json:"type"`
+
+ // hostGroup holds the vmGroup and the hostGroup names in vCenter
+ // corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also
+ // contains the vmHostRule which is an affinity vm-host rule in vCenter.
+ // +unionMember
+ // +optional
+ HostGroup *VSphereFailureDomainHostGroup `json:"hostGroup,omitempty"`
+}
+
+// VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the
+// VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster.
+// +union
+type VSphereFailureDomainRegionAffinity struct {
+ // type determines the vSphere object type for a region within this failure domain.
+ // Available types are Datacenter and ComputeCluster.
+ // When set to Datacenter, this means the vCenter Datacenter defined is the region.
+ // When set to ComputeCluster, this means the vCenter cluster defined is the region.
+ // +kubebuilder:validation:Enum:=ComputeCluster;Datacenter
+ // +required
+ // +unionDiscriminator
+ Type VSphereFailureDomainRegionType `json:"type"`
+}
+
+// VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter
+// corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also
+// contains the vmHostRule which is an affinity vm-host rule in vCenter.
+type VSphereFailureDomainHostGroup struct {
+ // vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain.
+ // vmGroup is limited to 80 characters.
+ // This field is required when the VSphereFailureDomain ZoneType is HostGroup
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=80
+ // +required
+ VMGroup string `json:"vmGroup"`
+
+ // hostGroup is the name of the vm-host group of type host within vCenter for this failure domain.
+ // hostGroup is limited to 80 characters.
+ // This field is required when the VSphereFailureDomain ZoneType is HostGroup
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=80
+ // +required
+ HostGroup string `json:"hostGroup"`
+
+ // vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain.
+ // vmHostRule is limited to 80 characters.
+ // This field is required when the VSphereFailureDomain ZoneType is HostGroup
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=80
+ // +required
+ VMHostRule string `json:"vmHostRule"`
+}
+
// VSpherePlatformVCenterSpec stores the vCenter connection fields.
// This is used by the vSphere CCM.
type VSpherePlatformVCenterSpec struct {
// server is the fully-qualified domain name or the IP address of the vCenter server.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxLength=255
// ---
// + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname
@@ -1289,7 +1409,7 @@ type VSpherePlatformVCenterSpec struct {
// be used by the Cloud Controller Manager.
// Each datacenter listed here should be used within
// a topology.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinItems=1
// +listType=set
Datacenters []string `json:"datacenters"`
@@ -1490,14 +1610,14 @@ type IBMCloudServiceEndpoint struct {
// Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured
// with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`
//
- // +kubebuilder:validation:Required
+ // +required
Name IBMCloudServiceName `json:"name"`
// url is fully qualified URI with scheme https, that overrides the default generated
// endpoint for a client.
// This must be provided and cannot be empty.
//
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL"
URL string `json:"url"`
@@ -1509,20 +1629,20 @@ type IBMCloudPlatformSpec struct{}
// IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.
type IBMCloudPlatformStatus struct {
- // Location is where the cluster has been deployed
+ // location is where the cluster has been deployed
Location string `json:"location,omitempty"`
- // ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.
+ // resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.
ResourceGroupName string `json:"resourceGroupName,omitempty"`
- // ProviderType indicates the type of cluster that was created
+ // providerType indicates the type of cluster that was created
ProviderType IBMCloudProviderType `json:"providerType,omitempty"`
- // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing
+ // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing
// the DNS zone for the cluster's base domain
CISInstanceCRN string `json:"cisInstanceCRN,omitempty"`
- // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone
+ // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone
// for the cluster's base domain
DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"`
@@ -1578,15 +1698,15 @@ type PowerVSServiceEndpoint struct {
// ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller
// Power Cloud - https://cloud.ibm.com/apidocs/power-cloud
//
- // +kubebuilder:validation:Required
- // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$`
+ // +required
+ // +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;Power;ResourceController;ResourceManager;VPC
Name string `json:"name"`
// url is fully qualified URI with scheme https, that overrides the default generated
// endpoint for a client.
// This must be provided and cannot be empty.
//
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Format=uri
// +kubebuilder:validation:Pattern=`^https://`
@@ -1633,11 +1753,11 @@ type PowerVSPlatformStatus struct {
// +optional
ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"`
- // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing
+ // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing
// the DNS zone for the cluster's base domain
CISInstanceCRN string `json:"cisInstanceCRN,omitempty"`
- // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone
+ // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone
// for the cluster's base domain
DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"`
}
@@ -1649,7 +1769,6 @@ type AlibabaCloudPlatformSpec struct{}
// AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider.
type AlibabaCloudPlatformStatus struct {
// region specifies the region for Alibaba Cloud resources created for the cluster.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$`
// +required
Region string `json:"region"`
@@ -1668,13 +1787,11 @@ type AlibabaCloudPlatformStatus struct {
// AlibabaCloudResourceTag is the set of tags to add to apply to resources.
type AlibabaCloudResourceTag struct {
// key is the key of the tag.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=128
// +required
Key string `json:"key"`
// value is the value of the tag.
- // +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=128
// +required
@@ -1709,7 +1826,7 @@ type NutanixPlatformSpec struct {
// When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy.
// Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the
// proxy spec.noProxy list.
- // +kubebuilder:validation:Required
+ // +required
PrismCentral NutanixPrismEndpoint `json:"prismCentral"`
// prismElements holds one or more endpoint address and port data to access the Nutanix
@@ -1717,7 +1834,7 @@ type NutanixPlatformSpec struct {
// Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.)
// used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.)
// spread over multiple Prism Elements (clusters) of the Prism Central.
- // +kubebuilder:validation:Required
+ // +required
// +listType=map
// +listMapKey=name
PrismElements []NutanixPrismElementEndpoint `json:"prismElements"`
@@ -1725,6 +1842,7 @@ type NutanixPlatformSpec struct {
// failureDomains configures failure domains information for the Nutanix platform.
// When set, the failure domains defined here may be used to spread Machines across
// prism element clusters to improve fault tolerance of the cluster.
+ // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32
// +listType=map
// +listMapKey=name
// +optional
@@ -1738,7 +1856,7 @@ type NutanixFailureDomain struct {
// It must consist of only lower case alphanumeric characters and hyphens (-).
// It must start and end with an alphanumeric character.
// This value is arbitrary and is used to identify the failure domain within the platform.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=64
// +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?`
@@ -1747,17 +1865,19 @@ type NutanixFailureDomain struct {
// cluster is to identify the cluster (the Prism Element under management of the Prism Central),
// in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained
// from the Prism Central console or using the prism_central API.
- // +kubebuilder:validation:Required
+ // +required
Cluster NutanixResourceIdentifier `json:"cluster"`
// subnets holds a list of identifiers (one or more) of the cluster's network subnets
+ // If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured.
// for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be
// obtained from the Prism Central console or using the prism_central API.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinItems=1
- // +kubebuilder:validation:MaxItems=1
- // +listType=map
- // +listMapKey=type
+ // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1
+ // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32
+ // +openshift:validation:FeatureGateAwareXValidation:featureGate=NutanixMultiSubnets,rule="self.all(x, self.exists_one(y, x == y))",message="each subnet must be unique"
+ // +listType=atomic
Subnets []NutanixResourceIdentifier `json:"subnets"`
}
@@ -1780,7 +1900,7 @@ const (
type NutanixResourceIdentifier struct {
// type is the identifier type to use for this resource.
// +unionDiscriminator
- // +kubebuilder:validation:Required
+ // +required
Type NutanixIdentifierType `json:"type"`
// uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID.
@@ -1795,12 +1915,12 @@ type NutanixResourceIdentifier struct {
// NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster)
type NutanixPrismEndpoint struct {
// address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster)
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxLength=256
Address string `json:"address"`
// port is the port number to access the Nutanix Prism Central or Element (cluster)
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=65535
Port int32 `json:"port"`
@@ -1810,7 +1930,7 @@ type NutanixPrismEndpoint struct {
type NutanixPrismElementEndpoint struct {
// name is the name of the Prism Element (cluster). This value will correspond with
// the cluster field configured on other resources (eg Machines, PVCs, etc).
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxLength=256
Name string `json:"name"`
@@ -1818,7 +1938,7 @@ type NutanixPrismElementEndpoint struct {
// When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy.
// Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the
// proxy spec.noProxy list.
- // +kubebuilder:validation:Required
+ // +required
Endpoint NutanixPrismEndpoint `json:"endpoint"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go
index 302913a16..9492e08a7 100644
--- a/vendor/github.com/openshift/api/config/v1/types_ingress.go
+++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go
@@ -27,7 +27,6 @@ type Ingress struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec IngressSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
@@ -150,7 +149,7 @@ type AWSIngressSpec struct {
// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb
// +unionDiscriminator
// +kubebuilder:validation:Enum:=NLB;Classic
- // +kubebuilder:validation:Required
+ // +required
Type AWSLBType `json:"type,omitempty"`
}
@@ -223,7 +222,6 @@ type ComponentRouteSpec struct {
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
- // +kubebuilder:validation:Required
// +required
Namespace string `json:"namespace"`
@@ -233,12 +231,10 @@ type ComponentRouteSpec struct {
// entry in the list of status.componentRoutes if the route is to be customized.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=256
- // +kubebuilder:validation:Required
// +required
Name string `json:"name"`
// hostname is the hostname that should be used by the route.
- // +kubebuilder:validation:Required
// +required
Hostname Hostname `json:"hostname"`
@@ -260,7 +256,6 @@ type ComponentRouteStatus struct {
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=63
- // +kubebuilder:validation:Required
// +required
Namespace string `json:"namespace"`
@@ -271,12 +266,10 @@ type ComponentRouteStatus struct {
// entry in the list of spec.componentRoutes if the route is to be customized.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=256
- // +kubebuilder:validation:Required
// +required
Name string `json:"name"`
// defaultHostname is the hostname of this route prior to customization.
- // +kubebuilder:validation:Required
// +required
DefaultHostname Hostname `json:"defaultHostname"`
@@ -310,7 +303,6 @@ type ComponentRouteStatus struct {
// relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.
// +kubebuilder:validation:MinItems=1
- // +kubebuilder:validation:Required
// +required
RelatedObjects []ObjectReference `json:"relatedObjects"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go
index 1eeae69da..95e55a7ff 100644
--- a/vendor/github.com/openshift/api/config/v1/types_network.go
+++ b/vendor/github.com/openshift/api/config/v1/types_network.go
@@ -30,7 +30,6 @@ type Network struct {
// As a general rule, this SHOULD NOT be read directly. Instead, you should
// consume the NetworkStatus, as it indicates the currently deployed configuration.
// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
- // +kubebuilder:validation:Required
// +required
Spec NetworkSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
@@ -55,7 +54,7 @@ type NetworkSpec struct {
// +listType=atomic
ServiceNetwork []string `json:"serviceNetwork"`
- // NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes).
+ // networkType is the plugin that is to be deployed (e.g. OVNKubernetes).
// This should match a value that the cluster-network-operator understands,
// or else no networking will be installed.
// Currently supported values are:
@@ -101,13 +100,13 @@ type NetworkStatus struct {
// +listType=atomic
ServiceNetwork []string `json:"serviceNetwork,omitempty"`
- // NetworkType is the plugin that is deployed (e.g. OVNKubernetes).
+ // networkType is the plugin that is deployed (e.g. OVNKubernetes).
NetworkType string `json:"networkType,omitempty"`
- // ClusterNetworkMTU is the MTU for inter-pod networking.
+ // clusterNetworkMTU is the MTU for inter-pod networking.
ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"`
- // Migration contains the cluster network migration configuration.
+ // migration contains the cluster network migration configuration.
Migration *NetworkMigration `json:"migration,omitempty"`
// conditions represents the observations of a network.config current state.
@@ -185,35 +184,35 @@ type NetworkList struct {
// NetworkMigration represents the network migration status.
type NetworkMigration struct {
- // NetworkType is the target plugin that is being deployed.
+ // networkType is the target plugin that is being deployed.
// DEPRECATED: network type migration is no longer supported,
// so this should always be unset.
// +optional
NetworkType string `json:"networkType,omitempty"`
- // MTU is the MTU configuration that is being deployed.
+ // mtu is the MTU configuration that is being deployed.
// +optional
MTU *MTUMigration `json:"mtu,omitempty"`
}
// MTUMigration contains infomation about MTU migration.
type MTUMigration struct {
- // Network contains MTU migration configuration for the default network.
+ // network contains MTU migration configuration for the default network.
// +optional
Network *MTUMigrationValues `json:"network,omitempty"`
- // Machine contains MTU migration configuration for the machine's uplink.
+ // machine contains MTU migration configuration for the machine's uplink.
// +optional
Machine *MTUMigrationValues `json:"machine,omitempty"`
}
// MTUMigrationValues contains the values for a MTU migration.
type MTUMigrationValues struct {
- // To is the MTU to migrate to.
+ // to is the MTU to migrate to.
// +kubebuilder:validation:Minimum=0
To *uint32 `json:"to"`
- // From is the MTU to migrate from.
+ // from is the MTU to migrate from.
// +kubebuilder:validation:Minimum=0
// +optional
From *uint32 `json:"from,omitempty"`
diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go
index b3b1b62c4..3fc7bc0c3 100644
--- a/vendor/github.com/openshift/api/config/v1/types_node.go
+++ b/vendor/github.com/openshift/api/config/v1/types_node.go
@@ -28,7 +28,6 @@ type Node struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec NodeSpec `json:"spec"`
@@ -38,14 +37,33 @@ type Node struct {
}
type NodeSpec struct {
- // CgroupMode determines the cgroups version on the node
+ // cgroupMode determines the cgroups version on the node
// +optional
CgroupMode CgroupMode `json:"cgroupMode,omitempty"`
- // WorkerLatencyProfile determins the how fast the kubelet is updating
+ // workerLatencyProfile determins the how fast the kubelet is updating
// the status and corresponding reaction of the cluster
// +optional
WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"`
+
+ // minimumKubeletVersion is the lowest version of a kubelet that can join the cluster.
+ // Specifically, the apiserver will deny most authorization requests of kubelets that are older
+ // than the specified version, only allowing the kubelet to get and update its node object, and perform
+ // subjectaccessreviews.
+ // This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads,
+ // and will eventually be marked as not ready.
+ // Its max length is 8, so maximum version allowed is either "9.999.99" or "99.99.99".
+ // Since the kubelet reports the version of the kubernetes release, not Openshift, this field references
+ // the underlying kubernetes version this version of Openshift is based off of.
+ // In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then
+ // they should set the minimumKubeletVersion to 1.30.0.
+ // When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version.
+ // Thus, a kubelet with version "1.0.0-ec.0" will be compatible with minimumKubeletVersion "1.0.0" or earlier.
+ // +kubebuilder:validation:XValidation:rule="self == \"\" || self.matches('^[0-9]*.[0-9]*.[0-9]*$')",message="minmumKubeletVersion must be in a semver compatible format of x.y.z, or empty"
+ // +kubebuilder:validation:MaxLength:=8
+ // +openshift:enable:FeatureGate=MinimumKubeletVersion
+ // +optional
+ MinimumKubeletVersion string `json:"minimumKubeletVersion"`
}
type NodeStatus struct {
diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go
index dce08a17f..20845e4db 100644
--- a/vendor/github.com/openshift/api/config/v1/types_oauth.go
+++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go
@@ -27,7 +27,6 @@ type OAuth struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec OAuthSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go
index 78fd3f41a..3d219862b 100644
--- a/vendor/github.com/openshift/api/config/v1/types_project.go
+++ b/vendor/github.com/openshift/api/config/v1/types_project.go
@@ -24,7 +24,6 @@ type Project struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec ProjectSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go
index 2dfc66b1c..ed40176ce 100644
--- a/vendor/github.com/openshift/api/config/v1/types_proxy.go
+++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go
@@ -25,8 +25,7 @@ type Proxy struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty"`
- // Spec holds user-settable values for the proxy configuration
- // +kubebuilder:validation:Required
+ // spec holds user-settable values for the proxy configuration
// +required
Spec ProxySpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
index 2749f4f70..c90d5633f 100644
--- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go
+++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
@@ -25,7 +25,6 @@ type Scheduler struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec SchedulerSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
@@ -74,7 +73,7 @@ type SchedulerSpec struct {
// would not be applied.
// +optional
DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"`
- // MastersSchedulable allows masters nodes to be schedulable. When this flag is
+ // mastersSchedulable allows masters nodes to be schedulable. When this flag is
// turned on, all the master nodes in the cluster will be made schedulable,
// so that workload pods can run on them. The default value for this field is false,
// meaning none of the master nodes are schedulable.
diff --git a/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/vendor/github.com/openshift/api/config/v1/types_testreporting.go
index 4d642e060..00953957f 100644
--- a/vendor/github.com/openshift/api/config/v1/types_testreporting.go
+++ b/vendor/github.com/openshift/api/config/v1/types_testreporting.go
@@ -15,7 +15,6 @@ type TestReporting struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty"`
- // +kubebuilder:validation:Required
// +required
Spec TestReportingSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
@@ -24,20 +23,20 @@ type TestReporting struct {
}
type TestReportingSpec struct {
- // TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.
+ // testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.
TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"`
}
type FeatureGateTests struct {
- // FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.
+ // featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.
FeatureGate string `json:"featureGate"`
- // Tests contains an item for every TestName
+ // tests contains an item for every TestName
Tests []TestDetails `json:"tests"`
}
type TestDetails struct {
- // TestName is the name of the test as it appears in junit XMLs.
+ // testName is the name of the test as it appears in junit XMLs.
// It does not include the suite name since the same test can be executed in many suites.
TestName string `json:"testName"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
index 069346998..b013d4595 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
@@ -245,6 +245,11 @@ func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) {
*out = make([]AWSResourceTag, len(*in))
copy(*out, *in)
}
+ if in.CloudLoadBalancerConfig != nil {
+ in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig
+ *out = new(CloudLoadBalancerConfig)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -5822,9 +5827,72 @@ func (in *UsernamePrefix) DeepCopy() *UsernamePrefix {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSphereFailureDomainHostGroup) DeepCopyInto(out *VSphereFailureDomainHostGroup) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainHostGroup.
+func (in *VSphereFailureDomainHostGroup) DeepCopy() *VSphereFailureDomainHostGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(VSphereFailureDomainHostGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSphereFailureDomainRegionAffinity) DeepCopyInto(out *VSphereFailureDomainRegionAffinity) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainRegionAffinity.
+func (in *VSphereFailureDomainRegionAffinity) DeepCopy() *VSphereFailureDomainRegionAffinity {
+ if in == nil {
+ return nil
+ }
+ out := new(VSphereFailureDomainRegionAffinity)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSphereFailureDomainZoneAffinity) DeepCopyInto(out *VSphereFailureDomainZoneAffinity) {
+ *out = *in
+ if in.HostGroup != nil {
+ in, out := &in.HostGroup, &out.HostGroup
+ *out = new(VSphereFailureDomainHostGroup)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainZoneAffinity.
+func (in *VSphereFailureDomainZoneAffinity) DeepCopy() *VSphereFailureDomainZoneAffinity {
+ if in == nil {
+ return nil
+ }
+ out := new(VSphereFailureDomainZoneAffinity)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VSpherePlatformFailureDomainSpec) DeepCopyInto(out *VSpherePlatformFailureDomainSpec) {
*out = *in
+ if in.RegionAffinity != nil {
+ in, out := &in.RegionAffinity, &out.RegionAffinity
+ *out = new(VSphereFailureDomainRegionAffinity)
+ **out = **in
+ }
+ if in.ZoneAffinity != nil {
+ in, out := &in.ZoneAffinity, &out.ZoneAffinity
+ *out = new(VSphereFailureDomainZoneAffinity)
+ (*in).DeepCopyInto(*out)
+ }
in.Topology.DeepCopyInto(&out.Topology)
return
}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml
index fa5dd4e31..78fd36f3f 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -115,6 +115,7 @@ clusterversions.config.openshift.io:
Capability: ""
Category: ""
FeatureGates:
+ - ImageStreamImportMode
- SignatureStores
FilenameOperatorName: cluster-version-operator
FilenameOperatorOrdering: "01"
@@ -310,10 +311,14 @@ infrastructures.config.openshift.io:
Capability: ""
Category: ""
FeatureGates:
+ - AWSClusterHostedDNS
- BareMetalLoadBalancer
- GCPClusterHostedDNS
- GCPLabelsTags
+ - HighlyAvailableArbiter
+ - NutanixMultiSubnets
- VSphereControlPlaneMachineSet
+ - VSphereHostVMGroupZonal
- VSphereMultiNetworks
- VSphereMultiVCenters
FilenameOperatorName: config-operator
@@ -382,7 +387,8 @@ nodes.config.openshift.io:
CRDName: nodes.config.openshift.io
Capability: ""
Category: ""
- FeatureGates: []
+ FeatureGates:
+ - MinimumKubeletVersion
FilenameOperatorName: config-operator
FilenameOperatorOrdering: "01"
FilenameRunLevel: "0000_10"
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
index c580bd834..0ac9c7ccd 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
@@ -22,8 +22,8 @@ func (AdmissionConfig) SwaggerDoc() map[string]string {
var map_AdmissionPluginConfig = map[string]string{
"": "AdmissionPluginConfig holds the necessary configuration options for admission plugins",
- "location": "Location is the path to a configuration file that contains the plugin's configuration",
- "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.",
+ "location": "location is the path to a configuration file that contains the plugin's configuration",
+ "configuration": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.",
}
func (AdmissionPluginConfig) SwaggerDoc() map[string]string {
@@ -37,8 +37,8 @@ var map_AuditConfig = map[string]string{
"maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.",
"maximumRetainedFiles": "Maximum number of old log files to retain.",
"maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.",
- "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.",
- "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.",
+ "policyFile": "policyFile is a path to the file that defines the audit policy configuration.",
+ "policyConfiguration": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.",
"logFormat": "Format of saved audits (legacy or json).",
"webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.",
"webHookMode": "Strategy for sending audit events (block or batch).",
@@ -50,8 +50,8 @@ func (AuditConfig) SwaggerDoc() map[string]string {
var map_CertInfo = map[string]string{
"": "CertInfo relates a certificate with a private key",
- "certFile": "CertFile is a file containing a PEM-encoded certificate",
- "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile",
+ "certFile": "certFile is a file containing a PEM-encoded certificate",
+ "keyFile": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile",
}
func (CertInfo) SwaggerDoc() map[string]string {
@@ -71,7 +71,7 @@ func (ClientConnectionOverrides) SwaggerDoc() map[string]string {
var map_ConfigMapFileReference = map[string]string{
"": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.",
- "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.",
+ "key": "key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.",
}
func (ConfigMapFileReference) SwaggerDoc() map[string]string {
@@ -107,8 +107,8 @@ func (DelegatedAuthorization) SwaggerDoc() map[string]string {
var map_EtcdConnectionInfo = map[string]string{
"": "EtcdConnectionInfo holds information necessary for connecting to an etcd server",
- "urls": "URLs are the URLs for etcd",
- "ca": "CA is a file containing trusted roots for the etcd server certificates",
+ "urls": "urls are the URLs for etcd",
+ "ca": "ca is a file containing trusted roots for the etcd server certificates",
}
func (EtcdConnectionInfo) SwaggerDoc() map[string]string {
@@ -116,7 +116,7 @@ func (EtcdConnectionInfo) SwaggerDoc() map[string]string {
}
var map_EtcdStorageConfig = map[string]string{
- "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.",
+ "storagePrefix": "storagePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.",
}
func (EtcdStorageConfig) SwaggerDoc() map[string]string {
@@ -138,7 +138,7 @@ func (GenericAPIServerConfig) SwaggerDoc() map[string]string {
var map_GenericControllerConfig = map[string]string{
"": "GenericControllerConfig provides information to configure a controller",
- "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints",
+ "servingInfo": "servingInfo is the HTTP serving information for the controller's endpoints",
"leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need",
"authentication": "authentication allows configuration of authentication for the endpoints",
"authorization": "authorization allows configuration of authentication for the endpoints",
@@ -150,8 +150,8 @@ func (GenericControllerConfig) SwaggerDoc() map[string]string {
var map_HTTPServingInfo = map[string]string{
"": "HTTPServingInfo holds configuration for serving HTTP",
- "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.",
- "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.",
+ "maxRequestsInFlight": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.",
+ "requestTimeoutSeconds": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.",
}
func (HTTPServingInfo) SwaggerDoc() map[string]string {
@@ -193,7 +193,7 @@ func (MaxAgePolicy) SwaggerDoc() map[string]string {
var map_NamedCertificate = map[string]string{
"": "NamedCertificate specifies a certificate/key, and the names it should be served for",
- "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.",
+ "names": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.",
}
func (NamedCertificate) SwaggerDoc() map[string]string {
@@ -202,8 +202,8 @@ func (NamedCertificate) SwaggerDoc() map[string]string {
var map_RemoteConnectionInfo = map[string]string{
"": "RemoteConnectionInfo holds information necessary for establishing a remote connection",
- "url": "URL is the remote URL to connect to",
- "ca": "CA is the CA for verifying TLS connections",
+ "url": "url is the remote URL to connect to",
+ "ca": "ca is the CA for verifying TLS connections",
}
func (RemoteConnectionInfo) SwaggerDoc() map[string]string {
@@ -233,12 +233,12 @@ func (SecretNameReference) SwaggerDoc() map[string]string {
var map_ServingInfo = map[string]string{
"": "ServingInfo holds information about serving web pages",
- "bindAddress": "BindAddress is the ip:port to serve on",
- "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"",
- "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates",
- "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames",
- "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants",
- "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants",
+ "bindAddress": "bindAddress is the ip:port to serve on",
+ "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"",
+ "clientCA": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates",
+ "namedCertificates": "namedCertificates is a list of certificates to use to secure requests to specific hostnames",
+ "minTLSVersion": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants",
+ "cipherSuites": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants",
}
func (ServingInfo) SwaggerDoc() map[string]string {
@@ -255,10 +255,10 @@ func (StringSource) SwaggerDoc() map[string]string {
var map_StringSourceSpec = map[string]string{
"": "StringSourceSpec specifies a string value, or external location",
- "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.",
- "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.",
- "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.",
- "keyFile": "KeyFile references a file containing the key to use to decrypt the value.",
+ "value": "value specifies the cleartext value, or an encrypted value if keyFile is specified.",
+ "env": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.",
+ "file": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.",
+ "keyFile": "keyFile references a file containing the key to use to decrypt the value.",
}
func (StringSourceSpec) SwaggerDoc() map[string]string {
@@ -369,7 +369,7 @@ var map_AuthenticationSpec = map[string]string{
"webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.",
"webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".",
"serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.",
- "oidcProviders": "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.",
+ "oidcProviders": "oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.",
}
func (AuthenticationSpec) SwaggerDoc() map[string]string {
@@ -378,7 +378,7 @@ func (AuthenticationSpec) SwaggerDoc() map[string]string {
var map_AuthenticationStatus = map[string]string{
"integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.",
- "oidcClients": "OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.",
+ "oidcClients": "oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.",
}
func (AuthenticationStatus) SwaggerDoc() map[string]string {
@@ -395,11 +395,11 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string {
}
var map_OIDCClientConfig = map[string]string{
- "componentName": "ComponentName is the name of the component that is supposed to consume this client configuration",
- "componentNamespace": "ComponentNamespace is the namespace of the component that is supposed to consume this client configuration",
- "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider",
- "clientSecret": "ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field",
- "extraScopes": "ExtraScopes is an optional set of scopes to request tokens with.",
+ "componentName": "componentName is the name of the component that is supposed to consume this client configuration",
+ "componentNamespace": "componentNamespace is the namespace of the component that is supposed to consume this client configuration",
+ "clientID": "clientID is the identifier of the OIDC client from the OIDC provider",
+ "clientSecret": "clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field",
+ "extraScopes": "extraScopes is an optional set of scopes to request tokens with.",
}
func (OIDCClientConfig) SwaggerDoc() map[string]string {
@@ -409,7 +409,7 @@ func (OIDCClientConfig) SwaggerDoc() map[string]string {
var map_OIDCClientReference = map[string]string{
"oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`",
"issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.",
- "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider",
+ "clientID": "clientID is the identifier of the OIDC client from the OIDC provider",
}
func (OIDCClientReference) SwaggerDoc() map[string]string {
@@ -417,11 +417,11 @@ func (OIDCClientReference) SwaggerDoc() map[string]string {
}
var map_OIDCClientStatus = map[string]string{
- "componentName": "ComponentName is the name of the component that will consume a client configuration.",
- "componentNamespace": "ComponentNamespace is the namespace of the component that will consume a client configuration.",
- "currentOIDCClients": "CurrentOIDCClients is a list of clients that the component is currently using.",
- "consumingUsers": "ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.",
- "conditions": "Conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.",
+ "componentName": "componentName is the name of the component that will consume a client configuration.",
+ "componentNamespace": "componentNamespace is the namespace of the component that will consume a client configuration.",
+ "currentOIDCClients": "currentOIDCClients is a list of clients that the component is currently using.",
+ "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.",
+ "conditions": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.",
}
func (OIDCClientStatus) SwaggerDoc() map[string]string {
@@ -429,11 +429,11 @@ func (OIDCClientStatus) SwaggerDoc() map[string]string {
}
var map_OIDCProvider = map[string]string{
- "name": "Name of the OIDC provider",
- "issuer": "Issuer describes atributes of the OIDC token issuer",
- "oidcClients": "OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer",
- "claimMappings": "ClaimMappings describes rules on how to transform information from an ID token into a cluster identity",
- "claimValidationRules": "ClaimValidationRules are rules that are applied to validate token claims to authenticate users.",
+ "name": "name of the OIDC provider",
+ "issuer": "issuer describes atributes of the OIDC token issuer",
+ "oidcClients": "oidcClients contains configuration for the platform's clients that need to request tokens from the issuer",
+ "claimMappings": "claimMappings describes rules on how to transform information from an ID token into a cluster identity",
+ "claimValidationRules": "claimValidationRules are rules that are applied to validate token claims to authenticate users.",
}
func (OIDCProvider) SwaggerDoc() map[string]string {
@@ -441,7 +441,7 @@ func (OIDCProvider) SwaggerDoc() map[string]string {
}
var map_PrefixedClaimMapping = map[string]string{
- "prefix": "Prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".",
+ "prefix": "prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".",
}
func (PrefixedClaimMapping) SwaggerDoc() map[string]string {
@@ -449,7 +449,7 @@ func (PrefixedClaimMapping) SwaggerDoc() map[string]string {
}
var map_TokenClaimMapping = map[string]string{
- "claim": "Claim is a JWT token claim to be used in the mapping",
+ "claim": "claim is a JWT token claim to be used in the mapping",
}
func (TokenClaimMapping) SwaggerDoc() map[string]string {
@@ -457,8 +457,8 @@ func (TokenClaimMapping) SwaggerDoc() map[string]string {
}
var map_TokenClaimMappings = map[string]string{
- "username": "Username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"",
- "groups": "Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.",
+ "username": "username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"",
+ "groups": "groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.",
}
func (TokenClaimMappings) SwaggerDoc() map[string]string {
@@ -466,8 +466,8 @@ func (TokenClaimMappings) SwaggerDoc() map[string]string {
}
var map_TokenClaimValidationRule = map[string]string{
- "type": "Type sets the type of the validation rule",
- "requiredClaim": "RequiredClaim allows configuring a required claim name and its expected value",
+ "type": "type sets the type of the validation rule",
+ "requiredClaim": "requiredClaim allows configuring a required claim name and its expected value",
}
func (TokenClaimValidationRule) SwaggerDoc() map[string]string {
@@ -476,7 +476,7 @@ func (TokenClaimValidationRule) SwaggerDoc() map[string]string {
var map_TokenIssuer = map[string]string{
"issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.",
- "audiences": "Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.",
+ "audiences": "audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.",
"issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.",
}
@@ -485,8 +485,8 @@ func (TokenIssuer) SwaggerDoc() map[string]string {
}
var map_TokenRequiredClaim = map[string]string{
- "claim": "Claim is a name of a required claim. Only claims with string values are supported.",
- "requiredValue": "RequiredValue is the required value for the claim.",
+ "claim": "claim is a name of a required claim. Only claims with string values are supported.",
+ "requiredValue": "requiredValue is the required value for the claim.",
}
func (TokenRequiredClaim) SwaggerDoc() map[string]string {
@@ -494,7 +494,7 @@ func (TokenRequiredClaim) SwaggerDoc() map[string]string {
}
var map_UsernameClaimMapping = map[string]string{
- "prefixPolicy": "PrefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"",
+ "prefixPolicy": "prefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"",
}
func (UsernameClaimMapping) SwaggerDoc() map[string]string {
@@ -513,7 +513,7 @@ func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string {
var map_Build = map[string]string{
"": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "spec": "Spec holds user-settable values for the build controller configuration",
+ "spec": "spec holds user-settable values for the build controller configuration",
}
func (Build) SwaggerDoc() map[string]string {
@@ -521,11 +521,11 @@ func (Build) SwaggerDoc() map[string]string {
}
var map_BuildDefaults = map[string]string{
- "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.",
- "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.",
- "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build",
- "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.",
- "resources": "Resources defines resource requirements to execute the build.",
+ "defaultProxy": "defaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.",
+ "gitProxy": "gitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.",
+ "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build",
+ "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.",
+ "resources": "resources defines resource requirements to execute the build.",
}
func (BuildDefaults) SwaggerDoc() map[string]string {
@@ -542,10 +542,10 @@ func (BuildList) SwaggerDoc() map[string]string {
}
var map_BuildOverrides = map[string]string{
- "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
- "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node",
- "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.",
- "forcePull": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself",
+ "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
+ "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node",
+ "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.",
+ "forcePull": "forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself",
}
func (BuildOverrides) SwaggerDoc() map[string]string {
@@ -553,9 +553,9 @@ func (BuildOverrides) SwaggerDoc() map[string]string {
}
var map_BuildSpec = map[string]string{
- "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.",
- "buildDefaults": "BuildDefaults controls the default information for Builds",
- "buildOverrides": "BuildOverrides controls override settings for builds",
+ "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.",
+ "buildDefaults": "buildDefaults controls the default information for Builds",
+ "buildOverrides": "buildOverrides controls override settings for builds",
}
func (BuildSpec) SwaggerDoc() map[string]string {
@@ -563,8 +563,8 @@ func (BuildSpec) SwaggerDoc() map[string]string {
}
var map_ImageLabel = map[string]string{
- "name": "Name defines the name of the label. It must have non-zero length.",
- "value": "Value defines the literal value of the label.",
+ "name": "name defines the name of the label. It must have non-zero length.",
+ "value": "value defines the literal value of the label.",
}
func (ImageLabel) SwaggerDoc() map[string]string {
@@ -648,7 +648,7 @@ func (OperandVersion) SwaggerDoc() map[string]string {
var map_ClusterCondition = map[string]string{
"": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.",
"type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.",
- "promql": "promQL represents a cluster condition based on PromQL.",
+ "promql": "promql represents a cluster condition based on PromQL.",
}
func (ClusterCondition) SwaggerDoc() map[string]string {
@@ -764,7 +764,7 @@ func (ConditionalUpdateRisk) SwaggerDoc() map[string]string {
var map_PromQLClusterCondition = map[string]string{
"": "PromQLClusterCondition represents a cluster condition based on PromQL.",
- "promql": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.",
+ "promql": "promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.",
}
func (PromQLClusterCondition) SwaggerDoc() map[string]string {
@@ -772,11 +772,12 @@ func (PromQLClusterCondition) SwaggerDoc() map[string]string {
}
var map_Release = map[string]string{
- "": "Release represents an OpenShift release image and associated metadata.",
- "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.",
- "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.",
- "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.",
- "channels": "channels is the set of Cincinnati channels to which the release currently belongs.",
+ "": "Release represents an OpenShift release image and associated metadata.",
+ "architecture": "architecture is an optional field that indicates the value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. Valid values are 'Multi' and empty.",
+ "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.",
+ "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.",
+ "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.",
+ "channels": "channels is the set of Cincinnati channels to which the release currently belongs.",
}
func (Release) SwaggerDoc() map[string]string {
@@ -1184,10 +1185,11 @@ func (AWSPlatformSpec) SwaggerDoc() map[string]string {
}
var map_AWSPlatformStatus = map[string]string{
- "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.",
- "region": "region holds the default AWS region for new AWS resources created by the cluster.",
- "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.",
- "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.",
+ "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.",
+ "region": "region holds the default AWS region for new AWS resources created by the cluster.",
+ "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.",
+ "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.",
+ "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.",
}
func (AWSPlatformStatus) SwaggerDoc() map[string]string {
@@ -1359,7 +1361,7 @@ func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string {
var map_ExternalPlatformSpec = map[string]string{
"": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.",
- "platformName": "PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.",
+ "platformName": "platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.",
}
func (ExternalPlatformSpec) SwaggerDoc() map[string]string {
@@ -1389,7 +1391,7 @@ var map_GCPPlatformStatus = map[string]string{
"region": "region holds the region for new GCP resources created for the cluster.",
"resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.",
"resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.",
- "cloudLoadBalancerConfig": "cloudLoadBalancerConfig is a union that contains the IP addresses of API, API-Int and Ingress Load Balancers created on the cloud platform. These values would not be populated on on-prem platforms. These Load Balancer IPs are used to configure the in-cluster DNS instances for API, API-Int and Ingress services. `dnsType` is expected to be set to `ClusterHosted` when these Load Balancer IP addresses are populated and used.",
+ "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.",
}
func (GCPPlatformStatus) SwaggerDoc() map[string]string {
@@ -1427,11 +1429,11 @@ func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string {
var map_IBMCloudPlatformStatus = map[string]string{
"": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.",
- "location": "Location is where the cluster has been deployed",
- "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.",
- "providerType": "ProviderType indicates the type of cluster that was created",
- "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain",
- "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain",
+ "location": "location is where the cluster has been deployed",
+ "resourceGroupName": "resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.",
+ "providerType": "providerType indicates the type of cluster that was created",
+ "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain",
+ "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain",
"serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services.",
}
@@ -1518,7 +1520,7 @@ var map_NutanixFailureDomain = map[string]string{
"": "NutanixFailureDomain configures failure domain information for the Nutanix platform.",
"name": "name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform.",
"cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.",
- "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.",
+ "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.",
}
func (NutanixFailureDomain) SwaggerDoc() map[string]string {
@@ -1659,19 +1661,19 @@ func (OvirtPlatformStatus) SwaggerDoc() map[string]string {
var map_PlatformSpec = map[string]string{
"": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.",
"type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.",
- "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.",
- "azure": "Azure contains settings specific to the Azure infrastructure provider.",
- "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.",
- "baremetal": "BareMetal contains settings specific to the BareMetal platform.",
- "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.",
- "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.",
- "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.",
- "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.",
- "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.",
- "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.",
- "powervs": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.",
- "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.",
- "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.",
+ "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.",
+ "azure": "azure contains settings specific to the Azure infrastructure provider.",
+ "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.",
+ "baremetal": "baremetal contains settings specific to the BareMetal platform.",
+ "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.",
+ "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.",
+ "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.",
+ "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.",
+ "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.",
+ "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.",
+ "powervs": "powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.",
+ "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.",
+ "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.",
"external": "ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately.",
}
@@ -1682,20 +1684,20 @@ func (PlatformSpec) SwaggerDoc() map[string]string {
var map_PlatformStatus = map[string]string{
"": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.",
"type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.",
- "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.",
- "azure": "Azure contains settings specific to the Azure infrastructure provider.",
- "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.",
- "baremetal": "BareMetal contains settings specific to the BareMetal platform.",
- "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.",
- "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.",
- "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.",
- "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.",
- "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.",
- "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.",
- "powervs": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.",
- "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.",
- "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.",
- "external": "External contains settings specific to the generic External infrastructure provider.",
+ "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.",
+ "azure": "azure contains settings specific to the Azure infrastructure provider.",
+ "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.",
+ "baremetal": "baremetal contains settings specific to the BareMetal platform.",
+ "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.",
+ "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.",
+ "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.",
+ "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.",
+ "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.",
+ "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.",
+ "powervs": "powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider.",
+ "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.",
+ "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.",
+ "external": "external contains settings specific to the generic External infrastructure provider.",
}
func (PlatformStatus) SwaggerDoc() map[string]string {
@@ -1717,8 +1719,8 @@ var map_PowerVSPlatformStatus = map[string]string{
"zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported",
"resourceGroup": "resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state.",
"serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.",
- "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain",
- "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain",
+ "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain",
+ "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain",
}
func (PowerVSPlatformStatus) SwaggerDoc() map[string]string {
@@ -1735,13 +1737,45 @@ func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string {
return map_PowerVSServiceEndpoint
}
+var map_VSphereFailureDomainHostGroup = map[string]string{
+ "": "VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.",
+ "vmGroup": "vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. vmGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup",
+ "hostGroup": "hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. hostGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup",
+ "vmHostRule": "vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. vmHostRule is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup",
+}
+
+func (VSphereFailureDomainHostGroup) SwaggerDoc() map[string]string {
+ return map_VSphereFailureDomainHostGroup
+}
+
+var map_VSphereFailureDomainRegionAffinity = map[string]string{
+ "": "VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster.",
+ "type": "type determines the vSphere object type for a region within this failure domain. Available types are Datacenter and ComputeCluster. When set to Datacenter, this means the vCenter Datacenter defined is the region. When set to ComputeCluster, this means the vCenter cluster defined is the region.",
+}
+
+func (VSphereFailureDomainRegionAffinity) SwaggerDoc() map[string]string {
+ return map_VSphereFailureDomainRegionAffinity
+}
+
+var map_VSphereFailureDomainZoneAffinity = map[string]string{
+ "": "VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. This configuration within vCenter creates the required association between a failure domain, virtual machines and ESXi hosts to create a vm-host based zone.",
+ "type": "type determines the vSphere object type for a zone within this failure domain. Available types are ComputeCluster and HostGroup. When set to ComputeCluster, this means the vCenter cluster defined is the zone. When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and this means the zone is defined by the grouping of those fields.",
+ "hostGroup": "hostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.",
+}
+
+func (VSphereFailureDomainZoneAffinity) SwaggerDoc() map[string]string {
+ return map_VSphereFailureDomainZoneAffinity
+}
+
var map_VSpherePlatformFailureDomainSpec = map[string]string{
- "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.",
- "name": "name defines the arbitrary but unique name of a failure domain.",
- "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.",
- "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.",
- "server": "server is the fully-qualified domain name or the IP address of the vCenter server.",
- "topology": "Topology describes a given failure domain using vSphere constructs",
+ "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.",
+ "name": "name defines the arbitrary but unique name of a failure domain.",
+ "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.",
+ "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.",
+ "regionAffinity": "regionAffinity holds the type of region, Datacenter or ComputeCluster. When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology.",
+ "zoneAffinity": "zoneAffinity holds the type of the zone and the hostGroup which vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.",
+ "server": "server is the fully-qualified domain name or the IP address of the vCenter server.",
+ "topology": "topology describes a given failure domain using vSphere constructs",
}
func (VSpherePlatformFailureDomainSpec) SwaggerDoc() map[string]string {
@@ -1960,8 +1994,8 @@ func (ExternalIPPolicy) SwaggerDoc() map[string]string {
var map_MTUMigration = map[string]string{
"": "MTUMigration contains infomation about MTU migration.",
- "network": "Network contains MTU migration configuration for the default network.",
- "machine": "Machine contains MTU migration configuration for the machine's uplink.",
+ "network": "network contains MTU migration configuration for the default network.",
+ "machine": "machine contains MTU migration configuration for the machine's uplink.",
}
func (MTUMigration) SwaggerDoc() map[string]string {
@@ -1970,8 +2004,8 @@ func (MTUMigration) SwaggerDoc() map[string]string {
var map_MTUMigrationValues = map[string]string{
"": "MTUMigrationValues contains the values for a MTU migration.",
- "to": "To is the MTU to migrate to.",
- "from": "From is the MTU to migrate from.",
+ "to": "to is the MTU to migrate to.",
+ "from": "from is the MTU to migrate from.",
}
func (MTUMigrationValues) SwaggerDoc() map[string]string {
@@ -2030,8 +2064,8 @@ func (NetworkList) SwaggerDoc() map[string]string {
var map_NetworkMigration = map[string]string{
"": "NetworkMigration represents the network migration status.",
- "networkType": "NetworkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.",
- "mtu": "MTU is the MTU configuration that is being deployed.",
+ "networkType": "networkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.",
+ "mtu": "mtu is the MTU configuration that is being deployed.",
}
func (NetworkMigration) SwaggerDoc() map[string]string {
@@ -2042,7 +2076,7 @@ var map_NetworkSpec = map[string]string{
"": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
"clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.",
"serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.",
- "networkType": "NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.",
+ "networkType": "networkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.",
"externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.",
"serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.",
"networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.",
@@ -2056,9 +2090,9 @@ var map_NetworkStatus = map[string]string{
"": "NetworkStatus is the current network configuration.",
"clusterNetwork": "IP address pool to use for pod IPs.",
"serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.",
- "networkType": "NetworkType is the plugin that is deployed (e.g. OVNKubernetes).",
- "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.",
- "migration": "Migration contains the cluster network migration configuration.",
+ "networkType": "networkType is the plugin that is deployed (e.g. OVNKubernetes).",
+ "clusterNetworkMTU": "clusterNetworkMTU is the MTU for inter-pod networking.",
+ "migration": "migration contains the cluster network migration configuration.",
"conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkDiagnosticsAvailable\"",
}
@@ -2087,8 +2121,9 @@ func (NodeList) SwaggerDoc() map[string]string {
}
var map_NodeSpec = map[string]string{
- "cgroupMode": "CgroupMode determines the cgroups version on the node",
- "workerLatencyProfile": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster",
+ "cgroupMode": "cgroupMode determines the cgroups version on the node",
+ "workerLatencyProfile": "workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster",
+ "minimumKubeletVersion": "minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. Specifically, the apiserver will deny most authorization requests of kubelets that are older than the specified version, only allowing the kubelet to get and update its node object, and perform subjectaccessreviews. This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, and will eventually be marked as not ready. Its max length is 8, so maximum version allowed is either \"9.999.99\" or \"99.99.99\". Since the kubelet reports the version of the kubernetes release, not Openshift, this field references the underlying kubernetes version this version of Openshift is based off of. In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then they should set the minimumKubeletVersion to 1.30.0. When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. Thus, a kubelet with version \"1.0.0-ec.0\" will be compatible with minimumKubeletVersion \"1.0.0\" or earlier.",
}
func (NodeSpec) SwaggerDoc() map[string]string {
@@ -2435,7 +2470,7 @@ func (TemplateReference) SwaggerDoc() map[string]string {
var map_Proxy = map[string]string{
"": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
"metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "spec": "Spec holds user-settable values for the proxy configuration",
+ "spec": "spec holds user-settable values for the proxy configuration",
"status": "status holds observed values from the cluster. They may not be overridden.",
}
@@ -2510,7 +2545,7 @@ var map_SchedulerSpec = map[string]string{
"profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"",
"profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles.",
"defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.",
- "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.",
+ "mastersSchedulable": "mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.",
}
func (SchedulerSpec) SwaggerDoc() map[string]string {
@@ -2518,8 +2553,8 @@ func (SchedulerSpec) SwaggerDoc() map[string]string {
}
var map_FeatureGateTests = map[string]string{
- "featureGate": "FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.",
- "tests": "Tests contains an item for every TestName",
+ "featureGate": "featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.",
+ "tests": "tests contains an item for every TestName",
}
func (FeatureGateTests) SwaggerDoc() map[string]string {
@@ -2527,7 +2562,7 @@ func (FeatureGateTests) SwaggerDoc() map[string]string {
}
var map_TestDetails = map[string]string{
- "testName": "TestName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.",
+ "testName": "testName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.",
}
func (TestDetails) SwaggerDoc() map[string]string {
@@ -2545,7 +2580,7 @@ func (TestReporting) SwaggerDoc() map[string]string {
}
var map_TestReportingSpec = map[string]string{
- "testsForFeatureGates": "TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.",
+ "testsForFeatureGates": "testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.",
}
func (TestReportingSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/register.go b/vendor/github.com/openshift/api/config/v1alpha1/register.go
index 36432ceb8..4b30ea380 100644
--- a/vendor/github.com/openshift/api/config/v1alpha1/register.go
+++ b/vendor/github.com/openshift/api/config/v1alpha1/register.go
@@ -30,6 +30,8 @@ func Resource(resource string) schema.GroupResource {
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(GroupVersion,
+ &ClusterMonitoring{},
+ &ClusterMonitoringList{},
&InsightsDataGather{},
&InsightsDataGatherList{},
&Backup{},
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go
index 65eb5c1f7..e52a2e5c5 100644
--- a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go
+++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go
@@ -24,18 +24,16 @@ type Backup struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
// +required
Spec BackupSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
- // +kubebuilder:validation:Optional
// +optional
Status BackupStatus `json:"status"`
}
type BackupSpec struct {
// etcd specifies the configuration for periodic backups of the etcd cluster
- // +kubebuilder:validation:Required
+ // +required
EtcdBackupSpec EtcdBackupSpec `json:"etcd"`
}
@@ -45,12 +43,11 @@ type BackupStatus struct {
// EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator
type EtcdBackupSpec struct {
- // Schedule defines the recurring backup schedule in Cron format
+ // schedule defines the recurring backup schedule in Cron format
// every 2 hours: 0 */2 * * *
// every day at 3am: 0 3 * * *
// Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice.
// The current default is "no backups", but will change in the future.
- // +kubebuilder:validation:Optional
// +optional
// +kubebuilder:validation:Pattern:=`^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$`
Schedule string `json:"schedule"`
@@ -73,7 +70,6 @@ type EtcdBackupSpec struct {
// The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
// If not specified, this will default to the time zone of the kube-controller-manager process.
// See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones
- // +kubebuilder:validation:Optional
// +optional
// +kubebuilder:validation:Pattern:=`^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$`
TimeZone string `json:"timeZone"`
@@ -84,17 +80,15 @@ type EtcdBackupSpec struct {
// [A-Za-z_]+(/[A-Za-z_]+){1,2} - One or more alphabetical characters (uppercase or lowercase) or underscores, followed by one or two occurrences of a forward slash followed by one or more alphabetical characters or underscores. This allows for matching timezone identifiers with 2 or 3 parts, e.g America/Argentina/Buenos_Aires
// (/GMT[+-]\d{1,2})? - Makes the GMT offset suffix optional. It matches "/GMT" followed by either a plus ("+") or minus ("-") sign and one or two digits (the GMT offset)
- // RetentionPolicy defines the retention policy for retaining and deleting existing backups.
- // +kubebuilder:validation:Optional
+ // retentionPolicy defines the retention policy for retaining and deleting existing backups.
// +optional
RetentionPolicy RetentionPolicy `json:"retentionPolicy"`
- // PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the
+ // pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the
// etcd backup files would be saved
// The PVC itself must always be created in the "openshift-etcd" namespace
// If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup.
// In the future this would be backups saved across the control-plane master nodes.
- // +kubebuilder:validation:Optional
// +optional
PVCName string `json:"pvcName"`
}
@@ -115,45 +109,40 @@ const (
// This struct is a discriminated union that allows users to select the type of retention policy from the supported types.
// +union
type RetentionPolicy struct {
- // RetentionType sets the type of retention policy.
+ // retentionType sets the type of retention policy.
// Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future.
// Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice.
// The current default is RetentionNumber with 15 backups kept.
// +unionDiscriminator
// +required
- // +kubebuilder:validation:Required
// +kubebuilder:validation:Enum:="";"RetentionNumber";"RetentionSize"
RetentionType RetentionType `json:"retentionType"`
- // RetentionNumber configures the retention policy based on the number of backups
- // +kubebuilder:validation:Optional
+ // retentionNumber configures the retention policy based on the number of backups
// +optional
RetentionNumber *RetentionNumberConfig `json:"retentionNumber,omitempty"`
- // RetentionSize configures the retention policy based on the size of backups
- // +kubebuilder:validation:Optional
+ // retentionSize configures the retention policy based on the size of backups
// +optional
RetentionSize *RetentionSizeConfig `json:"retentionSize,omitempty"`
}
// RetentionNumberConfig specifies the configuration of the retention policy on the number of backups
type RetentionNumberConfig struct {
- // MaxNumberOfBackups defines the maximum number of backups to retain.
+ // maxNumberOfBackups defines the maximum number of backups to retain.
// If the existing number of backups saved is equal to MaxNumberOfBackups then
// the oldest backup will be removed before a new backup is initiated.
// +kubebuilder:validation:Minimum=1
- // +kubebuilder:validation:Required
// +required
MaxNumberOfBackups int `json:"maxNumberOfBackups,omitempty"`
}
// RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups
type RetentionSizeConfig struct {
- // MaxSizeOfBackupsGb defines the total size in GB of backups to retain.
+ // maxSizeOfBackupsGb defines the total size in GB of backups to retain.
// If the current total size backups exceeds MaxSizeOfBackupsGb then
// the oldest backup will be removed before a new backup is initiated.
// +kubebuilder:validation:Minimum=1
- // +kubebuilder:validation:Required
// +required
MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb,omitempty"`
}
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go
index e3670f03e..5eaeeea73 100644
--- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go
+++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go
@@ -24,7 +24,7 @@ type ClusterImagePolicy struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec contains the configuration for the cluster image policy.
- // +kubebuilder:validation:Required
+ // +required
Spec ClusterImagePolicySpec `json:"spec"`
// status contains the observed state of the resource.
// +optional
@@ -41,15 +41,16 @@ type ClusterImagePolicySpec struct {
// If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored.
// In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories
// quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation.
+ // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied.
// For additional details about the format, please refer to the document explaining the docker transport field,
// which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxItems=256
// +listType=set
Scopes []ImageScope `json:"scopes"`
// policy contains configuration to allow scopes to be verified, and defines how
// images not matching the verification policy will be treated.
- // +kubebuilder:validation:Required
+ // +required
Policy Policy `json:"policy"`
}
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go
new file mode 100644
index 000000000..c276971b5
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:internal
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1929
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=clustermonitoring,scope=Cluster
+// +kubebuilder:subresource:status
+// +kubebuilder:metadata:annotations="description=Cluster Monitoring Operators configuration API"
+// +openshift:enable:FeatureGate=ClusterMonitoringConfig
+// ClusterMonitoring is the Schema for the Cluster Monitoring Operators API
+type ClusterMonitoring struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user configuration for the Cluster Monitoring Operator
+ // +required
+ Spec ClusterMonitoringSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ClusterMonitoringStatus `json:"status,omitempty"`
+}
+
+// MonitoringOperatorStatus defines the observed state of MonitoringOperator
+type ClusterMonitoringStatus struct {
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:internal
+type ClusterMonitoringList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list metadata.
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a list of ClusterMonitoring
+ // +optional
+ Items []ClusterMonitoring `json:"items"`
+}
+
+// ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator
+// +required
+type ClusterMonitoringSpec struct {
+ // userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring.
+ // +required
+ UserDefined UserDefinedMonitoring `json:"userDefined"`
+}
+
+// UserDefinedMonitoring config for user-defined projects.
+// +required
+type UserDefinedMonitoring struct {
+ // mode defines the different configurations of UserDefinedMonitoring
+ // Valid values are Disabled and NamespaceIsolated
+ // Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces.
+ // NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level.
+ // +kubebuilder:validation:Enum:="Disabled";"NamespaceIsolated"
+ // +required
+ Mode UserDefinedMode `json:"mode"`
+}
+
+// UserDefinedMode specifies mode for UserDefine Monitoring
+// +enum
+type UserDefinedMode string
+
+const (
+ // UserDefinedDisabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces.
+ UserDefinedDisabled UserDefinedMode = "Disabled"
+ // UserDefinedNamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level.
+ UserDefinedNamespaceIsolated UserDefinedMode = "NamespaceIsolated"
+)
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go
index 7031110ff..7f57d88f9 100644
--- a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go
+++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go
@@ -23,7 +23,7 @@ type ImagePolicy struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
+ // +required
Spec ImagePolicySpec `json:"spec"`
// status contains the observed state of the resource.
// +optional
@@ -40,15 +40,16 @@ type ImagePolicySpec struct {
// If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored.
// In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories
// quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation.
+ // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied.
// For additional details about the format, please refer to the document explaining the docker transport field,
// which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxItems=256
// +listType=set
Scopes []ImageScope `json:"scopes"`
// policy contains configuration to allow scopes to be verified, and defines how
// images not matching the verification policy will be treated.
- // +kubebuilder:validation:Required
+ // +required
Policy Policy `json:"policy"`
}
@@ -61,7 +62,7 @@ type ImageScope string
// Policy defines the verification policy for the items in the scopes list.
type Policy struct {
// rootOfTrust specifies the root of trust for the policy.
- // +kubebuilder:validation:Required
+ // +required
RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"`
// signedIdentity specifies what image identity the signature claims about the image. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact".
// +optional
@@ -77,7 +78,7 @@ type PolicyRootOfTrust struct {
// "PublicKey" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification.
// "FulcioCAWithRekor" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification.
// +unionDiscriminator
- // +kubebuilder:validation:Required
+ // +required
PolicyType PolicyType `json:"policyType"`
// publicKey defines the root of trust based on a sigstore public key.
// +optional
@@ -101,7 +102,7 @@ const (
type PublicKey struct {
// keyData contains inline base64-encoded data for the PEM format public key.
// KeyData must be at most 8192 characters.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxLength=8192
KeyData []byte `json:"keyData"`
// rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key.
@@ -115,16 +116,16 @@ type PublicKey struct {
type FulcioCAWithRekor struct {
// fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA.
// fulcioCAData must be at most 8192 characters.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxLength=8192
FulcioCAData []byte `json:"fulcioCAData"`
// rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key.
// rekorKeyData must be at most 8192 characters.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MaxLength=8192
RekorKeyData []byte `json:"rekorKeyData"`
// fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration.
- // +kubebuilder:validation:Required
+ // +required
FulcioSubject PolicyFulcioSubject `json:"fulcioSubject,omitempty"`
}
@@ -132,12 +133,12 @@ type FulcioCAWithRekor struct {
type PolicyFulcioSubject struct {
// oidcIssuer contains the expected OIDC issuer. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token.
// Example: "https://expected.OIDC.issuer/"
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL"
OIDCIssuer string `json:"oidcIssuer"`
// signedEmail holds the email address the the Fulcio certificate is issued for.
// Example: "expected-signing-user@example.com"
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address"
SignedEmail string `json:"signedEmail"`
}
@@ -156,7 +157,7 @@ type PolicyIdentity struct {
// "ExactRepository" means that the identity in the signature must be in the same repository as a specific identity specified by "repository".
// "RemapIdentity" means that the signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix.
// +unionDiscriminator
- // +kubebuilder:validation:Required
+ // +required
MatchPolicy IdentityMatchPolicy `json:"matchPolicy"`
// exactRepository is required if matchPolicy is set to "ExactRepository".
// +optional
@@ -174,7 +175,7 @@ type IdentityRepositoryPrefix string
type PolicyMatchExactRepository struct {
// repository is the reference of the image identity to be matched.
// The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox
- // +kubebuilder:validation:Required
+ // +required
Repository IdentityRepositoryPrefix `json:"repository"`
}
@@ -185,12 +186,12 @@ type PolicyMatchRemapIdentity struct {
// The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces,
// or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form.
// For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.
- // +kubebuilder:validation:Required
+ // +required
Prefix IdentityRepositoryPrefix `json:"prefix"`
// signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces,
// or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form.
// For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.
- // +kubebuilder:validation:Required
+ // +required
SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"`
}
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go
index 171e96d5b..3ae4de157 100644
--- a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go
+++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go
@@ -24,7 +24,7 @@ type InsightsDataGather struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
- // +kubebuilder:validation:Required
+ // +required
Spec InsightsDataGatherSpec `json:"spec"`
// status holds observed values from the cluster. They may not be overridden.
// +optional
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go
index ab39b5b91..8e22e2d27 100644
--- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go
@@ -210,6 +210,100 @@ func (in *ClusterImagePolicyStatus) DeepCopy() *ClusterImagePolicyStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterMonitoring) DeepCopyInto(out *ClusterMonitoring) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoring.
+func (in *ClusterMonitoring) DeepCopy() *ClusterMonitoring {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterMonitoring)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterMonitoring) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterMonitoringList) DeepCopyInto(out *ClusterMonitoringList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterMonitoring, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringList.
+func (in *ClusterMonitoringList) DeepCopy() *ClusterMonitoringList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterMonitoringList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterMonitoringList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterMonitoringSpec) DeepCopyInto(out *ClusterMonitoringSpec) {
+ *out = *in
+ out.UserDefined = in.UserDefined
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringSpec.
+func (in *ClusterMonitoringSpec) DeepCopy() *ClusterMonitoringSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterMonitoringSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterMonitoringStatus) DeepCopyInto(out *ClusterMonitoringStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringStatus.
+func (in *ClusterMonitoringStatus) DeepCopy() *ClusterMonitoringStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterMonitoringStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) {
*out = *in
@@ -676,3 +770,19 @@ func (in *RetentionSizeConfig) DeepCopy() *RetentionSizeConfig {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserDefinedMonitoring) DeepCopyInto(out *UserDefinedMonitoring) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserDefinedMonitoring.
+func (in *UserDefinedMonitoring) DeepCopy() *UserDefinedMonitoring {
+ if in == nil {
+ return nil
+ }
+ out := new(UserDefinedMonitoring)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
index 393365b41..1d4a88d50 100644
--- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
+++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
@@ -44,6 +44,30 @@ clusterimagepolicies.config.openshift.io:
- SigstoreImageVerification
Version: v1alpha1
+clustermonitoring.config.openshift.io:
+ Annotations:
+ description: Cluster Monitoring Operators configuration API
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1929
+ CRDName: clustermonitoring.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - ClusterMonitoringConfig
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: ClusterMonitoring
+ Labels: {}
+ PluralName: clustermonitoring
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates:
+ - ClusterMonitoringConfig
+ Version: v1alpha1
+
imagepolicies.config.openshift.io:
Annotations: {}
ApprovedPRNumber: https://github.com/openshift/api/pull/1457
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go
index 9da086efc..92ae6cc72 100644
--- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go
@@ -41,10 +41,10 @@ func (BackupSpec) SwaggerDoc() map[string]string {
var map_EtcdBackupSpec = map[string]string{
"": "EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator",
- "schedule": "Schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.",
+ "schedule": "schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.",
"timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones",
- "retentionPolicy": "RetentionPolicy defines the retention policy for retaining and deleting existing backups.",
- "pvcName": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.",
+ "retentionPolicy": "retentionPolicy defines the retention policy for retaining and deleting existing backups.",
+ "pvcName": "pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.",
}
func (EtcdBackupSpec) SwaggerDoc() map[string]string {
@@ -53,7 +53,7 @@ func (EtcdBackupSpec) SwaggerDoc() map[string]string {
var map_RetentionNumberConfig = map[string]string{
"": "RetentionNumberConfig specifies the configuration of the retention policy on the number of backups",
- "maxNumberOfBackups": "MaxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.",
+ "maxNumberOfBackups": "maxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.",
}
func (RetentionNumberConfig) SwaggerDoc() map[string]string {
@@ -62,9 +62,9 @@ func (RetentionNumberConfig) SwaggerDoc() map[string]string {
var map_RetentionPolicy = map[string]string{
"": "RetentionPolicy defines the retention policy for retaining and deleting existing backups. This struct is a discriminated union that allows users to select the type of retention policy from the supported types.",
- "retentionType": "RetentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.",
- "retentionNumber": "RetentionNumber configures the retention policy based on the number of backups",
- "retentionSize": "RetentionSize configures the retention policy based on the size of backups",
+ "retentionType": "retentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.",
+ "retentionNumber": "retentionNumber configures the retention policy based on the number of backups",
+ "retentionSize": "retentionSize configures the retention policy based on the size of backups",
}
func (RetentionPolicy) SwaggerDoc() map[string]string {
@@ -73,7 +73,7 @@ func (RetentionPolicy) SwaggerDoc() map[string]string {
var map_RetentionSizeConfig = map[string]string{
"": "RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups",
- "maxSizeOfBackupsGb": "MaxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.",
+ "maxSizeOfBackupsGb": "maxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.",
}
func (RetentionSizeConfig) SwaggerDoc() map[string]string {
@@ -102,7 +102,7 @@ func (ClusterImagePolicyList) SwaggerDoc() map[string]string {
var map_ClusterImagePolicySpec = map[string]string{
"": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.",
- "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker",
+ "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker",
"policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.",
}
@@ -118,6 +118,53 @@ func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string {
return map_ClusterImagePolicyStatus
}
+var map_ClusterMonitoring = map[string]string{
+ "": "ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. ClusterMonitoring is the Schema for the Cluster Monitoring Operators API",
+ "metadata": "metadata is the standard object metadata.",
+ "spec": "spec holds user configuration for the Cluster Monitoring Operator",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (ClusterMonitoring) SwaggerDoc() map[string]string {
+ return map_ClusterMonitoring
+}
+
+var map_ClusterMonitoringList = map[string]string{
+ "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list metadata.",
+ "items": "items is a list of ClusterMonitoring",
+}
+
+func (ClusterMonitoringList) SwaggerDoc() map[string]string {
+ return map_ClusterMonitoringList
+}
+
+var map_ClusterMonitoringSpec = map[string]string{
+ "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator",
+ "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring.",
+}
+
+func (ClusterMonitoringSpec) SwaggerDoc() map[string]string {
+ return map_ClusterMonitoringSpec
+}
+
+var map_ClusterMonitoringStatus = map[string]string{
+ "": "MonitoringOperatorStatus defines the observed state of MonitoringOperator",
+}
+
+func (ClusterMonitoringStatus) SwaggerDoc() map[string]string {
+ return map_ClusterMonitoringStatus
+}
+
+var map_UserDefinedMonitoring = map[string]string{
+ "": "UserDefinedMonitoring config for user-defined projects.",
+ "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level.",
+}
+
+func (UserDefinedMonitoring) SwaggerDoc() map[string]string {
+ return map_UserDefinedMonitoring
+}
+
var map_FulcioCAWithRekor = map[string]string{
"": "FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.",
"fulcioCAData": "fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters.",
@@ -151,7 +198,7 @@ func (ImagePolicyList) SwaggerDoc() map[string]string {
var map_ImagePolicySpec = map[string]string{
"": "ImagePolicySpec is the specification of the ImagePolicy CRD.",
- "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker",
+ "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker",
"policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.",
}
diff --git a/vendor/github.com/openshift/api/console/v1/types_console_link.go b/vendor/github.com/openshift/api/console/v1/types_console_link.go
index 24a5dbadc..977fcbda9 100644
--- a/vendor/github.com/openshift/api/console/v1/types_console_link.go
+++ b/vendor/github.com/openshift/api/console/v1/types_console_link.go
@@ -56,7 +56,7 @@ type ApplicationMenuSpec struct {
// This can be any text that will appear as a subheading in the application menu dropdown.
// A new section will be created if the text does not match text of an existing section.
Section string `json:"section"`
- // imageUrl is the URL for the icon used in front of the link in the application menu.
+ // imageURL is the URL for the icon used in front of the link in the application menu.
// The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.
// +optional
ImageURL string `json:"imageURL,omitempty"`
diff --git a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go
index 24954687d..569f20fe1 100644
--- a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go
+++ b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go
@@ -26,7 +26,8 @@ type ConsolePlugin struct {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata"`
- // +kubebuilder:validation:Required
+ // spec contains the desired configuration for the console plugin.
+ // +required
Spec ConsolePluginSpec `json:"spec"`
}
@@ -34,20 +35,160 @@ type ConsolePlugin struct {
type ConsolePluginSpec struct {
// displayName is the display name of the plugin.
// The dispalyName should be between 1 and 128 characters.
- // +kubebuilder:validation:Required
+ // +required
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:MaxLength=128
DisplayName string `json:"displayName"`
// backend holds the configuration of backend which is serving console's plugin .
- // +kubebuilder:validation:Required
+ // +required
Backend ConsolePluginBackend `json:"backend"`
// proxy is a list of proxies that describe various service type
// to which the plugin needs to connect to.
+ // +listType=atomic
// +optional
Proxy []ConsolePluginProxy `json:"proxy,omitempty"`
// i18n is the configuration of plugin's localization resources.
// +optional
I18n ConsolePluginI18n `json:"i18n"`
+ // contentSecurityPolicy is a list of Content-Security-Policy (CSP) directives for the plugin.
+ // Each directive specifies a list of values, appropriate for the given directive type,
+ // for example a list of remote endpoints for fetch directives such as ScriptSrc.
+ // Console web application uses CSP to detect and mitigate certain types of attacks,
+ // such as cross-site scripting (XSS) and data injection attacks.
+ // Dynamic plugins should specify this field if need to load assets from outside
+ // the cluster or if violation reports are observed. Dynamic plugins should always prefer
+ // loading their assets from within the cluster, either by vendoring them, or fetching
+ // from a cluster service.
+ // CSP violation reports can be viewed in the browser's console logs during development and
+ // testing of the plugin in the OpenShift web console.
+ // Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc, FontSrc, ObjectSrc and ConnectSrc.
+ // Each of the available directives may be defined only once in the list.
+ // The value 'self' is automatically included in all fetch directives by the OpenShift web
+ // console's backend.
+ // For more information about the CSP directives, see:
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy
+ //
+ // The OpenShift web console server aggregates the CSP directives and values across
+ // its own default values and all enabled ConsolePlugin CRs, merging them into a single
+ // policy string that is sent to the browser via `Content-Security-Policy` HTTP response header.
+ //
+ // Example:
+ // ConsolePlugin A directives:
+ // script-src: https://script1.com/, https://script2.com/
+ // font-src: https://font1.com/
+ //
+ // ConsolePlugin B directives:
+ // script-src: https://script2.com/, https://script3.com/
+ // font-src: https://font2.com/
+ // img-src: https://img1.com/
+ //
+ // Unified set of CSP directives, passed to the OpenShift web console server:
+ // script-src: https://script1.com/, https://script2.com/, https://script3.com/
+ // font-src: https://font1.com/, https://font2.com/
+ // img-src: https://img1.com/
+ //
+ // OpenShift web console server CSP response header:
+ // Content-Security-Policy: default-src 'self'; base-uri 'self'; script-src 'self' https://script1.com/ https://script2.com/ https://script3.com/; font-src 'self' https://font1.com/ https://font2.com/; img-src 'self' https://img1.com/; style-src 'self'; frame-src 'none'; object-src 'none'
+ //
+ // +openshift:enable:FeatureGate=ConsolePluginContentSecurityPolicy
+ // +kubebuilder:validation:MaxItems=5
+ // +kubebuilder:validation:XValidation:rule="self.map(x, x.values.map(y, y.size()).sum()).sum() < 8192",message="the total combined size of values of all directives must not exceed 8192 (8kb)"
+ // +listType=map
+ // +listMapKey=directive
+ // +optional
+ ContentSecurityPolicy []ConsolePluginCSP `json:"contentSecurityPolicy"`
+}
+
+// DirectiveType is an enumeration of OpenShift web console supported CSP directives.
+// LoadType is an enumeration of i18n loading types.
+// +kubebuilder:validation:Enum:="DefaultSrc";"ScriptSrc";"StyleSrc";"ImgSrc";"FontSrc";"ObjectSrc";"ConnectSrc"
+// +enum
+type DirectiveType string
+
+const (
+ // DefaultSrc directive serves as a fallback for the other CSP fetch directives.
+ // For more information about the DefaultSrc directive, see:
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src
+ DefaultSrc DirectiveType = "DefaultSrc"
+ // ScriptSrc directive specifies valid sources for JavaScript.
+ // For more information about the ScriptSrc directive, see:
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src
+ ScriptSrc DirectiveType = "ScriptSrc"
+ // StyleSrc directive specifies valid sources for stylesheets.
+ // For more information about the StyleSrc directive, see:
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src
+ StyleSrc DirectiveType = "StyleSrc"
+ // ImgSrc directive specifies a valid sources of images and favicons.
+ // For more information about the ImgSrc directive, see:
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src
+ ImgSrc DirectiveType = "ImgSrc"
+ // FontSrc directive specifies valid sources for fonts loaded using @font-face.
+ // For more information about the FontSrc directive, see:
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src
+ FontSrc DirectiveType = "FontSrc"
+ // ObjectSrc directive specifies valid sources for the