Skip to content

Commit 584e5ba

Browse files
authored
Merge branch 'main' into appProtocol
2 parents 77f7026 + 028e8c3 commit 584e5ba

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+1002
-262
lines changed

clientcmd/clientcmd.go

Lines changed: 3 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ import (
88
"k8s.io/client-go/kubernetes/scheme"
99
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
1010
restclient "k8s.io/client-go/rest"
11-
"k8s.io/client-go/tools/clientcmd"
1211
"k8s.io/client-go/tools/remotecommand"
1312
)
1413

@@ -17,29 +16,16 @@ type Client struct {
1716
restconfig *restclient.Config
1817
}
1918

20-
func NewClient() (*Client, error) {
21-
// Instantiate loader for kubeconfig file.
22-
kubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
23-
clientcmd.NewDefaultClientConfigLoadingRules(),
24-
&clientcmd.ConfigOverrides{},
25-
)
26-
27-
// Get a rest.Config from the kubeconfig file. This will be passed into all
28-
// the client objects we create.
29-
restconfig, err := kubeconfig.ClientConfig()
30-
if err != nil {
31-
return nil, err
32-
}
33-
19+
func NewClient(config *restclient.Config) (*Client, error) {
3420
// Create a Kubernetes core/v1 client.
35-
cl, err := corev1client.NewForConfig(restconfig)
21+
cl, err := corev1client.NewForConfig(config)
3622
if err != nil {
3723
return nil, err
3824
}
3925

4026
return &Client{
4127
client: cl,
42-
restconfig: restconfig,
28+
restconfig: config,
4329
}, nil
4430
}
4531

deploy/cw-bundle.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18336,6 +18336,14 @@ rules:
1833618336
- update
1833718337
- patch
1833818338
- delete
18339+
- apiGroups:
18340+
- ""
18341+
resources:
18342+
- nodes
18343+
verbs:
18344+
- get
18345+
- list
18346+
- watch
1833918347
- apiGroups:
1834018348
- ""
1834118349
resources:

deploy/cw-rbac.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,14 @@ rules:
3535
- update
3636
- patch
3737
- delete
38+
- apiGroups:
39+
- ""
40+
resources:
41+
- nodes
42+
verbs:
43+
- get
44+
- list
45+
- watch
3846
- apiGroups:
3947
- ""
4048
resources:

e2e-tests/cross-site-sharded/run

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,26 @@ unset OPERATOR_NS
1313
main_cluster="cross-site-sharded-main"
1414
replica_cluster="cross-site-sharded-replica"
1515

16+
wait_for_members() {
17+
local endpoint="$1"
18+
local rsName="$2"
19+
local nodes_amount=0
20+
until [[ ${nodes_amount} == 6 ]]; do
21+
nodes_amount=$(run_mongos 'rs.conf().members.length' "clusterAdmin:clusterAdmin123456@$endpoint" "mongodb" ":27017" \
22+
| egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' \
23+
| $sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/')
24+
25+
echo "waiting for all members to be configured in ${rsName}"
26+
let retry+=1
27+
if [ $retry -ge 15 ]; then
28+
echo "Max retry count $retry reached. something went wrong with mongo cluster. Config for endpoint $endpoint has $nodes_amount but expected 6."
29+
exit 1
30+
fi
31+
echo -n .
32+
sleep 10
33+
done
34+
}
35+
1636
desc "create main cluster"
1737
create_infra "$namespace"
1838

@@ -118,7 +138,10 @@ sleep 30
118138

119139
desc "create replica PSMDB cluster $cluster"
120140
apply_cluster "$test_dir/conf/${replica_cluster}.yml"
121-
sleep 300
141+
142+
wait_for_running $replica_cluster-rs0 3 "false"
143+
wait_for_running $replica_cluster-rs1 3 "false"
144+
wait_for_running $replica_cluster-cfg 3 "false"
122145

123146
replica_cfg_0_endpoint=$(get_service_ip cross-site-sharded-replica-cfg-0 'cfg')
124147
replica_cfg_1_endpoint=$(get_service_ip cross-site-sharded-replica-cfg-1 'cfg')
@@ -141,7 +164,10 @@ kubectl_bin patch psmdb ${main_cluster} --type=merge --patch '{
141164
}
142165
}'
143166

144-
sleep 60
167+
wait_for_members $replica_cfg_0_endpoint cfg
168+
wait_for_members $replica_rs0_0_endpoint rs0
169+
wait_for_members $replica_rs1_0_endpoint rs1
170+
145171
kubectl_bin config set-context $(kubectl_bin config current-context) --namespace="$replica_namespace"
146172

147173
desc 'check if all 3 Pods started'
@@ -165,8 +191,8 @@ compare_mongos_cmd "find" "myApp:myPass@$main_cluster-mongos.$namespace"
165191

166192
desc 'test failover'
167193
kubectl_bin config set-context $(kubectl_bin config current-context) --namespace="$namespace"
194+
168195
kubectl_bin delete psmdb $main_cluster
169-
sleep 60
170196

171197
desc 'run disaster recovery script for replset: cfg'
172198
run_script_mongos "${test_dir}/disaster_recovery.js" "clusterAdmin:clusterAdmin123456@$replica_cfg_0_endpoint" "mongodb" ":27017"
@@ -180,7 +206,9 @@ run_script_mongos "${test_dir}/disaster_recovery.js" "clusterAdmin:clusterAdmin1
180206
desc 'make replica cluster managed'
181207
kubectl_bin config set-context $(kubectl_bin config current-context) --namespace="$replica_namespace"
182208
kubectl_bin patch psmdb ${replica_cluster} --type=merge --patch '{"spec":{"unmanaged": false}}'
183-
sleep 120
209+
210+
wait_for_running $replica_cluster-rs0 3
211+
wait_for_running $replica_cluster-cfg 3
184212

185213
desc "check failover status"
186214
compare_mongos_cmd "find" "myApp:myPass@$replica_cluster-mongos.$replica_namespace"

e2e-tests/serviceless-external-nodes/run

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ cat $tmp_dir/psmdb.yaml \
4646

4747
wait_cluster_consistency ${cluster}
4848

49+
# waiting the config will be ready.
50+
sleep 30
4951
run_mongo 'rs.status().members.forEach(function(z){printjson(z.name);printjson(z.stateStr); })' "clusterAdmin:clusterAdmin123456@${cluster}-rs0-0.${cluster}-rs0.${namespace}" "mongodb" | egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' >"$tmp_dir/rs.txt"
5052

5153
cat "${test_dir}/compare/rs.txt" \

e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl-internal.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
apiVersion: cert-manager.io/v1
22
kind: Certificate
33
metadata:
4+
annotations: {}
45
generation: 1
56
name: some-name-ssl-internal
67
ownerReferences:

e2e-tests/tls-issue-cert-manager/compare/certificate_some-name-ssl.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
apiVersion: cert-manager.io/v1
22
kind: Certificate
33
metadata:
4+
annotations: {}
45
generation: 1
56
name: some-name-ssl
67
ownerReferences:

e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-ca-issuer.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
apiVersion: cert-manager.io/v1
22
kind: Issuer
33
metadata:
4+
annotations: {}
45
generation: 1
56
name: some-name-psmdb-ca-issuer
67
ownerReferences:

e2e-tests/tls-issue-cert-manager/compare/issuer_some-name-psmdb-issuer.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
apiVersion: cert-manager.io/v1
22
kind: Issuer
33
metadata:
4+
annotations: {}
45
generation: 1
56
name: some-name-psmdb-issuer
67
ownerReferences:

e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1150-oc.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
annotations: {}
5-
generation: 7
5+
generation: 5
66
labels:
77
app.kubernetes.io/component: cfg
88
app.kubernetes.io/instance: some-name

e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1150.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
annotations: {}
5-
generation: 7
5+
generation: 5
66
labels:
77
app.kubernetes.io/component: cfg
88
app.kubernetes.io/instance: some-name

e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1160-oc.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
annotations: {}
5-
generation: 10
5+
generation: 8
66
labels:
77
app.kubernetes.io/component: cfg
88
app.kubernetes.io/instance: some-name

e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1160.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
annotations: {}
5-
generation: 10
5+
generation: 8
66
labels:
77
app.kubernetes.io/component: cfg
88
app.kubernetes.io/instance: some-name

e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1150-oc.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
annotations: {}
5-
generation: 8
5+
generation: 5
66
labels:
77
app.kubernetes.io/component: mongod
88
app.kubernetes.io/instance: some-name

e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1150.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
annotations: {}
5-
generation: 8
5+
generation: 5
66
labels:
77
app.kubernetes.io/component: mongod
88
app.kubernetes.io/instance: some-name

e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1160-oc.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
annotations: {}
5-
generation: 11
5+
generation: 8
66
labels:
77
app.kubernetes.io/component: mongod
88
app.kubernetes.io/instance: some-name

e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1160.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ apiVersion: apps/v1
22
kind: StatefulSet
33
metadata:
44
annotations: {}
5-
generation: 11
5+
generation: 8
66
labels:
77
app.kubernetes.io/component: mongod
88
app.kubernetes.io/instance: some-name

e2e-tests/upgrade-consistency-sharded-tls/run

Lines changed: 16 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@ main() {
3333
compare_generation "1" "statefulset" "${CLUSTER}-rs0"
3434
compare_generation "1" "statefulset" "${CLUSTER}-cfg"
3535

36-
# TODO: uncomment when 1.14.0 will be removed,
37-
# renewal doesn't work on "1.14.0" version
36+
# Renewal doesn't work on "1.14.0" version
37+
#
3838
#renew_certificate "some-name-ssl"
3939
#renew_certificate "some-name-ssl-internal"
4040
#wait_cluster
@@ -46,29 +46,16 @@ main() {
4646
compare_kubectl statefulset/${CLUSTER}-cfg "-1140"
4747

4848
desc 'test 1.15.0'
49-
# workaround to switch to updated certificate structure
50-
# more details: https://github.com/percona/percona-server-mongodb-operator/pull/1287
51-
# TODO: remove the workaround when 1.14.0 will be removed
52-
stop_cluster $CLUSTER
53-
54-
compare_generation "4" "statefulset" "${CLUSTER}-rs0"
55-
compare_generation "3" "statefulset" "${CLUSTER}-cfg"
56-
5749
kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{
5850
"spec": {"crVersion":"1.15.0"}
5951
}'
6052
# Wait for at least one reconciliation
6153
sleep 20
54+
desc 'check if Pod started'
55+
wait_cluster
6256

63-
compare_generation "5" "statefulset" "${CLUSTER}-rs0"
64-
compare_generation "4" "statefulset" "${CLUSTER}-cfg"
65-
66-
kubectl_bin delete certificate "$CLUSTER"-ssl "$CLUSTER"-ssl-internal
67-
kubectl_bin delete issuer "$CLUSTER-psmdb-ca"
68-
kubectl_bin delete secret "$CLUSTER"-ssl "$CLUSTER"-ssl-internal
69-
start_cluster $CLUSTER
70-
compare_generation "6" "statefulset" "${CLUSTER}-rs0"
71-
compare_generation "5" "statefulset" "${CLUSTER}-cfg"
57+
compare_generation "3" "statefulset" "${CLUSTER}-rs0"
58+
compare_generation "3" "statefulset" "${CLUSTER}-cfg"
7259

7360
# Wait for at least one reconciliation
7461
sleep 20
@@ -78,14 +65,14 @@ main() {
7865
renew_certificate "some-name-ssl"
7966
sleep 20
8067
wait_cluster
81-
compare_generation "7" "statefulset" "${CLUSTER}-rs0"
82-
compare_generation "6" "statefulset" "${CLUSTER}-cfg"
68+
compare_generation "4" "statefulset" "${CLUSTER}-rs0"
69+
compare_generation "4" "statefulset" "${CLUSTER}-cfg"
8370

8471
renew_certificate "some-name-ssl-internal"
8572
sleep 20
8673
wait_cluster
87-
compare_generation "8" "statefulset" "${CLUSTER}-rs0"
88-
compare_generation "7" "statefulset" "${CLUSTER}-cfg"
74+
compare_generation "5" "statefulset" "${CLUSTER}-rs0"
75+
compare_generation "5" "statefulset" "${CLUSTER}-cfg"
8976

9077
desc 'check if service and statefulset created with expected config'
9178
compare_kubectl service/${CLUSTER}-rs0 "-1150"
@@ -101,20 +88,20 @@ main() {
10188
sleep 20
10289
desc 'check if Pod started'
10390
wait_cluster
104-
compare_generation "9" "statefulset" "${CLUSTER}-rs0"
105-
compare_generation "8" "statefulset" "${CLUSTER}-cfg"
91+
compare_generation "6" "statefulset" "${CLUSTER}-rs0"
92+
compare_generation "6" "statefulset" "${CLUSTER}-cfg"
10693

10794
renew_certificate "some-name-ssl"
10895
sleep 20
10996
wait_cluster
110-
compare_generation "10" "statefulset" "${CLUSTER}-rs0"
111-
compare_generation "9" "statefulset" "${CLUSTER}-cfg"
97+
compare_generation "7" "statefulset" "${CLUSTER}-rs0"
98+
compare_generation "7" "statefulset" "${CLUSTER}-cfg"
11299

113100
renew_certificate "some-name-ssl-internal"
114101
sleep 20
115102
wait_cluster
116-
compare_generation "11" "statefulset" "${CLUSTER}-rs0"
117-
compare_generation "10" "statefulset" "${CLUSTER}-cfg"
103+
compare_generation "8" "statefulset" "${CLUSTER}-rs0"
104+
compare_generation "8" "statefulset" "${CLUSTER}-cfg"
118105

119106
desc 'check if service and statefulset created with expected config'
120107
compare_kubectl service/${CLUSTER}-rs0 "-1160"

pkg/apis/psmdb/v1/perconaservermongodbrestore_types.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,4 +157,8 @@ var (
157157
PITRestoreTypeLatest PITRestoreType = "latest"
158158
)
159159

160-
const AnnotationRestoreInProgress = "percona.com/restore-in-progress"
160+
const (
161+
AnnotationRestoreInProgress = "percona.com/restore-in-progress"
162+
// AnnotationUpdateMongosFirst is an annotation used to force next smart update to be applied to mongos before mongod.
163+
AnnotationUpdateMongosFirst = "percona.com/update-mongos-first"
164+
)

pkg/apis/psmdb/v1/psmdb_types.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -699,6 +699,14 @@ type SecretsSpec struct {
699699
LDAPSecret string `json:"ldapSecret,omitempty"`
700700
}
701701

702+
func SSLSecretName(cr *PerconaServerMongoDB) string {
703+
return cr.Spec.Secrets.SSL
704+
}
705+
706+
func SSLInternalSecretName(cr *PerconaServerMongoDB) string {
707+
return cr.Spec.Secrets.SSLInternal
708+
}
709+
702710
type MongosSpec struct {
703711
MultiAZ `json:",inline"`
704712

0 commit comments

Comments
 (0)