diff --git a/pmm_qa/README.md b/pmm_qa/README.md new file mode 100644 index 00000000..fa9c2685 --- /dev/null +++ b/pmm_qa/README.md @@ -0,0 +1,17 @@ +# WIP - PMM-QA Framework Documentation +Sets up all types of dbs base one or with replication sets. + +Available flags: +- +- ```--database``` Sets up selected DB available options: + - ```ps``` - Sets up Percona server, example: ```--database ps=8.4,SETUP_TYPE=gr,QUERY_SOURCE=perfschema``` + - parameters: + - SETUP_TYPE: + - gr - Group replication + - QUERY_SOURCE - The Performance Schema provides detailed, real-time metrics on various server + performance aspects, while the Slow Query Log records queries that exceed a defined execution + time threshold, helping to identify inefficient queries. + - perfschema + - slowlog + - COUNT - Count of percona server dbs created. + - Available versions: ```8.4```, ```8.0```, ```5.7``` \ No newline at end of file diff --git a/pmm_qa/percona_server/init.sql.j2 b/pmm_qa/percona_server/init.sql.j2 new file mode 100644 index 00000000..19185831 --- /dev/null +++ b/pmm_qa/percona_server/init.sql.j2 @@ -0,0 +1,15 @@ +-- Create replication user and grant necessary privileges +SET SQL_LOG_BIN=0; +CREATE USER '{{ replication_user }}'@'%' IDENTIFIED BY '{{ replication_password }}'; +GRANT REPLICATION SLAVE ON *.* TO '{{ replication_user }}'@'%'; +GRANT CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +GRANT BACKUP_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +GRANT GROUP_REPLICATION_STREAM ON *.* TO '{{ replication_user }}'@'%'; +-- GRANT SERVICE_CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +-- GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +FLUSH PRIVILEGES; +SET SQL_LOG_BIN=1; + +-- Configure group replication recovery credentials +CHANGE REPLICATION SOURCE TO SOURCE_USER='{{ replication_user }}', SOURCE_PASSWORD='{{ replication_password }}' FOR CHANNEL 'group_replication_recovery'; + diff --git a/pmm_qa/percona_server/my.cnf.j2 b/pmm_qa/percona_server/my.cnf.j2 new file mode 100644 index 00000000..951eac26 --- /dev/null +++ b/pmm_qa/percona_server/my.cnf.j2 @@ -0,0 +1,45 @@ +[mysqld] +# General server configuration +server_id={{ server_id_start + item - 1 }} +bind-address=0.0.0.0 +port={{ mysql_listen_port }} + +# General replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +binlog_checksum=NONE +log_bin=binlog +log_replica_updates=ON +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" +lower_case_table_names=2 # MacOS-specific, but also good generally + +# MySQL 8.4 compatibility settings +report_host=ps_pmm_{{ps_version}}_{{ item }} + +# Group Replication Settings +plugin_load_add='group_replication.so' +loose-group_replication_group_name='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' +loose-group_replication_local_address='ps_pmm_{{ps_version}}_{{ item }}:{{ group_seeds_port }}' +loose-group_replication_group_seeds='{% for i in range(1, nodes_count | int + 1) %}ps_pmm_{{ps_version}}_{{ i }}:{{ group_seeds_port }}{% if not loop.last %},{% endif %}{% endfor %}' +loose-group_replication_communication_stack=XCOM + +# Group replication behavior +loose-group_replication_start_on_boot=OFF +loose-group_replication_bootstrap_group=OFF +loose-group_replication_single_primary_mode=ON +loose-group_replication_enforce_update_everywhere_checks=OFF + +# Recovery settings +loose-group_replication_recovery_get_public_key=ON +loose-group_replication_recovery_retry_count=10 +loose-group_replication_recovery_reconnect_interval=60 + +# Crash-safe replication settings +relay-log=ps_pmm_{{ps_version}}_{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M + diff --git a/pmm_qa/percona_server/percona-server-setup.yml b/pmm_qa/percona_server/percona-server-setup.yml new file mode 100644 index 00000000..896af31e --- /dev/null +++ b/pmm_qa/percona_server/percona-server-setup.yml @@ -0,0 +1,336 @@ +--- +# Percona Server 8.4 and higher single instance and also Cluster with Group Replication +- name: Setup Percona Server 8.4 and higher. Cluster with Group Replication in Docker + hosts: localhost + connection: local + gather_facts: yes + vars: + ps_version: "{{ lookup('env', 'PS_VERSION') | default('8.4', true) }}" + cluster_name: "mysql_cluster" + replication_user: "repl_user" + replication_password: "GRgrO9301RuF" + root_password: "GRgrO9301RuF" + mysql_port: 33066 + mysql_listen_port: 3306 + group_seeds_port: 34061 + nodes_count: "{{ (lookup('env', 'NODES_COUNT') | default('3', true)) | int }}" + network_name: "pmm-qa" + data_dir: "{{ lookup('env', 'HOME') }}/mysql_cluster_data" + server_id_start: 1 + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + query_source: "{{ lookup('env', 'QUERY_SOURCE') | default('perfschema', true) }}" + setup_type: "{{ lookup('env', 'SETUP_TYPE') }}" + + tasks: + - name: Create Docker network + community.docker.docker_network: + name: "{{ network_name }}" + state: present + + - name: "Remove old data folders" + shell: 'rm -fr {{ data_dir }}' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Create data directories + file: + path: "{{ data_dir }}/node{{ item }}/data" + state: directory + mode: '0755' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Generate my.cnf for each node + template: + src: my.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type == "gr" + + - name: Create initialization script for each node + template: + src: init.sql.j2 + dest: "{{ data_dir }}/node{{ item }}/init.sql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type == "gr" + + - name: Remove old percona server containers + community.docker.docker_container: + name: "ps_pmm_{{ ps_version }}_{{ item }}" + image: "percona/percona-server:{{ ps_version }}" + restart_policy: always + state: absent + loop: "{{ range(1, nodes_count | int + 1) | list }}" + ignore_errors: yes + + - name: Recursively change ownership of a directory + shell: "sudo chown -R 1001:1001 {{ data_dir }}/node{{ item }}/data" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Start Percona Server containers with group replication + community.docker.docker_container: + name: "ps_pmm_{{ ps_version }}_{{ item }}" + image: "percona/percona-server:{{ ps_version }}" + restart_policy: always + state: started + networks: + - name: "{{ network_name }}" + env: + MYSQL_ROOT_PASSWORD: "{{ root_password }}" + ports: + - "{{ mysql_port + item - 1 }}:{{ mysql_listen_port }}" + - "{{ group_seeds_port + item - 1 }}:{{ group_seeds_port }}" + volumes: + - "{{ data_dir }}/node{{ item }}/data:/var/lib/mysql" + - "{{ data_dir }}/node{{ item }}/my.cnf:/etc/mysql/my.cnf" + - "{{ data_dir }}/node{{ item }}/init.sql:/docker-entrypoint-initdb.d/init.sql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type == "gr" + + - name: Start Percona Server containers + community.docker.docker_container: + name: "ps_pmm_{{ ps_version }}_{{ item }}" + image: "percona/percona-server:{{ ps_version }}" + restart_policy: always + state: started + networks: + - name: "{{ network_name }}" + env: + MYSQL_ROOT_PASSWORD: "{{ root_password }}" + ports: + - "{{ mysql_port + item - 1 }}:{{ mysql_listen_port }}" + - "{{ group_seeds_port + item - 1 }}:{{ group_seeds_port }}" + volumes: + - "{{ data_dir }}/node{{ item }}/data:/var/lib/mysql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" + + - name: Wait for MySQL to be available + wait_for: + host: localhost + port: "{{ mysql_port + item - 1 }}" + delay: 10 + timeout: 300 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Reset configuration for all nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + RESET BINARY LOGS AND GTIDS; + RESET REPLICA ALL; + SET GLOBAL gtid_purged=''; + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + ignore_errors: yes + when: setup_type == "gr" + + - name: Create slowlog configuration for mysql nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + SET GLOBAL slow_query_log='ON'; + SET GLOBAL long_query_time=0; + SET GLOBAL log_slow_admin_statements=ON; + SET GLOBAL log_slow_slave_statements=ON; + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: query_source == "slowlog" + + - name: Bootstrap first node in the cluster + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + mysql -uroot -p{{ root_password }} -e " + SET GLOBAL group_replication_bootstrap_group=ON; + START GROUP_REPLICATION; + SET GLOBAL group_replication_bootstrap_group=OFF;" + when: setup_type == "gr" + retries: 5 + delay: 10 + + - name: Wait 5 seconds for bootstrap to complete + pause: + seconds: 5 + when: setup_type == "gr" + + - name: Start group replication on other nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: mysql -uroot -p{{ root_password }} -e "START GROUP_REPLICATION;" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + ignore_errors: yes + when: setup_type == "gr" + + - name: Wait 10 seconds for the other nodes to join + pause: + seconds: 10 + when: setup_type == "gr" + + - name: Create and seed a test database on primary + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + mysql -uroot -p{{ root_password}} -e " + CREATE DATABASE testdb; + USE testdb; + CREATE TABLE testdb (id INT PRIMARY KEY, data VARCHAR(100)); + INSERT INTO testdb VALUES (1, 'Initial data from node mysql1');" + when: setup_type == "gr" + + - name: Check replication status on first node + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: mysql -uroot -p{{ root_password }} -e "SELECT * FROM performance_schema.replication_group_members;" + register: replication_status + when: setup_type == "gr" + + - name: Display replication status + debug: + var: replication_status.stdout + when: setup_type == "gr" + + - name: Check replication group members count + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: mysql -uroot -p{{ root_password }} -e "SELECT COUNT(*) AS count FROM performance_schema.replication_group_members;" + register: member_count + when: setup_type == "gr" + + - name: Display member count + debug: + var: member_count.stdout + when: setup_type == "gr" + + - name: Set verification instructions + set_fact: + verification_msg: | + MySQL Cluster setup complete! + + To verify replication is working: + 1. Connect to the first node: + docker exec -it ps_pmm_{{ ps_version }}_1 mysql -uroot -p{{ root_password }} + + 2. Insert data in the test database: + USE testdb; + INSERT INTO testdb VALUES (100, 'Test replication'); + + 3. Connect to other nodes and verify data is replicated: + docker exec -it ps_pmm_{{ ps_version }}_2 mysql -uroot -p{{ root_password }} + USE testdb; + SELECT * FROM testdb; + when: setup_type == "gr" + + - name: Display verification instructions + debug: + msg: "{{ verification_msg | split('\n') }}" + when: setup_type == "gr" + + - name: Install pmm client and connect to pmm server + include_tasks: ../tasks/add_mysql_to_pmm_server.yml + vars: + container_name: "ps_pmm_{{ ps_version }}_{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + + + - name: Install sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + user: "root" + command: > + /bin/sh -c " + wget -O epel-release.rpm -nv https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && + rpm -i epel-release.rpm && + microdnf install -y sysbench + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + SET GLOBAL super_read_only = OFF; + SET GLOBAL read_only = OFF; + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + CREATE DATABASE sbtest; + CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password'; + GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; + CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password'; + GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; + FLUSH PRIVILEGES; + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" + + - name: Prepare sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + mysql -uroot -p{{ root_password }} -e " + CREATE DATABASE sbtest; + CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password'; + GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; + CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password'; + GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; + FLUSH PRIVILEGES; + " + when: setup_type == "gr" + + - name: Prepare data for sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + sysbench /usr/share/sysbench/oltp_read_write.lua + --mysql-host=127.0.0.1 + --mysql-port=3306 + --mysql-user=sbtest + --mysql-password=password + --mysql-db=sbtest + --tables=10 + --table-size=100000 + prepare + when: setup_type != "gr" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare data for sysbench inside of first percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + sysbench /usr/share/sysbench/oltp_read_write.lua + --mysql-host=127.0.0.1 + --mysql-port=3306 + --mysql-user=sbtest + --mysql-password=password + --mysql-db=sbtest + --tables=10 + --table-size=100000 + prepare + when: setup_type == "gr" + + - name: Run load for sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + sysbench /usr/share/sysbench/oltp_read_write.lua + --mysql-host=127.0.0.1 + --mysql-port=3306 + --mysql-user=sbtest + --mysql-password=password + --mysql-db=sbtest + --tables=10 + --table-size=100000 + --threads=16 + --time=60 + run + loop: "{{ range(1, nodes_count | int + 1) | list }}" diff --git a/pmm_qa/pmm-framework.py b/pmm_qa/pmm-framework.py index 6a1d2c26..c8fe29d9 100755 --- a/pmm_qa/pmm-framework.py +++ b/pmm_qa/pmm-framework.py @@ -39,7 +39,7 @@ "PS": { "versions": ["5.7", "8.4", "8.0"], "configurations": {"QUERY_SOURCE": "perfschema", "SETUP_TYPE": "", "CLIENT_VERSION": "3-dev-latest", - "TARBALL": ""} + "TARBALL": "", "NODES_COUNT": 1} }, "SSL_MYSQL": { "versions": ["5.7", "8.4", "8.0"], @@ -184,27 +184,41 @@ def setup_ps(db_type, db_version=None, db_config=None, args=None): # Gather Version details ps_version = os.getenv('PS_VERSION') or db_version or database_configs[db_type]["versions"][-1] - # Define environment variables for playbook - env_vars = { - 'GROUP_REPLICATION': setup_type, - 'PS_NODES': no_of_nodes, - 'PS_VERSION': ps_version, - 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', - 'PS_CONTAINER': 'ps_pmm_' + str(ps_version) + ('_replica' if setup_type_value in ("replication", "replica") else ''), - 'PS_PORT': 3318 if setup_type_value in ("replication", "replica") else 3317, - 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), - 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), - 'PS_TARBALL': get_value('TARBALL', db_type, args, db_config), - 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', - 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' - } - - # Ansible playbook filename - playbook_filename = 'ps_pmm_setup.yml' - - # Call the function to run the Ansible playbook - run_ansible_playbook(playbook_filename, env_vars, args) - + ps_version_int = int(ps_version.replace(".", "")) + if ps_version_int >= 84: + # Define environment variables for playbook + env_vars = { + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'SETUP_TYPE': setup_type_value, + 'NODES_COUNT': get_value('NODES_COUNT', db_type, args, db_config), + 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), + 'PS_VERSION': ps_version, + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + } + + run_ansible_playbook('percona_server/percona-server-setup.yml', env_vars, args) + else: + # Define environment variables for playbook + env_vars = { + 'GROUP_REPLICATION': setup_type, + 'PS_NODES': no_of_nodes, + 'PS_VERSION': ps_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'PS_CONTAINER': 'ps_pmm_' + str(ps_version) + ('_replica' if setup_type_value in ("replication", "replica") else ''), + 'PS_PORT': 3318 if setup_type_value in ("replication", "replica") else 3317, + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), + 'PS_TARBALL': get_value('TARBALL', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'ps_pmm_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) def setup_mysql(db_type, db_version=None, db_config=None, args=None): # Check if PMM server is running diff --git a/pmm_qa/pmm3-client-setup-centos.sh b/pmm_qa/pmm3-client-setup-centos.sh new file mode 100644 index 00000000..8a9440a4 --- /dev/null +++ b/pmm_qa/pmm3-client-setup-centos.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +echo "start installing pmm-agent" + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$admin_password" ]; then + export admin_password=admin +fi + +if [ -z "$pmm_server_ip" ]; then + export pmm_server_ip=127.0.0.1 +fi + +if [ -z "$client_version" ]; then + export client_version=dev-latest +fi + +if [ -z "$install_client" ]; then + export install_client=yes +fi + +if [ -z "$metrics_mode" ]; then + export metrics_mode=auto +fi + +if [ -z "$use_metrics_mode" ]; then + export use_metrics_mode=yes +fi + +if [ ! -z "$upgrade" ]; then + upgrade="-u" +fi + +port=8443 +if [[ "$pmm_server_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + port=443 +fi + +microdnf install -y wget gnupg2 +wget https://repo.percona.com/yum/percona-release-latest.noarch.rpm +rpm -i ./percona-release-latest.noarch.rpm +export PMM_AGENT_SETUP_NODE_NAME=client_container_$(echo $((1 + $RANDOM % 9999))) + +if [[ "$client_version" == "3-dev-latest" ]]; then + echo "Installing 3-dev-latest pmm client" + percona-release enable-only pmm3-client experimental + microdnf install -y pmm-client +fi + +if [[ "$client_version" == "pmm3-rc" ]]; then + echo "Installing testing pmm client" + percona-release enable-only pmm3-client testing + microdnf install -y pmm-client +fi + +if [[ "$client_version" == "pmm3-latest" ]]; then + echo "Installing release pmm client" + microdnf -y install pmm-client +fi + +if [[ "$client_version" =~ ^3\.[0-9]+\.[0-9]+$ ]]; then + wget -O pmm-client.deb https://repo.percona.com/pmm3-client/yum/release/9/RPMS/x86_64/pmm-client-${client_version}-7.el9.x86_64.rpm + rpm -i pmm-client.deb +fi + +## Default Binary path +path="/usr/local/percona/pmm"; +## As export PATH is not working link the paths +ln -sf ${path}/bin/pmm-admin /usr/local/bin/pmm-admin +ln -sf ${path}/bin/pmm-agent /usr/local/bin/pmm-agent + +if [[ "$client_version" == http* ]]; then + if [[ "$install_client" == "yes" ]]; then + wget -O pmm-client.tar.gz --progress=dot:giga "${client_version}" + fi + tar -zxpf pmm-client.tar.gz + rm -r pmm-client.tar.gz + PMM_CLIENT=`ls -1td pmm-client* 2>/dev/null | grep -v ".tar" | grep -v ".sh" | head -n1` + echo ${PMM_CLIENT} + rm -rf pmm-client + mv ${PMM_CLIENT} pmm-client + rm -rf /usr/local/bin/pmm-client + mv -f pmm-client /usr/local/bin + pushd /usr/local/bin/pmm-client + ## only setting up all binaries in default path /usr/local/percona/pmm + bash -x ./install_tarball ${upgrade} + pwd + popd + pmm-admin --version +fi + +## Check if we are upgrading or attempting fresh install. +if [[ -z "$upgrade" ]]; then + if [[ "$use_metrics_mode" == "yes" ]]; then + echo "setup pmm-agent when metrics mode yes" + pmm-agent setup --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml --server-address=${pmm_server_ip}:${port} --server-insecure-tls --metrics-mode=${metrics_mode} --server-username=admin --server-password=${admin_password} + else + echo "setup pmm-agent when metrics mode no" + pmm-agent setup --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml --server-address=${pmm_server_ip}:${port} --server-insecure-tls --server-username=admin --server-password=${admin_password} + fi + sleep 10 + pmm-agent --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml > pmm-agent.log 2>&1 & + sleep 10 +else + pid=`ps -ef | grep pmm-agent | grep -v grep | awk -F ' ' '{print $2}'` + if [ -n "$pid" ]; then + kill -9 $pid + echo "Killing and restarting pmm agent...." + pmm-agent --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml >> pmm-agent.log 2>&1 & + sleep 10 + fi +fi +echo "pmm-admin status" +pmm-admin status diff --git a/pmm_qa/tasks/add_mysql_to_pmm_server.yml b/pmm_qa/tasks/add_mysql_to_pmm_server.yml new file mode 100644 index 00000000..1dd6b42d --- /dev/null +++ b/pmm_qa/tasks/add_mysql_to_pmm_server.yml @@ -0,0 +1,17 @@ +- name: Install and add pmm client + include_tasks: ./install_pmm_client_centos.yml + +- name: Set unique service name + include_tasks: ./set_unique_service_name.yml + +- name: Add group replication service to pmm server + community.docker.docker_container_exec: + container: "{{ container_name }}" + command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-gr-dev --cluster=ps-gr-dev-cluster --replication-set=ps-gr-replication {{ service_name }} --debug 127.0.0.1:3306 + when: setup_type == "gr" + +- name: Add service to pmm server + community.docker.docker_container_exec: + container: "{{ container_name }}" + command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-dev {{ service_name }} --debug 127.0.0.1:3306 + when: setup_type != "gr" diff --git a/pmm_qa/tasks/install_pmm_client_centos.yml b/pmm_qa/tasks/install_pmm_client_centos.yml new file mode 100644 index 00000000..ea8f9f38 --- /dev/null +++ b/pmm_qa/tasks/install_pmm_client_centos.yml @@ -0,0 +1,29 @@ +- name: Get PMM server address + shell: 'docker ps -f name=-server --format "{{ "{{" }}.Names{{ "}}" }}"' + register: pmm_server_ip_output + when: pmm_server_ip == "127.0.0.1" + +- name: Set correct pmm server address + set_fact: + pmm_server_ip: "{{ pmm_server_ip_output.stdout }}" + when: pmm_server_ip == "127.0.0.1" + +- name: Print pmm server address + debug: + var: pmm_server_ip + +- name: Copy a file into the container + community.docker.docker_container_copy_into: + container: "{{ container_name }}" + path: ../pmm3-client-setup-centos.sh + container_path: /pmm3-client-setup.sh + +- name: "PMM Server IP is" + debug: + msg: "{{ pmm_server_ip }} is pmm server IP" + +- name: Install pmm-client + community.docker.docker_container_exec: + container: "{{ container_name }}" + user: "root" + command: bash -x /pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no diff --git a/pmm_qa/tasks/set_unique_service_name.yml b/pmm_qa/tasks/set_unique_service_name.yml new file mode 100644 index 00000000..60492f9c --- /dev/null +++ b/pmm_qa/tasks/set_unique_service_name.yml @@ -0,0 +1,44 @@ +- name: Install jq via appropriate package manager + become: true + block: + - name: Install jq on Debian/Ubuntu + apt: + name: jq + state: present + when: ansible_facts['os_family'] == 'Debian' + + - name: Install jq on RHEL/CentOS/Alma/Rocky + yum: + name: jq + state: present + when: ansible_facts['os_family'] == 'RedHat' + +- name: Verify that service with expected name is not connected to pmm server + shell: | + SERVICE_NAME="{{ container_name }}" + echo "$SERVICE_NAME" + curl -u admin:{{ admin_password }} --location 'http://{{ pmm_server_ip}}/v1/management/services' | jq -r '.services[].service_name' | grep -q $SERVICE_NAME + register: service_exists_old + ignore_errors: yes + +- name: Set correct service name in pmm server + set_fact: + service_name: "{{ service_exists_old.stdout }}" + when: service_exists_old.rc == 1 + +- name: Create a new service name that is not already connected to pmm server + shell: | + RANDOM_ID="_$(shuf -i 1-10000 -n 1)" + SERVICE_NAME="{{ container_name }}$RANDOM_ID" + echo "$SERVICE_NAME" + curl -u admin:{{ admin_password }} --location 'http://{{ pmm_server_ip}}/v1/management/services' | jq -r '.services[].service_name' | grep -q $SERVICE_NAME + register: service_exists_new + ignore_errors: yes + retries: 5 + until: service_exists_new.rc == 1 + when: service_exists_old.rc == 0 + +- name: Set correct service name in pmm server + set_fact: + service_name: "{{ service_exists_new.stdout }}" + when: service_exists_old.rc == 0