Differences
This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
mysql8_innodb_cluster [2024/06/21 14:11] – andonovj | mysql8_innodb_cluster [2024/06/25 13:40] (current) – andonovj | ||
---|---|---|---|
Line 2: | Line 2: | ||
Innodb CLuster is a modern solution to provide group replication for MySQL. In this configuration, | Innodb CLuster is a modern solution to provide group replication for MySQL. In this configuration, | ||
- | {{: | + | {{: |
+ | |||
+ | Of course our IPs will look different because I am waaaay too lazy to care for proper pictures. | ||
+ | |||
+ | |||
+ | =====Installation===== | ||
+ | I am not big fan of manual work, ask my grandma about it. So I automated the whole shabang using Terraform + Ansible. | ||
+ | Now, I will not show the terraform because it is strictly specific, but I will show you the ansible | ||
+ | |||
+ | |||
+ | < | ||
+ | - name: Configure MySQL Libraries | ||
+ | vars: | ||
+ | ansible_user: | ||
+ | ansible_become: | ||
+ | ansible_become_method: | ||
+ | ansible_python_interpreter: | ||
+ | hosts: dbservers | ||
+ | tasks: | ||
+ | - block: | ||
+ | - name: Install the necessary packages | ||
+ | yum: | ||
+ | name: | ||
+ | - mysql-server | ||
+ | - mysql-shell | ||
+ | - mysql-devel | ||
+ | - mysql-common | ||
+ | - mysql-libs | ||
+ | - mysql | ||
+ | state: present | ||
+ | |||
+ | - name: Disable Selinux | ||
+ | ansible.builtin.command: | ||
+ | |||
+ | - name: Stop firewall | ||
+ | service: | ||
+ | name: firewalld | ||
+ | state: stopped | ||
+ | </ | ||
+ | |||
+ | =====Configuration===== | ||
+ | After that you can configure the rest using the following ansible: | ||
+ | |||
+ | < | ||
+ | - name: Configure MySQL Libraries | ||
+ | vars: | ||
+ | ansible_user: | ||
+ | ansible_become: | ||
+ | ansible_become_method: | ||
+ | hosts: dbservers | ||
+ | tasks: | ||
+ | - block: | ||
+ | - name: Start the MySQL Service | ||
+ | ansible.builtin.service: | ||
+ | name: mysqld | ||
+ | state: started | ||
+ | |||
+ | - name: Check the existance of a mysql file | ||
+ | stat: | ||
+ | path: / | ||
+ | register: mysql_file | ||
+ | |||
+ | - name: Obtain & Change temporal password | ||
+ | shell: | | ||
+ | temporal_password=`awk '/A temporary password is generated for/ {a=$0} END{ print a }' / | ||
+ | mysql -uroot -p${temporal_password} -e "SET GLOBAL super_read_only = 0; SET GLOBAL read_only = 0; " --connect-expired-password | ||
+ | mysql -uroot -p${temporal_password} -e "ALTER USER ' | ||
+ | echo " | ||
+ | user=root | ||
+ | password=\" | ||
+ | when: not mysql_file.stat.exists | ||
+ | |||
+ | - name: Generate random server_id | ||
+ | shell: | | ||
+ | echo $((0x$(echo `hostname` | md5sum | cut -f 1 -d " " | cut -c 1-3))) | cut -c 1-3 | ||
+ | register: mysql_server_id | ||
+ | |||
+ | - name: Stop MySQL If running | ||
+ | service: | ||
+ | name: mysqld | ||
+ | state: stopped | ||
+ | |||
+ | - name: Define the primary host (e.g. first host in the group) | ||
+ | set_fact: | ||
+ | primary_node: | ||
+ | |||
+ | - name: Define the secondary nodes | ||
+ | set_fact: | ||
+ | secondary_nodes: | ||
+ | |||
+ | - name: Get secondary hosts | ||
+ | debug: | ||
+ | var: secondary_nodes | ||
+ | |||
+ | - name: Start MySQL | ||
+ | service: | ||
+ | name: mysqld | ||
+ | state: started | ||
+ | |||
+ | - name: Template bootstrapping js for mysqlsh | ||
+ | template: | ||
+ | dest: "/ | ||
+ | src: templates/ | ||
+ | owner: " | ||
+ | group: " | ||
+ | mode: ' | ||
+ | |||
+ | - name: Template InnoDB Cluster | ||
+ | template: | ||
+ | dest: "/ | ||
+ | src: templates/ | ||
+ | owner: " | ||
+ | group: " | ||
+ | mode: ' | ||
+ | |||
+ | - name: Bootstrap the cluster (new deployment) | ||
+ | command: mysqlsh -f / | ||
+ | |||
+ | - name: Wait to start | ||
+ | wait_for: | ||
+ | timeout: 5 | ||
+ | |||
+ | - name: Add the other instances | ||
+ | command: mysqlsh -f / | ||
+ | when: inventory_hostname == groups[' | ||
+ | </ | ||
+ | |||
+ | Congrats, now we have the cluster running: | ||
+ | |||
+ | |||
+ | < | ||
+ | | ||
+ | | ||
+ | { | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | }, | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | }, | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | } | ||
+ | }, | ||
+ | " | ||
+ | }, | ||
+ | " | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | =====Templates===== | ||
+ | Because we want to look (only to look like, being one is boring) professional, | ||
+ | |||
+ | |||
+ | < | ||
+ | # | ||
+ | [mysqld] | ||
+ | server_id={{ mysql_server_id.stdout }} | ||
+ | binlog_checksum=NONE | ||
+ | gtid_mode=ON | ||
+ | enforce_gtid_consistency=ON | ||
+ | log_bin | ||
+ | log_slave_updates=ON | ||
+ | master_info_repository=TABLE | ||
+ | relay_log_info_repository=TABLE | ||
+ | skip-host-cache | ||
+ | skip-name-resolve | ||
+ | binlog_transaction_dependency_tracking=WRITESET | ||
+ | |||
+ | # | ||
+ | var cluster_user_password = "{{ cluster_user_password }}"; | ||
+ | var clusterName = "{{ cluster_name }}"; | ||
+ | var secondary_nodes = "{{ secondary_nodes }}"; | ||
+ | var root_password = "{{ root_password }}"; | ||
+ | var cluster_user = "{{ cluster_user }}"; | ||
+ | |||
+ | var mysql = require(' | ||
+ | |||
+ | try{ | ||
+ | | ||
+ | password: `${root_password}`, | ||
+ | restart: true, | ||
+ | interactive: | ||
+ | clusterAdmin: | ||
+ | clusterAdminPassword: | ||
+ | }); | ||
+ | } catch(e){ | ||
+ | print(' | ||
+ | } | ||
+ | |||
+ | #Adding the instances | ||
+ | var cluster_user = "{{ cluster_user }}"; | ||
+ | var cluster_user_password = "{{ cluster_user_password }}"; | ||
+ | var clusterName = "{{ cluster_name }}"; | ||
+ | var secondary_nodes = "{{ secondary_nodes }}"; | ||
+ | var secondary_nodes = secondary_nodes.replaceAll(" | ||
+ | |||
+ | var mysql = require(' | ||
+ | |||
+ | try { | ||
+ | print(' | ||
+ | shell.connect(' | ||
+ | var cluster = dba.createCluster(clusterName); | ||
+ | for(let i=0; i < {{ secondary_nodes|length }}; i++) { | ||
+ | cluster.addInstance(`${cluster_user}@${secondary_nodes[i]}`, | ||
+ | } | ||
+ | } catch(e){ | ||
+ | print(' | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | |||
+ | =====Management===== | ||
+ | It is all cool and stuff, but let's go through several scenarios with our cluster: | ||
+ | |||
+ | 1. Loss of a member | ||
+ | 2. Backup | ||
+ | 3. Recovery | ||
+ | |||
+ | |||
+ | First thing first, what we do if the connection between the primary and the secondary fails, well we have to rejoin, let's see how. | ||
+ | |||
+ | |||
+ | ====Loss of a member==== | ||
+ | To do that, we will simulate a stop of the service, someone stopped our service :-). | ||
+ | |||
+ | < | ||
+ | [root@mysql-cluster-b ~]# service mysqld stop | ||
+ | Redirecting to / | ||
+ | [root@mysql-cluster-b ~]# | ||
+ | </ | ||
+ | |||
+ | Since our cluster can tolerate one failure, after that, it cannot tolerate anymore: | ||
+ | |||
+ | |||
+ | |||
+ | < | ||
+ | | ||
+ | | ||
+ | { | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | }, | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | **" | ||
+ | " | ||
+ | }, | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | } | ||
+ | }, | ||
+ | " | ||
+ | }, | ||
+ | " | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | We see that the node is down. So let's try to simulate a restoration of the connection: | ||
+ | |||
+ | < | ||
+ | [root@mysql-cluster-b ~]# service mysqld start | ||
+ | Redirecting to / | ||
+ | [root@mysql-cluster-b ~]# | ||
+ | </ | ||
+ | |||
+ | Now we can check again: | ||
+ | |||
+ | < | ||
+ | { | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | }, | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | }, | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | " | ||
+ | } | ||
+ | }, | ||
+ | " | ||
+ | }, | ||
+ | " | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | Now we see that it is working, if for some reason it doesn' | ||
+ | < | ||
+ | Cluster.rejoinInstance(instance) | ||
+ | cluster.rescan() | ||
+ | </ | ||
+ | |||
+ | It is important that if the UUID was changed, we have to remove the instance from the metadata before adding it: | ||
+ | |||
+ | < | ||
+ | cluster.removeInstance(" | ||
+ | cluster.rescan() | ||
+ | </ | ||
+ | |||
+ | |||
+ | ====Backup==== | ||
+ | |||
+ | |||
+ | ====Restore==== |