Differences
This shows you the differences between two versions of the page.
Next revision | Previous revision | ||
terraform_aurora_postgresql [2023/06/20 15:07] – created andonovj | terraform_aurora_postgresql [2023/09/10 15:05] (current) – andonovj | ||
---|---|---|---|
Line 1: | Line 1: | ||
+ | |||
+ | ======Overview====== | ||
+ | In this module, we will configure Aurora PostrgreSQL with one node (reader and writer). Furthermore, | ||
+ | So let's get started. | ||
+ | |||
+ | |||
+ | ====== Configure the publisher / provider ====== | ||
+ | Firstly, we will start with the creation of the Aurora PostgreSQL, which will acts as a Publisher / Provider | ||
+ | |||
+ | < | ||
+ | resource " | ||
+ | name = " | ||
+ | family | ||
+ | description = "RDS cluster parameter group for logical replication" | ||
+ | |||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | } | ||
+ | |||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | } | ||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | } | ||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | |||
+ | } | ||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | } | ||
+ | } | ||
+ | |||
+ | locals { | ||
+ | destination_endpoint = " | ||
+ | rds_cidr_blocks = [for s in data.aws_subnet.rds_subnet_array : s.cidr_block] | ||
+ | all_cidr_blocks = concat(tolist([data.aws_subnet.ec2_subnet.cidr_block]), | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | filter { | ||
+ | name = " | ||
+ | values = [" | ||
+ | } | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | for_each = toset(data.aws_subnets.rds_subnets.ids) | ||
+ | id = each.value | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | filter { | ||
+ | name = " | ||
+ | values = [" | ||
+ | } | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | key_name | ||
+ | public_key = var.public_key | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | id = data.aws_subnets.private_subnets.ids[0] | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | filter { | ||
+ | name = " | ||
+ | values = [" | ||
+ | } | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | name = " | ||
+ | description = "Allow PostgreSQL & SSH access" | ||
+ | vpc_id | ||
+ | |||
+ | tags = { | ||
+ | Name = " | ||
+ | } | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | vpc_id = data.aws_vpc.main_vpc.id | ||
+ | |||
+ | filter { | ||
+ | name = " | ||
+ | values = [" | ||
+ | |||
+ | } | ||
+ | depends_on = [aws_security_group.pgsql_allow] | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | vpc_id = data.aws_vpc.main_vpc.id | ||
+ | name = " | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | count = length(var.rules) | ||
+ | type = " | ||
+ | from_port | ||
+ | to_port | ||
+ | protocol | ||
+ | cidr_blocks | ||
+ | description | ||
+ | security_group_id = data.aws_security_group.selected_sg.id | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | count = length(var.rules) | ||
+ | type = " | ||
+ | from_port | ||
+ | to_port | ||
+ | protocol | ||
+ | cidr_blocks | ||
+ | description | ||
+ | security_group_id = data.aws_security_group.selected_sg.id | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | file_permission = " | ||
+ | content = templatefile(" | ||
+ | source_endpoing | ||
+ | database_name | ||
+ | database_admin_username = " | ||
+ | database_admin_password = " | ||
+ | }) | ||
+ | filename = " | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | ami = var.ami | ||
+ | subnet_id | ||
+ | instance_type = " | ||
+ | key_name | ||
+ | vpc_security_group_ids = [data.aws_security_group.selected_sg.id, | ||
+ | |||
+ | tags = { | ||
+ | Name = " | ||
+ | Environment = " | ||
+ | Terraform | ||
+ | } | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | | ||
+ | triggers = { | ||
+ | always_run = timestamp() | ||
+ | } | ||
+ | provisioner " | ||
+ | source | ||
+ | destination = "/ | ||
+ | |||
+ | connection { | ||
+ | type = " | ||
+ | user = " | ||
+ | private_key = " | ||
+ | host = aws_instance.ec2_instance.private_ip | ||
+ | } | ||
+ | } | ||
+ | |||
+ | provisioner " | ||
+ | inline = ["sudo yum -y update", | ||
+ | "sudo yum -y install https:// | ||
+ | "sudo yum install -y postgresql14", | ||
+ | "sudo yum install -y telnet", | ||
+ | "sudo yum install -y unzip", | ||
+ | " | ||
+ | |||
+ | connection { | ||
+ | type = " | ||
+ | user = " | ||
+ | private_key = " | ||
+ | host = aws_instance.ec2_instance.private_ip | ||
+ | } | ||
+ | } | ||
+ | depends_on = [module.cluster] | ||
+ | } | ||
+ | |||
+ | module " | ||
+ | source | ||
+ | name = " | ||
+ | engine | ||
+ | engine_version | ||
+ | manage_master_user_password = false | ||
+ | master_username | ||
+ | master_password | ||
+ | instances = { | ||
+ | one = { | ||
+ | instance_class = " | ||
+ | } | ||
+ | } | ||
+ | |||
+ | vpc_id | ||
+ | db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.pg14logical.name | ||
+ | vpc_security_group_ids = [data.aws_security_group.selected_sg.id] | ||
+ | db_subnet_group_name | ||
+ | skip_final_snapshot | ||
+ | |||
+ | tags = { | ||
+ | Environment = " | ||
+ | Terraform | ||
+ | } | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | |||
+ | |||
< | < | ||
Line 112: | Line 334: | ||
} | } | ||
</ | </ | ||
+ | |||
+ | This code will create Aurora PostgreSQL with pg_logical extension enabled and furthermore, | ||
+ | |||
+ | < | ||
+ | /* Setup */ | ||
+ | \c ${database_name} | ||
+ | create extension pglogical; | ||
+ | select pglogical.drop_node(' | ||
+ | |||
+ | /* Create node */ | ||
+ | select pglogical.create_node(node_name := ' | ||
+ | |||
+ | /* DR Setup */ | ||
+ | select pglogical.drop_replication_set(' | ||
+ | select pglogical.create_replication_set(set_name := ' | ||
+ | select pglogical.replication_set_add_all_tables(set_name | ||
+ | select pglogical.replication_set_add_all_tables(set_name | ||
+ | |||
+ | |||
+ | /* BI Setup */ | ||
+ | select pglogical.drop_replication_set(set_name := ' | ||
+ | select pglogical.create_replication_set(set_name := ' | ||
+ | select pglogical.replication_set_add_table(set_name := ' | ||
+ | select pglogical.replication_set_add_table(' | ||
+ | select pglogical.replication_set_add_table(' | ||
+ | |||
+ | |||
+ | select pglogical.replicate_ddl_command(' | ||
+ | select pglogical.replicate_ddl_command(' | ||
+ | select pglogical.replicate_ddl_command(' | ||
+ | </ | ||
+ | |||
+ | These are OF COURSE, test settings, BUT they will setup a pg_logical extension and start the node as publisher/ | ||
+ | Then, let's configure the Subscriber | ||
+ | |||
+ | |||
+ | ====== Configure the Subscriber ====== | ||
+ | The following code, will configure RDS PostgreSQL and it will create some Demo structure for the pg_logical: | ||
+ | |||
+ | < | ||
+ | resource " | ||
+ | name = " | ||
+ | family | ||
+ | description = "RDS parameter group for logical replication" | ||
+ | |||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | } | ||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | } | ||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | } | ||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | } | ||
+ | parameter { | ||
+ | name = " | ||
+ | value = " | ||
+ | apply_method = " | ||
+ | } | ||
+ | |||
+ | lifecycle { | ||
+ | create_before_destroy = true | ||
+ | } | ||
+ | } | ||
+ | |||
+ | |||
+ | locals { | ||
+ | destination_endpoint = " | ||
+ | rds_cidr_blocks = [for s in data.aws_subnet.rds_subnet_array : s.cidr_block] | ||
+ | all_cidr_blocks = concat(tolist([data.aws_subnet.ec2_subnet.cidr_block]), | ||
+ | } | ||
+ | |||
+ | |||
+ | data " | ||
+ | filter { | ||
+ | name = " | ||
+ | values = [" | ||
+ | } | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | key_name | ||
+ | public_key = var.public_key | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | filter { | ||
+ | name = " | ||
+ | values = [" | ||
+ | } | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | id = data.aws_subnets.private_subnets.ids[0] | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | filter { | ||
+ | name = " | ||
+ | values = [" | ||
+ | } | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | for_each = toset(data.aws_subnets.rds_subnets.ids) | ||
+ | id = each.value | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | name = " | ||
+ | description = "Allow PostgreSQL & SSH access" | ||
+ | vpc_id | ||
+ | |||
+ | tags = { | ||
+ | Name = " | ||
+ | } | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | vpc_id = data.aws_vpc.main_vpc.id | ||
+ | |||
+ | filter { | ||
+ | name = " | ||
+ | values = [" | ||
+ | |||
+ | } | ||
+ | depends_on = [aws_security_group.pgsql_allow] | ||
+ | } | ||
+ | |||
+ | data " | ||
+ | vpc_id = data.aws_vpc.main_vpc.id | ||
+ | name = " | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | count = length(var.rules) | ||
+ | type = " | ||
+ | from_port | ||
+ | to_port | ||
+ | protocol | ||
+ | cidr_blocks | ||
+ | description | ||
+ | security_group_id = data.aws_security_group.selected_allow_ssh_postgresql.id | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | count = length(var.rules) | ||
+ | type = " | ||
+ | from_port | ||
+ | to_port | ||
+ | protocol | ||
+ | cidr_blocks | ||
+ | description | ||
+ | security_group_id = data.aws_security_group.selected_allow_ssh_postgresql.id | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | file_permission = " | ||
+ | content = templatefile(" | ||
+ | destination_endpoint | ||
+ | database_name | ||
+ | database_admin_username = " | ||
+ | database_admin_password = " | ||
+ | source_endpoint | ||
+ | }) | ||
+ | filename = " | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | ami = var.ami | ||
+ | subnet_id | ||
+ | instance_type = " | ||
+ | key_name | ||
+ | vpc_security_group_ids = [data.aws_security_group.selected_allow_ssh_postgresql.id, | ||
+ | |||
+ | tags = { | ||
+ | Name = " | ||
+ | Environment = " | ||
+ | Terraform | ||
+ | } | ||
+ | } | ||
+ | |||
+ | resource " | ||
+ | |||
+ | triggers = { | ||
+ | always_run = timestamp() | ||
+ | } | ||
+ | | ||
+ | provisioner " | ||
+ | source | ||
+ | destination = "/ | ||
+ | |||
+ | connection { | ||
+ | type = " | ||
+ | user = " | ||
+ | private_key = " | ||
+ | host = aws_instance.ec2_instance.private_ip | ||
+ | } | ||
+ | } | ||
+ | |||
+ | provisioner " | ||
+ | inline = ["sudo yum -y update", | ||
+ | "sudo yum -y install https:// | ||
+ | "sudo yum install -y postgresql14", | ||
+ | "sudo yum install -y telnet", | ||
+ | "sudo yum install -y unzip", | ||
+ | " | ||
+ | |||
+ | connection { | ||
+ | type = " | ||
+ | user = " | ||
+ | private_key = " | ||
+ | host = aws_instance.ec2_instance.private_ip | ||
+ | } | ||
+ | } | ||
+ | depends_on = [module.db] | ||
+ | } | ||
+ | |||
+ | module " | ||
+ | source = " | ||
+ | engine | ||
+ | engine_version | ||
+ | instance_class | ||
+ | allocated_storage | ||
+ | manage_master_user_password = false | ||
+ | create_db_parameter_group | ||
+ | identifier | ||
+ | username | ||
+ | password | ||
+ | port = " | ||
+ | db_subnet_group_name | ||
+ | vpc_security_group_ids | ||
+ | parameter_group_name | ||
+ | major_engine_version | ||
+ | | ||
+ | tags = { | ||
+ | Environment = " | ||
+ | Terraform | ||
+ | } | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | As before, the code will create RDS PostgreSQL with all the options for pg_logical. It will also execute the SQL template below: | ||
+ | |||
+ | < | ||
+ | /* Clean up & Setup */ | ||
+ | drop database ${database_name} with (FORCE); | ||
+ | create database ${database_name}; | ||
+ | \c ${database_name} | ||
+ | create extension pglogical; | ||
+ | select pglogical.drop_node(' | ||
+ | |||
+ | |||
+ | /* Create node */ | ||
+ | select pglogical.create_node(node_name := ' | ||
+ | |||
+ | /* DR Setup */ | ||
+ | select pglogical.drop_subscription(' | ||
+ | select pglogical.create_subscription(subscription_name := ' | ||
+ | select pglogical.create_subscription(subscription_name := ' | ||
+ | |||
+ | /* BI Setup */ | ||
+ | select pglogical.drop_subscription(' | ||
+ | select pglogical.create_subscription(subscription_name := ' | ||
+ | </ | ||
+ | |||
+ | Of course, that excludes all the meta configuration which you need to do for Terraform like: " | ||
+ | |||
+ | That being said, as we need the security rules (ports, etc), here is extract from vars: | ||
+ | |||
+ | |||
+ | < | ||
+ | variable " | ||
+ | description = " | ||
+ | type = string | ||
+ | default | ||
+ | } | ||
+ | |||
+ | variable " | ||
+ | description = "Name of the environment" | ||
+ | type = string | ||
+ | } | ||
+ | |||
+ | variable " | ||
+ | description = "The password for the database" | ||
+ | type = string | ||
+ | |||
+ | } | ||
+ | |||
+ | variable " | ||
+ | description = "The username for the database" | ||
+ | type = string | ||
+ | } | ||
+ | |||
+ | variable " | ||
+ | description = "The path of the private key" | ||
+ | type = string | ||
+ | } | ||
+ | |||
+ | variable " | ||
+ | description = " | ||
+ | type = string | ||
+ | } | ||
+ | |||
+ | variable " | ||
+ | description = "AMI for the EC2 image" | ||
+ | type = string | ||
+ | default | ||
+ | } | ||
+ | |||
+ | variable " | ||
+ | description = "The path for the configuration of the source cluster" | ||
+ | type = string | ||
+ | } | ||
+ | |||
+ | variable " | ||
+ | description = "The name of the virtual database we will replicate" | ||
+ | type = string | ||
+ | } | ||
+ | |||
+ | variable " | ||
+ | description = "The endpoint of the source database, provided manually for now" | ||
+ | type = string | ||
+ | } | ||
+ | |||
+ | |||
+ | variable " | ||
+ | type = list(object({ | ||
+ | from_port | ||
+ | to_port | ||
+ | protocol | ||
+ | description = string | ||
+ | })) | ||
+ | default = [ | ||
+ | { | ||
+ | from_port | ||
+ | to_port | ||
+ | protocol | ||
+ | description = "ssh access" | ||
+ | }, | ||
+ | { | ||
+ | from_port | ||
+ | to_port | ||
+ | protocol | ||
+ | description = " | ||
+ | }, | ||
+ | ] | ||
+ | } | ||
+ | </ | ||
+ | |||
+ | The " |