410 lines
No EOL
12 KiB
HCL
410 lines
No EOL
12 KiB
HCL
terraform {
|
||
required_providers {
|
||
aws = {
|
||
source = "hashicorp/aws"
|
||
version = "~> 4.16"
|
||
}
|
||
}
|
||
backend "s3" {
|
||
bucket = "assessment-model-terraform-state"
|
||
region = "eu-west-2"
|
||
key = "terraform.tfstate"
|
||
}
|
||
|
||
required_version = ">= 1.2.0"
|
||
}
|
||
|
||
provider "aws" {
|
||
region = var.region
|
||
}
|
||
|
||
# Additional provider for resources that need to be in us-east-1, specifically the SSL certificate
|
||
provider "aws" {
|
||
alias = "aws_use1"
|
||
region = "us-east-1"
|
||
}
|
||
|
||
# Assuming the secret is already created and the name is "<stage>/assessment_model/db_credentials"
|
||
data "aws_secretsmanager_secret" "db_credentials" {
|
||
name = "${var.stage}/assessment_model/db_credentials"
|
||
}
|
||
|
||
data "aws_secretsmanager_secret_version" "db_credentials" {
|
||
secret_id = data.aws_secretsmanager_secret.db_credentials.id
|
||
}
|
||
|
||
# Default VPC
|
||
data "aws_vpc" "default" {
|
||
default = true
|
||
}
|
||
|
||
# For MVP, we allow all inbound traffic to the DB - this will need to be changed later; we'll likely
|
||
# need to re-deploy the frontend to AWS so that it's within the same VPC as the DB
|
||
resource "aws_security_group" "allow_db" {
|
||
name = "allow_tls"
|
||
description = "Allow TLS inbound traffic"
|
||
vpc_id = data.aws_vpc.default.id
|
||
|
||
ingress {
|
||
# TLS (change to whatever ports you need)
|
||
from_port = 5432
|
||
to_port = 5432
|
||
protocol = "tcp"
|
||
cidr_blocks = ["0.0.0.0/0"]
|
||
}
|
||
|
||
egress {
|
||
from_port = 0
|
||
to_port = 0
|
||
protocol = "-1"
|
||
cidr_blocks = ["0.0.0.0/0"]
|
||
}
|
||
}
|
||
|
||
resource "aws_db_instance" "default" {
|
||
allocated_storage = var.allocated_storage
|
||
engine = "postgres"
|
||
engine_version = "14.17"
|
||
instance_class = var.instance_class
|
||
db_name = var.database_name
|
||
username = jsondecode(data.aws_secretsmanager_secret_version.db_credentials.secret_string)["db_assessment_model_username"]
|
||
password = jsondecode(data.aws_secretsmanager_secret_version.db_credentials.secret_string)["db_assessment_model_password"]
|
||
parameter_group_name = "default.postgres14"
|
||
skip_final_snapshot = true
|
||
vpc_security_group_ids = [aws_security_group.allow_db.id]
|
||
lifecycle {
|
||
prevent_destroy = true
|
||
}
|
||
# For the moment, we make the database publically accessible so that we can connect to it from the frontend.
|
||
# We will look to change this in the future but as we are pre-MVP at the time of setting this, we don't
|
||
# have major security demand and don't want to set this up now
|
||
publicly_accessible = true
|
||
# Specify the CA certificate with the default RDS CA certificate
|
||
ca_cert_identifier = "rds-ca-rsa2048-g1"
|
||
# Temporary to enfore immediate change
|
||
apply_immediately = true
|
||
# Set up storage type to gp3 for better performance
|
||
storage_type = "gp3"
|
||
}
|
||
|
||
# Set up the bucket that recieve the csv uploads of epc to be retrofit
|
||
module "s3_presignable_bucket" {
|
||
source = "../modules/s3_presignable_bucket"
|
||
bucketname = "retrofit-plan-inputs-${var.stage}"
|
||
environment = var.stage
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "s3_due_considerations_bucket" {
|
||
source = "../modules/s3_presignable_bucket"
|
||
bucketname = "retrofit-due-considerations-${var.stage}"
|
||
environment = var.stage
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "s3_eco_spreadseet_bucket" {
|
||
source = "../modules/s3_presignable_bucket"
|
||
bucketname = "retrofit-eco-spreadsheet-${var.stage}"
|
||
environment = var.stage
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "s3" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-datalake-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "model_directory" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-model-directory-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "retrofit_sap_predictions" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-sap-predictions-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "retrofit_sap_data" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-data-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
output "retrofit_sap_data_bucket_name" {
|
||
value = module.retrofit_sap_data.bucket_name
|
||
description = "Name of the retrofit SAP data bucket"
|
||
}
|
||
|
||
module "retrofit_carbon_predictions" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-carbon-predictions-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "retrofit_heat_predictions" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-heat-predictions-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "retrofit_lighting_cost_predictions" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-lighting-cost-predictions-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "retrofit_heating_cost_predictions" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-heating-cost-predictions-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "retrofit_hot_water_cost_predictions" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-hot-water-cost-predictions-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "retrofit_heating_kwh_predictions" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-heating-kwh-predictions-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "retrofit_hotwater_kwh_predictions" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-hotwater-kwh-predictions-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "retrofit_sap_baseline_predictions" {
|
||
source = "../modules/s3"
|
||
bucketname = "retrofit-sap-baseline-predictions-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
// We make this bucket presignable, because we want to generate download links for the frontend
|
||
module "retrofit_energy_assessments" {
|
||
source = "../modules/s3_presignable_bucket"
|
||
bucketname = "retrofit-energy-assessments-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
environment = var.stage
|
||
}
|
||
|
||
# Set up the route53 record for the API
|
||
module "route53" {
|
||
source = "../modules/route53"
|
||
domain_name = var.domain_name
|
||
api_url_prefix = var.api_url_prefix
|
||
providers = {
|
||
aws.aws_use1 = aws.aws_use1
|
||
}
|
||
}
|
||
|
||
|
||
# Create an ECR repository for storage of the lambda's docker images
|
||
module "ecr" {
|
||
ecr_name = "fastapi-repository-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
module "lambda_sap_prediction_ecr" {
|
||
ecr_name = "lambda-sap-prediction-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
module "due_considerations_ecr" {
|
||
ecr_name = "due-considerations-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
module "eco_spreadsheet_ecr" {
|
||
ecr_name = "eco-spreadsheet-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
module "lambda_carbon_prediction_ecr" {
|
||
ecr_name = "lambda-carbon-prediction-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
module "lambda_heat_prediction_ecr" {
|
||
ecr_name = "lambda-heat-prediction-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
# ECR repos for lighting cost, heating cost and hot water cost models
|
||
module "lambda_lighting_cost_prediction_ecr" {
|
||
ecr_name = "lighting-cost-prediction-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
module "lambda_heating_cost_prediction_ecr" {
|
||
ecr_name = "heating-cost-prediction-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
module "lambda_hot_water_cost_prediction_ecr" {
|
||
ecr_name = "hot-water-cost-prediction-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
# For heating and hot water kwh models
|
||
module "lambda_heating_kwh_prediction_ecr" {
|
||
ecr_name = "heating-kwh-prediction-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
module "lambda_hotwater_kwh_prediction_ecr" {
|
||
ecr_name = "hotwater-kwh-prediction-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
# Baselining models
|
||
module "sap_baseline_ecr" {
|
||
ecr_name = "sap-baseline-prediction-${var.stage}"
|
||
source = "../modules/ecr"
|
||
}
|
||
|
||
##############################################
|
||
# CDN - Cloudfront
|
||
##############################################
|
||
module "cloudfront_distribution" {
|
||
source = "../modules/cloudfront"
|
||
bucket_name = module.s3.bucket_name
|
||
bucket_id = module.s3.bucket_id
|
||
bucket_arn = module.s3.bucket_arn
|
||
bucket_domain_name = module.s3.bucket_domain_name
|
||
stage = var.stage
|
||
}
|
||
|
||
################################################
|
||
# SES - Email sending
|
||
################################################
|
||
module "ses" {
|
||
source = "../modules/ses"
|
||
domain_name = "domna.homes"
|
||
stage = var.stage
|
||
}
|
||
|
||
output "ses_dns_records" {
|
||
value = module.ses.dns_records
|
||
}
|
||
|
||
################################################
|
||
# Address2UPRN – Lambda ECR
|
||
################################################
|
||
module "address2uprn_state_bucket" {
|
||
source = "../modules/tf_state_bucket"
|
||
bucket_name = "address2uprn-terraform-state"
|
||
|
||
}
|
||
|
||
module "address2uprn_registry" {
|
||
source = "../modules/container_registry"
|
||
name = "address2uprn"
|
||
stage = var.stage
|
||
|
||
}
|
||
|
||
# S3 policy for postcode splitter to read from retrofit data bucket
|
||
module "address2uprn_s3_read_and_write" {
|
||
source = "../modules/s3_iam_policy"
|
||
|
||
policy_name = "Address2UPRNReadandWriteS3"
|
||
policy_description = "Allow address2uprn Lambda to read and write from retrofit-data bucket"
|
||
bucket_arns = ["arn:aws:s3:::retrofit-data-${var.stage}"]
|
||
actions = ["s3:GetObject", "s3:ListBucket", "s3:PutObject"]
|
||
resource_paths = ["/*"]
|
||
}
|
||
|
||
output "address_2_uprn_s3_read_and_write_arn" {
|
||
value = module.address2uprn_s3_read_and_write.policy_arn
|
||
}
|
||
|
||
################################################
|
||
# Condition ETL – Lambda ECR
|
||
################################################
|
||
module "condition_etl_state_bucket" {
|
||
source = "../modules/tf_state_bucket"
|
||
bucket_name = "condition-etl-terraform-state"
|
||
|
||
}
|
||
|
||
module "condition_etl_registry" {
|
||
source = "../modules/container_registry"
|
||
name = "condition-etl"
|
||
stage = var.stage
|
||
|
||
}
|
||
|
||
# Condition Data S3 Bucket to store initial data
|
||
module "condition_data_bucket" {
|
||
source = "../modules/s3"
|
||
bucketname = "condition-data-${var.stage}"
|
||
allowed_origins = var.allowed_origins
|
||
}
|
||
|
||
module "condition_etl_s3_read" {
|
||
source = "../modules/s3_iam_policy"
|
||
|
||
policy_name = "ConditionETLReadS3"
|
||
policy_description = "Allow Lambda to read objects from condition-data-${var.stage}"
|
||
bucket_arns = ["arn:aws:s3:::condition-data-${var.stage}"]
|
||
actions = ["s3:GetObject"]
|
||
resource_paths = ["/*"]
|
||
}
|
||
|
||
output "condition_etl_s3_read_arn" {
|
||
value = module.condition_etl_s3_read.policy_arn
|
||
}
|
||
|
||
|
||
################################################
|
||
# Postcode Splitter – Lambda ECR
|
||
################################################
|
||
module "postcode_splitter_state_bucket" {
|
||
source = "../modules/tf_state_bucket"
|
||
bucket_name = "postcode-splitter-terraform-state"
|
||
|
||
}
|
||
|
||
module "postcode_splitter_registry" {
|
||
source = "../modules/container_registry"
|
||
name = "postcode_splitter"
|
||
stage = var.stage
|
||
|
||
}
|
||
|
||
# S3 policy for postcode splitter to read from retrofit data bucket
|
||
module "postcode_splitter_s3_read" {
|
||
source = "../modules/s3_iam_policy"
|
||
|
||
policy_name = "PostcodeSplitterReadS3"
|
||
policy_description = "Allow postcode splitter Lambda to read from retrofit-data bucket"
|
||
bucket_arns = ["arn:aws:s3:::retrofit-data-${var.stage}"]
|
||
actions = ["s3:GetObject", "s3:ListBucket", "s3:PutObject"]
|
||
resource_paths = ["/*"]
|
||
}
|
||
|
||
output "postcode_splitter_s3_read_arn" {
|
||
value = module.postcode_splitter_s3_read.policy_arn
|
||
}
|
||
|
||
################################################
|
||
# Categorisation – Lambda ECR
|
||
################################################
|
||
module "categorisation_state_bucket" {
|
||
source = "../modules/tf_state_bucket"
|
||
bucket_name = "categorisation-terraform-state"
|
||
|
||
}
|
||
|
||
module "categorisation_registry" {
|
||
source = "../modules/container_registry"
|
||
name = "categorisation"
|
||
stage = var.stage
|
||
} |