Compare commits
36 Commits
Author | SHA1 | Date | |
---|---|---|---|
1cec6bff4c | |||
d011abda06 | |||
0a136a0532 | |||
a467bb1009 | |||
1d96211bda | |||
684bdac1b6 | |||
8f15227287 | |||
038b3adde8 | |||
79a766aa51 | |||
f0ce583548 | |||
994eab3bd3 | |||
a12993b135 | |||
ec4140cb22 | |||
dd3f177982 | |||
dc97be0765 | |||
ca3a72e49d | |||
d7503c7cdd | |||
85b4bcb924 | |||
0b6524db77 | |||
60ad0ca83b | |||
bd7f5dbe40 | |||
f401ae4e38 | |||
6974f720ee | |||
c9beb6c4c4 | |||
6eea6eacda | |||
74f3846c63 | |||
1c76afe50f | |||
305e502957 | |||
58c73cae50 | |||
24ab131297 | |||
30d2d4d6e5 | |||
73d1b885b7 | |||
174ef7e840 | |||
f78e8d0638 | |||
6cfe2452f6 | |||
f9aac95f46 |
0
.gitmodules
vendored
Normal file
0
.gitmodules
vendored
Normal file
@@ -6,3 +6,12 @@ for automated testing of dahak workflows.
|
||||
|
||||
See [charlesreid1.github.io/dahak-bespin](https://charlesreid1.github.io/dahak-bespin).
|
||||
|
||||
Inspiration: [terraform-aws-consul](https://github.com/hashicorp/terraform-aws-consul)
|
||||
|
||||
Terraform module organization:
|
||||
|
||||
* root: This folder shows an example of Terraform code that uses a terraform module to deploy a cluster in AWS.
|
||||
* module: This folder contains the reusable code for this Module
|
||||
* examples: This folder contains examples of how to use the module.
|
||||
* test: Automated tests for the module and examples.
|
||||
|
||||
|
@@ -1,18 +0,0 @@
|
||||
# New resource for the S3 bucket our application will use.
|
||||
resource "aws_s3_bucket" "example" {
|
||||
# NOTE: S3 bucket names must be unique across _all_ AWS accounts, so
|
||||
# this name must be changed before applying this example to avoid naming
|
||||
# conflicts.
|
||||
bucket = "terraform-getting-started-guide"
|
||||
acl = "private"
|
||||
}
|
||||
|
||||
# Change the aws_instance we declared earlier to now include "depends_on"
|
||||
resource "aws_instance" "example" {
|
||||
ami = "ami-2757f631"
|
||||
instance_type = "t2.micro"
|
||||
|
||||
# Tells Terraform that this EC2 instance must be created only after the
|
||||
# S3 bucket has been created.
|
||||
depends_on = ["aws_s3_bucket.example"]
|
||||
}
|
2
cloud_init/spy.sh
Normal file
2
cloud_init/spy.sh
Normal file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
bash <( curl https://raw.githubusercontent.com/charlesreid1/dahak-spy/master/cloud_init/cloud_init.sh )
|
2
cloud_init/yeti.sh
Normal file
2
cloud_init/yeti.sh
Normal file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
bash <( curl https://raw.githubusercontent.com/charlesreid1/dahak-yeti/master/cloud_init/cloud_init.sh )
|
6
docs/_static/bootstrap.min.css
vendored
Normal file
6
docs/_static/bootstrap.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
6
docs/_static/custom.css
vendored
Normal file
6
docs/_static/custom.css
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
body {
|
||||
background-color: #efefef;
|
||||
}
|
||||
div.body {
|
||||
background-color: #efefef;
|
||||
}
|
41
docs/conf.py
41
docs/conf.py
@@ -19,9 +19,9 @@
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = 'bespin'
|
||||
copyright = '2018, charles reid'
|
||||
author = 'charles reid'
|
||||
project = 'dahak-bespin'
|
||||
copyright = '2018'
|
||||
author = 'DIB Lab'
|
||||
|
||||
# The short X.Y version
|
||||
version = ''
|
||||
@@ -47,8 +47,6 @@ extensions = [
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_parsers = {
|
||||
'.md': 'recommonmark.parser.CommonMarkParser'
|
||||
}
|
||||
@@ -86,6 +84,20 @@ html_theme = 'alabaster'
|
||||
#
|
||||
# html_theme_options = {}
|
||||
|
||||
# wow:
|
||||
# https://alabaster.readthedocs.io/en/latest/customization.html
|
||||
|
||||
html_theme_options = {
|
||||
'github_user': 'charlesreid1',
|
||||
'github_repo': 'dahak-bespin',
|
||||
'github_button' : 'true',
|
||||
#'analytics_id' : '???',
|
||||
'fixed_sidebar' : 'true',
|
||||
'github_banner' : 'true',
|
||||
'pre_bg' : '#fff'
|
||||
}
|
||||
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
@@ -102,14 +114,15 @@ html_static_path = ['_static']
|
||||
# html_sidebars = {}
|
||||
|
||||
html_context = {
|
||||
"github_project" : 'dahak-bespin',
|
||||
# "google_analytics_id" : 'UA-00000000-1',
|
||||
"github_base_account" : 'charlesreid1',
|
||||
"github_project" : 'dahak-taco',
|
||||
}
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ---------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'bespindoc'
|
||||
htmlhelp_basename = 'dahak-bespindoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ------------------------------------------------
|
||||
@@ -136,7 +149,7 @@ latex_elements = {
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'bespin.tex', 'bespin Documentation',
|
||||
(master_doc, 'dahak-bespin.tex', 'bespin Documentation',
|
||||
'charles reid', 'manual'),
|
||||
]
|
||||
|
||||
@@ -146,7 +159,7 @@ latex_documents = [
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'bespin', 'bespin Documentation',
|
||||
(master_doc, 'dahak-bespin', 'dahak-bespin Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
@@ -157,10 +170,14 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'bespin', 'bespin Documentation',
|
||||
author, 'bespin', 'One line description of project.',
|
||||
(master_doc, 'dahak-bespin', 'dahak-bespin Documentation',
|
||||
author, 'dahak-bespin', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
|
||||
# -- Extension configuration -------------------------------------------------
|
||||
|
||||
def setup(app):
|
||||
app.add_stylesheet('bootstrap.min.css')
|
||||
|
||||
|
@@ -18,21 +18,58 @@ we use the following architecture:
|
||||
| | | | | | +---------------+ | |
|
||||
| | | | | | | yeti3 | | | | |
|
||||
| | | | <--------+ | | | | | | |
|
||||
| | | | | | | | | | | |
|
||||
| | | | <-----------+ | | | | | |
|
||||
| | | | | | | | | | | |
|
||||
| | | | <---------------+ | | | | |
|
||||
| | +------------+ +-----------------+ | | | |
|
||||
| | | | | | | |
|
||||
| | +-----------------+ | | |
|
||||
| | | | | |
|
||||
| | +------------+ +--|---|----------+ | | | |
|
||||
| | +---|-------------+ | | |
|
||||
| | +---------------+ | |
|
||||
| | | |
|
||||
| | | |
|
||||
| +----------------------------------------------------+ |
|
||||
| |
|
||||
+------------------------------------------------------------+
|
||||
|
||||
```
|
||||
|
||||
## Dahak Infrastructure
|
||||
|
||||
Dahak workflows will require:
|
||||
|
||||
* VPC to connect nodes
|
||||
* 1 spy node to monitor and log
|
||||
* 1+ yeti nodes to run workflows
|
||||
|
||||
## Dahak Terraform Files
|
||||
|
||||
### VPC
|
||||
|
||||
The VPC will allocate an IP address space 10.X.0.0/16.
|
||||
|
||||
The VPC subnet will allocate an IP address space 10.X.0.0/24.
|
||||
|
||||
The VPC will require AWS-provided DNS/DHCP.
|
||||
|
||||
The VPC will require an internet gateway.
|
||||
|
||||
The VPC will require a routing table pointing to the gateway.
|
||||
|
||||
### Spy Node
|
||||
|
||||
The spy node will need to run the cloud init scripts
|
||||
contained in [dahak-spy](https://github.com/charlesreid1/dahak-spy).
|
||||
|
||||
### Yeti Node
|
||||
|
||||
The spy node will need to run the cloud init scripts
|
||||
contained in [dahak-yeti](https://github.com/charlesreid1/dahak-yeti).
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@@ -1,11 +1,23 @@
|
||||
.. bespin documentation master file, created by
|
||||
sphinx-quickstart on Fri Mar 23 00:08:44 2018.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
.. _index:
|
||||
|
||||
================
|
||||
dahak-bespin
|
||||
================
|
||||
|
||||
dahak-bespin is a framework for allocating
|
||||
cloud infrastructure to run dahak workflows.
|
||||
|
||||
* See source code at `charlesreid1/dahak-bespin on github
|
||||
<https://github.com/charlesreid1/dahak-bespin>`_.
|
||||
|
||||
* Sphinx documentation hosted at `charlesreid1.github.io/dahak-bespin
|
||||
<https://charlesreid1.github.io/dahak-bespin>`_.
|
||||
|
||||
* This package uses `terraform <https://www.terraform.io/>`_.
|
||||
|
||||
Using Bespin via Terraform
|
||||
===========================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
@@ -114,3 +114,61 @@ use the destroy command:
|
||||
$ terraform destroy
|
||||
```
|
||||
|
||||
## Using Variables in Terraform
|
||||
|
||||
### Input Variables
|
||||
|
||||
You can define input variables in a file `variables.tf`
|
||||
and use them to set up infrastructure.
|
||||
|
||||
**`variables.tf`:**
|
||||
|
||||
```
|
||||
variable "region" {
|
||||
default = "us-west-1"
|
||||
}
|
||||
```
|
||||
|
||||
Now you can use this variable by
|
||||
inserting the expression `${var.region}`:
|
||||
|
||||
```
|
||||
provider "aws" {
|
||||
region = "${var.region}"
|
||||
}
|
||||
```
|
||||
|
||||
This can also be set on the command line:
|
||||
|
||||
```
|
||||
$ terraform apply \
|
||||
-var 'region=us-east-1'
|
||||
```
|
||||
|
||||
If you name the varfile something other than `.tf`,
|
||||
use the `-var-file` command line argument:
|
||||
|
||||
```
|
||||
$ terraform apply \
|
||||
-var-file="production.tfvars"
|
||||
```
|
||||
|
||||
### Output Variables
|
||||
|
||||
Output variables are defined in
|
||||
terraform `.tf` files using `output`:
|
||||
|
||||
```
|
||||
output "ip" {
|
||||
value = "${aws_instance.example.public_ip}"
|
||||
}
|
||||
```
|
||||
|
||||
To see the value, check the output of `terraform apply`
|
||||
or run:
|
||||
|
||||
```
|
||||
$ terraform output ip
|
||||
```
|
||||
|
||||
|
||||
|
10
example.tf
10
example.tf
@@ -1,10 +0,0 @@
|
||||
provider "aws" {
|
||||
region = "us-west-1"
|
||||
}
|
||||
resource "aws_instance" "example" {
|
||||
ami = "ami-07585467"
|
||||
instance_type = "t2.micro"
|
||||
provisioner "local-exec" {
|
||||
command = "echo ${aws_instance.example.public_ip} > ip_address.txt"
|
||||
}
|
||||
}
|
196
main.tf
Normal file
196
main.tf
Normal file
@@ -0,0 +1,196 @@
|
||||
# TODO:
|
||||
# - vpc?
|
||||
#
|
||||
# Note:
|
||||
# - it is the source directive that links the module code with the module block
|
||||
#
|
||||
# ============================
|
||||
# Dahak Workflows Cluster
|
||||
# ============================
|
||||
#
|
||||
# Deploy a VPC and a single cluster
|
||||
# consisting of a single spy node
|
||||
# (monitoring and benchmarking)
|
||||
# and a variable number of yeti
|
||||
# nodes (worker nodes).
|
||||
|
||||
provider "aws" {
|
||||
region = "${var.aws_region}"
|
||||
}
|
||||
|
||||
# seehttps://github.com/hashicorp/terraform/issues/14399
|
||||
terraform {
|
||||
required_version = ">= 0.9.3, != 0.9.5"
|
||||
}
|
||||
|
||||
# ============================
|
||||
# Allocate Spy Node
|
||||
# ============================
|
||||
# Spy node is a simple micro instance.
|
||||
|
||||
module "spy_server" {
|
||||
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
|
||||
# to a specific version of the modules, such as the following example:
|
||||
#source = "git::git@github.com:hashicorp/terraform-aws-consul.git/modules/consul-cluster?ref=v0.0.1"
|
||||
source = "./module"
|
||||
|
||||
cluster_name = "${var.cluster_name}-spy"
|
||||
cluster_size = "1"
|
||||
instance_type = "${var.spy_instance_type}"
|
||||
spot_price = "${var.spot_price}"
|
||||
|
||||
### # The EC2 Instances will use these tags to automatically discover each other and form a cluster
|
||||
### cluster_tag_key = "${var.cluster_tag_key}"
|
||||
### cluster_tag_value = "${var.cluster_name}"
|
||||
|
||||
ami_id = "${var.ami_id}"
|
||||
user_data = "${data.template_file.spy_user_data.rendered}"
|
||||
|
||||
vpc_id = "${data.aws_vpc.dahakvpc.id}"
|
||||
subnet_ids = "${data.aws_subnet_ids.default.ids}"
|
||||
|
||||
# To make testing easier, we allow Consul and SSH requests from any IP address here but in a production
|
||||
# deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.
|
||||
allowed_ssh_cidr_blocks = ["0.0.0.0/0"]
|
||||
|
||||
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
|
||||
ssh_key_name = "${var.ssh_key_name}"
|
||||
|
||||
tags = [
|
||||
{
|
||||
key = "Environment"
|
||||
value = "development"
|
||||
propagate_at_launch = true
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
# ============================
|
||||
# Deploy Spy Node
|
||||
# ============================
|
||||
# Actually deploy the infrastructure
|
||||
# (apt-get scripts, Python, docker,
|
||||
# containers, etc.) to spy.
|
||||
|
||||
data "template_file" "spy_user_data" {
|
||||
template = "${file("${path.module}/dahak-spy/cloud_init/cloud_init.sh")}"
|
||||
|
||||
### vars {
|
||||
### cluster_tag_key = "${var.cluster_tag_key}"
|
||||
### cluster_tag_value = "${var.cluster_name}"
|
||||
### }
|
||||
}
|
||||
|
||||
|
||||
# ============================
|
||||
# Allocate Yeti Node
|
||||
# ============================
|
||||
# Yeti node is a beefy node.
|
||||
|
||||
module "yeti_server" {
|
||||
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
|
||||
# to a specific version of the modules, such as the following example:
|
||||
#source = "git::git@github.com:hashicorp/terraform-aws-consul.git/modules/consul-cluster?ref=v0.0.1"
|
||||
source = "./module"
|
||||
|
||||
cluster_name = "${var.cluster_name}-server"
|
||||
cluster_size = "${var.num_yeti_servers}"
|
||||
instance_type = "${var.yeti_instance_type}"
|
||||
spot_price = "${var.spot_price}"
|
||||
|
||||
### # The EC2 Instances will use these tags to automatically discover each other and form a cluster
|
||||
### cluster_tag_key = "${var.cluster_tag_key}"
|
||||
### cluster_tag_value = "${var.cluster_name}"
|
||||
|
||||
ami_id = "${var.ami_id}"
|
||||
user_data = "${data.template_file.yeti_user_data.rendered}"
|
||||
|
||||
vpc_id = "${data.aws_vpc.dahakvpc.id}"
|
||||
subnet_ids = "${data.aws_subnet_ids.default.ids}"
|
||||
|
||||
# To make testing easier, we allow Consul and SSH requests from any IP address here but in a production
|
||||
# deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.
|
||||
allowed_ssh_cidr_blocks = ["0.0.0.0/0"]
|
||||
|
||||
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
|
||||
ssh_key_name = "${var.ssh_key_name}"
|
||||
|
||||
tags = [
|
||||
{
|
||||
key = "Environment"
|
||||
value = "development"
|
||||
propagate_at_launch = true
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
# ============================
|
||||
# Deploy Yeti Node
|
||||
# ============================
|
||||
# Actually deploy the infrastructure
|
||||
# (apt-get scripts, Python, snakemake,
|
||||
# singularity, etc.) to yeti.
|
||||
|
||||
data "template_file" "yeti_user_data" {
|
||||
template = "${file("${path.module}/dahak-yeti/cloud_init/cloud_init.sh")}"
|
||||
|
||||
### vars {
|
||||
### cluster_tag_key = "${var.cluster_tag_key}"
|
||||
### cluster_tag_value = "${var.cluster_name}"
|
||||
### }
|
||||
}
|
||||
|
||||
# ============================
|
||||
# Deploy VPC
|
||||
# ============================
|
||||
# Assemble the VPC, subnet,
|
||||
# internet gateway, DNS, DHCP,
|
||||
|
||||
# VPC
|
||||
resource "aws_vpc" "dahakvpc" {
|
||||
cidr_block = "10.99.0.0/16"
|
||||
enable_dns_support = true
|
||||
enable_dns_hostnames = true
|
||||
}
|
||||
|
||||
# VPC subnet
|
||||
resource "aws_subnet" "dahaksubnet" {
|
||||
vpc_id = "${aws_vpc.dahakvpc.id}"
|
||||
cidr_block = "10.99.0.0/24"
|
||||
map_public_ip_on_launch = true
|
||||
availability_zone = "us-west-1a"
|
||||
tags {
|
||||
Name = "namedahaksubnet"
|
||||
}
|
||||
}
|
||||
|
||||
# Internet gateway
|
||||
resource "aws_internet_gateway" "dahakgw" {
|
||||
vpc_id = "${aws_vpc.dahakvpc.id}"
|
||||
tags {
|
||||
Name = "namedahakgw"
|
||||
}
|
||||
}
|
||||
|
||||
# Route
|
||||
resource "aws_route" "internet_access" {
|
||||
route_table_id = "${aws_vpc.dahakvpc.main_route_table_id}"
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
gateway_id = "${aws_internet_gateway.dahakgw.id}"
|
||||
}
|
||||
|
||||
# Route table
|
||||
resource "aws_route_table" "private_route_table" {
|
||||
vpc_id = "${aws_vpc.dahakvpc.id}"
|
||||
tags {
|
||||
Name = "Private route table"
|
||||
}
|
||||
}
|
||||
|
||||
# Associate route table with subnet
|
||||
# and routing table.
|
||||
resource "aws_route_table_association" "dahaksubnet_association" {
|
||||
subnet_id = "${aws_subnet.dahaksubnet.id}"
|
||||
route_table_id = "${aws_vpc.dahakvpc.main_route_table_id}"
|
||||
}
|
||||
|
46
modules/dahak-cluster/README.md
Normal file
46
modules/dahak-cluster/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# dahak cluster
|
||||
|
||||
(work in progress)
|
||||
|
||||
This folder contains a [Terraform module](https://www.terraform.io/docs/modules/usage.html)
|
||||
to deploy a dahak cluster consisting of a VPC, a spy monitoring node, and one or more yeti worker nodes.
|
||||
|
||||
## using this module
|
||||
|
||||
This folder defines a Terraform module, which you can use in your
|
||||
code by adding a `module` configuration and setting its `source` parameter
|
||||
to URL of this folder:
|
||||
|
||||
```hcl
|
||||
module "dahak_cluster" {
|
||||
# TODO: update this
|
||||
source = "github.com/hashicorp/terraform-aws-consul//modules/consul-cluster?ref=v0.0.5"
|
||||
|
||||
# TODO: update this
|
||||
# amazon image ID
|
||||
ami_id = "ami-abcd1234"
|
||||
|
||||
# Configure and start the nodes
|
||||
user_data = <<-EOF
|
||||
#!/bin/bash
|
||||
/opt/consul/bin/run-consul --server --cluster-tag-key consul-cluster
|
||||
EOF
|
||||
|
||||
# ... See variables.tf for the other parameters you must define for the consul-cluster module
|
||||
}
|
||||
```
|
||||
|
||||
Note the following parameters:
|
||||
|
||||
* `source`: Use this parameter to specify the URL of the terraform module we are using.
|
||||
The double slash (`//`) is intentional and required. Terraform uses it to specify subfolders within a Git repo.
|
||||
The `ref` parameter specifies a specific Git tag in this repo. It enures you are using a fixed version of the repo.
|
||||
|
||||
* `ami_id`: Use this parameter to specify the amazon machine image to install on the nodes on the cluster.
|
||||
|
||||
* `user_data`: Use this parameter to specify user data (cloud init scripts).
|
||||
|
||||
You can find the other parameters in [variables.tf](variables.tf).
|
||||
|
||||
Check out the [consul-cluster example](https://github.com/hashicorp/terraform-aws-consul/tree/master/MAIN.md) for fully-working sample code.
|
||||
|
122
modules/dahak-cluster/main.tf
Normal file
122
modules/dahak-cluster/main.tf
Normal file
@@ -0,0 +1,122 @@
|
||||
# A modern terraform version is required
|
||||
terraform {
|
||||
required_version = ">= 0.9.3"
|
||||
}
|
||||
|
||||
# This is going to be called
|
||||
# each time we create a module
|
||||
# and point to this directory.
|
||||
#
|
||||
# In other words, we are calling
|
||||
# this once for spy and once for
|
||||
# each yeti node.
|
||||
#
|
||||
# The parameters come from the main.tf
|
||||
# and vars.tf in the parent directory.
|
||||
#
|
||||
resource "aws_launch_configuration" "launch_configuration" {
|
||||
name_prefix = "${var.cluster_name}-"
|
||||
image_id = "${var.ami_id}"
|
||||
instance_type = "${var.instance_type}"
|
||||
user_data = "${var.user_data}"
|
||||
spot_price = "${var.spot_price}"
|
||||
|
||||
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
|
||||
key_name = "${var.ssh_key_name}"
|
||||
security_groups = ["${aws_security_group.lc_security_group.id}"]
|
||||
placement_tenancy = "${var.tenancy}"
|
||||
associate_public_ip_address = "${var.associate_public_ip_address}"
|
||||
|
||||
ebs_optimized = "${var.root_volume_ebs_optimized}"
|
||||
|
||||
root_block_device {
|
||||
volume_type = "${var.root_volume_type}"
|
||||
volume_size = "${var.root_volume_size}"
|
||||
delete_on_termination = "${var.root_volume_delete_on_termination}"
|
||||
}
|
||||
|
||||
# Important note: whenever using a launch configuration with an auto scaling group, you must set
|
||||
# create_before_destroy = true. However, as soon as you set create_before_destroy = true in one resource, you must
|
||||
# also set it in every resource that it depends on, or you'll get an error about cyclic dependencies (especially when
|
||||
# removing resources). For more info, see:
|
||||
#
|
||||
# https://www.terraform.io/docs/providers/aws/r/launch_configuration.html
|
||||
# https://terraform.io/docs/configuration/resources.html
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Create a security group
|
||||
resource "aws_security_group" "lc_security_group" {
|
||||
name_prefix = "${var.cluster_name}"
|
||||
description = "Security group for the ${var.cluster_name} launch configuration"
|
||||
vpc_id = "${var.vpc_id}"
|
||||
|
||||
# aws_launch_configuration.launch_configuration in this module sets create_before_destroy to true, which means
|
||||
# everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors
|
||||
# when you try to do a terraform destroy.
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
tags {
|
||||
Name = "${var.cluster_name}"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Security group rules:
|
||||
|
||||
resource "aws_security_group_rule" "allow_ssh_inbound" {
|
||||
count = "${length(var.allowed_ssh_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.ssh_port}"
|
||||
to_port = "${var.ssh_port}"
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["${var.allowed_ssh_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${aws_security_group.lc_security_group.id}"
|
||||
}
|
||||
|
||||
|
||||
resource "aws_security_group_rule" "allow_ssh_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_ssh_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.ssh_port}"
|
||||
to_port = "${var.ssh_port}"
|
||||
protocol = "tcp"
|
||||
source_security_group_id = "${element(var.allowed_ssh_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${aws_security_group.lc_security_group.id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_all_outbound" {
|
||||
type = "egress"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
|
||||
security_group_id = "${aws_security_group.lc_security_group.id}"
|
||||
}
|
||||
|
||||
module "security_group_rules" {
|
||||
source = "../consul-security-group-rules"
|
||||
|
||||
security_group_id = "${aws_security_group.lc_security_group.id}"
|
||||
allowed_inbound_cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
allowed_inbound_security_group_ids = ["${var.allowed_inbound_security_group_ids}"]
|
||||
|
||||
server_rpc_port = "${var.server_rpc_port}"
|
||||
cli_rpc_port = "${var.cli_rpc_port}"
|
||||
serf_lan_port = "${var.serf_lan_port}"
|
||||
serf_wan_port = "${var.serf_wan_port}"
|
||||
http_api_port = "${var.http_api_port}"
|
||||
dns_port = "${var.dns_port}"
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
9
modules/dahak-security-rules/README.md
Normal file
9
modules/dahak-security-rules/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# dahak cluster security rules
|
||||
|
||||
(work in progress)
|
||||
|
||||
This directory contains configuration files
|
||||
that control/set rules for the security group
|
||||
associated with the dahak cluster.
|
||||
|
||||
[also see](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-security-group-rules)
|
198
modules/dahak-security-rules/main.tf
Normal file
198
modules/dahak-security-rules/main.tf
Normal file
@@ -0,0 +1,198 @@
|
||||
# CREATE THE SECURITY GROUP RULES THAT CONTROL WHAT TRAFFIC CAN GO IN AND OUT OF A CONSUL CLUSTER
|
||||
resource "aws_security_group_rule" "allow_server_rpc_inbound" {
|
||||
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.server_rpc_port}"
|
||||
to_port = "${var.server_rpc_port}"
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_cli_rpc_inbound" {
|
||||
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.cli_rpc_port}"
|
||||
to_port = "${var.cli_rpc_port}"
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_serf_lan_tcp_inbound" {
|
||||
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.serf_lan_port}"
|
||||
to_port = "${var.serf_lan_port}"
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_serf_lan_udp_inbound" {
|
||||
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.serf_lan_port}"
|
||||
to_port = "${var.serf_lan_port}"
|
||||
protocol = "udp"
|
||||
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_serf_wan_tcp_inbound" {
|
||||
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.serf_wan_port}"
|
||||
to_port = "${var.serf_wan_port}"
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_serf_wan_udp_inbound" {
|
||||
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.serf_wan_port}"
|
||||
to_port = "${var.serf_wan_port}"
|
||||
protocol = "udp"
|
||||
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_http_api_inbound" {
|
||||
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.http_api_port}"
|
||||
to_port = "${var.http_api_port}"
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_dns_tcp_inbound" {
|
||||
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.dns_port}"
|
||||
to_port = "${var.dns_port}"
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_dns_udp_inbound" {
|
||||
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
|
||||
type = "ingress"
|
||||
from_port = "${var.dns_port}"
|
||||
to_port = "${var.dns_port}"
|
||||
protocol = "udp"
|
||||
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_server_rpc_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_inbound_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.server_rpc_port}"
|
||||
to_port = "${var.server_rpc_port}"
|
||||
protocol = "tcp"
|
||||
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_cli_rpc_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_inbound_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.cli_rpc_port}"
|
||||
to_port = "${var.cli_rpc_port}"
|
||||
protocol = "tcp"
|
||||
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_serf_lan_tcp_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_inbound_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.serf_lan_port}"
|
||||
to_port = "${var.serf_lan_port}"
|
||||
protocol = "tcp"
|
||||
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_serf_lan_udp_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_inbound_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.serf_lan_port}"
|
||||
to_port = "${var.serf_lan_port}"
|
||||
protocol = "udp"
|
||||
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_serf_wan_tcp_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_inbound_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.serf_wan_port}"
|
||||
to_port = "${var.serf_wan_port}"
|
||||
protocol = "tcp"
|
||||
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_serf_wan_udp_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_inbound_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.serf_wan_port}"
|
||||
to_port = "${var.serf_wan_port}"
|
||||
protocol = "udp"
|
||||
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_http_api_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_inbound_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.http_api_port}"
|
||||
to_port = "${var.http_api_port}"
|
||||
protocol = "tcp"
|
||||
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_dns_tcp_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_inbound_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.dns_port}"
|
||||
to_port = "${var.dns_port}"
|
||||
protocol = "tcp"
|
||||
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow_dns_udp_inbound_from_security_group_ids" {
|
||||
count = "${length(var.allowed_inbound_security_group_ids)}"
|
||||
type = "ingress"
|
||||
from_port = "${var.dns_port}"
|
||||
to_port = "${var.dns_port}"
|
||||
protocol = "udp"
|
||||
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
|
||||
|
||||
security_group_id = "${var.security_group_id}"
|
||||
}
|
54
modules/dahak-security-rules/vars.tf
Normal file
54
modules/dahak-security-rules/vars.tf
Normal file
@@ -0,0 +1,54 @@
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
# REQUIRED PARAMETERS
|
||||
# You must provide a value for each of these parameters.
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
variable "security_group_id" {
|
||||
description = "The ID of the security group to which we should add the Consul security group rules"
|
||||
}
|
||||
|
||||
variable "allowed_inbound_cidr_blocks" {
|
||||
description = "A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow connections to Consul"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
# OPTIONAL PARAMETERS
|
||||
# These parameters have reasonable defaults.
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
variable "allowed_inbound_security_group_ids" {
|
||||
description = "A list of security group IDs that will be allowed to connect to Consul"
|
||||
type = "list"
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "server_rpc_port" {
|
||||
description = "The port used by servers to handle incoming requests from other agents."
|
||||
default = 8300
|
||||
}
|
||||
|
||||
variable "cli_rpc_port" {
|
||||
description = "The port used by all agents to handle RPC from the CLI."
|
||||
default = 8400
|
||||
}
|
||||
|
||||
variable "serf_lan_port" {
|
||||
description = "The port used to handle gossip in the LAN. Required by all agents."
|
||||
default = 8301
|
||||
}
|
||||
|
||||
variable "serf_wan_port" {
|
||||
description = "The port used by servers to gossip over the WAN to other servers."
|
||||
default = 8302
|
||||
}
|
||||
|
||||
variable "http_api_port" {
|
||||
description = "The port used by clients to talk to the HTTP API"
|
||||
default = 8500
|
||||
}
|
||||
|
||||
variable "dns_port" {
|
||||
description = "The port used to resolve DNS queries."
|
||||
default = 8600
|
||||
}
|
57
variables.tf
Normal file
57
variables.tf
Normal file
@@ -0,0 +1,57 @@
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
# ENVIRONMENT VARIABLES
|
||||
# Define these secrets as environment variables
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
# AWS_ACCESS_KEY_ID
|
||||
# AWS_SECRET_ACCESS_KEY
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
# OPTIONAL PARAMETERS
|
||||
# These parameters have reasonable defaults.
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
variable "ami_id" {
|
||||
description = "The ID of the AMI to run in the cluster."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "aws_region" {
|
||||
description = "The AWS region to deploy into (e.g. us-east-1)."
|
||||
default = "us-east-1"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "What to name the dahak cluster and all of its associated resources"
|
||||
default = "dahak-test-cluster"
|
||||
}
|
||||
|
||||
variable "spy_instance_type" {
|
||||
description = "The type of instance to deploy for the spy node."
|
||||
default = "t2.micro"
|
||||
}
|
||||
|
||||
variable "num_yeti_servers" {
|
||||
description = "The number of yeti workers to deploy."
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "yeti_instance_type" {
|
||||
description = "The type of instance to deploy for the yeti workers."
|
||||
default = "m5.4xlarge"
|
||||
}
|
||||
|
||||
### variable "cluster_tag_key" {
|
||||
### description = "The tag the EC2 Instances will look for to automatically discover each other and form a cluster."
|
||||
### default = "consul-servers"
|
||||
### }
|
||||
|
||||
variable "ssh_key_name" {
|
||||
description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "spot_price" {
|
||||
description = "The maximum hourly price to pay for EC2 Spot Instances."
|
||||
default = "0.28"
|
||||
}
|
Reference in New Issue
Block a user