36 Commits

Author SHA1 Message Date
1cec6bff4c add bootstrap to docs style 2018-03-30 15:32:17 -07:00
d011abda06 update readme 2018-03-30 15:31:35 -07:00
0a136a0532 fix source to be relative directory 2018-03-30 15:31:21 -07:00
a467bb1009 removing spy from .gitmodules 2018-03-28 15:01:57 -07:00
1d96211bda removing spy submodule 2018-03-28 15:01:42 -07:00
684bdac1b6 Removing submodules. Use cloud init and pipe-to-bash instead. 2018-03-28 14:57:58 -07:00
8f15227287 update docs styles 2018-03-28 14:57:20 -07:00
038b3adde8 add cloud init scripts 2018-03-28 14:56:33 -07:00
79a766aa51 add modules structure - one for cluster launch config, one for security rules 2018-03-27 18:16:21 -07:00
f0ce583548 fix user data block naming problem 2018-03-27 17:27:00 -07:00
994eab3bd3 updating yeti/spy pointers 2018-03-27 17:26:40 -07:00
a12993b135 add terraform module variables file 2018-03-27 17:13:35 -07:00
ec4140cb22 Adding spy and yeti submodules, adding main terraform module file 2018-03-27 16:59:22 -07:00
dd3f177982 updating readme with terraform module info 2018-03-27 16:58:31 -07:00
dc97be0765 removing terraform files - adding new terraform structure into repo next. 2018-03-27 15:26:50 -07:00
ca3a72e49d add light gray background 2018-03-26 15:25:28 -07:00
d7503c7cdd update index with bullet list 2018-03-23 12:59:01 -07:00
85b4bcb924 Add dahak VPC workflow, and update documentation 2018-03-23 12:31:26 -07:00
0b6524db77 Merge branch 'master' into terraform
* master:
  add a big ass todo list
  major overhaul to vpc script, filling in the gaps
  minor updates to node scripts
  bespin cli should rely on dotfiles to determine existence of resources
  remove .py from bespin, and switch to /usr/bin/env python for shebang
  fix readme output
  add some of the command output to the readme.
  bespin now checks for stashfiles.
  add a "how it works" document to the readme
  add functions to generate random labels for networks.
  Adding base AWS class with AWS accessor objects
  Adding complete skeleton for bespin
  Adding initial versions of node/spy/yeti/vpc.
  add skeleton command-subcommand argparser object
  updating readme docs
  Update readme and add information about network architecture.
  Initial commit
2018-03-23 01:52:49 -07:00
60ad0ca83b add a big ass todo list 2018-03-22 20:59:34 -07:00
bd7f5dbe40 major overhaul to vpc script, filling in the gaps 2018-03-22 20:20:19 -07:00
f401ae4e38 minor updates to node scripts 2018-03-22 20:19:55 -07:00
6974f720ee bespin cli should rely on dotfiles to determine existence of resources 2018-03-22 20:18:48 -07:00
c9beb6c4c4 remove .py from bespin, and switch to /usr/bin/env python for shebang 2018-03-16 14:56:36 -07:00
6eea6eacda fix readme output 2018-03-16 14:52:32 -07:00
74f3846c63 add some of the command output to the readme. 2018-03-16 14:49:45 -07:00
1c76afe50f bespin now checks for stashfiles. 2018-03-15 03:03:35 -07:00
305e502957 add a "how it works" document to the readme 2018-03-15 03:03:02 -07:00
58c73cae50 add functions to generate random labels for networks. 2018-03-15 03:02:49 -07:00
24ab131297 Adding base AWS class with AWS accessor objects 2018-03-15 00:09:28 -07:00
30d2d4d6e5 Adding complete skeleton for bespin 2018-03-15 00:01:08 -07:00
73d1b885b7 Adding initial versions of node/spy/yeti/vpc.
This implements a first pass at a command line interface,
and implements a few sub-commands and sub-sub-commands
for doing stuff with a VPC.

Try it out:

./bespin vpc build
./bespin vpc destroy
./bespin vpc info
2018-03-14 16:45:03 -07:00
174ef7e840 add skeleton command-subcommand argparser object 2018-03-14 14:23:02 -07:00
f78e8d0638 updating readme docs 2018-03-14 12:15:22 -07:00
6cfe2452f6 Update readme and add information about network architecture. 2018-03-14 01:42:43 -07:00
f9aac95f46 Initial commit 2018-03-14 00:46:12 -07:00
19 changed files with 854 additions and 51 deletions

0
.gitmodules vendored Normal file
View File

View File

@@ -6,3 +6,12 @@ for automated testing of dahak workflows.
See [charlesreid1.github.io/dahak-bespin](https://charlesreid1.github.io/dahak-bespin).
Inspiration: [terraform-aws-consul](https://github.com/hashicorp/terraform-aws-consul)
Terraform module organization:
* root: This folder shows an example of Terraform code that uses a terraform module to deploy a cluster in AWS.
* module: This folder contains the reusable code for this Module
* examples: This folder contains examples of how to use the module.
* test: Automated tests for the module and examples.

View File

@@ -1,18 +0,0 @@
# New resource for the S3 bucket our application will use.
resource "aws_s3_bucket" "example" {
# NOTE: S3 bucket names must be unique across _all_ AWS accounts, so
# this name must be changed before applying this example to avoid naming
# conflicts.
bucket = "terraform-getting-started-guide"
acl = "private"
}
# Change the aws_instance we declared earlier to now include "depends_on"
resource "aws_instance" "example" {
ami = "ami-2757f631"
instance_type = "t2.micro"
# Tells Terraform that this EC2 instance must be created only after the
# S3 bucket has been created.
depends_on = ["aws_s3_bucket.example"]
}

2
cloud_init/spy.sh Normal file
View File

@@ -0,0 +1,2 @@
#!/bin/bash
bash <( curl https://raw.githubusercontent.com/charlesreid1/dahak-spy/master/cloud_init/cloud_init.sh )

2
cloud_init/yeti.sh Normal file
View File

@@ -0,0 +1,2 @@
#!/bin/bash
bash <( curl https://raw.githubusercontent.com/charlesreid1/dahak-yeti/master/cloud_init/cloud_init.sh )

6
docs/_static/bootstrap.min.css vendored Normal file

File diff suppressed because one or more lines are too long

6
docs/_static/custom.css vendored Normal file
View File

@@ -0,0 +1,6 @@
body {
background-color: #efefef;
}
div.body {
background-color: #efefef;
}

View File

@@ -19,9 +19,9 @@
# -- Project information -----------------------------------------------------
project = 'bespin'
copyright = '2018, charles reid'
author = 'charles reid'
project = 'dahak-bespin'
copyright = '2018'
author = 'DIB Lab'
# The short X.Y version
version = ''
@@ -47,8 +47,6 @@ extensions = [
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser'
}
@@ -86,6 +84,20 @@ html_theme = 'alabaster'
#
# html_theme_options = {}
# wow:
# https://alabaster.readthedocs.io/en/latest/customization.html
html_theme_options = {
'github_user': 'charlesreid1',
'github_repo': 'dahak-bespin',
'github_button' : 'true',
#'analytics_id' : '???',
'fixed_sidebar' : 'true',
'github_banner' : 'true',
'pre_bg' : '#fff'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
@@ -102,14 +114,15 @@ html_static_path = ['_static']
# html_sidebars = {}
html_context = {
"github_project" : 'dahak-bespin',
# "google_analytics_id" : 'UA-00000000-1',
"github_base_account" : 'charlesreid1',
"github_project" : 'dahak-taco',
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bespindoc'
htmlhelp_basename = 'dahak-bespindoc'
# -- Options for LaTeX output ------------------------------------------------
@@ -136,7 +149,7 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bespin.tex', 'bespin Documentation',
(master_doc, 'dahak-bespin.tex', 'bespin Documentation',
'charles reid', 'manual'),
]
@@ -146,7 +159,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bespin', 'bespin Documentation',
(master_doc, 'dahak-bespin', 'dahak-bespin Documentation',
[author], 1)
]
@@ -157,10 +170,14 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bespin', 'bespin Documentation',
author, 'bespin', 'One line description of project.',
(master_doc, 'dahak-bespin', 'dahak-bespin Documentation',
author, 'dahak-bespin', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
def setup(app):
app.add_stylesheet('bootstrap.min.css')

View File

@@ -18,21 +18,58 @@ we use the following architecture:
| | | | | | +---------------+ | |
| | | | | | | yeti3 | | | | |
| | | | <--------+ | | | | | | |
| | | | | | | | | | | |
| | | | <-----------+ | | | | | |
| | | | | | | | | | | |
| | | | <---------------+ | | | | |
| | +------------+ +-----------------+ | | | |
| | | | | | | |
| | +-----------------+ | | |
| | | | | |
| | +------------+ +--|---|----------+ | | | |
| | +---|-------------+ | | |
| | +---------------+ | |
| | | |
| | | |
| +----------------------------------------------------+ |
| |
+------------------------------------------------------------+
```
## Dahak Infrastructure
Dahak workflows will require:
* VPC to connect nodes
* 1 spy node to monitor and log
* 1+ yeti nodes to run workflows
## Dahak Terraform Files
### VPC
The VPC will allocate an IP address space 10.X.0.0/16.
The VPC subnet will allocate an IP address space 10.X.0.0/24.
The VPC will require AWS-provided DNS/DHCP.
The VPC will require an internet gateway.
The VPC will require a routing table pointing to the gateway.
### Spy Node
The spy node will need to run the cloud init scripts
contained in [dahak-spy](https://github.com/charlesreid1/dahak-spy).
### Yeti Node
The spy node will need to run the cloud init scripts
contained in [dahak-yeti](https://github.com/charlesreid1/dahak-yeti).

View File

@@ -1,11 +1,23 @@
.. bespin documentation master file, created by
sphinx-quickstart on Fri Mar 23 00:08:44 2018.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
.. _index:
================
dahak-bespin
================
dahak-bespin is a framework for allocating
cloud infrastructure to run dahak workflows.
* See source code at `charlesreid1/dahak-bespin on github
<https://github.com/charlesreid1/dahak-bespin>`_.
* Sphinx documentation hosted at `charlesreid1.github.io/dahak-bespin
<https://charlesreid1.github.io/dahak-bespin>`_.
* This package uses `terraform <https://www.terraform.io/>`_.
Using Bespin via Terraform
===========================
.. toctree::
:maxdepth: 2
:caption: Contents:

View File

@@ -114,3 +114,61 @@ use the destroy command:
$ terraform destroy
```
## Using Variables in Terraform
### Input Variables
You can define input variables in a file `variables.tf`
and use them to set up infrastructure.
**`variables.tf`:**
```
variable "region" {
default = "us-west-1"
}
```
Now you can use this variable by
inserting the expression `${var.region}`:
```
provider "aws" {
region = "${var.region}"
}
```
This can also be set on the command line:
```
$ terraform apply \
-var 'region=us-east-1'
```
If you name the varfile something other than `.tf`,
use the `-var-file` command line argument:
```
$ terraform apply \
-var-file="production.tfvars"
```
### Output Variables
Output variables are defined in
terraform `.tf` files using `output`:
```
output "ip" {
value = "${aws_instance.example.public_ip}"
}
```
To see the value, check the output of `terraform apply`
or run:
```
$ terraform output ip
```

View File

@@ -1,10 +0,0 @@
provider "aws" {
region = "us-west-1"
}
resource "aws_instance" "example" {
ami = "ami-07585467"
instance_type = "t2.micro"
provisioner "local-exec" {
command = "echo ${aws_instance.example.public_ip} > ip_address.txt"
}
}

196
main.tf Normal file
View File

@@ -0,0 +1,196 @@
# TODO:
# - vpc?
#
# Note:
# - it is the source directive that links the module code with the module block
#
# ============================
# Dahak Workflows Cluster
# ============================
#
# Deploy a VPC and a single cluster
# consisting of a single spy node
# (monitoring and benchmarking)
# and a variable number of yeti
# nodes (worker nodes).
provider "aws" {
region = "${var.aws_region}"
}
# seehttps://github.com/hashicorp/terraform/issues/14399
terraform {
required_version = ">= 0.9.3, != 0.9.5"
}
# ============================
# Allocate Spy Node
# ============================
# Spy node is a simple micro instance.
module "spy_server" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
#source = "git::git@github.com:hashicorp/terraform-aws-consul.git/modules/consul-cluster?ref=v0.0.1"
source = "./module"
cluster_name = "${var.cluster_name}-spy"
cluster_size = "1"
instance_type = "${var.spy_instance_type}"
spot_price = "${var.spot_price}"
### # The EC2 Instances will use these tags to automatically discover each other and form a cluster
### cluster_tag_key = "${var.cluster_tag_key}"
### cluster_tag_value = "${var.cluster_name}"
ami_id = "${var.ami_id}"
user_data = "${data.template_file.spy_user_data.rendered}"
vpc_id = "${data.aws_vpc.dahakvpc.id}"
subnet_ids = "${data.aws_subnet_ids.default.ids}"
# To make testing easier, we allow Consul and SSH requests from any IP address here but in a production
# deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.
allowed_ssh_cidr_blocks = ["0.0.0.0/0"]
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
ssh_key_name = "${var.ssh_key_name}"
tags = [
{
key = "Environment"
value = "development"
propagate_at_launch = true
},
]
}
# ============================
# Deploy Spy Node
# ============================
# Actually deploy the infrastructure
# (apt-get scripts, Python, docker,
# containers, etc.) to spy.
data "template_file" "spy_user_data" {
template = "${file("${path.module}/dahak-spy/cloud_init/cloud_init.sh")}"
### vars {
### cluster_tag_key = "${var.cluster_tag_key}"
### cluster_tag_value = "${var.cluster_name}"
### }
}
# ============================
# Allocate Yeti Node
# ============================
# Yeti node is a beefy node.
module "yeti_server" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
#source = "git::git@github.com:hashicorp/terraform-aws-consul.git/modules/consul-cluster?ref=v0.0.1"
source = "./module"
cluster_name = "${var.cluster_name}-server"
cluster_size = "${var.num_yeti_servers}"
instance_type = "${var.yeti_instance_type}"
spot_price = "${var.spot_price}"
### # The EC2 Instances will use these tags to automatically discover each other and form a cluster
### cluster_tag_key = "${var.cluster_tag_key}"
### cluster_tag_value = "${var.cluster_name}"
ami_id = "${var.ami_id}"
user_data = "${data.template_file.yeti_user_data.rendered}"
vpc_id = "${data.aws_vpc.dahakvpc.id}"
subnet_ids = "${data.aws_subnet_ids.default.ids}"
# To make testing easier, we allow Consul and SSH requests from any IP address here but in a production
# deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.
allowed_ssh_cidr_blocks = ["0.0.0.0/0"]
allowed_inbound_cidr_blocks = ["0.0.0.0/0"]
ssh_key_name = "${var.ssh_key_name}"
tags = [
{
key = "Environment"
value = "development"
propagate_at_launch = true
},
]
}
# ============================
# Deploy Yeti Node
# ============================
# Actually deploy the infrastructure
# (apt-get scripts, Python, snakemake,
# singularity, etc.) to yeti.
data "template_file" "yeti_user_data" {
template = "${file("${path.module}/dahak-yeti/cloud_init/cloud_init.sh")}"
### vars {
### cluster_tag_key = "${var.cluster_tag_key}"
### cluster_tag_value = "${var.cluster_name}"
### }
}
# ============================
# Deploy VPC
# ============================
# Assemble the VPC, subnet,
# internet gateway, DNS, DHCP,
# VPC
resource "aws_vpc" "dahakvpc" {
cidr_block = "10.99.0.0/16"
enable_dns_support = true
enable_dns_hostnames = true
}
# VPC subnet
resource "aws_subnet" "dahaksubnet" {
vpc_id = "${aws_vpc.dahakvpc.id}"
cidr_block = "10.99.0.0/24"
map_public_ip_on_launch = true
availability_zone = "us-west-1a"
tags {
Name = "namedahaksubnet"
}
}
# Internet gateway
resource "aws_internet_gateway" "dahakgw" {
vpc_id = "${aws_vpc.dahakvpc.id}"
tags {
Name = "namedahakgw"
}
}
# Route
resource "aws_route" "internet_access" {
route_table_id = "${aws_vpc.dahakvpc.main_route_table_id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.dahakgw.id}"
}
# Route table
resource "aws_route_table" "private_route_table" {
vpc_id = "${aws_vpc.dahakvpc.id}"
tags {
Name = "Private route table"
}
}
# Associate route table with subnet
# and routing table.
resource "aws_route_table_association" "dahaksubnet_association" {
subnet_id = "${aws_subnet.dahaksubnet.id}"
route_table_id = "${aws_vpc.dahakvpc.main_route_table_id}"
}

View File

@@ -0,0 +1,46 @@
# dahak cluster
(work in progress)
This folder contains a [Terraform module](https://www.terraform.io/docs/modules/usage.html)
to deploy a dahak cluster consisting of a VPC, a spy monitoring node, and one or more yeti worker nodes.
## using this module
This folder defines a Terraform module, which you can use in your
code by adding a `module` configuration and setting its `source` parameter
to URL of this folder:
```hcl
module "dahak_cluster" {
# TODO: update this
source = "github.com/hashicorp/terraform-aws-consul//modules/consul-cluster?ref=v0.0.5"
# TODO: update this
# amazon image ID
ami_id = "ami-abcd1234"
# Configure and start the nodes
user_data = <<-EOF
#!/bin/bash
/opt/consul/bin/run-consul --server --cluster-tag-key consul-cluster
EOF
# ... See variables.tf for the other parameters you must define for the consul-cluster module
}
```
Note the following parameters:
* `source`: Use this parameter to specify the URL of the terraform module we are using.
The double slash (`//`) is intentional and required. Terraform uses it to specify subfolders within a Git repo.
The `ref` parameter specifies a specific Git tag in this repo. It enures you are using a fixed version of the repo.
* `ami_id`: Use this parameter to specify the amazon machine image to install on the nodes on the cluster.
* `user_data`: Use this parameter to specify user data (cloud init scripts).
You can find the other parameters in [variables.tf](variables.tf).
Check out the [consul-cluster example](https://github.com/hashicorp/terraform-aws-consul/tree/master/MAIN.md) for fully-working sample code.

View File

@@ -0,0 +1,122 @@
# A modern terraform version is required
terraform {
required_version = ">= 0.9.3"
}
# This is going to be called
# each time we create a module
# and point to this directory.
#
# In other words, we are calling
# this once for spy and once for
# each yeti node.
#
# The parameters come from the main.tf
# and vars.tf in the parent directory.
#
resource "aws_launch_configuration" "launch_configuration" {
name_prefix = "${var.cluster_name}-"
image_id = "${var.ami_id}"
instance_type = "${var.instance_type}"
user_data = "${var.user_data}"
spot_price = "${var.spot_price}"
iam_instance_profile = "${aws_iam_instance_profile.instance_profile.name}"
key_name = "${var.ssh_key_name}"
security_groups = ["${aws_security_group.lc_security_group.id}"]
placement_tenancy = "${var.tenancy}"
associate_public_ip_address = "${var.associate_public_ip_address}"
ebs_optimized = "${var.root_volume_ebs_optimized}"
root_block_device {
volume_type = "${var.root_volume_type}"
volume_size = "${var.root_volume_size}"
delete_on_termination = "${var.root_volume_delete_on_termination}"
}
# Important note: whenever using a launch configuration with an auto scaling group, you must set
# create_before_destroy = true. However, as soon as you set create_before_destroy = true in one resource, you must
# also set it in every resource that it depends on, or you'll get an error about cyclic dependencies (especially when
# removing resources). For more info, see:
#
# https://www.terraform.io/docs/providers/aws/r/launch_configuration.html
# https://terraform.io/docs/configuration/resources.html
lifecycle {
create_before_destroy = true
}
}
# Create a security group
resource "aws_security_group" "lc_security_group" {
name_prefix = "${var.cluster_name}"
description = "Security group for the ${var.cluster_name} launch configuration"
vpc_id = "${var.vpc_id}"
# aws_launch_configuration.launch_configuration in this module sets create_before_destroy to true, which means
# everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors
# when you try to do a terraform destroy.
lifecycle {
create_before_destroy = true
}
tags {
Name = "${var.cluster_name}"
}
}
# Security group rules:
resource "aws_security_group_rule" "allow_ssh_inbound" {
count = "${length(var.allowed_ssh_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.ssh_port}"
to_port = "${var.ssh_port}"
protocol = "tcp"
cidr_blocks = ["${var.allowed_ssh_cidr_blocks}"]
security_group_id = "${aws_security_group.lc_security_group.id}"
}
resource "aws_security_group_rule" "allow_ssh_inbound_from_security_group_ids" {
count = "${length(var.allowed_ssh_security_group_ids)}"
type = "ingress"
from_port = "${var.ssh_port}"
to_port = "${var.ssh_port}"
protocol = "tcp"
source_security_group_id = "${element(var.allowed_ssh_security_group_ids, count.index)}"
security_group_id = "${aws_security_group.lc_security_group.id}"
}
resource "aws_security_group_rule" "allow_all_outbound" {
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.lc_security_group.id}"
}
module "security_group_rules" {
source = "../consul-security-group-rules"
security_group_id = "${aws_security_group.lc_security_group.id}"
allowed_inbound_cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
allowed_inbound_security_group_ids = ["${var.allowed_inbound_security_group_ids}"]
server_rpc_port = "${var.server_rpc_port}"
cli_rpc_port = "${var.cli_rpc_port}"
serf_lan_port = "${var.serf_lan_port}"
serf_wan_port = "${var.serf_wan_port}"
http_api_port = "${var.http_api_port}"
dns_port = "${var.dns_port}"
}

View File

@@ -0,0 +1,9 @@
# dahak cluster security rules
(work in progress)
This directory contains configuration files
that control/set rules for the security group
associated with the dahak cluster.
[also see](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-security-group-rules)

View File

@@ -0,0 +1,198 @@
# CREATE THE SECURITY GROUP RULES THAT CONTROL WHAT TRAFFIC CAN GO IN AND OUT OF A CONSUL CLUSTER
resource "aws_security_group_rule" "allow_server_rpc_inbound" {
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.server_rpc_port}"
to_port = "${var.server_rpc_port}"
protocol = "tcp"
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_cli_rpc_inbound" {
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.cli_rpc_port}"
to_port = "${var.cli_rpc_port}"
protocol = "tcp"
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_serf_lan_tcp_inbound" {
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.serf_lan_port}"
to_port = "${var.serf_lan_port}"
protocol = "tcp"
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_serf_lan_udp_inbound" {
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.serf_lan_port}"
to_port = "${var.serf_lan_port}"
protocol = "udp"
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_serf_wan_tcp_inbound" {
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.serf_wan_port}"
to_port = "${var.serf_wan_port}"
protocol = "tcp"
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_serf_wan_udp_inbound" {
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.serf_wan_port}"
to_port = "${var.serf_wan_port}"
protocol = "udp"
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_http_api_inbound" {
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.http_api_port}"
to_port = "${var.http_api_port}"
protocol = "tcp"
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_dns_tcp_inbound" {
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.dns_port}"
to_port = "${var.dns_port}"
protocol = "tcp"
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_dns_udp_inbound" {
count = "${length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0}"
type = "ingress"
from_port = "${var.dns_port}"
to_port = "${var.dns_port}"
protocol = "udp"
cidr_blocks = ["${var.allowed_inbound_cidr_blocks}"]
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_server_rpc_inbound_from_security_group_ids" {
count = "${length(var.allowed_inbound_security_group_ids)}"
type = "ingress"
from_port = "${var.server_rpc_port}"
to_port = "${var.server_rpc_port}"
protocol = "tcp"
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_cli_rpc_inbound_from_security_group_ids" {
count = "${length(var.allowed_inbound_security_group_ids)}"
type = "ingress"
from_port = "${var.cli_rpc_port}"
to_port = "${var.cli_rpc_port}"
protocol = "tcp"
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_serf_lan_tcp_inbound_from_security_group_ids" {
count = "${length(var.allowed_inbound_security_group_ids)}"
type = "ingress"
from_port = "${var.serf_lan_port}"
to_port = "${var.serf_lan_port}"
protocol = "tcp"
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_serf_lan_udp_inbound_from_security_group_ids" {
count = "${length(var.allowed_inbound_security_group_ids)}"
type = "ingress"
from_port = "${var.serf_lan_port}"
to_port = "${var.serf_lan_port}"
protocol = "udp"
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_serf_wan_tcp_inbound_from_security_group_ids" {
count = "${length(var.allowed_inbound_security_group_ids)}"
type = "ingress"
from_port = "${var.serf_wan_port}"
to_port = "${var.serf_wan_port}"
protocol = "tcp"
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_serf_wan_udp_inbound_from_security_group_ids" {
count = "${length(var.allowed_inbound_security_group_ids)}"
type = "ingress"
from_port = "${var.serf_wan_port}"
to_port = "${var.serf_wan_port}"
protocol = "udp"
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_http_api_inbound_from_security_group_ids" {
count = "${length(var.allowed_inbound_security_group_ids)}"
type = "ingress"
from_port = "${var.http_api_port}"
to_port = "${var.http_api_port}"
protocol = "tcp"
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_dns_tcp_inbound_from_security_group_ids" {
count = "${length(var.allowed_inbound_security_group_ids)}"
type = "ingress"
from_port = "${var.dns_port}"
to_port = "${var.dns_port}"
protocol = "tcp"
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
security_group_id = "${var.security_group_id}"
}
resource "aws_security_group_rule" "allow_dns_udp_inbound_from_security_group_ids" {
count = "${length(var.allowed_inbound_security_group_ids)}"
type = "ingress"
from_port = "${var.dns_port}"
to_port = "${var.dns_port}"
protocol = "udp"
source_security_group_id = "${element(var.allowed_inbound_security_group_ids, count.index)}"
security_group_id = "${var.security_group_id}"
}

View File

@@ -0,0 +1,54 @@
# ---------------------------------------------------------------------------------------------------------------------
# REQUIRED PARAMETERS
# You must provide a value for each of these parameters.
# ---------------------------------------------------------------------------------------------------------------------
variable "security_group_id" {
description = "The ID of the security group to which we should add the Consul security group rules"
}
variable "allowed_inbound_cidr_blocks" {
description = "A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow connections to Consul"
type = "list"
}
# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# These parameters have reasonable defaults.
# ---------------------------------------------------------------------------------------------------------------------
variable "allowed_inbound_security_group_ids" {
description = "A list of security group IDs that will be allowed to connect to Consul"
type = "list"
default = []
}
variable "server_rpc_port" {
description = "The port used by servers to handle incoming requests from other agents."
default = 8300
}
variable "cli_rpc_port" {
description = "The port used by all agents to handle RPC from the CLI."
default = 8400
}
variable "serf_lan_port" {
description = "The port used to handle gossip in the LAN. Required by all agents."
default = 8301
}
variable "serf_wan_port" {
description = "The port used by servers to gossip over the WAN to other servers."
default = 8302
}
variable "http_api_port" {
description = "The port used by clients to talk to the HTTP API"
default = 8500
}
variable "dns_port" {
description = "The port used to resolve DNS queries."
default = 8600
}

57
variables.tf Normal file
View File

@@ -0,0 +1,57 @@
# ---------------------------------------------------------------------------------------------------------------------
# ENVIRONMENT VARIABLES
# Define these secrets as environment variables
# ---------------------------------------------------------------------------------------------------------------------
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# ---------------------------------------------------------------------------------------------------------------------
# OPTIONAL PARAMETERS
# These parameters have reasonable defaults.
# ---------------------------------------------------------------------------------------------------------------------
variable "ami_id" {
description = "The ID of the AMI to run in the cluster."
default = ""
}
variable "aws_region" {
description = "The AWS region to deploy into (e.g. us-east-1)."
default = "us-east-1"
}
variable "cluster_name" {
description = "What to name the dahak cluster and all of its associated resources"
default = "dahak-test-cluster"
}
variable "spy_instance_type" {
description = "The type of instance to deploy for the spy node."
default = "t2.micro"
}
variable "num_yeti_servers" {
description = "The number of yeti workers to deploy."
default = 1
}
variable "yeti_instance_type" {
description = "The type of instance to deploy for the yeti workers."
default = "m5.4xlarge"
}
### variable "cluster_tag_key" {
### description = "The tag the EC2 Instances will look for to automatically discover each other and form a cluster."
### default = "consul-servers"
### }
variable "ssh_key_name" {
description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair."
default = ""
}
variable "spot_price" {
description = "The maximum hourly price to pay for EC2 Spot Instances."
default = "0.28"
}