Unvendor tree-sitter-hcl

pull/888/head
Antonin Delpeuch 2025-09-30 20:23:53 +07:00 committed by Wilfred Hughes
parent 3a94668aee
commit 6d7b594aca
1939 changed files with 15 additions and 144772 deletions

11
Cargo.lock generated

@ -284,6 +284,7 @@ dependencies = [
"tree-sitter-fsharp",
"tree-sitter-go",
"tree-sitter-haskell",
"tree-sitter-hcl",
"tree-sitter-html",
"tree-sitter-java",
"tree-sitter-javascript",
@ -1132,6 +1133,16 @@ dependencies = [
"tree-sitter-language",
]
[[package]]
name = "tree-sitter-hcl"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a7b2cc3d7121553b84309fab9d11b3ff3d420403eef9ae50f9fd1cd9d9cf012"
dependencies = [
"cc",
"tree-sitter-language",
]
[[package]]
name = "tree-sitter-html"
version = "0.23.2"

@ -87,6 +87,7 @@ tree-sitter-erlang = "0.13.0"
tree-sitter-fsharp = "0.1.0"
tree-sitter-go = "0.23.4"
tree-sitter-haskell = "0.23.1"
tree-sitter-hcl = "1.1.0"
tree-sitter-html = "0.23.2"
tree-sitter-java = "0.23.4"
tree-sitter-javascript = "0.23.1"

@ -122,11 +122,6 @@ fn main() {
src_dir: "vendored_parsers/tree-sitter-hare-src",
extra_files: vec![],
},
TreeSitterParser {
name: "tree-sitter-hcl",
src_dir: "vendored_parsers/tree-sitter-hcl-src",
extra_files: vec!["scanner.cc"],
},
TreeSitterParser {
name: "tree-sitter-janet-simple",
src_dir: "vendored_parsers/tree-sitter-janet-simple-src",

@ -65,7 +65,7 @@ with `difft --list-languages`.
| Language | Parser Used |
|----------|---------------------------------------------------------------------------------------------------|
| CSS | [tree-sitter/tree-sitter-css](https://github.com/tree-sitter/tree-sitter-css) |
| HCL | [MichaHoffmann/tree-sitter-hcl](https://github.com/MichaHoffmann/tree-sitter-hcl) |
| HCL | [tree-sitter-grammars/tree-sitter-hcl](https://github.com/tree-sitter-grammars/tree-sitter-hcl) |
| HTML | [tree-sitter/tree-sitter-html](https://github.com/tree-sitter/tree-sitter-html) |
| JSON | [tree-sitter/tree-sitter-json](https://github.com/tree-sitter/tree-sitter-json) |
| LaTeX | [latex-lsp/tree-sitter-latex](https://github.com/latex-lsp/tree-sitter-latex) |

@ -81,7 +81,6 @@ extern "C" {
fn tree_sitter_gleam() -> ts::Language;
fn tree_sitter_hare() -> ts::Language;
fn tree_sitter_hack() -> ts::Language;
fn tree_sitter_hcl() -> ts::Language;
fn tree_sitter_janet_simple() -> ts::Language;
fn tree_sitter_kotlin() -> ts::Language;
fn tree_sitter_latex() -> ts::Language;
@ -477,7 +476,8 @@ pub(crate) fn from_language(language: guess::Language) -> TreeSitterConfig {
}
}
Hcl => {
let language = unsafe { tree_sitter_hcl() };
let language_fn = tree_sitter_hcl::LANGUAGE;
let language = tree_sitter::Language::new(language_fn);
TreeSitterConfig {
language: language.clone(),
atom_nodes: ["string_lit", "heredoc_template"].into_iter().collect(),

@ -1 +0,0 @@
tree-sitter-hcl/src

@ -1,10 +0,0 @@
root = true
[*.{cc,txt,js}]
indent_style = space
indent_size = 2
tab_width = 8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
charset = utf-8

@ -1,26 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Write HCL
2. ???
3. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

@ -1,16 +0,0 @@
name: acceptance
on: [pull_request]
jobs:
unittests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: nixbuild/nix-quick-install-action@v5
- run: nix-shell --run 'tree-sitter test'
acceptance:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: nixbuild/nix-quick-install-action@v5
- run: nix-shell --run 'tree-sitter parse --quiet --stat example/real_world_stuff/*/*'

@ -1,22 +0,0 @@
name: build
on: [pull_request]
jobs:
compile:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
compiler: [gcc, clang++]
name: compile
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- if: matrix.os == 'windows-latest' && matrix.compiler == 'gcc'
uses: egor-tensin/setup-mingw@v2
- name: build
run: ${{ matrix.compiler }} -o scanner.o -I./src -c src/scanner.cc -Werror

@ -1,10 +0,0 @@
node_modules
queries
package-lock.json
build
target
Cargo.lock
npm-debug.log
log.html
.env
.DS_Store

@ -1,71 +0,0 @@
# Changelog
## 0.7.0 - 2022-06-02
housekeeping:
* bump tree-sitter dependency to 0.20.6
* regenerate parser
* fix clang+windows CI job
fix:
* allow empty string literals
## 0.6.0 - 2021-09-20
feature:
* add template for expressions
* add template if expressions
cleanup:
* move fuzzing instrumentation to different repository
## 0.5.0 - 2021-09-15
feature:
* unhide `(block|object|tuple)_(start|end)` tokens
fix:
* remove empty `template_directive` block because it causes issues for semgrep
## 0.4.0 - 2021-07-02
feature:
* add named "key" and "val" fields to left and right side of object elements
* unhide the `template_interpolation_(start|end)` and `quoted_template_(start|end)` tokens
## 0.3.2 - 2021-07-01
fix:
* add a build step to CI to make sure the scanner is compilable
* add fuzzing instrumentation
## 0.3.1 - 2021-06-30
fix:
* dont use c struct initializer syntax in src/scanner.cc
## 0.3.0 - 2021-06-28
fix:
* correct expression for identifiers
* allow empty template interpolations
* allow empty templates
* fix crash when `context_stack.size()` exceeds `CHAR_MAX`
* fix crash when `heredoc_identifier.size()` exceeds `CHAR_MAX`
quality:
* add fuzzing
## 0.2.0 - 2021-06-26
feature:
* add quoted templates and heredoc templates
* still missing template directives though
quality:
* collect corpus of real world hcl files from github
* add github actions
## 0.1.0 - 2021-06-13
* initial version

@ -1,26 +0,0 @@
[package]
name = "tree-sitter-hcl"
description = "hcl grammar for the tree-sitter parsing library"
version = "0.0.1"
keywords = ["incremental", "parsing", "hcl"]
categories = ["parsing", "text-editors"]
repository = "https://github.com/MichaHoffmann/tree-sitter-hcl"
edition = "2018"
license = "Apache"
build = "bindings/rust/build.rs"
include = [
"bindings/rust/*",
"grammar.js",
"queries/*",
"src/*",
]
[lib]
path = "bindings/rust/lib.rs"
[dependencies]
tree-sitter = "~0.20"
[build-dependencies]
cc = "1.0"

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,39 +0,0 @@
# tree-sitter-hcl
tree-sitter grammar for the [HCL](https://github.com/hashicorp/hcl/blob/main/hclsyntax/spec.md) language
## Try It Out
Try the parser in the [playground](https://michahoffmann.github.io/tree-sitter-hcl/)
## Example
Highlighting `example/example.hcl`:
![Highlighting Example](https://i.imgur.com/yUJ0ybK.png)
## Developing
It is recommended to use `nix` to fulfill all development dependencies. To activate the development environment simply run `nix-shell` in the project root.
## Running Tests
To run tests simply run `nix-shell --run 'tree-sitter test'`.
## Compliance
The directory `example/real_world_stuff` contains a corpus of hcl files that I found with the github query `language:HCL` for users `coreos`, `hashicorp`, `oracle` and `terraform-community-modules`.
```bash
tree-sitter parse --quiet --stat example/real_world_stuff/*/*
Total parses: 1892; successful parses: 1892; failed parses: 0; success percentage: 100.00%
```
## Fuzzing
See the [fuzzing repo for this parser](https://github.com/MichaHoffmann/tree-sitter-hcl-fuzz)
## Attributions
Pages were copied from https://github.com/m-novikov/tree-sitter-sql

@ -1,19 +0,0 @@
{
"targets": [
{
"target_name": "tree_sitter_hcl_binding",
"include_dirs": [
"<!(node -e \"require('nan')\")",
"src"
],
"sources": [
"bindings/node/binding.cc",
"src/parser.c",
# If your language uses an external scanner, add it here.
],
"cflags_c": [
"-std=c99",
]
}
]
}

@ -1,28 +0,0 @@
#include "tree_sitter/parser.h"
#include <node.h>
#include "nan.h"
using namespace v8;
extern "C" TSLanguage * tree_sitter_hcl();
namespace {
NAN_METHOD(New) {}
void Init(Local<Object> exports, Local<Object> module) {
Local<FunctionTemplate> tpl = Nan::New<FunctionTemplate>(New);
tpl->SetClassName(Nan::New("Language").ToLocalChecked());
tpl->InstanceTemplate()->SetInternalFieldCount(1);
Local<Function> constructor = Nan::GetFunction(tpl).ToLocalChecked();
Local<Object> instance = constructor->NewInstance(Nan::GetCurrentContext()).ToLocalChecked();
Nan::SetInternalFieldPointer(instance, 0, tree_sitter_hcl());
Nan::Set(instance, Nan::New("name").ToLocalChecked(), Nan::New("hcl").ToLocalChecked());
Nan::Set(module, Nan::New("exports").ToLocalChecked(), instance);
}
NODE_MODULE(tree_sitter_hcl_binding, Init)
} // namespace

@ -1,19 +0,0 @@
try {
module.exports = require("../../build/Release/tree_sitter_hcl_binding");
} catch (error1) {
if (error1.code !== 'MODULE_NOT_FOUND') {
throw error1;
}
try {
module.exports = require("../../build/Debug/tree_sitter_hcl_binding");
} catch (error2) {
if (error2.code !== 'MODULE_NOT_FOUND') {
throw error2;
}
throw error1
}
}
try {
module.exports.nodeTypeInfo = require("../../src/node-types.json");
} catch (_) {}

@ -1,35 +0,0 @@
fn main() {
let src_dir = std::path::Path::new("src");
let mut c_config = cc::Build::new();
c_config.include(&src_dir);
c_config
.flag_if_supported("-Wno-unused-parameter")
.flag_if_supported("-Wno-unused-but-set-variable")
.flag_if_supported("-Wno-trigraphs");
let parser_path = src_dir.join("parser.c");
c_config.file(&parser_path);
// If your language uses an external scanner written in C,
// then include this block of code:
/*
let scanner_path = src_dir.join("scanner.c");
c_config.file(&scanner_path);
println!("cargo:rerun-if-changed={}", scanner_path.to_str().unwrap());
*/
c_config.compile("parser");
println!("cargo:rerun-if-changed={}", parser_path.to_str().unwrap());
let mut cpp_config = cc::Build::new();
cpp_config.cpp(true);
cpp_config.include(&src_dir);
cpp_config
.flag_if_supported("-Wno-unused-parameter")
.flag_if_supported("-Wno-unused-but-set-variable");
let scanner_path = src_dir.join("scanner.cc");
cpp_config.file(&scanner_path);
cpp_config.compile("scanner");
println!("cargo:rerun-if-changed={}", scanner_path.to_str().unwrap());
}

@ -1,52 +0,0 @@
//! This crate provides hcl language support for the [tree-sitter][] parsing library.
//!
//! Typically, you will use the [language][language func] function to add this language to a
//! tree-sitter [Parser][], and then use the parser to parse some code:
//!
//! ```
//! let code = "";
//! let mut parser = tree_sitter::Parser::new();
//! parser.set_language(tree_sitter_hcl::language()).expect("Error loading hcl grammar");
//! let tree = parser.parse(code, None).unwrap();
//! ```
//!
//! [Language]: https://docs.rs/tree-sitter/*/tree_sitter/struct.Language.html
//! [language func]: fn.language.html
//! [Parser]: https://docs.rs/tree-sitter/*/tree_sitter/struct.Parser.html
//! [tree-sitter]: https://tree-sitter.github.io/
use tree_sitter::Language;
extern "C" {
fn tree_sitter_hcl() -> Language;
}
/// Get the tree-sitter [Language][] for this grammar.
///
/// [Language]: https://docs.rs/tree-sitter/*/tree_sitter/struct.Language.html
pub fn language() -> Language {
unsafe { tree_sitter_hcl() }
}
/// The content of the [`node-types.json`][] file for this grammar.
///
/// [`node-types.json`]: https://tree-sitter.github.io/tree-sitter/using-parsers#static-node-types
pub const NODE_TYPES: &'static str = include_str!("../../src/node-types.json");
// Uncomment these to include any queries that this grammar contains
// pub const HIGHLIGHTS_QUERY: &'static str = include_str!("../../queries/highlights.scm");
// pub const INJECTIONS_QUERY: &'static str = include_str!("../../queries/injections.scm");
// pub const LOCALS_QUERY: &'static str = include_str!("../../queries/locals.scm");
// pub const TAGS_QUERY: &'static str = include_str!("../../queries/tags.scm");
#[cfg(test)]
mod tests {
#[test]
fn test_can_load_grammar() {
let mut parser = tree_sitter::Parser::new();
parser
.set_language(super::language())
.expect("Error loading hcl language");
}
}

@ -1,83 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Tree Sitter HCL Playground</title>
<style>
#playground-container {
max-width: 640px;
margin-left: auto;
margin-right: auto;
}
#playground-container .CodeMirror {
border: 1px solid;
}
#create-issue-btn {
padding: 0.2em;
float: right;
font-size: 1.5em;
}
#checkboxes {
padding-bottom: 1em;
}
#output-container {
border: 1px solid;
}
.highlight {
background-color: #f8f8f8;
}
</style>
</head>
<body>
<!--
This file is licensed under MIT license
Copyright (c) 2018 Max Brunsfeld
Taken from https://github.com/tree-sitter/tree-sitter/docs/section-7-playground.html
-->
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.45.0/codemirror.min.css"
/>
<link
rel="stylesheet"
href="https://cdnjs.cloudflare.com/ajax/libs/clusterize.js/0.18.0/clusterize.min.css"
/>
<div id="playground-container">
<h1>Tree Sitter HCL Playground</h1>
<h4>Code</h4>
<div id="checkboxes">
<input id="logging-checkbox" type="checkbox" />
<label for="logging-checkbox">Log</label>
<input id="query-checkbox" type="checkbox" />
<label for="query-checkbox">Query</label>
</div>
<textarea id="code-input">
example "test" {
foo = "bar"
}
</textarea>
<div id="query-container" style="visibility: hidden; position: absolute">
<h4>Query</h4>
<textarea id="query-input"></textarea>
</div>
<h4>Tree</h4>
<span id="update-time"></span>
<div id="output-container-scroll">
<pre id="output-container" class="highlight"></pre>
</div>
<button id="create-issue-btn" type="button">Create Issue</button>
</div>
<script src="//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.45.0/codemirror.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/clusterize.js/0.18.0/clusterize.min.js"></script>
<script src="./vendor/tree-sitter.js"></script>
<script id="playground-script" src="./playground.js?v=3"></script>
</body>
</html>

@ -1,498 +0,0 @@
// This file is licensed under MIT license
// Copyright (c) 2018 Max Brunsfeld
// Taken from https://github.com/tree-sitter/tree-sitter/docs/assets/playground.js
let tree;
(async () => {
const CAPTURE_REGEX = /@\s*([\w\._-]+)/g;
const COLORS_BY_INDEX = [
"blue",
"chocolate",
"darkblue",
"darkcyan",
"darkgreen",
"darkred",
"darkslategray",
"dimgray",
"green",
"indigo",
"navy",
"red",
"sienna",
];
const scriptURL = document.getElementById("playground-script").src;
const codeInput = document.getElementById("code-input");
const loggingCheckbox = document.getElementById("logging-checkbox");
const outputContainer = document.getElementById("output-container");
const outputContainerScroll = document.getElementById(
"output-container-scroll",
);
const playgroundContainer = document.getElementById("playground-container");
const queryCheckbox = document.getElementById("query-checkbox");
const createIssueBtn = document.getElementById("create-issue-btn");
const queryContainer = document.getElementById("query-container");
const queryInput = document.getElementById("query-input");
const updateTimeSpan = document.getElementById("update-time");
loadState();
await TreeSitter.init();
const parser = new TreeSitter();
const codeEditor = CodeMirror.fromTextArea(codeInput, {
lineNumbers: true,
showCursorWhenSelecting: true,
});
const queryEditor = CodeMirror.fromTextArea(queryInput, {
lineNumbers: true,
showCursorWhenSelecting: true,
});
const cluster = new Clusterize({
rows: [],
noDataText: null,
contentElem: outputContainer,
scrollElem: outputContainerScroll,
});
const renderTreeOnCodeChange = debounce(renderTree, 50);
const saveStateOnChange = debounce(saveState, 2000);
const runTreeQueryOnChange = debounce(runTreeQuery, 50);
let languageName = "hcl";
let treeRows = null;
let treeRowHighlightedIndex = -1;
let parseCount = 0;
let isRendering = 0;
let query;
codeEditor.on("changes", handleCodeChange);
codeEditor.on("viewportChange", runTreeQueryOnChange);
codeEditor.on("cursorActivity", debounce(handleCursorMovement, 150));
queryEditor.on("changes", debounce(handleQueryChange, 150));
loggingCheckbox.addEventListener("change", handleLoggingChange);
queryCheckbox.addEventListener("change", handleQueryEnableChange);
outputContainer.addEventListener("click", handleTreeClick);
createIssueBtn.addEventListener("click", handleCreateIssue);
handleQueryEnableChange();
await loadLanguage();
playgroundContainer.style.visibility = "visible";
async function loadLanguage() {
const query = new URL(scriptURL).search;
const url = `tree-sitter-hcl.wasm${query}`;
const language = await TreeSitter.Language.load(url);
tree = null;
parser.setLanguage(language);
handleCodeChange();
handleQueryChange();
}
async function handleCodeChange(editor, changes) {
const newText = codeEditor.getValue() + "\n";
const edits = tree && changes && changes.map(treeEditForEditorChange);
const start = performance.now();
if (edits) {
for (const edit of edits) {
tree.edit(edit);
}
}
const newTree = parser.parse(newText, tree);
const duration = (performance.now() - start).toFixed(1);
updateTimeSpan.innerText = `${duration} ms`;
if (tree) tree.delete();
tree = newTree;
parseCount++;
renderTreeOnCodeChange();
runTreeQueryOnChange();
saveStateOnChange();
}
async function renderTree() {
isRendering++;
const cursor = tree.walk();
let currentRenderCount = parseCount;
let row = "";
let rows = [];
let finishedRow = false;
let visitedChildren = false;
let indentLevel = 0;
for (let i = 0; ; i++) {
if (i > 0 && i % 10000 === 0) {
await new Promise(r => setTimeout(r, 0));
if (parseCount !== currentRenderCount) {
cursor.delete();
isRendering--;
return;
}
}
let displayName;
if (cursor.nodeIsMissing) {
displayName = `MISSING ${cursor.nodeType}`;
} else if (cursor.nodeIsNamed) {
displayName = cursor.nodeType;
}
if (visitedChildren) {
if (displayName) {
finishedRow = true;
}
if (cursor.gotoNextSibling()) {
visitedChildren = false;
} else if (cursor.gotoParent()) {
visitedChildren = true;
indentLevel--;
} else {
break;
}
} else {
if (displayName) {
if (finishedRow) {
row += "</div>";
rows.push(row);
finishedRow = false;
}
const start = cursor.startPosition;
const end = cursor.endPosition;
const id = cursor.nodeId;
let fieldName = cursor.currentFieldName();
if (fieldName) {
fieldName += ": ";
} else {
fieldName = "";
}
row = `<div>${" ".repeat(
indentLevel,
)}${fieldName}<a class='plain' href="#" data-id=${id} data-range="${
start.row
},${start.column},${end.row},${end.column}">${displayName}</a> [${
start.row
}, ${start.column}] - [${end.row}, ${end.column}])`;
finishedRow = true;
}
if (cursor.gotoFirstChild()) {
visitedChildren = false;
indentLevel++;
} else {
visitedChildren = true;
}
}
}
if (finishedRow) {
row += "</div>";
rows.push(row);
}
cursor.delete();
cluster.update(rows);
treeRows = rows;
isRendering--;
handleCursorMovement();
}
function runTreeQuery(_, startRow, endRow) {
if (endRow == null) {
const viewport = codeEditor.getViewport();
startRow = viewport.from;
endRow = viewport.to;
}
codeEditor.operation(() => {
const marks = codeEditor.getAllMarks();
marks.forEach(m => m.clear());
if (tree && query) {
const captures = query.captures(
tree.rootNode,
{ row: startRow, column: 0 },
{ row: endRow, column: 0 },
);
let lastNodeId;
for (const { name, node } of captures) {
if (node.id === lastNodeId) continue;
lastNodeId = node.id;
const { startPosition, endPosition } = node;
codeEditor.markText(
{ line: startPosition.row, ch: startPosition.column },
{ line: endPosition.row, ch: endPosition.column },
{
inclusiveLeft: true,
inclusiveRight: true,
css: `color: ${colorForCaptureName(name)}`,
},
);
}
}
});
}
function handleQueryChange() {
if (query) {
query.delete();
query.deleted = true;
query = null;
}
queryEditor.operation(() => {
queryEditor.getAllMarks().forEach(m => m.clear());
if (!queryCheckbox.checked) return;
const queryText = queryEditor.getValue();
try {
query = parser.getLanguage().query(queryText);
let match;
let row = 0;
queryEditor.eachLine(line => {
while ((match = CAPTURE_REGEX.exec(line.text))) {
queryEditor.markText(
{ line: row, ch: match.index },
{ line: row, ch: match.index + match[0].length },
{
inclusiveLeft: true,
inclusiveRight: true,
css: `color: ${colorForCaptureName(match[1])}`,
},
);
}
row++;
});
} catch (error) {
const startPosition = queryEditor.posFromIndex(error.index);
const endPosition = {
line: startPosition.line,
ch: startPosition.ch + (error.length || Infinity),
};
if (error.index === queryText.length) {
if (startPosition.ch > 0) {
startPosition.ch--;
} else if (startPosition.row > 0) {
startPosition.row--;
startPosition.column = Infinity;
}
}
queryEditor.markText(startPosition, endPosition, {
className: "query-error",
inclusiveLeft: true,
inclusiveRight: true,
attributes: { title: error.message },
});
}
});
runTreeQuery();
saveQueryState();
}
function handleCursorMovement() {
if (isRendering) return;
const selection = codeEditor.getDoc().listSelections()[0];
let start = { row: selection.anchor.line, column: selection.anchor.ch };
let end = { row: selection.head.line, column: selection.head.ch };
if (
start.row > end.row ||
(start.row === end.row && start.column > end.column)
) {
let swap = end;
end = start;
start = swap;
}
const node = tree.rootNode.namedDescendantForPosition(start, end);
if (treeRows) {
if (treeRowHighlightedIndex !== -1) {
const row = treeRows[treeRowHighlightedIndex];
if (row)
treeRows[treeRowHighlightedIndex] = row.replace(
"highlighted",
"plain",
);
}
treeRowHighlightedIndex = treeRows.findIndex(row =>
row.includes(`data-id=${node.id}`),
);
if (treeRowHighlightedIndex !== -1) {
const row = treeRows[treeRowHighlightedIndex];
if (row)
treeRows[treeRowHighlightedIndex] = row.replace(
"plain",
"highlighted",
);
}
cluster.update(treeRows);
const lineHeight = cluster.options.item_height;
const scrollTop = outputContainerScroll.scrollTop;
const containerHeight = outputContainerScroll.clientHeight;
const offset = treeRowHighlightedIndex * lineHeight;
if (scrollTop > offset - 20) {
$(outputContainerScroll).animate({ scrollTop: offset - 20 }, 150);
} else if (scrollTop < offset + lineHeight + 40 - containerHeight) {
$(outputContainerScroll).animate(
{ scrollTop: offset - containerHeight + 40 },
150,
);
}
}
}
function handleCreateIssue() {
const queryText = codeEditor.getValue();
const outputText = outputContainer.innerText;
const title = `Error parsing SQL`;
const body = `Error when parsing the following SQL:
\`\`\`
${queryText}
\`\`\`
Error:
\`\`\`
${outputText}
\`\`\``;
const queryParams = `title=${encodeURIComponent(
title,
)}&body=${encodeURIComponent(body)}`;
const url = `https://github.com/MichaHoffmann/tree-sitter-hcl/issues/new?${queryParams}`;
window.open(url);
}
function handleTreeClick(event) {
if (event.target.tagName === "A") {
event.preventDefault();
const [startRow, startColumn, endRow, endColumn] =
event.target.dataset.range.split(",").map(n => parseInt(n));
codeEditor.focus();
codeEditor.setSelection(
{ line: startRow, ch: startColumn },
{ line: endRow, ch: endColumn },
);
}
}
function handleLoggingChange() {
if (loggingCheckbox.checked) {
parser.setLogger((message, lexing) => {
if (lexing) {
console.log(" ", message);
} else {
console.log(message);
}
});
} else {
parser.setLogger(null);
}
}
function handleQueryEnableChange() {
if (queryCheckbox.checked) {
queryContainer.style.visibility = "";
queryContainer.style.position = "";
} else {
queryContainer.style.visibility = "hidden";
queryContainer.style.position = "absolute";
}
handleQueryChange();
}
function treeEditForEditorChange(change) {
const oldLineCount = change.removed.length;
const newLineCount = change.text.length;
const lastLineLength = change.text[newLineCount - 1].length;
const startPosition = { row: change.from.line, column: change.from.ch };
const oldEndPosition = { row: change.to.line, column: change.to.ch };
const newEndPosition = {
row: startPosition.row + newLineCount - 1,
column:
newLineCount === 1
? startPosition.column + lastLineLength
: lastLineLength,
};
const startIndex = codeEditor.indexFromPos(change.from);
let newEndIndex = startIndex + newLineCount - 1;
let oldEndIndex = startIndex + oldLineCount - 1;
for (let i = 0; i < newLineCount; i++) newEndIndex += change.text[i].length;
for (let i = 0; i < oldLineCount; i++)
oldEndIndex += change.removed[i].length;
return {
startIndex,
oldEndIndex,
newEndIndex,
startPosition,
oldEndPosition,
newEndPosition,
};
}
function colorForCaptureName(capture) {
const id = query.captureNames.indexOf(capture);
return COLORS_BY_INDEX[id % COLORS_BY_INDEX.length];
}
function storageGetItem(lookupKey) {
try {
return localStorage.getItem(lookupKey);
} catch {
return null;
}
}
function storageSetItem(lookupKey, value) {
try {
return localStorage.setIem(lookupKey, value);
} catch {}
}
function loadState() {
const language = storageGetItem("language");
const sourceCode = storageGetItem("sourceCode");
const query = storageGetItem("query");
const queryEnabled = storageGetItem("queryEnabled");
if (language != null && sourceCode != null && query != null) {
queryInput.value = query;
codeInput.value = sourceCode;
queryCheckbox.checked = queryEnabled === "true";
}
}
function saveState() {
storageSetItem("sourceCode", codeEditor.getValue());
saveQueryState();
}
function saveQueryState() {
storageSetItem("queryEnabled", queryCheckbox.checked);
storageSetItem("query", queryEditor.getValue());
}
function debounce(func, wait, immediate) {
var timeout;
return function () {
var context = this,
args = arguments;
var later = function () {
timeout = null;
if (!immediate) func.apply(context, args);
};
var callNow = immediate && !timeout;
clearTimeout(timeout);
timeout = setTimeout(later, wait);
if (callNow) func.apply(context, args);
};
}
})();

File diff suppressed because one or more lines are too long

@ -1,95 +0,0 @@
resource "example" "literals" {
attr1 = "val1"
tupl1 = [ 1, 2, 3.4, "foo" ]
tupl2 = []
obj1 = { foo = "bar", baz = quz }
null1 = null
bool1 = true
bool2 = false
esc1 = "\" \t \UFF11FF22 \uFFFF \n"
esc2 = "$${} %%{}"
num1 = 2
num2 = 2.112
num3 = 2.112e-12
num4 = 2.112e+12
num5 = 2.112E+12
num6 = 2.112E-12
num7 = 0x21FF
}
resource "example" "comments" {
// comment
# comment
/*
comment
*/
}
resource "example" "splat_expressions" {
splat1 = foo.*.bar.baz[0]
splat2 = foo[*].bar.baz[0]
}
resource "example" "for_expressions" {
for1 = { for i, v in ["a", "a", "b"] : v => i... }
for2 = [ for k, v in x : "${k}-${v}" ]
for3 = { for k, v in x: k => v }
for4 = [ for v in x : v ]
for5 = { for v in x : v => v }
for6 = [ for v in x : v if v < 3 ]
}
resource "example" "function_expressions" {
func1 = is_number("123")
func2 = multiline(
arg1,
arg2,
arg3...
)
func3 = withobject({
"foo" : 2,
"bar" : baz,
key : val,
fizz : buzz,
})
}
resource "example" "binary_expressions" {
cond1 = (1 == 2) ? 1 : "foobar"
bin1 = ((1+2)%3)*4
}
resource "example" "template_expressions" {
tpl1 = "prefix-${var.bar}"
tpl2 = "prefix-${func("bar")}"
tpl3 = "prefix-${func("nested-${var.bar}")}"
tpl4 = <<EOF
%{ for a in f(b) ~}
${func("foo${ a }")}
%{ endfor ~}
EOF
tpl5 = <<-EOF
%{~if cond~}
"foo"
%{~else~}
4
%{~endif~}
EOF
tpl6 = <<-EOF
%{ for a in f(b) ~}
%{~if a~} "true" %{~else~} "false" %{~endif~}
%{ endfor ~}
EOF
}
resource "example" "nested_blocks" {
nested_block "first" {
attr1 = "foo"
nested_block "second" {
attr1 = "bar"
}
}
}

@ -1,358 +0,0 @@
terraform {
required_version = ">= 0.10.7"
}
provider "archive" {
version = "1.0.0"
}
provider "external" {
version = "1.0.0"
}
provider "ignition" {
version = "1.0.0"
}
provider "local" {
version = "1.0.0"
}
provider "null" {
version = "1.0.0"
}
provider "random" {
version = "1.0.0"
}
provider "template" {
version = "1.0.0"
}
provider "tls" {
version = "1.0.1"
}
variable "tectonic_config_version" {
description = <<EOF
(internal) This declares the version of the global configuration variables.
It has no impact on generated assets but declares the version contract of the configuration.
EOF
default = "1.0"
}
variable "tectonic_image_re" {
description = <<EOF
(internal) Regular expression used to extract repo and tag components
EOF
type = "string"
default = "/^([^/]+/[^/]+):(.*)$/"
}
variable "tectonic_container_images" {
description = "(internal) Container images to use"
type = "map"
default = {
addon_resizer = "gcr.io/google_containers/addon-resizer:2.1"
awscli = "quay.io/coreos/awscli:025a357f05242fdad6a81e8a6b520098aa65a600"
gcloudsdk = "google/cloud-sdk:178.0.0-alpine"
bootkube = "quay.io/coreos/bootkube:v0.10.0"
tnc_operator = "quay.io/coreos/tectonic-node-controller-operator-dev:c3cee2bc5673011e88ac7b0ab1659c2c7243a499"
etcd_cert_signer = "quay.io/coreos/kube-etcd-signer-server:678cc8e6841e2121ebfdb6e2db568fce290b67d6"
etcd = "quay.io/coreos/etcd:v3.2.14"
hyperkube = "openshift/origin-node:latest"
kube_core_renderer = "quay.io/coreos/kube-core-renderer-dev:c3cee2bc5673011e88ac7b0ab1659c2c7243a499"
kube_core_operator = "quay.io/coreos/kube-core-operator-dev:c3cee2bc5673011e88ac7b0ab1659c2c7243a499"
tectonic_channel_operator = "quay.io/coreos/tectonic-channel-operator-dev:c3cee2bc5673011e88ac7b0ab1659c2c7243a499"
kube_addon_operator = "quay.io/coreos/kube-addon-operator-dev:c3cee2bc5673011e88ac7b0ab1659c2c7243a499"
tectonic_alm_operator = "quay.io/coreos/tectonic-alm-operator:v0.3.1"
tectonic_ingress_controller_operator = "quay.io/coreos/tectonic-ingress-controller-operator-dev:c3cee2bc5673011e88ac7b0ab1659c2c7243a499"
tectonic_utility_operator = "quay.io/coreos/tectonic-utility-operator-dev:c3cee2bc5673011e88ac7b0ab1659c2c7243a499"
tectonic_network_operator = "quay.io/coreos/tectonic-network-operator-dev:c3cee2bc5673011e88ac7b0ab1659c2c7243a499"
}
}
variable "tectonic_container_base_images" {
description = "(internal) Base images of the components to use"
type = "map"
default = {
tectonic_monitoring_auth = "quay.io/coreos/tectonic-monitoring-auth"
config_reload = "quay.io/coreos/configmap-reload"
addon_resizer = "quay.io/coreos/addon-resizer"
kube_state_metrics = "quay.io/coreos/kube-state-metrics"
grafana = "quay.io/coreos/monitoring-grafana"
grafana_watcher = "quay.io/coreos/grafana-watcher"
prometheus_operator = "quay.io/coreos/prometheus-operator"
prometheus_config_reload = "quay.io/coreos/prometheus-config-reloader"
prometheus = "quay.io/prometheus/prometheus"
alertmanager = "quay.io/prometheus/alertmanager"
node_exporter = "quay.io/prometheus/node-exporter"
kube_rbac_proxy = "quay.io/coreos/kube-rbac-proxy"
}
}
variable "tectonic_versions" {
description = "(internal) Versions of the components to use"
type = "map"
default = {
tectonic = "1.8.4-tectonic.2"
alm = "0.4.0"
}
}
variable "tectonic_service_cidr" {
type = "string"
description = <<EOF
(optional) This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation.
The maximum size of this IP range is /12
EOF
}
variable "tectonic_cluster_cidr" {
type = "string"
description = "(optional) This declares the IP range to assign Kubernetes pod IPs in CIDR notation."
}
variable "tectonic_master_count" {
type = "string"
default = "1"
description = <<EOF
The number of master nodes to be created.
This applies only to cloud platforms.
EOF
}
variable "tectonic_worker_count" {
type = "string"
default = "3"
description = <<EOF
The number of worker nodes to be created.
This applies only to cloud platforms.
EOF
}
variable "tectonic_etcd_count" {
type = "string"
default = "0"
description = <<EOF
The number of etcd nodes to be created.
If set to zero, the count of etcd nodes will be determined automatically.
EOF
}
variable "tectonic_base_domain" {
type = "string"
description = <<EOF
The base DNS domain of the cluster. It must NOT contain a trailing period. Some
DNS providers will automatically add this if necessary.
Example: `openshift.example.com`.
Note: This field MUST be set manually prior to creating the cluster.
This applies only to cloud platforms.
EOF
}
variable "tectonic_cluster_name" {
type = "string"
description = <<EOF
The name of the cluster.
If used in a cloud-environment, this will be prepended to `tectonic_base_domain` resulting in the URL to the Tectonic console.
Note: This field MUST be set manually prior to creating the cluster.
EOF
}
variable "tectonic_pull_secret_path" {
type = "string"
default = ""
description = <<EOF
The path the pull secret file in JSON format.
This is known to be a "Docker pull secret" as produced by the docker login [1] command.
A sample JSON content is shown in [2].
You can download the pull secret from your Account overview page at [3].
[1] https://docs.docker.com/engine/reference/commandline/login/
[2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup
[3] https://account.coreos.com/overview
EOF
}
variable "tectonic_license_path" {
type = "string"
default = ""
description = <<EOF
The path to the tectonic licence file.
You can download the Tectonic license file from your Account overview page at [1].
[1] https://account.coreos.com/overview
EOF
}
variable "tectonic_container_linux_channel" {
type = "string"
description = <<EOF
The Container Linux update channel.
Examples: `stable`, `beta`, `alpha`
EOF
}
variable "tectonic_container_linux_version" {
type = "string"
description = <<EOF
The Container Linux version to use. Set to `latest` to select the latest available version for the selected update channel.
Examples: `latest`, `1465.6.0`
EOF
}
variable "tectonic_update_server" {
type = "string"
default = "https://tectonic.update.core-os.net"
description = "(internal) The URL of the Tectonic Omaha update server"
}
variable "tectonic_update_channel" {
type = "string"
default = "tectonic-1.9-production"
description = "(internal) The Tectonic Omaha update channel"
}
variable "tectonic_update_app_id" {
type = "string"
default = "6bc7b986-4654-4a0f-94b3-84ce6feb1db4"
description = "(internal) The Tectonic Omaha update App ID"
}
variable "tectonic_admin_email" {
type = "string"
description = <<EOF
(internal) The e-mail address used to:
1. login as the admin user to the Tectonic Console.
2. generate DNS zones for some providers.
Note: This field MUST be in all lower-case e-mail address format and set manually prior to creating the cluster.
EOF
}
variable "tectonic_admin_password" {
type = "string"
description = <<EOF
(internal) The admin user password to login to the Tectonic Console.
Note: This field MUST be set manually prior to creating the cluster. Backslashes and double quotes must
also be escaped.
EOF
}
variable "tectonic_ca_cert" {
type = "string"
default = ""
description = <<EOF
(optional) The content of the PEM-encoded CA certificate, used to generate all cluster certificates.
If left blank, a CA certificate will be automatically generated.
EOF
}
variable "tectonic_ca_key" {
type = "string"
default = ""
description = <<EOF
(optional) The content of the PEM-encoded CA key, used to generate Tectonic all cluster certificates.
This field is mandatory if `tectonic_ca_cert` is set.
EOF
}
variable "tectonic_ca_key_alg" {
type = "string"
default = "RSA"
description = <<EOF
(optional) The algorithm used to generate tectonic_ca_key.
The default value is currently recommended.
This field is mandatory if `tectonic_ca_cert` is set.
EOF
}
variable "tectonic_stats_url" {
type = "string"
default = "https://stats-collector.tectonic.com"
description = "(internal) The Tectonic statistics collection URL to which to report."
}
variable "tectonic_networking" {
description = <<EOF
(optional) Configures the network to be used in Tectonic. One of the following values can be used:
- "flannel": enables overlay networking only. This is implemented by flannel using VXLAN.
- "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico.
- "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on bare metal installations only.
- "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services.
EOF
}
variable "tectonic_kubelet_debug_config" {
type = "string"
default = ""
description = "(internal) debug flags for the kubelet (used in CI only)"
}
variable "tectonic_ignition_master" {
type = "string"
default = ""
description = <<EOF
(internal) Ignition config file path. This is automatically generated by the installer.
EOF
}
variable "tectonic_ignition_worker" {
type = "string"
default = ""
description = <<EOF
(internal) Ignition config file path. This is automatically generated by the installer.
EOF
}
variable "tectonic_platform" {
type = "string"
description = <<EOF
(internal) The internal Terraform platform type, e.g. aws or libvirt
EOF
}
// This variable is generated by tectonic internally. Do not modify
variable "tectonic_cluster_id" {
type = "string"
description = "(internal) The Tectonic cluster id."
}

@ -1,610 +0,0 @@
terraform {
required_version = ">= 0.10.7"
}
provider "archive" {
version = "1.0.0"
}
provider "external" {
version = "1.0.0"
}
provider "ignition" {
version = "1.0.0"
}
provider "local" {
version = "1.0.0"
}
provider "null" {
version = "1.0.0"
}
provider "random" {
version = "1.0.0"
}
provider "template" {
version = "1.0.0"
}
provider "tls" {
version = "1.0.1"
}
locals {
// The total amount of public CA certificates present in Tectonic.
// That is all custom CAs + kube CA + etcd CA + ingress CA
// This is a local constant, which needs to be dependency injected because TF cannot handle length() on computed values,
// see https://github.com/hashicorp/terraform/issues/10857#issuecomment-268289775.
tectonic_ca_count = "${length(var.tectonic_custom_ca_pem_list) + 3}"
tectonic_http_proxy_enabled = "${length(var.tectonic_http_proxy_address) > 0}"
}
variable "tectonic_config_version" {
description = <<EOF
(internal) This declares the version of the global configuration variables.
It has no impact on generated assets but declares the version contract of the configuration.
EOF
default = "1.0"
}
variable "tectonic_image_re" {
description = <<EOF
(internal) Regular expression used to extract repo and tag components
EOF
type = "string"
default = "/^([^/]+/[^/]+/[^/]+):(.*)$/"
}
variable "tectonic_container_images" {
description = "(internal) Container images to use"
type = "map"
default = {
addon_resizer = "gcr.io/google_containers/addon-resizer:2.1"
awscli = "quay.io/coreos/awscli:025a357f05242fdad6a81e8a6b520098aa65a600"
gcloudsdk = "google/cloud-sdk:178.0.0-alpine"
bootkube = "quay.io/coreos/bootkube:v0.8.1"
calico = "quay.io/calico/node:v2.6.1"
calico_cni = "quay.io/calico/cni:v1.11.0"
console = "quay.io/coreos/tectonic-console:v6.0.5"
error_server = "quay.io/coreos/tectonic-error-server:1.1"
etcd = "quay.io/coreos/etcd:v3.1.8"
etcd_operator = "quay.io/coreos/etcd-operator:v0.5.0"
flannel = "quay.io/coreos/flannel:v0.8.0-amd64"
flannel_cni = "quay.io/coreos/flannel-cni:v0.2.0"
heapster = "gcr.io/google_containers/heapster:v1.4.1"
hyperkube = "quay.io/coreos/hyperkube:v1.9.6_coreos.0"
identity = "quay.io/coreos/dex:v2.8.1"
ingress_controller = "quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0-beta.17"
kenc = "quay.io/coreos/kenc:0.0.2"
kubedns = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.8"
kubednsmasq = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.8"
kubedns_sidecar = "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.8"
kube_version = "quay.io/coreos/kube-version:0.1.0"
kube_version_operator = "quay.io/coreos/kube-version-operator:v1.9.6-kvo.4"
node_agent = "quay.io/coreos/node-agent:cd69b4a0f65b0d3a3b30edfce3bb184fd2a22c26"
pod_checkpointer = "quay.io/coreos/pod-checkpointer:e22cc0e3714378de92f45326474874eb602ca0ac"
stats_emitter = "quay.io/coreos/tectonic-stats:6e882361357fe4b773adbf279cddf48cb50164c1"
stats_extender = "quay.io/coreos/tectonic-stats-extender:487b3da4e175da96dabfb44fba65cdb8b823db2e"
tectonic_channel_operator = "quay.io/coreos/tectonic-channel-operator:0.6.4"
tectonic_etcd_operator = "quay.io/coreos/tectonic-etcd-operator:v0.0.2"
tectonic_prometheus_operator = "quay.io/coreos/tectonic-prometheus-operator:v1.9.5"
tectonic_cluo_operator = "quay.io/coreos/tectonic-cluo-operator:v0.3.2"
tectonic_torcx = "quay.io/coreos/tectonic-torcx:v0.2.0"
tectonic_alm_operator = "quay.io/coreos/tectonic-alm-operator:v0.4.0"
}
}
variable "tectonic_container_base_images" {
description = "(internal) Base images of the components to use"
type = "map"
default = {
tectonic_monitoring_auth = "quay.io/coreos/tectonic-monitoring-auth"
config_reload = "quay.io/coreos/configmap-reload"
addon_resizer = "quay.io/coreos/addon-resizer"
kube_state_metrics = "quay.io/coreos/kube-state-metrics"
grafana = "quay.io/coreos/monitoring-grafana"
grafana_watcher = "quay.io/coreos/grafana-watcher"
prometheus_operator = "quay.io/coreos/prometheus-operator"
prometheus_config_reload = "quay.io/coreos/prometheus-config-reloader"
prometheus = "quay.io/prometheus/prometheus"
alertmanager = "quay.io/prometheus/alertmanager"
node_exporter = "quay.io/prometheus/node-exporter"
kube_rbac_proxy = "quay.io/coreos/kube-rbac-proxy"
}
}
variable "tectonic_versions" {
description = "(internal) Versions of the components to use"
type = "map"
default = {
etcd = "3.1.8"
kubernetes = "1.9.6+tectonic.1"
monitoring = "1.9.5"
tectonic = "1.9.6-tectonic.1"
tectonic-etcd = "0.0.1"
cluo = "0.3.2"
alm = "0.4.0"
}
}
variable "tectonic_service_cidr" {
type = "string"
default = "10.3.0.0/16"
description = <<EOF
(optional) This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation.
The maximum size of this IP range is /12
EOF
}
variable "tectonic_cluster_cidr" {
type = "string"
default = "10.2.0.0/16"
description = "(optional) This declares the IP range to assign Kubernetes pod IPs in CIDR notation."
}
variable "tectonic_master_count" {
type = "string"
default = "1"
description = <<EOF
The number of master nodes to be created.
This applies only to cloud platforms.
EOF
}
variable "tectonic_worker_count" {
type = "string"
default = "3"
description = <<EOF
The number of worker nodes to be created.
This applies only to cloud platforms.
EOF
}
variable "tectonic_etcd_count" {
type = "string"
default = "0"
description = <<EOF
The number of etcd nodes to be created.
If set to zero, the count of etcd nodes will be determined automatically.
Note: This is not supported on bare metal.
EOF
}
variable "tectonic_etcd_servers" {
description = <<EOF
(optional) List of external etcd v3 servers to connect with (hostnames/IPs only).
Needs to be set if using an external etcd cluster.
Note: If this variable is defined, the installer will not create self-signed certs.
To provide a CA certificate to trust the etcd servers, set "tectonic_etcd_ca_cert_path".
Example: `["etcd1", "etcd2", "etcd3"]`
EOF
type = "list"
default = []
}
variable "tectonic_etcd_tls_enabled" {
default = true
description = <<EOF
(optional) If set to `true`, all etcd endpoints will be configured to use the "https" scheme.
Note: If `tectonic_experimental` is set to `true` this variable has no effect, because the experimental self-hosted etcd always uses TLS.
EOF
}
variable "tectonic_etcd_ca_cert_path" {
type = "string"
default = "/dev/null"
description = <<EOF
(optional) The path of the file containing the CA certificate for TLS communication with etcd.
Note: This works only when used in conjunction with an external etcd cluster.
If set, the variable `tectonic_etcd_servers` must also be set.
EOF
}
variable "tectonic_etcd_client_cert_path" {
type = "string"
default = "/dev/null"
description = <<EOF
(optional) The path of the file containing the client certificate for TLS communication with etcd.
Note: This works only when used in conjunction with an external etcd cluster.
If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_ca_cert_path`, and `tectonic_etcd_client_key_path` must also be set.
EOF
}
variable "tectonic_etcd_client_key_path" {
type = "string"
default = "/dev/null"
description = <<EOF
(optional) The path of the file containing the client key for TLS communication with etcd.
Note: This works only when used in conjunction with an external etcd cluster.
If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_ca_cert_path`, and `tectonic_etcd_client_cert_path` must also be set.
EOF
}
variable "tectonic_base_domain" {
type = "string"
description = <<EOF
The base DNS domain of the cluster. It must NOT contain a trailing period. Some
DNS providers will automatically add this if necessary.
Example: `openstack.dev.coreos.systems`.
Note: This field MUST be set manually prior to creating the cluster.
This applies only to cloud platforms.
[Azure-specific NOTE]
To use Azure-provided DNS, `tectonic_base_domain` should be set to `""`
If using DNS records, ensure that `tectonic_base_domain` is set to a properly configured external DNS zone.
Instructions for configuring delegated domains for Azure DNS can be found here: https://docs.microsoft.com/en-us/azure/dns/dns-delegate-domain-azure-dns
EOF
}
variable "tectonic_cluster_name" {
type = "string"
description = <<EOF
The name of the cluster.
If used in a cloud-environment, this will be prepended to `tectonic_base_domain` resulting in the URL to the Tectonic console.
Note: This field MUST be set manually prior to creating the cluster.
Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints.
EOF
}
variable "tectonic_pull_secret_path" {
type = "string"
default = ""
description = <<EOF
The path the pull secret file in JSON format.
This is known to be a "Docker pull secret" as produced by the docker login [1] command.
A sample JSON content is shown in [2].
You can download the pull secret from your Account overview page at [3].
[1] https://docs.docker.com/engine/reference/commandline/login/
[2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup
[3] https://account.coreos.com/overview
Note: This field MUST be set manually prior to creating the cluster unless `tectonic_vanilla_k8s` is set to `true`.
EOF
}
variable "tectonic_license_path" {
type = "string"
default = ""
description = <<EOF
The path to the tectonic licence file.
You can download the Tectonic license file from your Account overview page at [1].
[1] https://account.coreos.com/overview
Note: This field MUST be set manually prior to creating the cluster unless `tectonic_vanilla_k8s` is set to `true`.
EOF
}
variable "tectonic_container_linux_channel" {
type = "string"
default = "stable"
description = <<EOF
(optional) The Container Linux update channel.
Examples: `stable`, `beta`, `alpha`
EOF
}
variable "tectonic_container_linux_version" {
type = "string"
default = "latest"
description = <<EOF
The Container Linux version to use. Set to `latest` to select the latest available version for the selected update channel.
Examples: `latest`, `1465.6.0`
EOF
}
variable "tectonic_update_server" {
type = "string"
default = "https://tectonic.update.core-os.net"
description = "(internal) The URL of the Tectonic Omaha update server"
}
variable "tectonic_update_channel" {
type = "string"
default = "tectonic-1.8-production"
description = "(internal) The Tectonic Omaha update channel"
}
variable "tectonic_update_app_id" {
type = "string"
default = "6bc7b986-4654-4a0f-94b3-84ce6feb1db4"
description = "(internal) The Tectonic Omaha update App ID"
}
variable "tectonic_admin_email" {
type = "string"
description = <<EOF
(internal) The e-mail address used to:
1. login as the admin user to the Tectonic Console.
2. generate DNS zones for some providers.
Note: This field MUST be in all lower-case e-mail address format and set manually prior to creating the cluster.
EOF
}
variable "tectonic_admin_password" {
type = "string"
description = <<EOF
(internal) The admin user password to login to the Tectonic Console.
Note: This field MUST be set manually prior to creating the cluster. Backslashes and double quotes must
also be escaped.
EOF
}
variable "tectonic_ca_cert" {
type = "string"
default = ""
description = <<EOF
(optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate.
If left blank, a CA certificate will be automatically generated.
EOF
}
variable "tectonic_ca_key" {
type = "string"
default = ""
description = <<EOF
(optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate.
This field is mandatory if `tectonic_ca_cert` is set.
EOF
}
variable "tectonic_ca_key_alg" {
type = "string"
default = "RSA"
description = <<EOF
(optional) The algorithm used to generate tectonic_ca_key.
The default value is currently recommended.
This field is mandatory if `tectonic_ca_cert` is set.
EOF
}
variable "tectonic_tls_validity_period" {
type = "string"
default = "26280"
description = <<EOF
Validity period of the self-signed certificates (in hours).
Default is 3 years.
This setting is ignored if user provided certificates are used.
EOF
}
variable "tectonic_vanilla_k8s" {
default = false
description = <<EOF
If set to true, a vanilla Kubernetes cluster will be deployed, omitting any Tectonic assets.
EOF
}
variable "tectonic_stats_url" {
type = "string"
default = "https://stats-collector.tectonic.com"
description = "(internal) The Tectonic statistics collection URL to which to report."
}
variable "tectonic_ddns_server" {
type = "string"
default = ""
description = <<EOF
(optional) This only applies if you use the modules/dns/ddns module.
Specifies the RFC2136 Dynamic DNS server IP/host to register IP addresses to.
EOF
}
variable "tectonic_ddns_key_name" {
type = "string"
default = ""
description = <<EOF
(optional) This only applies if you use the modules/dns/ddns module.
Specifies the RFC2136 Dynamic DNS server key name.
EOF
}
variable "tectonic_ddns_key_algorithm" {
type = "string"
default = ""
description = <<EOF
(optional) This only applies if you use the modules/dns/ddns module.
Specifies the RFC2136 Dynamic DNS server key algorithm.
EOF
}
variable "tectonic_ddns_key_secret" {
type = "string"
default = ""
description = <<EOF
(optional) This only applies if you use the modules/dns/ddns module.
Specifies the RFC2136 Dynamic DNS server key secret.
EOF
}
variable "tectonic_networking" {
default = "flannel"
description = <<EOF
(optional) Configures the network to be used in Tectonic. One of the following values can be used:
- "flannel": enables overlay networking only. This is implemented by flannel using VXLAN.
- "canal": [ALPHA] enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico.
- "calico": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only.
EOF
}
variable "tectonic_self_hosted_etcd" {
default = ""
description = <<EOF
(internal) [ALPHA] If set to one of the following values, self-hosted etcd is deployed:
- "enabled": Deploys a self-hosted etcd cluster.
- "pv_backup": Deploys a self-hosted etcd cluster including backups to Persistence Volumes.
`tectonic_etcd_backup_size` and `tectonic_etcd_backup_storage_class` must be configured when using this setting.
EOF
}
variable "tectonic_etcd_backup_size" {
type = "string"
description = "(optional) The size in MB of the PersistentVolume used for handling etcd backups."
default = "512"
}
variable "tectonic_etcd_backup_storage_class" {
type = "string"
default = ""
description = "(optional) The name of an existing Kubernetes StorageClass that will be used for handling etcd backups."
}
variable "tectonic_bootstrap_upgrade_cl" {
type = "string"
default = "true"
description = "(internal) Whether to trigger a ContainerLinux upgrade on node bootstrap."
}
variable "tectonic_kubelet_debug_config" {
type = "string"
default = ""
description = "(internal) debug flags for the kubelet (used in CI only)"
}
variable "tectonic_custom_ca_pem_list" {
type = "list"
default = []
description = <<EOF
(optional) A list of PEM encoded CA files that will be installed in /etc/ssl/certs on etcd, master, and worker nodes.
EOF
}
variable "tectonic_iscsi_enabled" {
type = "string"
default = "false"
description = "(optional) Start iscsid.service to enable iscsi volume attachment."
}
variable "tectonic_http_proxy_address" {
type = "string"
default = ""
description = <<EOF
(optional) HTTP proxy address.
Example: `http://myproxy.example.com`
EOF
}
variable "tectonic_https_proxy_address" {
type = "string"
default = ""
description = <<EOF
(optional) HTTPS proxy address.
Example: `http://myproxy.example.com`
EOF
}
variable "tectonic_no_proxy" {
type = "list"
default = []
description = <<EOF
(optional) List of local endpoints that will not use HTTP proxy.
Example: `["127.0.0.1","localhost",".example.com","10.3.0.1"]`
EOF
}
variable "tectonic_ntp_servers" {
type = "list"
default = []
description = <<EOF
(optional) If left blank, the default Container Linux NTP servers will be used.
A list of NTP servers to be used for time synchronization on the cluster nodes.
EOF
}
variable "tectonic_nfs_config_file" {
type = "string"
default = ""
description = <<EOF
(optional) the path to an NFS configuration file to be placed in /etc/conf.d/nfs
file on the CL nodes.
EOF
}
# Workaround for https://github.com/hashicorp/hil/issues/50
locals {
_tectonic_nfs_config_file = "${var.tectonic_nfs_config_file != "" ? var.tectonic_nfs_config_file : "/dev/null"}"
}
variable "tectonic_proxy_exclusive_units" {
default = false
description = <<EOF
(optional) When set to true, it restricts the application of proxy settings to processes started through systemd units.
Global proxy settings will not be affected in this case.
When set to false, the proxy settings will apply globally, including to all processes lauched by users.
EOF
}

@ -1,161 +0,0 @@
provider "aws" {
region = "${var.vpc_aws_region}"
}
# Declare the data source
data "aws_availability_zones" "available" {}
resource "aws_vpc" "vpc" {
cidr_block = "${var.vpc_cidr}"
enable_dns_support = true
enable_dns_hostnames = true
tags {
Name = "${var.vpc_name}"
}
}
data "aws_ami" "coreos_ami" {
most_recent = true
filter {
name = "name"
values = ["CoreOS-stable-*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "owner-id"
values = ["190570271432"]
}
}
resource "aws_instance" "bastion" {
# 1st available AZ
availability_zone = "${data.aws_availability_zones.available.names[0]}"
ami = "${data.aws_ami.coreos_ami.image_id}"
instance_type = "${var.instance_type}"
subnet_id = "${aws_subnet.pub_subnet_generic.id}"
vpc_security_group_ids = ["${compact(concat(list(aws_security_group.powerdns.id), list(aws_security_group.vpn_sg.id)))}"]
source_dest_check = false
key_name = "${var.ssh_key}"
user_data = "${data.ignition_config.main.rendered}"
depends_on = ["aws_eip.ovpn_eip"]
tags {
Name = "${var.vpc_name}-server"
}
}
data "ignition_config" "main" {
files = ["${data.ignition_file.nginx_conf.id}"]
systemd = ["${compact(list(
data.ignition_systemd_unit.gateway_service.id,
data.ignition_systemd_unit.nginx_service.id,
data.ignition_systemd_unit.openvpn_service.id,
data.ignition_systemd_unit.powerdns_service.id,
data.ignition_systemd_unit.update-engine.id,
data.ignition_systemd_unit.locksmithd.id,
))}"]
}
data "ignition_systemd_unit" "update-engine" {
name = "update-engine.service"
mask = true
}
data "ignition_systemd_unit" "locksmithd" {
name = "locksmithd.service"
mask = true
}
# IGW
resource "aws_internet_gateway" "igw" {
vpc_id = "${aws_vpc.vpc.id}"
tags {
Name = "${var.vpc_name}-igw"
}
}
# General purpose public subnet. used for OVPN access and IGW/NAT attachment.
resource "aws_subnet" "pub_subnet_generic" {
vpc_id = "${aws_vpc.vpc.id}"
# 1st available AZ
availability_zone = "${data.aws_availability_zones.available.names[0]}"
cidr_block = "10.0.255.0/24"
map_public_ip_on_launch = true
tags {
Name = "${var.vpc_name}-vpn"
}
}
resource "aws_route_table_association" "pub_subnet_generic" {
subnet_id = "${aws_subnet.pub_subnet_generic.id}"
route_table_id = "${aws_route_table.pub_rt.id}"
}
# public subnet route table
resource "aws_route_table" "pub_rt" {
vpc_id = "${aws_vpc.vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.igw.id}"
}
tags {
Name = "${var.vpc_name}-public"
}
}
# private subnets
resource "aws_subnet" "priv_subnet" {
count = "${var.subnet_count}"
vpc_id = "${aws_vpc.vpc.id}"
availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}"
cidr_block = "${cidrsubnet(var.vpc_cidr, 8, count.index + 100)}"
tags {
Name = "${var.vpc_name}-${count.index}"
}
}
resource "aws_route_table_association" "priv_subnet" {
count = "${var.subnet_count}"
subnet_id = "${aws_subnet.priv_subnet.*.id[count.index]}"
route_table_id = "${aws_route_table.priv_rt.id}"
}
# private subnet route table
resource "aws_route_table" "priv_rt" {
vpc_id = "${aws_vpc.vpc.id}"
propagating_vgws = ["${aws_vpn_gateway.vpg.id}"]
route {
cidr_block = "0.0.0.0/0"
instance_id = "${aws_instance.bastion.id}"
}
route {
cidr_block = "${var.local_network_cidr}"
gateway_id = "${aws_vpn_gateway.vpg.id}"
}
tags {
Name = "${var.vpc_name}-private"
}
}

@ -1,74 +0,0 @@
# Security Group
resource "aws_security_group" "powerdns" {
vpc_id = "${aws_vpc.vpc.id}"
}
resource "aws_security_group_rule" "powerdns_egress" {
type = "egress"
security_group_id = "${aws_security_group.powerdns.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "powerdns_ingress_ssh" {
type = "ingress"
security_group_id = "${aws_security_group.powerdns.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
to_port = 22
}
resource "aws_security_group_rule" "powerdns_ingress_http" {
type = "ingress"
security_group_id = "${aws_security_group.powerdns.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 80
to_port = 80
}
resource "aws_security_group_rule" "powerdns_ingress_https" {
type = "ingress"
security_group_id = "${aws_security_group.powerdns.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
to_port = 443
}
resource "aws_security_group_rule" "powerdns_ingress_api" {
type = "ingress"
security_group_id = "${aws_security_group.powerdns.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 8081
to_port = 8081
}
resource "aws_security_group_rule" "powerdns_ingress_dns" {
type = "ingress"
security_group_id = "${aws_security_group.powerdns.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 53
to_port = 53
}
resource "aws_security_group_rule" "powerdns_ingress_dns_udp" {
type = "ingress"
security_group_id = "${aws_security_group.powerdns.id}"
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 53
to_port = 53
}

@ -1,76 +0,0 @@
data "template_file" "gateway_service" {
template = "${file("${path.module}/resources/gateway.service")}"
vars {
private_cidr = "${cidrsubnet(var.vpc_cidr, 6, 25)}"
}
}
data "ignition_systemd_unit" "gateway_service" {
name = "gateway.service"
enabled = true
content = "${data.template_file.gateway_service.rendered}"
}
data "template_file" "nginx_service" {
template = "${file("${path.module}/resources/nginx.service")}"
vars {
username = "${var.nginx_username}"
password = "${var.nginx_password}"
nginx_image = "${var.container_images["nginx"]}"
}
}
data "ignition_systemd_unit" "nginx_service" {
name = "nginx.service"
enabled = true
content = "${data.template_file.nginx_service.rendered}"
}
data "template_file" "nginx_conf" {
template = "${file("${path.module}/resources/nginx.conf")}"
}
data "ignition_file" "nginx_conf" {
filesystem = "root"
path = "/home/core/nginx-config/default.conf"
mode = 0644
content {
content = "${data.template_file.nginx_conf.rendered}"
}
}
data "template_file" "openvpn_service" {
template = "${file("${path.module}/resources/openvpn.service")}"
vars {
ip = "${aws_eip.ovpn_eip.public_ip}"
openvpn_image = "${var.container_images["openvpn"]}"
}
}
data "ignition_systemd_unit" "openvpn_service" {
name = "openvpn.service"
enabled = true
content = "${data.template_file.openvpn_service.rendered}"
}
data "template_file" "powerdns_service" {
template = "${file("${path.module}/resources/powerdns.service")}"
vars {
dns_zone = "${var.base_domain}"
pdns_api_key = "${var.pdns_api_key}"
mysql_password = "${var.mysql_password}"
powerdns_image = "${var.container_images["powerdns"]}"
mysql_image = "${var.container_images["mysql"]}"
}
}
data "ignition_systemd_unit" "powerdns_service" {
name = "powerdns.service"
enabled = true
content = "${data.template_file.powerdns_service.rendered}"
}

@ -1,95 +0,0 @@
# placeholders for access_key / secret_key
# should be fed through env var or variable file
# https://www.terraform.io/docs/configuration/variables.html
variable "vpc_name" {
description = "The name of the VPC to identify created resources."
default = "bastion"
}
variable "instance_type" {
description = "The type of the ec2 machine."
default = "t2.micro"
}
variable "base_domain" {
default = "tectonic-ci.de"
description = "The base domain for this cluster's FQDN"
}
variable "vpc_aws_region" {
description = "The target AWS region for the cluster"
default = "us-gov-west-1"
}
variable "vpc_cidr" {
default = "10.0.0.0/16"
description = "The CIDR range used for your entire VPC"
}
variable "subnet_count" {
default = 4
description = "Number of private subnets to pre-create"
}
variable "local_network_cidr" {
default = "10.7.0.0/16"
description = "IP range in the network your laptop is on (dosn't actually matter unless your instances need to connect to the local network your laptop is on)"
}
variable "ssh_key" {
description = "Name of an SSH key located within the AWS region. Example: coreos-user."
default = ""
}
variable "nginx_username" {
description = "Used for retrieving the OpenVPN client config file."
}
variable "nginx_password" {
description = "Used for retrieving the OpenVPN client config file."
}
variable "mysql_password" {
description = "Used as PowerDNS backend."
}
variable "pdns_api_key" {
description = "Used by clients to communicate with the PowerDNS API."
}
variable "container_images" {
description = "Container images to use"
type = "map"
default = {
powerdns = "quay.io/coreos/pdns:20678f8bffc316579367d885ca4fcb229b1dbc1d"
mysql = "quay.io/coreos/mysql:5.7.21"
openvpn = "quay.io/coreos/openvpn:2.4"
nginx = "quay.io/coreos/nginx:1.13.7-alpine"
}
}
output "ovpn_url" {
value = "http://${aws_eip.ovpn_eip.public_ip}"
}
output "base_domain" {
value = "${var.base_domain}"
}
output "vpc_id" {
value = "${aws_vpc.vpc.id}"
}
output "vpc_dns_ip" {
value = "${aws_instance.bastion.private_ip}"
}
output "dns_api_url" {
value = "http://${aws_instance.bastion.private_ip}:8081"
}
output "subnets" {
value = "${aws_subnet.priv_subnet.*.id}"
}

@ -1,108 +0,0 @@
resource "aws_vpn_gateway" "vpg" {
vpc_id = "${aws_vpc.vpc.id}"
# 1st available AZ
availability_zone = "${data.aws_availability_zones.available.names[0]}"
tags {
Name = "${var.vpc_name}-vpg"
}
}
resource "aws_eip" "ovpn_eip" {
vpc = true
}
resource "aws_eip_association" "vpn_eip_assoc" {
instance_id = "${aws_instance.bastion.id}"
allocation_id = "${aws_eip.ovpn_eip.id}"
}
resource "aws_security_group" "vpn_sg" {
name = "ovpn-server-sg"
description = "Allow all inbound traffic"
vpc_id = "${aws_vpc.vpc.id}"
ingress {
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = ["${cidrsubnet(var.vpc_cidr, 6, 25)}"]
}
ingress {
from_port = 943
to_port = 943
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 943
to_port = 943
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 1194
to_port = 1194
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
# all
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags {
Name = "${var.vpc_name}-ovpn-sg"
}
}
resource "aws_customer_gateway" "customer_gateway" {
bgp_asn = 65000
ip_address = "${aws_eip.ovpn_eip.public_ip}"
type = "ipsec.1"
tags {
Name = "${var.vpc_name}-customer-gateway"
}
}
resource "aws_vpn_connection" "main" {
vpn_gateway_id = "${aws_vpn_gateway.vpg.id}"
customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}"
type = "ipsec.1"
static_routes_only = true
tags {
Name = "${var.vpc_name}-vpn-conn"
}
}

@ -1,118 +0,0 @@
provider "aws" {
region = "${var.vpc_aws_region}"
}
# Declare the data source
data "aws_availability_zones" "available" {}
resource "aws_vpc" "vpc" {
cidr_block = "${var.vpc_cidr}"
enable_dns_support = true
enable_dns_hostnames = true
tags {
Name = "${var.vpc_name}"
}
}
# Private DNS zone
resource "aws_route53_zone" "priv_zone" {
name = "${var.base_domain}"
vpc_id = "${aws_vpc.vpc.id}"
comment = "Managed by Terraform"
tags {
Name = "${var.vpc_name}"
}
}
# IGW
resource "aws_internet_gateway" "igw" {
vpc_id = "${aws_vpc.vpc.id}"
tags {
Name = "${var.vpc_name}-igw"
}
}
# NAT Gateway
resource "aws_eip" "natgw" {
vpc = true
}
resource "aws_nat_gateway" "natgw" {
allocation_id = "${aws_eip.natgw.id}"
subnet_id = "${aws_subnet.pub_subnet_generic.id}"
depends_on = ["aws_internet_gateway.igw"]
}
# General purpose public subnet. used for OVPN access and IGW/NAT attachment.
resource "aws_subnet" "pub_subnet_generic" {
vpc_id = "${aws_vpc.vpc.id}"
# 1st available AZ
availability_zone = "${data.aws_availability_zones.available.names[0]}"
cidr_block = "10.0.255.0/24"
map_public_ip_on_launch = true
tags {
Name = "${var.vpc_name}-vpn"
}
}
resource "aws_route_table_association" "pub_subnet_generic" {
subnet_id = "${aws_subnet.pub_subnet_generic.id}"
route_table_id = "${aws_route_table.pub_rt.id}"
}
# public subnet route table
resource "aws_route_table" "pub_rt" {
vpc_id = "${aws_vpc.vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.igw.id}"
}
tags {
Name = "${var.vpc_name}-public"
}
}
# private subnets
resource "aws_subnet" "priv_subnet" {
count = "${var.subnet_count}"
vpc_id = "${aws_vpc.vpc.id}"
availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}"
cidr_block = "${cidrsubnet(var.vpc_cidr, 8, count.index + 100)}"
tags {
Name = "${var.vpc_name}-${count.index}"
}
}
resource "aws_route_table_association" "priv_subnet" {
count = "${var.subnet_count}"
subnet_id = "${aws_subnet.priv_subnet.*.id[count.index]}"
route_table_id = "${aws_route_table.priv_rt.id}"
}
# private subnet route table
resource "aws_route_table" "priv_rt" {
vpc_id = "${aws_vpc.vpc.id}"
propagating_vgws = ["${aws_vpn_gateway.vpg.id}"]
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = "${aws_nat_gateway.natgw.id}"
}
route {
cidr_block = "${var.local_network_cidr}"
gateway_id = "${aws_vpn_gateway.vpg.id}"
}
tags {
Name = "${var.vpc_name}-private"
}
}

@ -1,60 +0,0 @@
# placeholders for access_key / secret_key
# should be fed through env var or variable file
# https://www.terraform.io/docs/configuration/variables.html
variable vpc_name {
description = "The name of the VPC to identify created resources."
}
variable base_domain {
default = "tectonic-ci.de"
description = "The base domain for this cluster's FQDN"
}
variable vpc_aws_region {
description = "The target AWS region for the cluster"
}
variable vpc_cidr {
default = "10.0.0.0/16"
description = "The CIDR range used for your entire VPC"
}
variable subnet_count {
default = 4
description = "Number of private subnets to pre-create"
}
variable local_network_cidr {
default = "10.7.0.0/16"
description = "IP range in the network your laptop is on (dosn't actually matter unless your instances need to connect to the local network your laptop is on)"
}
variable ovpn_password {
default = "PASSWORD"
description = "password to use when connecting"
}
output "ovpn_url" {
value = "https://${aws_eip.ovpn_eip.public_ip}:443"
}
output "base_domain" {
value = "${var.base_domain}"
}
output "private_zone_id" {
value = "${aws_route53_zone.priv_zone.id}"
}
output "vpc_id" {
value = "${aws_vpc.vpc.id}"
}
output "vpc_dns" {
value = "${cidrhost(var.vpc_cidr, 2)}"
}
output "subnets" {
value = "${aws_subnet.priv_subnet.*.id}"
}

@ -1,142 +0,0 @@
# For details see https://docs.openvpn.net/how-to-tutorialsguides/virtual-platforms/amazon-ec2-appliance-ami-quick-start-guide
data "aws_ami" "openvpn_ami" {
most_recent = true
filter {
name = "name"
values = ["OpenVPN Access Server*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "owner-id"
values = ["679593333241"]
}
}
resource "aws_vpn_gateway" "vpg" {
vpc_id = "${aws_vpc.vpc.id}"
# 1st available AZ
availability_zone = "${data.aws_availability_zones.available.names[0]}"
tags {
Name = "${var.vpc_name}-vpg"
}
}
resource "aws_instance" "ovpn" {
# 1st available AZ
availability_zone = "${data.aws_availability_zones.available.names[0]}"
ami = "${data.aws_ami.openvpn_ami.image_id}"
instance_type = "t2.micro"
subnet_id = "${aws_subnet.pub_subnet_generic.id}"
vpc_security_group_ids = ["${aws_security_group.vpn_sg.id}"]
user_data = <<EOF
public_hostname=${aws_eip.ovpn_eip.public_ip}
admin_user=openvpn
admin_pw=${var.ovpn_password}
reroute_gw=1
reroute_dns=1
EOF
depends_on = ["aws_eip.ovpn_eip"]
tags {
Name = "${var.vpc_name}-ovpn-server"
}
}
resource "aws_eip" "ovpn_eip" {
vpc = true
}
resource "aws_eip_association" "vpn_eip_assoc" {
instance_id = "${aws_instance.ovpn.id}"
allocation_id = "${aws_eip.ovpn_eip.id}"
}
resource "aws_security_group" "vpn_sg" {
name = "ovpn-server-sg"
description = "Allow all inbound traffic"
vpc_id = "${aws_vpc.vpc.id}"
ingress {
from_port = 943
to_port = 943
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 943
to_port = 943
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 1194
to_port = 1194
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
# all
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags {
Name = "${var.vpc_name}-ovpn-sg"
}
}
resource "aws_customer_gateway" "customer_gateway" {
bgp_asn = 65000
ip_address = "${aws_eip.ovpn_eip.public_ip}"
type = "ipsec.1"
tags {
Name = "${var.vpc_name}-customer-gateway"
}
}
resource "aws_vpn_connection" "main" {
vpn_gateway_id = "${aws_vpn_gateway.vpg.id}"
customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}"
type = "ipsec.1"
static_routes_only = true
tags {
Name = "${var.vpc_name}-vpn-conn"
}
}

@ -1,41 +0,0 @@
{
"tectonic_admin_email": "admin@example.com",
"tectonic_admin_password": "password",
"tectonic_aws_etcd_ec2_type": "t2.medium",
"tectonic_aws_etcd_root_volume_size": 30,
"tectonic_aws_etcd_root_volume_type": "gp2",
"tectonic_aws_extra_tags": {
"test_tag": "testing"
},
"tectonic_aws_master_custom_subnets": {
"us-west-1a": "10.0.0.0/19",
"us-west-1c": "10.0.32.0/19"
},
"tectonic_aws_master_ec2_type": "t2.medium",
"tectonic_aws_master_root_volume_size": 30,
"tectonic_aws_master_root_volume_type": "gp2",
"tectonic_aws_private_endpoints": false,
"tectonic_aws_region": "us-west-1",
"tectonic_aws_ssh_key": "tectonic-jenkins",
"tectonic_aws_vpc_cidr_block": "10.0.0.0/16",
"tectonic_aws_worker_custom_subnets": {
"us-west-1a": "10.0.64.0/19",
"us-west-1c": "10.0.96.0/19"
},
"tectonic_aws_worker_ec2_type": "t2.medium",
"tectonic_aws_worker_root_volume_size": 30,
"tectonic_aws_worker_root_volume_type": "gp2",
"tectonic_base_domain": "tectonic.dev.coreos.systems",
"tectonic_cluster_cidr": "10.2.0.0/16",
"tectonic_cluster_name": "test",
"tectonic_dns_name": "test",
"tectonic_etcd_count": 3,
"tectonic_kube_apiserver_service_ip": "10.3.0.1",
"tectonic_kube_dns_service_ip": "10.3.0.10",
"tectonic_kube_etcd_service_ip": "10.3.0.15",
"tectonic_license_path": "./license.txt",
"tectonic_master_count": 3,
"tectonic_pull_secret_path": "./pull_secret.json",
"tectonic_service_cidr": "10.3.0.0/16",
"tectonic_worker_count": 3
}

@ -1,44 +0,0 @@
{
"tectonic_admin_email": "admin@example.com",
"tectonic_admin_password": "password",
"tectonic_base_domain": "unused",
"tectonic_cluster_cidr": "10.2.0.0/16",
"tectonic_cluster_name": "my-cluster",
"tectonic_container_linux_version": "1353.8.0",
"tectonic_dns_name": "",
"tectonic_kube_apiserver_service_ip": "10.3.0.1",
"tectonic_kube_dns_service_ip": "10.3.0.10",
"tectonic_kube_etcd_service_ip": "10.3.0.15",
"tectonic_license_path": "./license.txt",
"tectonic_metal_controller_domain": "cluster.example.com",
"tectonic_metal_controller_domains": [
"node1.example.com"
],
"tectonic_metal_controller_macs": [
"52:54:00:a1:9c:ae"
],
"tectonic_metal_controller_names": [
"node1"
],
"tectonic_metal_ingress_domain": "tectonic.example.com",
"tectonic_metal_matchbox_ca": "-----BEGIN CERTIFICATE-----\nMIIFDTCCAvWgAwIBAgIJAIuXq10k2OFlMA0GCSqGSIb3DQEBCwUAMBIxEDAOBgNV\nBAMMB2Zha2UtY2EwHhcNMTcwMjAxMjIxMzI0WhcNMjcwMTMwMjIxMzI0WjASMRAw\nDgYDVQQDDAdmYWtlLWNhMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA\nzzHsB56F6oZjsVBKzfpicsG+mVHQ/QzA4jqRCbQ8Zr12NtUZKnPUVwDoFf4WTfmy\nZ0u8Uv+6/B/8un3LGsIaJEugPfRboc2oZKJcqfMJSFfLb/wkmT0D/1HJR60ml/M5\nwpHeh4vQ7BhktNsK90EjdlLvr1GDfevXArnye5ksEInOSX9nXVsGPrm0AGSffhmY\nuUAjY8f9IspJa1j4vL6NI89GWO4jqME+SUnuI4SYIkuQJoSElofAIX2b5Tk3dFya\nVKmAq2L89teCMYsciPbFa/Z2HvDNZ7pC17Ow7zr1f+V5BU18h3cLk610YNPcEBw0\nf94+mePsmMSMjUM0f+NMFyDERF+pys60/3qqVWrJe/FkJM6NDCyWXXXAfTxIwLq0\nCVrlWALdTc+RMAPI2sxAdUp4BqAuek4SjIg3FuoJrBs3EAUPfybclJ7g3HJwyXM2\n3WIe10BnSk+rGzd4KMVbYw5/nM8Nc/Y20R2an/vVZn6xTxs9o6hhEHF7d5iws6Bi\n7/jv+jdZhLG8b3sG6Tj7a7YdvKWqH/mSPFlc/sevYOjR7NKYRMwGnl0d9qf+Xe5V\nxyH1llIXPs6+y1B4tRyL/tulyeVqi25+I4QVAYypxWU8CPyw7tsSdOsSTbeGTmXj\nehelY/BCjAqAcexL7oRV7dy7VZ1Ezg6zQRwMt0Tar90CAwEAAaNmMGQwHQYDVR0O\nBBYEFNGPoXTjJnHjG2zMpjSg/9vNO/trMB8GA1UdIwQYMBaAFNGPoXTjJnHjG2zM\npjSg/9vNO/trMBIGA1UdEwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGGMA0G\nCSqGSIb3DQEBCwUAA4ICAQC9V/0iiEZYHz7xbezHpeGHwmecH5oylEvAeCcN10gx\nHFvUN+XMyBaPqN7iRtx/rSqyp2iN2AK1Cdn1viOSRc09lwPiuj9V4diSDyPwJWxd\n60gqd5E9F9gQXlenWoIdm7kW8Lo8HLfx8ItYKGpE51JUctTmGY5WURRmBlVKr1LA\nhbVsAWBaGQfPyW1CrFcxxc5mCABxWOxjRjLw8A8c5IXD0Q5C5pRd0BckBHKTdl40\nowm893oPEQcu/1C432T4vIddVh1Ktq1pd7O/9BPYOaPryzf7076xSwZ0bSuBUGRq\nVd3STfu5QRqpMv4dIrhqRofmIUzjOHLRX8Lx2pzgYcMgMQ8O+jM+ETrYD6rsDoLQ\nuiVSWZK0YFndKzNTA04u57arRumWKqqfS0kkDFayumyv6KaDS6YZdsqSRmaiLAOG\nF6jchpUtkDhDY0v/Y7jESUneT0hRnqNMPAKJMNhE4hS+1qkcP/ikQQgZl/OWma1z\nHUyBGT4OGP2T3JIfq12Z4vC5FGVD4aD/frTvPMlifV3i8lKlYZs271JPXUo6ASIA\nZSBpV5QilOlE25Q5Lcw0yWmN4KwxqBL9bJ5W9D1I0qhWxaMF78m+8vLIFv+dAylE\nOd27a+1We/P5ey7WRlwCfuEcFV7nYS/qMykYdQ9fxHSPgTPlrGrSwKstaaIIqOkE\nkA==\n-----END CERTIFICATE-----\n",
"tectonic_metal_matchbox_client_cert": "-----BEGIN CERTIFICATE-----\nMIIEYDCCAkigAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwEjEQMA4GA1UEAwwHZmFr\nZS1jYTAeFw0xNzAyMDEyMjEzMjVaFw0xODAyMDEyMjEzMjVaMBYxFDASBgNVBAMM\nC2Zha2UtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr8S7\nx/tAS6W+aRW3X833OvNfxXjUJAiRkUV85Raln7tqVcTG/9iyRhWgNpUn/WU1/3qV\nobto4ZCURIwoQh0kWk8io1lafZJ+S6Znm3+0TKo7u6QMavolJyetsOQkT/bIoZ73\n09fhk4Vu9GILjtZtxV7GDb4WqR9R7z77nYTdHMio/BQVk+Xg6rkOsMRyoR+B9JHG\nn9mvXLZSi8Q+3ABtsN6flPt7mTkhFFFvTgWxtzgVbeORT/uFxIV/IMjtGseUIzvF\nGUQP6KCyCJb3Kp4rxSxIbi35mFqEWXjB7BVT/0pjx1mc5tSvGuFl7G4N/MmGe3Zq\nZCF4FalpiPGAInKrWQIDAQABo4G7MIG4MAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEB\nBAQDAgeAMDMGCWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBDbGllbnQg\nQ2VydGlmaWNhdGUwHQYDVR0OBBYEFNZOj+0OOvhOFEtGGriZrPVCSzc3MB8GA1Ud\nIwQYMBaAFNGPoXTjJnHjG2zMpjSg/9vNO/trMA4GA1UdDwEB/wQEAwIF4DATBgNV\nHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAgEAiiGHlmPI6RlJQq7/\nz/1i0vFArDbnc2mwBf3pqrDPyqx1EBx7V3Tsm38TNMZyHaz0IyPUDvRPn10UYXui\n2ZGseauwU/PmvFNofxVbG0Dc55lOoxl31520K0h9cWxVHcYzUxPndQ1pltYkXiMm\n/596LHkJ+unMJszDVhAIOmc0PgECtGH1VG6EoTTFlMu7VJekKInkYNow4Q6cAVcr\n11F4meOs0DMZgzfeUjSnsKG7KsLHfr5bLw6FEEzobgtI2sXVMOJi+ypd3zTY+ACq\noRt6wkRFCUoEgap7SG6B2TwHPGe15VIFZJtcnOZqHdrnfJLVROPnA4dYhJVJj1v1\n9JFH/T6EIi6nIqnrlX+10zaatpzq2+AFX8LiWpr7C7S99LgH3cnFdssfmlqoG82t\n3BshYpDrIw1f72zy8+RCkK52OdjNpDoVwubwz6i8jldzoENqmsioyetyaVfe9GGH\nUdEPrUZ4BHLeGPjHclOPVEhjVBZuofQ/GgM2gmCUdn5tcVLjnIeLAv/sQXwkMxIe\n4m9QcPrxVAKOlDr9LhB0mVPr2kfc4yI/wYWEe+CniwcuvxJiOmjsyrENxfaFY30r\nQspTSDVt8hVfVISzpuEchtLVjuRO/ESpmeOF1rRTc1qL/CjetmidkedDm64EZjyK\njyXQv9IZPMTwOndF6AVLH7l1F0E=\n-----END CERTIFICATE-----\n",
"tectonic_metal_matchbox_client_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAr8S7x/tAS6W+aRW3X833OvNfxXjUJAiRkUV85Raln7tqVcTG\n/9iyRhWgNpUn/WU1/3qVobto4ZCURIwoQh0kWk8io1lafZJ+S6Znm3+0TKo7u6QM\navolJyetsOQkT/bIoZ7309fhk4Vu9GILjtZtxV7GDb4WqR9R7z77nYTdHMio/BQV\nk+Xg6rkOsMRyoR+B9JHGn9mvXLZSi8Q+3ABtsN6flPt7mTkhFFFvTgWxtzgVbeOR\nT/uFxIV/IMjtGseUIzvFGUQP6KCyCJb3Kp4rxSxIbi35mFqEWXjB7BVT/0pjx1mc\n5tSvGuFl7G4N/MmGe3ZqZCF4FalpiPGAInKrWQIDAQABAoIBAQCR/OQ+0JdxfWNu\nYqQhBbA/nV7BZH9GwnstXrrCiBHeXsqOHFdwruo7PcEJNM+3LnYwEP/xCfityOjt\nGkBh0VSdUbciV5fKTn9pk/ff9qypNIdSbYoG3Gc5Y0JndsYWSJIRczjCEj+AyMYE\nYt7Yr48S7ImxZl3p8GKcRQK1rWH9geg4cyCPisbaDSfjJbYh5yLk/2wsxGBRM3gg\nCyJEbkJ/v107a1iThTGBgEgnFPP+FqZ2jlnfhBPVzuYggYyiMJuNtgDl7Vi7NLBe\n2ueqq1UAT9LCpZNLJ8eYiDuyNHZtA7a2r3O/jTR4cvQy1xEjD3h4Es7olkAf/Lzu\n6wuggbllAoGBANcqZyJtVxkGwHV9CWTWniTT7BNQ2ehYErkNKggMXl2AzOqEKzqn\nIDRoBhiJKeAphdw/ccvqUEm9bUJD2QLpTJuMmUBkOwqMhATBXFrFCBX4PzGHYnC8\n6hEXjoUE6XhKdJEOgXTqrt31HDgj13GwAp/2DnsscFkC9co5+IW68sUjAoGBANEg\nQvZYdI4Me6JxLXotyirpo57xjocvlo+uffws/YwBH8nK/op6am69zzMMOgUYA5Li\n00WzfEXoyO+BdcbH28xYdBZT0CTkGlPM8IHuH+d/AwnEurxUElWZRRXSz6g17siM\nKjBodqI8h+jQiQJuJ/zBJbOm3bUbpIt1Z+ROjstTAoGAWdAdVMWHQa8Lzv7uWOUt\nBfpf5IAvNUjuJ8hS7yEakrUc1BdvZAA29Skmwj8e967dbV4eRhv8f4tOfAaOIyT3\nEUbTAYnVC0Y0JTgBMPJluaXx2t7EPILewVuv5d5zBf8uQQ5pA0Ci1YtmyBhN6eqq\nbdLroIagLseJiWxBTLEIfTkCgYEAjikXPC2fdhzoQuIbHy5Xe1p+PwNId3+TIzNk\nM3RGG9F70YqsBGj5RzTC0JnkKyhK7aRCKOS9eyymw6HG9Y1RTpVmvPLW0O07NHJh\noIHGsHD4GMDijDm+iO/7Nb2sKlYXb79Qwr2Qv/LUFSEFsmA90KVgQsMRfhc/gQob\nyOjaSz8CgYEAwr3aYp1CkKBXeUTNioLbyymhA4RqGPH/69F1NQ7froLXb152SzOV\njWcrt4ogRacgHb8thuTedrjUiJJLoWhQ3KqzSA2pI3tTLIxrJePiMMpt1Xb2z9l6\nPikk0rvNVB/vrPeVjAdGY9TJC/vpz3om92DRDmUifu8rCFxIHE0GrQ0=\n-----END RSA PRIVATE KEY-----\n",
"tectonic_metal_matchbox_http_url": "http://matchbox.example.com:8080",
"tectonic_metal_matchbox_rpc_endpoint": "matchbox.example.com:8081",
"tectonic_metal_worker_domains": [
"node2.example.com",
"node3.example.com"
],
"tectonic_metal_worker_macs": [
"52:54:00:b2:2f:86",
"52:54:00:c3:61:77"
],
"tectonic_metal_worker_names": [
"node2",
"node3"
],
"tectonic_pull_secret_path": "./pull_secret.json",
"tectonic_service_cidr": "10.3.0.0/16",
"tectonic_ssh_authorized_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCt3BebCHqnSsgpLjo4kVvyfY/z2BS8t27r/7du+O2pb4xYkr7n+KFpbOz523vMTpQ+o1jY4u4TgexglyT9nqasWgLOvo1qjD1agHme8LlTPQSk07rXqOB85Uq5p7ig2zoOejF6qXhcc3n1c7+HkxHrgpBENjLVHOBpzPBIAHkAGaZcl07OCqbsG5yxqEmSGiAlh/IiUVOZgdDMaGjCRFy0wk0mQaGD66DmnFc1H5CzcPjsxr0qO65e7lTGsE930KkO1Vc+RHCVwvhdXs+c2NhJ2/3740Kpes9n1/YullaWZUzlCPDXtRuy6JRbFbvy39JUgHWGWzB3d+3f8oJ/N4qZ cardno:000603633110"
}

@ -1,20 +0,0 @@
locals {
ignition_etcd_keys = ["ignition_etcd_0.json", "ignition_etcd_1.json", "ignition_etcd_2.json"]
}
data "ignition_config" "tnc" {
count = "${var.instance_count}"
append {
source = "${format("http://${var.cluster_name}-tnc.${var.base_domain}/config/etcd?etcd_index=%d", count.index)}"
# TODO: add verification
}
# Used for loading certificates
append {
source = "${format("s3://%s/%s", var.s3_bucket, local.ignition_etcd_keys[count.index])}"
# TODO: add verification
}
}

@ -1,38 +0,0 @@
data "ignition_config" "etcd" {
count = "${length(var.external_endpoints) == 0 ? var.instance_count : 0}"
systemd = [
"${data.ignition_systemd_unit.locksmithd.*.id[count.index]}",
"${var.ign_etcd_dropin_id_list[count.index]}",
]
files = ["${compact(list(
var.ign_profile_env_id,
var.ign_systemd_default_env_id,
))}",
"${var.ign_etcd_crt_id_list}",
"${var.ign_ntp_dropin_id}",
]
}
data "ignition_systemd_unit" "locksmithd" {
count = "${length(var.external_endpoints) == 0 ? var.instance_count : 0}"
name = "locksmithd.service"
enabled = true
dropin = [
{
name = "40-etcd-lock.conf"
content = <<EOF
[Service]
Environment=REBOOT_STRATEGY=etcd-lock
${var.tls_enabled ? "Environment=\"LOCKSMITHD_ETCD_CAFILE=/etc/ssl/etcd/ca.crt\"" : ""}
${var.tls_enabled ? "Environment=\"LOCKSMITHD_ETCD_KEYFILE=/etc/ssl/etcd/client.key\"" : ""}
${var.tls_enabled ? "Environment=\"LOCKSMITHD_ETCD_CERTFILE=/etc/ssl/etcd/client.crt\"" : ""}
Environment="LOCKSMITHD_ENDPOINT=${var.tls_enabled ? "https" : "http"}://${var.cluster_name}-etcd-${count.index}.${var.base_domain}:2379"
EOF
},
]
}

@ -1,25 +0,0 @@
resource "aws_s3_bucket_object" "ignition_etcd" {
count = "${length(var.external_endpoints) == 0 ? var.instance_count : 0}"
bucket = "${var.s3_bucket}"
key = "ignition_etcd_${count.index}.json"
content = "${data.ignition_config.etcd.*.rendered[count.index]}"
acl = "private"
server_side_encryption = "AES256"
tags = "${merge(map(
"Name", "${var.cluster_name}-ignition-etcd-${count.index}",
"KubernetesCluster", "${var.cluster_name}",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
data "ignition_config" "s3" {
count = "${length(var.external_endpoints) == 0 ? var.instance_count : 0}"
replace {
source = "${format("s3://%s/%s", var.s3_bucket, aws_s3_bucket_object.ignition_etcd.*.key[count.index])}"
verification = "sha512-${sha512(data.ignition_config.etcd.*.rendered[count.index])}"
}
}

@ -1,135 +0,0 @@
locals {
ami_owner = "595879546273"
arn = "aws"
}
data "aws_ami" "coreos_ami" {
filter {
name = "name"
values = ["CoreOS-${var.container_linux_channel}-${var.container_linux_version}-*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "owner-id"
values = ["${local.ami_owner}"]
}
}
resource "aws_iam_instance_profile" "etcd" {
name = "${var.cluster_name}-etcd-profile"
role = "${var.etcd_iam_role == "" ?
join("|", aws_iam_role.etcd_role.*.name) :
join("|", data.aws_iam_role.etcd_role.*.name)
}"
}
data "aws_iam_role" "etcd_role" {
count = "${var.etcd_iam_role == "" ? 0 : 1}"
name = "${var.etcd_iam_role}"
}
resource "aws_iam_role" "etcd_role" {
count = "${var.etcd_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}-etcd-role"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_role_policy" "etcd" {
count = "${var.etcd_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}_etcd_policy"
role = "${aws_iam_role.etcd_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Action" : [
"s3:GetObject"
],
"Resource": "arn:${local.arn}:s3:::*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_instance" "etcd_node" {
count = "${var.instance_count}"
ami = "${coalesce(var.ec2_ami, data.aws_ami.coreos_ami.image_id)}"
iam_instance_profile = "${aws_iam_instance_profile.etcd.name}"
instance_type = "${var.ec2_type}"
key_name = "${var.ssh_key}"
subnet_id = "${element(var.subnets, count.index)}"
user_data = "${data.ignition_config.tnc.*.rendered[count.index]}"
vpc_security_group_ids = ["${var.sg_ids}"]
lifecycle {
# Ignore changes in the AMI which force recreation of the resource. This
# avoids accidental deletion of nodes whenever a new CoreOS Release comes
# out.
ignore_changes = ["ami"]
}
tags = "${merge(map(
"Name", "${var.cluster_name}-etcd-${count.index}",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
root_block_device {
volume_type = "${var.root_volume_type}"
volume_size = "${var.root_volume_size}"
iops = "${var.root_volume_type == "io1" ? var.root_volume_iops : var.root_volume_type == "gp2" ? min(10000, max(100, 3 * var.root_volume_size)) : 0}"
}
volume_tags = "${merge(map(
"Name", "${var.cluster_name}-etcd-${count.index}-vol",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}

@ -1,145 +0,0 @@
locals {
ami_owner = "595879546273"
arn = "aws"
}
data "aws_ami" "coreos_ami" {
filter {
name = "name"
values = ["CoreOS-${var.container_linux_channel}-${var.container_linux_version}-*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "owner-id"
values = ["${local.ami_owner}"]
}
}
resource "aws_iam_instance_profile" "etcd" {
count = "${length(var.external_endpoints) == 0 ? 1 : 0}"
name = "${var.cluster_name}-etcd-profile"
role = "${var.etcd_iam_role == "" ?
join("|", aws_iam_role.etcd_role.*.name) :
join("|", data.aws_iam_role.etcd_role.*.name)
}"
}
data "aws_iam_role" "etcd_role" {
count = "${var.etcd_iam_role == "" ? 0 : 1}"
name = "${var.etcd_iam_role}"
}
resource "aws_iam_role" "etcd_role" {
count = "${length(var.external_endpoints) == 0 && var.etcd_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}-etcd-role"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_role_policy" "etcd" {
count = "${var.etcd_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}_etcd_policy"
role = "${aws_iam_role.etcd_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Action": [
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*",
"Effect": "Allow"
},
{
"Action" : [
"s3:GetObject"
],
"Resource": "arn:${local.arn}:s3:::*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_instance" "etcd_node" {
count = "${length(var.external_endpoints) == 0 ? var.instance_count : 0}"
ami = "${coalesce(var.ec2_ami, data.aws_ami.coreos_ami.image_id)}"
iam_instance_profile = "${aws_iam_instance_profile.etcd.name}"
instance_type = "${var.ec2_type}"
key_name = "${var.ssh_key}"
subnet_id = "${element(var.subnets, count.index)}"
user_data = "${data.ignition_config.s3.*.rendered[count.index]}"
vpc_security_group_ids = ["${var.sg_ids}"]
lifecycle {
# Ignore changes in the AMI which force recreation of the resource. This
# avoids accidental deletion of nodes whenever a new CoreOS Release comes
# out.
ignore_changes = ["ami"]
}
tags = "${merge(map(
"Name", "${var.cluster_name}-etcd-${count.index}",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
root_block_device {
volume_type = "${var.root_volume_type}"
volume_size = "${var.root_volume_size}"
iops = "${var.root_volume_type == "io1" ? var.root_volume_iops : var.root_volume_type == "gp2" ? min(10000, max(100, 3 * var.root_volume_size)) : 0}"
}
volume_tags = "${merge(map(
"Name", "${var.cluster_name}-etcd-${count.index}-vol",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}

@ -1,3 +0,0 @@
output "ip_addresses" {
value = "${aws_instance.etcd_node.*.private_ip}"
}

@ -1,86 +0,0 @@
variable "base_domain" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
variable "cluster_name" {
type = "string"
}
variable "container_linux_channel" {
type = "string"
}
variable "container_linux_version" {
type = "string"
}
variable "instance_count" {
default = "3"
}
variable "ssh_key" {
type = "string"
}
variable "subnets" {
type = "list"
}
variable "container_image" {
type = "string"
}
variable "ec2_type" {
type = "string"
}
variable "ec2_ami" {
type = "string"
default = ""
}
variable "extra_tags" {
description = "Extra AWS tags to be applied to created resources."
type = "map"
default = {}
}
variable "root_volume_type" {
type = "string"
description = "The type of volume for the root block device."
}
variable "root_volume_size" {
type = "string"
description = "The size of the volume in gigabytes for the root block device."
}
variable "root_volume_iops" {
type = "string"
default = "100"
description = "The amount of provisioned IOPS for the root block device."
}
variable "sg_ids" {
type = "list"
description = "The security group IDs to be applied."
}
variable "s3_bucket" {
type = "string"
}
variable "etcd_iam_role" {
type = "string"
default = ""
description = "IAM role to use for the instance profiles of etcd nodes."
}
variable "dns_server_ip" {
type = "string"
default = ""
}

@ -1,111 +0,0 @@
variable "base_domain" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
variable "cluster_name" {
type = "string"
}
variable "container_linux_channel" {
type = "string"
}
variable "container_linux_version" {
type = "string"
}
variable "instance_count" {
default = "3"
}
variable "ssh_key" {
type = "string"
}
variable "subnets" {
type = "list"
}
variable "external_endpoints" {
type = "list"
}
variable "container_image" {
type = "string"
}
variable "ec2_type" {
type = "string"
}
variable "ec2_ami" {
type = "string"
default = ""
}
variable "extra_tags" {
description = "Extra AWS tags to be applied to created resources."
type = "map"
default = {}
}
variable "root_volume_type" {
type = "string"
description = "The type of volume for the root block device."
}
variable "root_volume_size" {
type = "string"
description = "The size of the volume in gigabytes for the root block device."
}
variable "root_volume_iops" {
type = "string"
default = "100"
description = "The amount of provisioned IOPS for the root block device."
}
variable "sg_ids" {
type = "list"
description = "The security group IDs to be applied."
}
variable "tls_enabled" {
default = false
}
variable "ign_etcd_dropin_id_list" {
type = "list"
}
variable "s3_bucket" {
type = "string"
}
variable "ign_etcd_crt_id_list" {
type = "list"
}
variable "etcd_iam_role" {
type = "string"
default = ""
description = "IAM role to use for the instance profiles of etcd nodes."
}
variable "ign_profile_env_id" {
type = "string"
default = ""
}
variable "ign_systemd_default_env_id" {
type = "string"
default = ""
}
variable "ign_ntp_dropin_id" {
type = "string"
}

@ -1,91 +0,0 @@
data "ignition_config" "main" {
files = ["${compact(list(
data.ignition_file.detect_master.id,
data.ignition_file.init_assets.id,
data.ignition_file.rm_assets.id,
var.ign_installer_kubelet_env_id,
var.ign_installer_runtime_mappings_id,
var.ign_max_user_watches_id,
var.ign_nfs_config_id,
var.ign_ntp_dropin_id,
var.ign_profile_env_id,
var.ign_s3_puller_id,
var.ign_systemd_default_env_id,
))}",
"${var.ign_ca_cert_id_list}",
]
systemd = ["${compact(list(
var.ign_bootkube_path_unit_id,
var.ign_bootkube_service_id,
var.ign_docker_dropin_id,
var.ign_init_assets_service_id,
var.ign_iscsi_service_id,
var.ign_k8s_node_bootstrap_service_id,
var.ign_kubelet_service_id,
var.ign_locksmithd_service_id,
var.ign_rm_assets_path_unit_id,
var.ign_rm_assets_service_id,
var.ign_tectonic_path_unit_id,
var.ign_tectonic_service_id,
var.ign_update_ca_certificates_dropin_id,
))}"]
}
data "template_file" "detect_master" {
template = "${file("${path.module}/resources/detect-master.sh")}"
vars {
load_balancer_name = "${format("%s-%s", var.cluster_name, var.private_endpoints ? "int" : "ext")}"
}
}
data "ignition_file" "detect_master" {
filesystem = "root"
path = "/opt/detect-master.sh"
mode = 0755
content {
content = "${data.template_file.detect_master.rendered}"
}
}
data "template_file" "init_assets" {
template = "${file("${path.module}/resources/init-assets.sh")}"
vars {
assets_s3_location = "${var.assets_s3_location}"
awscli_image = "${var.container_images["awscli"]}"
cluster_name = "${var.cluster_name}"
}
}
data "ignition_file" "init_assets" {
filesystem = "root"
path = "/opt/init-assets.sh"
mode = 0755
content {
content = "${data.template_file.init_assets.rendered}"
}
}
data "template_file" "rm_assets" {
template = "${file("${path.module}/resources/rm-assets.sh")}"
vars {
assets_s3_location = "${var.assets_s3_location}"
awscli_image = "${var.container_images["awscli"]}"
cluster_name = "${var.cluster_name}"
}
}
data "ignition_file" "rm_assets" {
filesystem = "root"
path = "/opt/rm-assets.sh"
mode = 0755
content {
content = "${data.template_file.rm_assets.rendered}"
}
}

@ -1,21 +0,0 @@
resource "aws_s3_bucket_object" "ignition_master" {
bucket = "${var.s3_bucket}"
key = "ignition_master.json"
content = "${data.ignition_config.main.rendered}"
acl = "private"
server_side_encryption = "AES256"
tags = "${merge(map(
"Name", "${var.cluster_name}-ignition-master",
"KubernetesCluster", "${var.cluster_name}",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
data "ignition_config" "s3" {
replace {
source = "${format("s3://%s/%s", var.s3_bucket, aws_s3_bucket_object.ignition_master.key)}"
verification = "sha512-${sha512(data.ignition_config.main.rendered)}"
}
}

@ -1,164 +0,0 @@
locals {
ami_owner = "595879546273"
arn = "aws"
}
data "aws_ami" "coreos_ami" {
filter {
name = "name"
values = ["CoreOS-${var.container_linux_channel}-${var.container_linux_version}-*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "owner-id"
values = ["${local.ami_owner}"]
}
}
resource "aws_autoscaling_group" "masters" {
name = "${var.cluster_name}-masters"
desired_capacity = "${var.instance_count}"
max_size = "${var.instance_count * 3}"
min_size = "${var.instance_count}"
launch_configuration = "${aws_launch_configuration.master_conf.id}"
vpc_zone_identifier = ["${var.subnet_ids}"]
load_balancers = ["${var.aws_lbs}"]
tags = [
{
key = "Name"
value = "${var.cluster_name}-master"
propagate_at_launch = true
},
{
key = "kubernetes.io/cluster/${var.cluster_name}"
value = "owned"
propagate_at_launch = true
},
{
key = "tectonicClusterID"
value = "${var.cluster_id}"
propagate_at_launch = true
},
"${var.autoscaling_group_extra_tags}",
]
lifecycle {
create_before_destroy = true
}
}
resource "aws_launch_configuration" "master_conf" {
instance_type = "${var.ec2_type}"
image_id = "${coalesce(var.ec2_ami, data.aws_ami.coreos_ami.image_id)}"
name_prefix = "${var.cluster_name}-master-"
key_name = "${var.ssh_key}"
security_groups = ["${var.master_sg_ids}"]
iam_instance_profile = "${aws_iam_instance_profile.master_profile.arn}"
associate_public_ip_address = "${var.public_endpoints}"
user_data = "${var.user_data_ign}"
lifecycle {
create_before_destroy = true
# Ignore changes in the AMI which force recreation of the resource. This
# avoids accidental deletion of nodes whenever a new CoreOS Release comes
# out.
ignore_changes = ["image_id"]
}
root_block_device {
volume_type = "${var.root_volume_type}"
volume_size = "${var.root_volume_size}"
iops = "${var.root_volume_type == "io1" ? var.root_volume_iops : 0}"
}
}
resource "aws_iam_instance_profile" "master_profile" {
name = "${var.cluster_name}-master-profile"
role = "${var.master_iam_role == "" ?
join("|", aws_iam_role.master_role.*.name) :
join("|", data.aws_iam_role.master_role.*.name)
}"
}
data "aws_iam_role" "master_role" {
count = "${var.master_iam_role == "" ? 0 : 1}"
name = "${var.master_iam_role}"
}
resource "aws_iam_role" "master_role" {
count = "${var.master_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}-master-role"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_role_policy" "master_policy" {
count = "${var.master_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}_master_policy"
role = "${aws_iam_role.master_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "ec2:*",
"Resource": "*",
"Effect": "Allow"
},
{
"Action": "elasticloadbalancing:*",
"Resource": "*",
"Effect": "Allow"
},
{
"Action" : [
"s3:GetObject",
"s3:HeadObject",
"s3:ListBucket",
"s3:PutObject"
],
"Resource": "arn:${local.arn}:s3:::*",
"Effect": "Allow"
},
{
"Action" : [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}

@ -1,164 +0,0 @@
locals {
ami_owner = "595879546273"
arn = "aws"
}
data "aws_ami" "coreos_ami" {
filter {
name = "name"
values = ["CoreOS-${var.container_linux_channel}-${var.container_linux_version}-*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "owner-id"
values = ["${local.ami_owner}"]
}
}
resource "aws_autoscaling_group" "masters" {
name = "${var.cluster_name}-masters"
desired_capacity = "${var.instance_count}"
max_size = "${var.instance_count * 3}"
min_size = "${var.instance_count}"
launch_configuration = "${aws_launch_configuration.master_conf.id}"
vpc_zone_identifier = ["${var.subnet_ids}"]
load_balancers = ["${var.aws_lbs}"]
tags = [
{
key = "Name"
value = "${var.cluster_name}-master"
propagate_at_launch = true
},
{
key = "kubernetes.io/cluster/${var.cluster_name}"
value = "owned"
propagate_at_launch = true
},
{
key = "tectonicClusterID"
value = "${var.cluster_id}"
propagate_at_launch = true
},
"${var.autoscaling_group_extra_tags}",
]
lifecycle {
create_before_destroy = true
}
}
resource "aws_launch_configuration" "master_conf" {
instance_type = "${var.ec2_type}"
image_id = "${coalesce(var.ec2_ami, data.aws_ami.coreos_ami.image_id)}"
name_prefix = "${var.cluster_name}-master-"
key_name = "${var.ssh_key}"
security_groups = ["${var.master_sg_ids}"]
iam_instance_profile = "${aws_iam_instance_profile.master_profile.arn}"
associate_public_ip_address = "${var.public_endpoints}"
user_data = "${data.ignition_config.s3.rendered}"
lifecycle {
create_before_destroy = true
# Ignore changes in the AMI which force recreation of the resource. This
# avoids accidental deletion of nodes whenever a new CoreOS Release comes
# out.
ignore_changes = ["image_id"]
}
root_block_device {
volume_type = "${var.root_volume_type}"
volume_size = "${var.root_volume_size}"
iops = "${var.root_volume_type == "io1" ? var.root_volume_iops : 0}"
}
}
resource "aws_iam_instance_profile" "master_profile" {
name = "${var.cluster_name}-master-profile"
role = "${var.master_iam_role == "" ?
join("|", aws_iam_role.master_role.*.name) :
join("|", data.aws_iam_role.master_role.*.name)
}"
}
data "aws_iam_role" "master_role" {
count = "${var.master_iam_role == "" ? 0 : 1}"
name = "${var.master_iam_role}"
}
resource "aws_iam_role" "master_role" {
count = "${var.master_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}-master-role"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_role_policy" "master_policy" {
count = "${var.master_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}_master_policy"
role = "${aws_iam_role.master_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "ec2:*",
"Resource": "*",
"Effect": "Allow"
},
{
"Action": "elasticloadbalancing:*",
"Resource": "*",
"Effect": "Allow"
},
{
"Action" : [
"s3:GetObject",
"s3:HeadObject",
"s3:ListBucket",
"s3:PutObject"
],
"Resource": "arn:${local.arn}:s3:::*",
"Effect": "Allow"
},
{
"Action" : [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}

@ -1,115 +0,0 @@
variable "autoscaling_group_extra_tags" {
description = "Extra AWS tags to be applied to created autoscaling group resources."
type = "list"
default = []
}
variable "base_domain" {
type = "string"
description = "Domain on which the ELB records will be created"
}
variable "container_linux_channel" {
type = "string"
}
variable "container_linux_version" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
variable "cluster_name" {
type = "string"
}
variable "container_images" {
description = "Container images to use"
type = "map"
}
variable "ec2_type" {
type = "string"
}
variable "extra_tags" {
description = "Extra AWS tags to be applied to created resources."
type = "map"
default = {}
}
variable "ec2_ami" {
type = "string"
default = ""
}
variable "instance_count" {
type = "string"
}
variable "master_iam_role" {
type = "string"
default = ""
description = "IAM role to use for the instance profiles of master nodes."
}
variable "master_sg_ids" {
type = "list"
description = "The security group IDs to be applied to the master nodes."
}
variable "private_endpoints" {
description = "If set to true, private-facing ingress resources are created."
default = true
}
variable "public_endpoints" {
description = "If set to true, public-facing ingress resources are created."
default = true
}
variable "aws_lbs" {
description = "List of aws_lb IDs for the Console & APIs"
type = "list"
default = []
}
variable "root_volume_iops" {
type = "string"
default = "100"
description = "The amount of provisioned IOPS for the root block device."
}
variable "root_volume_size" {
type = "string"
description = "The size of the volume in gigabytes for the root block device."
}
variable "root_volume_type" {
type = "string"
description = "The type of volume for the root block device."
}
variable "ssh_key" {
type = "string"
}
variable "subnet_ids" {
type = "list"
}
variable "dns_server_ip" {
type = "string"
default = ""
}
variable "kubeconfig_content" {
type = "string"
default = ""
}
variable "user_data_ign" {
type = "string"
}

@ -1,144 +0,0 @@
variable "assets_s3_location" {
type = "string"
description = "Location on S3 of the Bootkube/Tectonic assets to use (bucket/key)"
}
variable "autoscaling_group_extra_tags" {
description = "Extra AWS tags to be applied to created autoscaling group resources."
type = "list"
default = []
}
variable "base_domain" {
type = "string"
description = "Domain on which the ELB records will be created"
}
variable "container_linux_channel" {
type = "string"
}
variable "container_linux_version" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
variable "cluster_name" {
type = "string"
}
variable "container_images" {
description = "Container images to use"
type = "map"
}
variable "ec2_type" {
type = "string"
}
variable "extra_tags" {
description = "Extra AWS tags to be applied to created resources."
type = "map"
default = {}
}
variable "ign_s3_puller_id" {
type = "string"
}
variable "ec2_ami" {
type = "string"
default = ""
}
variable "instance_count" {
type = "string"
}
variable "master_iam_role" {
type = "string"
default = ""
description = "IAM role to use for the instance profiles of master nodes."
}
variable "master_sg_ids" {
type = "list"
description = "The security group IDs to be applied to the master nodes."
}
variable "private_endpoints" {
description = "If set to true, private-facing ingress resources are created."
default = true
}
variable "public_endpoints" {
description = "If set to true, public-facing ingress resources are created."
default = true
}
variable "aws_lbs" {
description = "List of aws_lb IDs for the Console & APIs"
type = "list"
default = []
}
variable "root_volume_iops" {
type = "string"
default = "100"
description = "The amount of provisioned IOPS for the root block device."
}
variable "root_volume_size" {
type = "string"
description = "The size of the volume in gigabytes for the root block device."
}
variable "root_volume_type" {
type = "string"
description = "The type of volume for the root block device."
}
variable "ssh_key" {
type = "string"
}
variable "subnet_ids" {
type = "list"
}
variable "ign_bootkube_service_id" {
type = "string"
description = "The ID of the bootkube systemd service unit"
}
variable "ign_bootkube_path_unit_id" {
type = "string"
}
variable "ign_tectonic_service_id" {
type = "string"
description = "The ID of the tectonic installer systemd service unit"
}
variable "ign_tectonic_path_unit_id" {
type = "string"
}
variable "ign_init_assets_service_id" {
type = "string"
}
variable "ign_rm_assets_service_id" {
type = "string"
}
variable "ign_rm_assets_path_unit_id" {
type = "string"
}
variable "s3_bucket" {
type = "string"
}

@ -1,12 +0,0 @@
# These subnet data-sources import external subnets from their user-supplied subnet IDs
# whenever an external VPC is specified
#
data "aws_subnet" "external_worker" {
count = "${var.external_vpc_id == "" ? 0 : length(var.external_worker_subnets)}"
id = "${var.external_worker_subnets[count.index]}"
}
data "aws_subnet" "external_master" {
count = "${var.external_vpc_id == "" ? 0 : length(var.external_master_subnets)}"
id = "${var.external_master_subnets[count.index]}"
}

@ -1,135 +0,0 @@
resource "aws_elb" "tnc" {
count = "${var.private_master_endpoints ? 1 : 0}"
name = "${var.cluster_name}-tnc"
subnets = ["${local.master_subnet_ids}"]
internal = true
security_groups = ["${aws_security_group.tnc.id}"]
idle_timeout = 3600
connection_draining = true
connection_draining_timeout = 300
listener {
instance_port = 49500
instance_protocol = "tcp"
lb_port = 80
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "TCP:49500"
interval = 5
}
tags = "${merge(map(
"Name", "${var.cluster_name}-int",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_elb" "api_internal" {
count = "${var.private_master_endpoints ? 1 : 0}"
name = "${var.cluster_name}-int"
subnets = ["${local.master_subnet_ids}"]
internal = true
security_groups = ["${aws_security_group.api.id}"]
idle_timeout = 3600
connection_draining = true
connection_draining_timeout = 300
listener {
instance_port = 6443
instance_protocol = "tcp"
lb_port = 6443
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "SSL:6443"
interval = 5
}
tags = "${merge(map(
"Name", "${var.cluster_name}-int",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_elb" "api_external" {
count = "${var.public_master_endpoints ? 1 : 0}"
name = "${var.cluster_name}-ext"
subnets = ["${local.master_subnet_ids}"]
internal = false
security_groups = ["${aws_security_group.api.id}"]
idle_timeout = 3600
connection_draining = true
connection_draining_timeout = 300
listener {
instance_port = 6443
instance_protocol = "tcp"
lb_port = 6443
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "SSL:6443"
interval = 5
}
tags = "${merge(map(
"Name", "${var.cluster_name}-api-external",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_elb" "console" {
name = "${var.cluster_name}-con"
subnets = ["${local.master_subnet_ids}"]
internal = "${var.public_master_endpoints ? false : true}"
security_groups = ["${aws_security_group.console.id}"]
idle_timeout = 3600
listener {
instance_port = 32001
instance_protocol = "tcp"
lb_port = 80
lb_protocol = "tcp"
}
listener {
instance_port = 32000
instance_protocol = "tcp"
lb_port = 443
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:32002/healthz"
interval = 5
}
tags = "${merge(map(
"Name", "${var.cluster_name}-console",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}

@ -1,102 +0,0 @@
resource "aws_elb" "api_internal" {
count = "${var.private_master_endpoints}"
name = "${var.cluster_name}-int"
subnets = ["${local.master_subnet_ids}"]
internal = true
security_groups = ["${aws_security_group.api.id}"]
idle_timeout = 3600
connection_draining = true
connection_draining_timeout = 300
listener {
instance_port = 443
instance_protocol = "tcp"
lb_port = 443
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "SSL:443"
interval = 5
}
tags = "${merge(map(
"Name", "${var.cluster_name}-int",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_elb" "api_external" {
count = "${var.public_master_endpoints}"
name = "${var.custom_dns_name == "" ? var.cluster_name : var.custom_dns_name}-ext"
subnets = ["${local.master_subnet_ids}"]
internal = false
security_groups = ["${aws_security_group.api.id}"]
idle_timeout = 3600
connection_draining = true
connection_draining_timeout = 300
listener {
instance_port = 443
instance_protocol = "tcp"
lb_port = 443
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "SSL:443"
interval = 5
}
tags = "${merge(map(
"Name", "${var.cluster_name}-api-external",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_elb" "console" {
name = "${var.custom_dns_name == "" ? var.cluster_name : var.custom_dns_name}-con"
subnets = ["${local.master_subnet_ids}"]
internal = "${var.public_master_endpoints ? false : true}"
security_groups = ["${aws_security_group.console.id}"]
idle_timeout = 3600
listener {
instance_port = 32001
instance_protocol = "tcp"
lb_port = 80
lb_protocol = "tcp"
}
listener {
instance_port = 32000
instance_protocol = "tcp"
lb_port = 443
lb_protocol = "tcp"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 3
target = "HTTP:32002/healthz"
interval = 5
}
tags = "${merge(map(
"Name", "${var.cluster_name}-console",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}

@ -1,81 +0,0 @@
output "vpc_id" {
value = "${data.aws_vpc.cluster_vpc.id}"
}
# We have to do this join() & split() 'trick' because null_data_source and
# the ternary operator can't output lists or maps
output "master_subnet_ids" {
value = "${local.master_subnet_ids}"
}
output "worker_subnet_ids" {
value = "${local.worker_subnet_ids}"
}
output "etcd_sg_id" {
value = "${element(concat(aws_security_group.etcd.*.id, list("")), 0)}"
}
output "master_sg_id" {
value = "${aws_security_group.master.id}"
}
output "worker_sg_id" {
value = "${aws_security_group.worker.id}"
}
output "api_sg_id" {
value = "${aws_security_group.api.id}"
}
output "console_sg_id" {
value = "${aws_security_group.console.id}"
}
output "aws_elb_api_external_id" {
value = "${aws_elb.api_external.*.id}"
}
output "aws_elb_internal_id" {
value = "${aws_elb.api_internal.*.id}"
}
output "aws_elb_console_id" {
value = "${aws_elb.console.id}"
}
output "aws_lbs" {
value = ["${compact(concat(aws_elb.api_internal.*.id, list(aws_elb.console.id), aws_elb.api_external.*.id, aws_elb.tnc.*.id))}"]
}
output "aws_api_external_dns_name" {
value = "${element(concat(aws_elb.api_external.*.dns_name, list("")), 0)}"
}
output "aws_elb_api_external_zone_id" {
value = "${element(concat(aws_elb.api_external.*.zone_id, list("")), 0)}"
}
output "aws_api_internal_dns_name" {
value = "${element(concat(aws_elb.api_internal.*.dns_name, list("")), 0)}"
}
output "aws_elb_api_internal_zone_id" {
value = "${element(concat(aws_elb.api_internal.*.zone_id, list("")), 0)}"
}
output "aws_console_dns_name" {
value = "${aws_elb.console.dns_name}"
}
output "aws_elb_console_zone_id" {
value = "${aws_elb.console.zone_id}"
}
output "aws_elb_tnc_dns_name" {
value = "${element(concat(aws_elb.tnc.*.dns_name, list("")), 0)}"
}
output "aws_elb_tnc_zone_id" {
value = "${element(concat(aws_elb.tnc.*.zone_id, list("")), 0)}"
}

@ -1,73 +0,0 @@
output "vpc_id" {
value = "${data.aws_vpc.cluster_vpc.id}"
}
# We have to do this join() & split() 'trick' because null_data_source and
# the ternary operator can't output lists or maps
output "master_subnet_ids" {
value = "${local.master_subnet_ids}"
}
output "worker_subnet_ids" {
value = "${local.worker_subnet_ids}"
}
output "etcd_sg_id" {
value = "${element(concat(aws_security_group.etcd.*.id, list("")), 0)}"
}
output "master_sg_id" {
value = "${aws_security_group.master.id}"
}
output "worker_sg_id" {
value = "${aws_security_group.worker.id}"
}
output "api_sg_id" {
value = "${aws_security_group.api.id}"
}
output "console_sg_id" {
value = "${aws_security_group.console.id}"
}
output "aws_elb_api_external_id" {
value = "${aws_elb.api_external.*.id}"
}
output "aws_elb_internal_id" {
value = "${aws_elb.api_internal.*.id}"
}
output "aws_elb_console_id" {
value = "${aws_elb.console.id}"
}
output "aws_lbs" {
value = ["${compact(concat(aws_elb.api_internal.*.id, list(aws_elb.console.id), aws_elb.api_external.*.id))}"]
}
output "aws_api_external_dns_name" {
value = "${element(concat(aws_elb.api_external.*.dns_name, list("")), 0)}"
}
output "aws_elb_api_external_zone_id" {
value = "${element(concat(aws_elb.api_external.*.zone_id, list("")), 0)}"
}
output "aws_api_internal_dns_name" {
value = "${element(concat(aws_elb.api_internal.*.dns_name, list("")), 0)}"
}
output "aws_elb_api_internal_zone_id" {
value = "${element(concat(aws_elb.api_internal.*.zone_id, list("")), 0)}"
}
output "aws_console_dns_name" {
value = "${aws_elb.console.dns_name}"
}
output "aws_elb_console_zone_id" {
value = "${aws_elb.console.zone_id}"
}

@ -1,88 +0,0 @@
resource "aws_security_group" "tnc" {
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_console_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 80
to_port = 80
}
ingress {
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
to_port = 443
}
}
resource "aws_security_group" "api" {
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_api_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 6443
to_port = 6443
}
}
resource "aws_security_group" "console" {
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_console_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 80
to_port = 80
}
ingress {
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
to_port = 443
}
}

@ -1,56 +0,0 @@
resource "aws_security_group" "api" {
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_api_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
to_port = 443
}
}
resource "aws_security_group" "console" {
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_console_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 80
to_port = 80
}
ingress {
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
to_port = 443
}
}

@ -1,49 +0,0 @@
resource "aws_security_group" "etcd" {
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_etcd_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
}
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
self = true
security_groups = ["${aws_security_group.master.id}"]
}
ingress {
protocol = "tcp"
from_port = 2379
to_port = 2379
self = true
security_groups = ["${aws_security_group.master.id}"]
}
ingress {
protocol = "tcp"
from_port = 2380
to_port = 2380
self = true
}
}

@ -1,50 +0,0 @@
resource "aws_security_group" "etcd" {
count = "${var.enable_etcd_sg}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_etcd_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
}
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
self = true
security_groups = ["${aws_security_group.master.id}"]
}
ingress {
protocol = "tcp"
from_port = 2379
to_port = 2379
self = true
security_groups = ["${aws_security_group.master.id}"]
}
ingress {
protocol = "tcp"
from_port = 2380
to_port = 2380
self = true
}
}

@ -1,209 +0,0 @@
resource "aws_security_group" "master" {
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_master_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_security_group_rule" "master_tnc" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 49500
to_port = 49500
}
resource "aws_security_group_rule" "master_egress" {
type = "egress"
security_group_id = "${aws_security_group.master.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "master_ingress_icmp" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "icmp"
cidr_blocks = ["${data.aws_vpc.cluster_vpc.cidr_block}"]
from_port = 0
to_port = 0
}
resource "aws_security_group_rule" "master_ingress_ssh" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
to_port = 22
}
resource "aws_security_group_rule" "master_ingress_http" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
cidr_blocks = ["${data.aws_vpc.cluster_vpc.cidr_block}"]
from_port = 80
to_port = 80
}
resource "aws_security_group_rule" "master_ingress_https" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
cidr_blocks = ["${data.aws_vpc.cluster_vpc.cidr_block}"]
from_port = 6443
to_port = 6445
}
resource "aws_security_group_rule" "master_ingress_heapster" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 4194
to_port = 4194
self = true
}
resource "aws_security_group_rule" "master_ingress_heapster_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 4194
to_port = 4194
}
resource "aws_security_group_rule" "master_ingress_flannel" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "udp"
from_port = 4789
to_port = 4789
self = true
}
resource "aws_security_group_rule" "master_ingress_flannel_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "udp"
from_port = 4789
to_port = 4789
}
resource "aws_security_group_rule" "master_ingress_node_exporter" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 9100
to_port = 9100
self = true
}
resource "aws_security_group_rule" "master_ingress_node_exporter_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 9100
to_port = 9100
}
resource "aws_security_group_rule" "master_ingress_kubelet_insecure" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 10250
to_port = 10250
self = true
}
resource "aws_security_group_rule" "master_ingress_kubelet_insecure_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 10250
to_port = 10250
}
resource "aws_security_group_rule" "master_ingress_kubelet_secure" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 10255
to_port = 10255
self = true
}
resource "aws_security_group_rule" "master_ingress_kubelet_secure_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 10255
to_port = 10255
}
resource "aws_security_group_rule" "master_ingress_etcd" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 2379
to_port = 2380
self = true
}
resource "aws_security_group_rule" "master_ingress_bootstrap_etcd" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 12379
to_port = 12380
self = true
}
resource "aws_security_group_rule" "master_ingress_services" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 30000
to_port = 32767
self = true
}
resource "aws_security_group_rule" "master_ingress_services_from_console" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.console.id}"
protocol = "tcp"
from_port = 30000
to_port = 32767
}

@ -1,199 +0,0 @@
resource "aws_security_group" "master" {
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_master_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_security_group_rule" "master_egress" {
type = "egress"
security_group_id = "${aws_security_group.master.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "master_ingress_icmp" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
}
resource "aws_security_group_rule" "master_ingress_ssh" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
to_port = 22
}
resource "aws_security_group_rule" "master_ingress_http" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 80
to_port = 80
}
resource "aws_security_group_rule" "master_ingress_https" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
to_port = 443
}
resource "aws_security_group_rule" "master_ingress_heapster" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 4194
to_port = 4194
self = true
}
resource "aws_security_group_rule" "master_ingress_heapster_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 4194
to_port = 4194
}
resource "aws_security_group_rule" "master_ingress_flannel" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "udp"
from_port = 4789
to_port = 4789
self = true
}
resource "aws_security_group_rule" "master_ingress_flannel_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "udp"
from_port = 4789
to_port = 4789
}
resource "aws_security_group_rule" "master_ingress_node_exporter" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 9100
to_port = 9100
self = true
}
resource "aws_security_group_rule" "master_ingress_node_exporter_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 9100
to_port = 9100
}
resource "aws_security_group_rule" "master_ingress_kubelet_insecure" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 10250
to_port = 10250
self = true
}
resource "aws_security_group_rule" "master_ingress_kubelet_insecure_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 10250
to_port = 10250
}
resource "aws_security_group_rule" "master_ingress_kubelet_secure" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 10255
to_port = 10255
self = true
}
resource "aws_security_group_rule" "master_ingress_kubelet_secure_from_worker" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 10255
to_port = 10255
}
resource "aws_security_group_rule" "master_ingress_etcd" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 2379
to_port = 2380
self = true
}
resource "aws_security_group_rule" "master_ingress_bootstrap_etcd" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 12379
to_port = 12380
self = true
}
resource "aws_security_group_rule" "master_ingress_services" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 30000
to_port = 32767
self = true
}
resource "aws_security_group_rule" "master_ingress_services_from_console" {
type = "ingress"
security_group_id = "${aws_security_group.master.id}"
source_security_group_id = "${aws_security_group.console.id}"
protocol = "tcp"
from_port = 30000
to_port = 32767
}

@ -1,179 +0,0 @@
resource "aws_security_group" "worker" {
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}_worker_sg",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_security_group_rule" "worker_egress" {
type = "egress"
security_group_id = "${aws_security_group.worker.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "worker_ingress_icmp" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
}
resource "aws_security_group_rule" "worker_ingress_ssh" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
to_port = 22
}
resource "aws_security_group_rule" "worker_ingress_http" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 80
to_port = 80
}
resource "aws_security_group_rule" "worker_ingress_https" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
to_port = 443
}
resource "aws_security_group_rule" "worker_ingress_heapster" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 4194
to_port = 4194
self = true
}
resource "aws_security_group_rule" "worker_ingress_heapster_from_master" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
source_security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 4194
to_port = 4194
}
resource "aws_security_group_rule" "worker_ingress_flannel" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "udp"
from_port = 4789
to_port = 4789
self = true
}
resource "aws_security_group_rule" "worker_ingress_flannel_from_master" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
source_security_group_id = "${aws_security_group.master.id}"
protocol = "udp"
from_port = 4789
to_port = 4789
}
resource "aws_security_group_rule" "worker_ingress_node_exporter" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 9100
to_port = 9100
self = true
}
resource "aws_security_group_rule" "worker_ingress_node_exporter_from_master" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
source_security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 9100
to_port = 9100
}
resource "aws_security_group_rule" "worker_ingress_kubelet_insecure" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 10250
to_port = 10250
self = true
}
resource "aws_security_group_rule" "worker_ingress_kubelet_insecure_from_master" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
source_security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 10250
to_port = 10250
}
resource "aws_security_group_rule" "worker_ingress_kubelet_secure" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 10255
to_port = 10255
self = true
}
resource "aws_security_group_rule" "worker_ingress_kubelet_secure_from_master" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
source_security_group_id = "${aws_security_group.master.id}"
protocol = "tcp"
from_port = 10255
to_port = 10255
}
resource "aws_security_group_rule" "worker_ingress_services" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
protocol = "tcp"
from_port = 30000
to_port = 32767
self = true
}
resource "aws_security_group_rule" "worker_ingress_services_from_console" {
type = "ingress"
security_group_id = "${aws_security_group.worker.id}"
source_security_group_id = "${aws_security_group.console.id}"
protocol = "tcp"
from_port = 30000
to_port = 32767
}

@ -1,58 +0,0 @@
variable "cidr_block" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
variable "base_domain" {
type = "string"
}
variable "cluster_name" {
type = "string"
}
variable "external_vpc_id" {
type = "string"
}
variable "external_master_subnet_ids" {
type = "list"
}
variable "external_worker_subnet_ids" {
type = "list"
}
variable "extra_tags" {
description = "Extra AWS tags to be applied to created resources."
type = "map"
default = {}
}
variable "new_master_subnet_configs" {
description = "{az_name = new_subnet_cidr}: Empty map means create new subnets in all availability zones in region with generated cidrs"
type = "map"
}
variable "new_worker_subnet_configs" {
description = "{az_name = new_subnet_cidr}: Empty map means create new subnets in all availability zones in region with generated cidrs"
type = "map"
}
variable "private_master_endpoints" {
description = "If set to true, private-facing ingress resources are created."
default = true
}
variable "public_master_endpoints" {
description = "If set to true, public-facing ingress resources are created."
default = true
}
variable "depends_on" {
default = []
type = "list"
}

@ -1,78 +0,0 @@
variable "master_az_count" {
type = "string"
}
variable "worker_az_count" {
type = "string"
}
variable "cidr_block" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
variable "base_domain" {
type = "string"
}
variable "cluster_name" {
type = "string"
}
variable "external_vpc_id" {
type = "string"
}
variable "external_master_subnets" {
type = "list"
}
variable "external_worker_subnets" {
type = "list"
}
variable "extra_tags" {
description = "Extra AWS tags to be applied to created resources."
type = "map"
default = {}
}
variable "enable_etcd_sg" {
description = "If set to true, security groups for etcd nodes are being created"
default = true
}
variable "master_subnets" {
type = "list"
}
variable "worker_subnets" {
type = "list"
}
variable "master_azs" {
type = "list"
}
variable "worker_azs" {
type = "list"
}
variable "private_master_endpoints" {
description = "If set to true, private-facing ingress resources are created."
default = true
}
variable "public_master_endpoints" {
description = "If set to true, public-facing ingress resources are created."
default = true
}
variable "custom_dns_name" {
type = "string"
default = ""
description = "DNS prefix used to construct the console and API server endpoints."
}

@ -1,43 +0,0 @@
resource "aws_route_table" "private_routes" {
count = "${local.new_worker_az_count}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name","${var.cluster_name}-private-${local.new_worker_subnet_azs[count.index]}",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_route" "to_nat_gw" {
count = "${local.new_worker_az_count}"
route_table_id = "${aws_route_table.private_routes.*.id[count.index]}"
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = "${element(aws_nat_gateway.nat_gw.*.id, count.index)}"
depends_on = ["aws_route_table.private_routes"]
}
resource "aws_subnet" "worker_subnet" {
count = "${local.new_worker_az_count}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
cidr_block = "${lookup(var.new_worker_subnet_configs,
local.new_worker_subnet_azs[count.index],
cidrsubnet(local.new_worker_cidr_range, 3, count.index),
)}"
tags = "${merge(map(
"Name", "${var.cluster_name}-worker-${local.new_worker_subnet_azs[count.index]}",
"kubernetes.io/cluster/${var.cluster_name}","shared",
"kubernetes.io/role/internal-elb", "",
"tectonicClusterID", "${var.cluster_id}",
),
var.extra_tags)}"
}
resource "aws_route_table_association" "worker_routing" {
count = "${local.new_worker_az_count}"
route_table_id = "${aws_route_table.private_routes.*.id[count.index]}"
subnet_id = "${aws_subnet.worker_subnet.*.id[count.index]}"
}

@ -1,46 +0,0 @@
resource "aws_route_table" "private_routes" {
count = "${var.external_vpc_id == "" ? var.worker_az_count : 0}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}-private-${data.aws_availability_zones.azs.names[count.index]}",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_route" "to_nat_gw" {
count = "${var.external_vpc_id == "" ? var.worker_az_count : 0}"
route_table_id = "${aws_route_table.private_routes.*.id[count.index]}"
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = "${element(aws_nat_gateway.nat_gw.*.id, count.index)}"
depends_on = ["aws_route_table.private_routes"]
}
resource "aws_subnet" "worker_subnet" {
count = "${var.external_vpc_id == "" ? var.worker_az_count : 0}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
cidr_block = "${length(var.worker_subnets) > 1 ?
"${element(var.worker_subnets, count.index)}" :
"${cidrsubnet(data.aws_vpc.cluster_vpc.cidr_block, 4, count.index + var.worker_az_count)}"
}"
availability_zone = "${var.worker_azs[count.index]}"
tags = "${merge(map(
"Name", "${var.cluster_name}-worker-${ "${length(var.worker_azs)}" > 0 ?
"${var.worker_azs[count.index]}" :
"${data.aws_availability_zones.azs.names[count.index]}" }",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"kubernetes.io/role/internal-elb", "",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_route_table_association" "worker_routing" {
count = "${var.external_vpc_id == "" ? var.worker_az_count : 0}"
route_table_id = "${aws_route_table.private_routes.*.id[count.index]}"
subnet_id = "${aws_subnet.worker_subnet.*.id[count.index]}"
}

@ -1,74 +0,0 @@
resource "aws_internet_gateway" "igw" {
count = "${local.external_vpc_mode ? 0 : 1}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}-igw",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_route_table" "default" {
count = "${var.external_vpc_id == "" ? 1 : 0}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}-public",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}",
), var.extra_tags)}"
}
resource "aws_main_route_table_association" "main_vpc_routes" {
count = "${local.external_vpc_mode ? 0 : 1}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
route_table_id = "${aws_route_table.default.id}"
}
resource "aws_route" "igw_route" {
count = "${local.external_vpc_mode ? 0 : 1}"
destination_cidr_block = "0.0.0.0/0"
route_table_id = "${aws_route_table.default.id}"
gateway_id = "${aws_internet_gateway.igw.id}"
}
resource "aws_subnet" "master_subnet" {
count = "${local.new_master_az_count}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
cidr_block = "${lookup(var.new_master_subnet_configs,
local.new_master_subnet_azs[count.index],
cidrsubnet(local.new_master_cidr_range, 3, count.index),
)}"
availability_zone = "${local.new_master_subnet_azs[count.index]}"
tags = "${merge(map(
"Name", "${var.cluster_name}-master-${local.new_master_subnet_azs[count.index]}",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_route_table_association" "route_net" {
count = "${local.new_master_az_count}"
route_table_id = "${aws_route_table.default.id}"
subnet_id = "${aws_subnet.master_subnet.*.id[count.index]}"
}
resource "aws_eip" "nat_eip" {
count = "${min(local.new_master_az_count,local.new_worker_az_count)}"
vpc = true
# Terraform does not declare an explicit dependency towards the internet gateway.
# this can cause the internet gateway to be deleted/detached before the EIPs.
# https://github.com/coreos/tectonic-installer/issues/1017#issuecomment-307780549
depends_on = ["aws_internet_gateway.igw"]
}
resource "aws_nat_gateway" "nat_gw" {
count = "${min(local.new_master_az_count,local.new_worker_az_count)}"
allocation_id = "${aws_eip.nat_eip.*.id[count.index]}"
subnet_id = "${aws_subnet.master_subnet.*.id[count.index]}"
}

@ -1,77 +0,0 @@
resource "aws_internet_gateway" "igw" {
count = "${var.external_vpc_id == "" ? 1 : 0}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}-igw",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_route_table" "default" {
count = "${var.external_vpc_id == "" ? 1 : 0}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
tags = "${merge(map(
"Name", "${var.cluster_name}-public",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_main_route_table_association" "main_vpc_routes" {
count = "${var.external_vpc_id == "" ? 1 : 0}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
route_table_id = "${aws_route_table.default.id}"
}
resource "aws_route" "igw_route" {
count = "${var.external_vpc_id == "" ? 1 : 0}"
destination_cidr_block = "0.0.0.0/0"
route_table_id = "${aws_route_table.default.id}"
gateway_id = "${aws_internet_gateway.igw.id}"
}
resource "aws_subnet" "master_subnet" {
count = "${var.external_vpc_id == "" ? var.master_az_count : 0}"
vpc_id = "${data.aws_vpc.cluster_vpc.id}"
cidr_block = "${length(var.master_subnets) > 1 ?
"${element(var.master_subnets, count.index)}" :
"${cidrsubnet(data.aws_vpc.cluster_vpc.cidr_block, 4, count.index)}"
}"
availability_zone = "${var.master_azs[count.index]}"
tags = "${merge(map(
"Name", "${var.cluster_name}-master-${ "${length(var.master_azs)}" > 0 ?
"${var.master_azs[count.index]}" :
"${data.aws_availability_zones.azs.names[count.index]}" }",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
resource "aws_route_table_association" "route_net" {
count = "${var.external_vpc_id == "" ? var.master_az_count : 0}"
route_table_id = "${aws_route_table.default.id}"
subnet_id = "${aws_subnet.master_subnet.*.id[count.index]}"
}
resource "aws_eip" "nat_eip" {
count = "${var.external_vpc_id == "" ? min(var.master_az_count, var.worker_az_count) : 0}"
vpc = true
# Terraform does not declare an explicit dependency towards the internet gateway.
# this can cause the internet gateway to be deleted/detached before the EIPs.
# https://github.com/coreos/tectonic-installer/issues/1017#issuecomment-307780549
depends_on = ["aws_internet_gateway.igw"]
}
resource "aws_nat_gateway" "nat_gw" {
count = "${var.external_vpc_id == "" ? min(var.master_az_count, var.worker_az_count) : 0}"
allocation_id = "${aws_eip.nat_eip.*.id[count.index]}"
subnet_id = "${aws_subnet.master_subnet.*.id[count.index]}"
}

@ -1,17 +0,0 @@
locals {
new_worker_cidr_range = "${cidrsubnet(data.aws_vpc.cluster_vpc.cidr_block,1,1)}"
new_master_cidr_range = "${cidrsubnet(data.aws_vpc.cluster_vpc.cidr_block,1,0)}"
}
resource "aws_vpc" "new_vpc" {
count = "${var.external_vpc_id == "" ? 1 : 0}"
cidr_block = "${var.cidr_block}"
enable_dns_hostnames = true
enable_dns_support = true
tags = "${merge(map(
"Name", "${var.cluster_name}.${var.base_domain}",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}

@ -1,30 +0,0 @@
data "aws_availability_zones" "azs" {}
resource "aws_vpc" "new_vpc" {
count = "${var.external_vpc_id == "" ? 1 : 0}"
cidr_block = "${var.cidr_block}"
enable_dns_hostnames = true
enable_dns_support = true
tags = "${merge(map(
"Name", "${var.cluster_name}.${var.base_domain}",
"kubernetes.io/cluster/${var.cluster_name}", "shared",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
data "aws_vpc" "cluster_vpc" {
# The join() hack is required because currently the ternary operator
# evaluates the expressions on both branches of the condition before
# returning a value. When providing and external VPC, the template VPC
# resource gets a count of zero which triggers an evaluation error.
#
# This is tracked upstream: https://github.com/hashicorp/hil/issues/50
#
id = "${var.external_vpc_id == "" ? join(" ", aws_vpc.new_vpc.*.id) : var.external_vpc_id }"
}
locals {
master_subnet_ids = ["${split(",", var.external_vpc_id == "" ? join(",", aws_subnet.master_subnet.*.id) : join(",", data.aws_subnet.external_master.*.id))}"]
worker_subnet_ids = ["${split(",", var.external_vpc_id == "" ? join(",", aws_subnet.worker_subnet.*.id) : join(",", data.aws_subnet.external_worker.*.id))}"]
}

@ -1,23 +0,0 @@
data "ignition_config" "main" {
files = ["${compact(list(
var.ign_installer_kubelet_env_id,
var.ign_installer_runtime_mappings_id,
var.ign_max_user_watches_id,
var.ign_nfs_config_id,
var.ign_ntp_dropin_id,
var.ign_profile_env_id,
var.ign_s3_puller_id,
var.ign_systemd_default_env_id,
))}",
"${var.ign_ca_cert_id_list}",
]
systemd = [
"${var.ign_docker_dropin_id}",
"${var.ign_iscsi_service_id}",
"${var.ign_k8s_node_bootstrap_service_id}",
"${var.ign_kubelet_service_id}",
"${var.ign_locksmithd_service_id}",
"${var.ign_update_ca_certificates_dropin_id}",
]
}

@ -1,21 +0,0 @@
resource "aws_s3_bucket_object" "ignition_worker" {
bucket = "${var.s3_bucket}"
key = "ignition_worker.json"
content = "${data.ignition_config.main.rendered}"
acl = "private"
server_side_encryption = "AES256"
tags = "${merge(map(
"Name", "${var.cluster_name}-ignition-worker",
"KubernetesCluster", "${var.cluster_name}",
"tectonicClusterID", "${var.cluster_id}"
), var.extra_tags)}"
}
data "ignition_config" "s3" {
replace {
source = "${format("s3://%s/%s", var.s3_bucket, aws_s3_bucket_object.ignition_worker.key)}"
verification = "sha512-${sha512(data.ignition_config.main.rendered)}"
}
}

@ -1,95 +0,0 @@
variable "ssh_key" {
type = "string"
}
variable "container_linux_channel" {
type = "string"
}
variable "container_linux_version" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
variable "cluster_name" {
type = "string"
}
variable "ec2_type" {
type = "string"
}
variable "ec2_ami" {
type = "string"
default = ""
}
variable "instance_count" {
type = "string"
}
variable "subnet_ids" {
type = "list"
}
variable "sg_ids" {
type = "list"
description = "The security group IDs to be applied."
}
variable "load_balancers" {
description = "List of ELBs to attach all worker instances to."
type = "list"
default = []
}
variable "extra_tags" {
description = "Extra AWS tags to be applied to created resources."
type = "map"
default = {}
}
variable "autoscaling_group_extra_tags" {
description = "Extra AWS tags to be applied to created autoscaling group resources."
type = "list"
default = []
}
variable "root_volume_type" {
type = "string"
description = "The type of volume for the root block device."
}
variable "root_volume_size" {
type = "string"
description = "The size of the volume in gigabytes for the root block device."
}
variable "root_volume_iops" {
type = "string"
default = "100"
description = "The amount of provisioned IOPS for the root block device."
}
variable "worker_iam_role" {
type = "string"
default = ""
description = "IAM role to use for the instance profiles of worker nodes."
}
variable "base_domain" {
type = "string"
description = "Domain on which the ELB records will be created"
}
variable "kubeconfig_content" {
type = "string"
default = ""
}
variable "user_data_ign" {
type = "string"
}

@ -1,93 +0,0 @@
variable "ssh_key" {
type = "string"
}
variable "vpc_id" {
type = "string"
}
variable "container_linux_channel" {
type = "string"
}
variable "container_linux_version" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
variable "cluster_name" {
type = "string"
}
variable "ec2_type" {
type = "string"
}
variable "ec2_ami" {
type = "string"
default = ""
}
variable "instance_count" {
type = "string"
}
variable "subnet_ids" {
type = "list"
}
variable "sg_ids" {
type = "list"
description = "The security group IDs to be applied."
}
variable "load_balancers" {
description = "List of ELBs to attach all worker instances to."
type = "list"
default = []
}
variable "extra_tags" {
description = "Extra AWS tags to be applied to created resources."
type = "map"
default = {}
}
variable "autoscaling_group_extra_tags" {
description = "Extra AWS tags to be applied to created autoscaling group resources."
type = "list"
default = []
}
variable "root_volume_type" {
type = "string"
description = "The type of volume for the root block device."
}
variable "root_volume_size" {
type = "string"
description = "The size of the volume in gigabytes for the root block device."
}
variable "root_volume_iops" {
type = "string"
default = "100"
description = "The amount of provisioned IOPS for the root block device."
}
variable "worker_iam_role" {
type = "string"
default = ""
description = "IAM role to use for the instance profiles of worker nodes."
}
variable "ign_s3_puller_id" {
type = "string"
}
variable "s3_bucket" {
type = "string"
}

@ -1,175 +0,0 @@
locals {
ami_owner = "595879546273"
arn = "aws"
}
data "aws_ami" "coreos_ami" {
filter {
name = "name"
values = ["CoreOS-${var.container_linux_channel}-${var.container_linux_version}-*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "owner-id"
values = ["${local.ami_owner}"]
}
}
resource "aws_launch_configuration" "worker_conf" {
instance_type = "${var.ec2_type}"
image_id = "${coalesce(var.ec2_ami, data.aws_ami.coreos_ami.image_id)}"
name_prefix = "${var.cluster_name}-worker-"
key_name = "${var.ssh_key}"
security_groups = ["${var.sg_ids}"]
iam_instance_profile = "${aws_iam_instance_profile.worker_profile.arn}"
user_data = "${var.user_data_ign}"
lifecycle {
create_before_destroy = true
# Ignore changes in the AMI which force recreation of the resource. This
# avoids accidental deletion of nodes whenever a new CoreOS Release comes
# out.
ignore_changes = ["image_id"]
}
root_block_device {
volume_type = "${var.root_volume_type}"
volume_size = "${var.root_volume_size}"
iops = "${var.root_volume_type == "io1" ? var.root_volume_iops : 0}"
}
}
resource "aws_autoscaling_group" "workers" {
name = "${var.cluster_name}-workers"
desired_capacity = "${var.instance_count}"
max_size = "${var.instance_count * 3}"
min_size = "${var.instance_count}"
launch_configuration = "${aws_launch_configuration.worker_conf.id}"
vpc_zone_identifier = ["${var.subnet_ids}"]
tags = [
{
key = "Name"
value = "${var.cluster_name}-worker"
propagate_at_launch = true
},
{
key = "kubernetes.io/cluster/${var.cluster_name}"
value = "owned"
propagate_at_launch = true
},
{
key = "tectonicClusterID"
value = "${var.cluster_id}"
propagate_at_launch = true
},
"${var.autoscaling_group_extra_tags}",
]
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_attachment" "workers" {
count = "${length(var.load_balancers)}"
autoscaling_group_name = "${aws_autoscaling_group.workers.name}"
elb = "${var.load_balancers[count.index]}"
}
resource "aws_iam_instance_profile" "worker_profile" {
name = "${var.cluster_name}-worker-profile"
role = "${var.worker_iam_role == "" ?
join("|", aws_iam_role.worker_role.*.name) :
join("|", data.aws_iam_role.worker_role.*.name)
}"
}
data "aws_iam_role" "worker_role" {
count = "${var.worker_iam_role == "" ? 0 : 1}"
name = "${var.worker_iam_role}"
}
resource "aws_iam_role" "worker_role" {
count = "${var.worker_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-role"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_role_policy" "worker_policy" {
count = "${var.worker_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}_worker_policy"
role = "${aws_iam_role.worker_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Action": "elasticloadbalancing:*",
"Resource": "*",
"Effect": "Allow"
},
{
"Action" : [
"s3:GetObject"
],
"Resource": "arn:${local.arn}:s3:::*",
"Effect": "Allow"
},
{
"Action" : [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}

@ -1,175 +0,0 @@
locals {
ami_owner = "595879546273"
arn = "aws"
}
data "aws_ami" "coreos_ami" {
filter {
name = "name"
values = ["CoreOS-${var.container_linux_channel}-${var.container_linux_version}-*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "owner-id"
values = ["${local.ami_owner}"]
}
}
resource "aws_launch_configuration" "worker_conf" {
instance_type = "${var.ec2_type}"
image_id = "${coalesce(var.ec2_ami, data.aws_ami.coreos_ami.image_id)}"
name_prefix = "${var.cluster_name}-worker-"
key_name = "${var.ssh_key}"
security_groups = ["${var.sg_ids}"]
iam_instance_profile = "${aws_iam_instance_profile.worker_profile.arn}"
user_data = "${data.ignition_config.s3.rendered}"
lifecycle {
create_before_destroy = true
# Ignore changes in the AMI which force recreation of the resource. This
# avoids accidental deletion of nodes whenever a new CoreOS Release comes
# out.
ignore_changes = ["image_id"]
}
root_block_device {
volume_type = "${var.root_volume_type}"
volume_size = "${var.root_volume_size}"
iops = "${var.root_volume_type == "io1" ? var.root_volume_iops : 0}"
}
}
resource "aws_autoscaling_group" "workers" {
name = "${var.cluster_name}-workers"
desired_capacity = "${var.instance_count}"
max_size = "${var.instance_count * 3}"
min_size = "${var.instance_count}"
launch_configuration = "${aws_launch_configuration.worker_conf.id}"
vpc_zone_identifier = ["${var.subnet_ids}"]
tags = [
{
key = "Name"
value = "${var.cluster_name}-worker"
propagate_at_launch = true
},
{
key = "kubernetes.io/cluster/${var.cluster_name}"
value = "owned"
propagate_at_launch = true
},
{
key = "tectonicClusterID"
value = "${var.cluster_id}"
propagate_at_launch = true
},
"${var.autoscaling_group_extra_tags}",
]
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_attachment" "workers" {
count = "${length(var.load_balancers)}"
autoscaling_group_name = "${aws_autoscaling_group.workers.name}"
elb = "${var.load_balancers[count.index]}"
}
resource "aws_iam_instance_profile" "worker_profile" {
name = "${var.cluster_name}-worker-profile"
role = "${var.worker_iam_role == "" ?
join("|", aws_iam_role.worker_role.*.name) :
join("|", data.aws_iam_role.worker_role.*.name)
}"
}
data "aws_iam_role" "worker_role" {
count = "${var.worker_iam_role == "" ? 0 : 1}"
name = "${var.worker_iam_role}"
}
resource "aws_iam_role" "worker_role" {
count = "${var.worker_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-role"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_role_policy" "worker_policy" {
count = "${var.worker_iam_role == "" ? 1 : 0}"
name = "${var.cluster_name}_worker_policy"
role = "${aws_iam_role.worker_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Action": "elasticloadbalancing:*",
"Resource": "*",
"Effect": "Allow"
},
{
"Action" : [
"s3:GetObject"
],
"Resource": "arn:${local.arn}:s3:::*",
"Effect": "Allow"
},
{
"Action" : [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}

@ -1,69 +0,0 @@
resource "azurerm_availability_set" "etcd" {
count = "${var.etcd_count > 0 ? 1 : 0}"
name = "${var.cluster_name}-etcd"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
managed = true
platform_fault_domain_count = "${var.fault_domains}"
tags = "${merge(map(
"Name", "${var.cluster_name}-etcd",
"tectonicClusterID", "${var.cluster_id}"),
var.extra_tags)}"
}
resource "azurerm_virtual_machine" "etcd_node" {
count = "${var.etcd_count}"
name = "${var.cluster_name}-etcd-${count.index}"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
network_interface_ids = ["${var.network_interface_ids[count.index]}"]
vm_size = "${var.vm_size}"
availability_set_id = "${azurerm_availability_set.etcd.id}"
delete_os_disk_on_termination = true
storage_image_reference {
publisher = "CoreOS"
offer = "CoreOS"
sku = "${var.container_linux_channel}"
version = "${var.container_linux_version}"
}
storage_os_disk {
name = "etcd-${count.index}-os-${var.storage_id}"
managed_disk_type = "${var.storage_type}"
create_option = "FromImage"
caching = "ReadWrite"
os_type = "linux"
disk_size_gb = "${var.root_volume_size}"
}
os_profile {
computer_name = "${var.cluster_name}-etcd-${count.index}"
admin_username = "core"
admin_password = ""
custom_data = "${base64encode("${data.ignition_config.etcd.*.rendered[count.index]}")}"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/core/.ssh/authorized_keys"
key_data = "${file(var.public_ssh_key)}"
}
}
tags = "${merge(map(
"Name", "${var.cluster_name}-etcd-${count.index}",
"tectonicClusterID", "${var.cluster_id}"),
var.extra_tags)}"
lifecycle {
ignore_changes = [
"storage_os_disk",
"storage_data_disk",
]
}
}

@ -1,52 +0,0 @@
data "ignition_config" "etcd" {
count = "${var.etcd_count}"
systemd = [
"${data.ignition_systemd_unit.locksmithd.*.id[count.index]}",
"${var.ign_etcd_dropin_id_list[count.index]}",
]
users = [
"${data.ignition_user.core.id}",
]
files = ["${compact(list(
var.ign_profile_env_id,
var.ign_systemd_default_env_id,
))}",
"${var.ign_etcd_crt_id_list}",
"${var.ign_ntp_dropin_id}",
]
}
data "ignition_user" "core" {
count = "${var.etcd_count > 0 ? 1 : 0}"
name = "core"
ssh_authorized_keys = [
"${file(var.public_ssh_key)}",
]
}
data "ignition_systemd_unit" "locksmithd" {
count = "${var.etcd_count}"
name = "locksmithd.service"
enabled = true
dropin = [
{
name = "40-etcd-lock.conf"
content = <<EOF
[Service]
Environment=REBOOT_STRATEGY=etcd-lock
${var.tls_enabled ? "Environment=\"LOCKSMITHD_ETCD_CAFILE=/etc/ssl/etcd/ca.crt\"" : ""}
${var.tls_enabled ? "Environment=\"LOCKSMITHD_ETCD_KEYFILE=/etc/ssl/etcd/client.key\"" : ""}
${var.tls_enabled ? "Environment=\"LOCKSMITHD_ETCD_CERTFILE=/etc/ssl/etcd/client.crt\"" : ""}
Environment="LOCKSMITHD_ENDPOINT=${var.tls_enabled ? "https" : "http"}://etcd-${count.index}:2379"
EOF
},
]
}

@ -1,3 +0,0 @@
output "etcd_vm_ids" {
value = ["${azurerm_virtual_machine.etcd_node.*.id}"]
}

@ -1,109 +0,0 @@
// Location is the Azure Location (East US, West US, etc)
variable "location" {
type = "string"
}
variable "resource_group_name" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
// VM Size name
variable "vm_size" {
type = "string"
}
// Storage account type
variable "storage_type" {
type = "string"
}
variable "storage_id" {
type = "string"
}
variable "root_volume_size" {
type = "string"
}
// Count of etcd nodes to be created.
variable "etcd_count" {
type = "string"
}
// The base DNS domain of the cluster.
// Example: `azure.dev.coreos.systems`
variable "base_domain" {
type = "string"
}
// The name of the cluster.
variable "cluster_name" {
type = "string"
}
variable "public_ssh_key" {
type = "string"
}
variable "network_interface_ids" {
type = "list"
}
variable "versions" {
description = "(internal) Versions of the components to use"
type = "map"
}
variable "container_linux_channel" {
type = "string"
}
variable "container_linux_version" {
type = "string"
}
variable "const_internal_node_names" {
type = "list"
default = ["etcd-0", "etcd-1", "etcd-2", "etcd-3", "etcd-4"]
description = "(internal) The list of hostnames assigned to etcd member nodes."
}
variable "tls_enabled" {
default = false
}
variable "container_image" {
type = "string"
}
variable "extra_tags" {
type = "map"
}
variable "ign_etcd_dropin_id_list" {
type = "list"
}
variable "fault_domains" {
type = "string"
}
variable "ign_etcd_crt_id_list" {
type = "list"
}
variable "ign_profile_env_id" {
type = "string"
}
variable "ign_systemd_default_env_id" {
type = "string"
}
variable "ign_ntp_dropin_id" {
type = "string"
}

@ -1,62 +0,0 @@
data "ignition_config" "master" {
files = ["${compact(list(
data.ignition_file.cloud_provider_config.id,
data.ignition_file.kubeconfig.id,
var.ign_azure_udev_rules_id,
var.ign_installer_kubelet_env_id,
var.ign_installer_runtime_mappings_id,
var.ign_max_user_watches_id,
var.ign_nfs_config_id,
var.ign_ntp_dropin_id,
var.ign_profile_env_id,
var.ign_systemd_default_env_id,
))}",
"${var.ign_ca_cert_id_list}",
]
systemd = ["${compact(list(
var.ign_docker_dropin_id,
var.ign_locksmithd_service_id,
var.ign_k8s_node_bootstrap_service_id,
var.ign_kubelet_service_id,
var.ign_tx_off_service_id,
var.ign_bootkube_service_id,
var.ign_tectonic_service_id,
var.ign_bootkube_path_unit_id,
var.ign_tectonic_path_unit_id,
var.ign_update_ca_certificates_dropin_id,
var.ign_iscsi_service_id,
))}"]
users = [
"${data.ignition_user.core.id}",
]
}
data "ignition_user" "core" {
name = "core"
ssh_authorized_keys = [
"${file(var.public_ssh_key)}",
]
}
data "ignition_file" "kubeconfig" {
filesystem = "root"
path = "/etc/kubernetes/kubeconfig"
mode = 0644
content {
content = "${var.kubeconfig_content}"
}
}
data "ignition_file" "cloud_provider_config" {
filesystem = "root"
path = "/etc/kubernetes/cloud/config"
mode = 0600
content {
content = "${var.cloud_provider_config}"
}
}

@ -1,68 +0,0 @@
resource "azurerm_availability_set" "tectonic_masters" {
name = "${var.cluster_name}-masters"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
managed = true
platform_fault_domain_count = "${var.fault_domains}"
tags = "${merge(map(
"Name", "${var.cluster_name}-masters",
"tectonicClusterID", "${var.cluster_id}"),
var.extra_tags)}"
}
resource "azurerm_virtual_machine" "tectonic_master" {
count = "${var.master_count}"
name = "${var.cluster_name}-master-${count.index}"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
network_interface_ids = ["${var.network_interface_ids[count.index]}"]
vm_size = "${var.vm_size}"
availability_set_id = "${azurerm_availability_set.tectonic_masters.id}"
delete_os_disk_on_termination = true
storage_image_reference {
publisher = "CoreOS"
offer = "CoreOS"
sku = "${var.container_linux_channel}"
version = "${var.container_linux_version}"
}
storage_os_disk {
name = "master-${count.index}-os-${var.storage_id}"
managed_disk_type = "${var.storage_type}"
create_option = "FromImage"
caching = "ReadWrite"
os_type = "linux"
disk_size_gb = "${var.root_volume_size}"
}
os_profile {
computer_name = "${var.cluster_name}-master-${count.index}"
admin_username = "core"
admin_password = ""
custom_data = "${base64encode("${data.ignition_config.master.rendered}")}"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/core/.ssh/authorized_keys"
key_data = "${file(var.public_ssh_key)}"
}
}
tags = "${merge(map(
"Name", "${var.cluster_name}-master-${count.index}",
"tectonicClusterID", "${var.cluster_id}"),
var.extra_tags)}"
lifecycle {
ignore_changes = [
"storage_os_disk",
"storage_data_disk",
]
}
}

@ -1,3 +0,0 @@
output "master_vm_ids" {
value = ["${azurerm_virtual_machine.tectonic_master.*.id}"]
}

@ -1,99 +0,0 @@
variable "container_linux_channel" {
type = "string"
}
variable "container_linux_version" {
type = "string"
}
variable "cloud_provider_config" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
variable "cluster_name" {
type = "string"
description = "The name of the cluster."
}
variable "extra_tags" {
type = "map"
}
variable "ign_azure_udev_rules_id" {
type = "string"
}
variable "ign_tx_off_service_id" {
type = "string"
}
variable "kubeconfig_content" {
type = "string"
}
variable "location" {
type = "string"
description = "Location is the Azure Location (East US, West US, etc)"
}
variable "master_count" {
type = "string"
description = "Count of master nodes to be created."
}
variable "network_interface_ids" {
type = "list"
description = "List of NICs to use for master VMs"
}
variable "public_ssh_key" {
type = "string"
}
variable "resource_group_name" {
type = "string"
}
variable "storage_id" {
type = "string"
}
variable "storage_type" {
type = "string"
description = "Storage account type"
}
variable "root_volume_size" {
type = "string"
}
variable "vm_size" {
type = "string"
description = "VM Size name"
}
variable "ign_bootkube_service_id" {
type = "string"
description = "The ID of the bootkube systemd service unit"
}
variable "ign_bootkube_path_unit_id" {
type = "string"
}
variable "ign_tectonic_service_id" {
type = "string"
description = "The ID of the tectonic installer systemd service unit"
}
variable "ign_tectonic_path_unit_id" {
type = "string"
}
variable "fault_domains" {
type = "string"
}

@ -1,44 +0,0 @@
variable "external_rsg_id" {
default = ""
type = "string"
}
variable "azure_location" {
type = "string"
}
variable "cluster_name" {
type = "string"
}
variable "cluster_id" {
type = "string"
}
# Storage ID
resource "random_id" "storage_id" {
byte_length = 2
}
variable "extra_tags" {
type = "map"
}
resource "azurerm_resource_group" "tectonic_cluster" {
count = "${var.external_rsg_id == "" ? 1 : 0}"
location = "${var.azure_location}"
name = "tectonic-cluster-${var.cluster_name}"
tags = "${merge(map(
"Name", "tectonic-cluster-${var.cluster_name}",
"tectonicClusterID", "${var.cluster_id}"),
var.extra_tags)}"
}
output "name" {
value = "${var.external_rsg_id == "" ? element(concat(azurerm_resource_group.tectonic_cluster.*.name, list("")), 0) : element(split("/", var.external_rsg_id), 4)}"
}
output "storage_id" {
value = "${random_id.storage_id.hex}"
}

@ -1,3 +0,0 @@
output "udev-rules_id" {
value = "${data.ignition_file.azure_udev_rules.id}"
}

@ -1,9 +0,0 @@
data "ignition_file" "azure_udev_rules" {
filesystem = "root"
path = "/etc/udev/rules.d/66-azure-storage.rules"
mode = 0644
content {
content = "${file("${path.module}/resources/66-azure-storage.rules")}"
}
}

@ -1,73 +0,0 @@
resource "azurerm_public_ip" "api_ip" {
count = "${var.private_cluster ? 0 : 1}"
name = "${var.cluster_name}_api_ip"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
public_ip_address_allocation = "static"
domain_name_label = "${var.cluster_name}-api"
tags = "${merge(map(
"Name", "${var.cluster_name}-api",
"tectonicClusterID", "${var.cluster_id}"),
var.extra_tags)}"
}
resource "azurerm_lb_rule" "api_lb" {
count = "${var.private_cluster ? 0 : 1}"
name = "api-lb-rule-443-443"
resource_group_name = "${var.resource_group_name}"
loadbalancer_id = "${join("", azurerm_lb.tectonic_lb.*.id)}"
backend_address_pool_id = "${join("", azurerm_lb_backend_address_pool.api-lb.*.id)}"
probe_id = "${azurerm_lb_probe.api_lb.id}"
protocol = "tcp"
frontend_port = 443
backend_port = 443
frontend_ip_configuration_name = "api"
}
resource "azurerm_lb_probe" "api_lb" {
count = "${var.private_cluster ? 0 : 1}"
name = "api-lb-probe-443-up"
loadbalancer_id = "${azurerm_lb.tectonic_lb.id}"
resource_group_name = "${var.resource_group_name}"
protocol = "tcp"
port = 443
}
resource "azurerm_lb_backend_address_pool" "api-lb" {
count = "${var.private_cluster ? 0 : 1}"
name = "api-lb-pool"
resource_group_name = "${var.resource_group_name}"
loadbalancer_id = "${azurerm_lb.tectonic_lb.id}"
}
resource "azurerm_lb_rule" "ssh_lb" {
count = "${var.private_cluster ? 0 : 1}"
name = "ssh-lb"
resource_group_name = "${var.resource_group_name}"
loadbalancer_id = "${join("", azurerm_lb.tectonic_lb.*.id)}"
backend_address_pool_id = "${join("", azurerm_lb_backend_address_pool.api-lb.*.id)}"
probe_id = "${azurerm_lb_probe.ssh_lb.id}"
load_distribution = "SourceIP"
protocol = "tcp"
frontend_port = 22
backend_port = 22
frontend_ip_configuration_name = "api"
}
resource "azurerm_lb_probe" "ssh_lb" {
count = "${var.private_cluster ? 0 : 1}"
name = "ssh-lb-22-up"
loadbalancer_id = "${azurerm_lb.tectonic_lb.id}"
resource_group_name = "${var.resource_group_name}"
protocol = "tcp"
port = 22
}

@ -1,54 +0,0 @@
resource "azurerm_public_ip" "console_ip" {
count = "${var.private_cluster ? 0 : 1}"
name = "${var.cluster_name}_console_ip"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
public_ip_address_allocation = "static"
domain_name_label = "${var.cluster_name}"
tags = "${merge(map(
"Name", "${var.cluster_name}",
"tectonicClusterID", "${var.cluster_id}"),
var.extra_tags)}"
}
resource "azurerm_lb_rule" "console_lb_https" {
count = "${var.private_cluster ? 0 : 1}"
name = "${var.cluster_name}-console-lb-rule-443-32000"
resource_group_name = "${var.resource_group_name}"
loadbalancer_id = "${azurerm_lb.tectonic_lb.id}"
backend_address_pool_id = "${join("", azurerm_lb_backend_address_pool.api-lb.*.id)}"
probe_id = "${azurerm_lb_probe.console_lb.id}"
protocol = "tcp"
frontend_port = 443
backend_port = 32000
frontend_ip_configuration_name = "console"
}
resource "azurerm_lb_rule" "console_lb_identity" {
count = "${var.private_cluster ? 0 : 1}"
name = "${var.cluster_name}-console-lb-rule-80-32001"
resource_group_name = "${var.resource_group_name}"
loadbalancer_id = "${azurerm_lb.tectonic_lb.id}"
backend_address_pool_id = "${join("", azurerm_lb_backend_address_pool.api-lb.*.id)}"
probe_id = "${azurerm_lb_probe.console_lb.id}"
protocol = "tcp"
frontend_port = 80
backend_port = 32001
frontend_ip_configuration_name = "console"
}
resource "azurerm_lb_probe" "console_lb" {
count = "${var.private_cluster ? 0 : 1}"
name = "${var.cluster_name}-console-lb-probe-443-up"
loadbalancer_id = "${azurerm_lb.tectonic_lb.id}"
resource_group_name = "${var.resource_group_name}"
protocol = "tcp"
port = 32000
}

@ -1,24 +0,0 @@
resource "azurerm_lb" "tectonic_lb" {
count = "${var.private_cluster ? 0 : 1}"
name = "${var.cluster_name}-api-lb"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
frontend_ip_configuration {
name = "api"
public_ip_address_id = "${join("", azurerm_public_ip.api_ip.*.id)}"
private_ip_address_allocation = "dynamic"
}
frontend_ip_configuration {
name = "console"
public_ip_address_id = "${join("" , azurerm_public_ip.console_ip.*.id)}"
private_ip_address_allocation = "dynamic"
}
tags = "${merge(map(
"Name", "${var.cluster_name}-api-lb",
"tectonicClusterID", "${var.cluster_id}"),
var.extra_tags)}"
}

@ -1,12 +0,0 @@
resource "azurerm_network_interface" "etcd_nic" {
count = "${var.etcd_count}"
name = "${var.cluster_name}-etcd-nic-${count.index}"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
ip_configuration {
name = "tectonic_etcd_configuration"
subnet_id = "${var.external_master_subnet_id == "" ? join(" ", azurerm_subnet.master_subnet.*.id) : var.external_master_subnet_id }"
private_ip_address_allocation = "dynamic"
}
}

@ -1,14 +0,0 @@
resource "azurerm_network_interface" "tectonic_master" {
count = "${var.master_count}"
name = "${var.cluster_name}-master-${count.index}"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
enable_ip_forwarding = true
ip_configuration {
private_ip_address_allocation = "dynamic"
name = "${var.cluster_name}-MasterIPConfiguration"
subnet_id = "${var.external_master_subnet_id == "" ? join("",azurerm_subnet.master_subnet.*.id) : var.external_master_subnet_id}"
load_balancer_backend_address_pools_ids = ["${compact(azurerm_lb_backend_address_pool.api-lb.*.id)}"]
}
}

@ -1,13 +0,0 @@
resource "azurerm_network_interface" "tectonic_worker" {
count = "${var.worker_count}"
name = "${var.cluster_name}-worker-${count.index}"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
enable_ip_forwarding = true
ip_configuration {
private_ip_address_allocation = "dynamic"
name = "${var.cluster_name}-WorkerIPConfiguration"
subnet_id = "${var.external_worker_subnet_id == "" ? join("", azurerm_subnet.worker_subnet.*.id) : var.external_worker_subnet_id}"
}
}

@ -1,125 +0,0 @@
resource "azurerm_network_security_rule" "etcd_egress" {
count = "${var.external_nsg_master_id == "" && var.etcd_count > 0 ? 1 : 0}"
name = "${var.cluster_name}-etcd-out"
description = "${var.cluster_name} etcd - Outbound"
priority = 2000
direction = "Outbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "*"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "etcd_ingress_ssh" {
count = "${var.external_nsg_master_id == "" && var.etcd_count > 0 ? 1 : 0}"
name = "${var.cluster_name}-etcd-in-ssh"
description = "${var.cluster_name} etcd - SSH"
priority = 400
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "22"
# TODO: Reference subnet
source_address_prefix = "${var.ssh_network_internal}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "etcd_ingress_ssh_admin" {
count = "${var.external_nsg_master_id == "" && var.etcd_count > 0 ? 1 : 0}"
name = "${var.cluster_name}-etcd-in-ssh-external"
description = "${var.cluster_name} etcd - SSH external"
priority = 405
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "22"
# TODO: Reference subnet
source_address_prefix = "${var.ssh_network_external}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "etcd_ingress_ssh_from_master" {
count = "${var.external_nsg_master_id == "" && var.etcd_count > 0 ? 1 : 0}"
name = "${var.cluster_name}-etcd-in-ssh-master"
description = "${var.cluster_name} etcd - SSH from master"
priority = 410
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "22"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "etcd_ingress_client_self" {
count = "${var.external_nsg_master_id == "" && var.etcd_count > 0 ? 1 : 0}"
name = "${var.cluster_name}-etcd-in-client-self"
description = "${var.cluster_name} etcd - etcd client"
priority = 415
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "2379"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "etcd_ingress_client_master" {
count = "${var.external_nsg_master_id == "" && var.etcd_count > 0 ? 1 : 0}"
name = "${var.cluster_name}-etcd-in-client-master"
description = "${var.cluster_name} etcd - etcd client from master"
priority = 420
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "2379"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "etcd_ingress_peer" {
count = "${var.external_nsg_master_id == "" && var.etcd_count > 0 ? 1 : 0}"
name = "${var.cluster_name}-etcd-in-peer"
description = "${var.cluster_name} etcd - etcd peer"
priority = 425
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "2380"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}

@ -1,242 +0,0 @@
resource "azurerm_network_security_group" "master" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
}
### LB rules
resource "azurerm_network_security_rule" "alb_probe" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-alb-probe"
description = "${var.cluster_name} master - Azure Load Balancer probe"
priority = 295
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "AzureLoadBalancer"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
# TODO: Fix NSG name and source
resource "azurerm_network_security_rule" "api_ingress_https" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-api-in-https"
description = "${var.cluster_name} Kubernetes API"
priority = 300
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "443"
# TODO: Ternary on private implementation
source_address_prefix = "*"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "console_ingress_https" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-console-in-https"
description = "${var.cluster_name} Azure Load Balancer - Tectonic Console"
priority = 305
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "443"
# TODO: Ternary on private implementation
source_address_prefix = "*"
destination_address_prefix = "AzureLoadBalancer"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "console_ingress_http" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-console-in-http"
description = "${var.cluster_name} Azure Load Balancer - Tectonic Identity"
priority = 310
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "80"
# TODO: Ternary on private implementation
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "AzureLoadBalancer"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
### Master node rules
resource "azurerm_network_security_rule" "master_egress" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master-out"
description = "${var.cluster_name} master - Outbound"
priority = 2005
direction = "Outbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "*"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "master_ingress_ssh" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master-in-ssh"
description = "${var.cluster_name} master - SSH"
priority = 500
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "22"
# TODO: Reference subnet
source_address_prefix = "${var.ssh_network_internal}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "master_ingress_ssh_admin" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master-in-ssh-external"
description = "${var.cluster_name} master - SSH external"
priority = 505
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "22"
# TODO: Reference subnet
source_address_prefix = "${var.ssh_network_external}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "master_ingress_flannel" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master-in-udp-4789"
description = "${var.cluster_name} master - flannel"
priority = 510
direction = "Inbound"
access = "Allow"
protocol = "UDP"
source_port_range = "*"
destination_port_range = "4789"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "master_ingress_node_exporter_from_master" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master-in-tcp-9100-master"
description = "${var.cluster_name} master - Prometheus node exporter from master"
priority = 515
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "9100"
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "master_ingress_node_exporter_from_worker" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master-in-tcp-9100-worker"
description = "${var.cluster_name} master - Prometheus node exporter from worker"
priority = 520
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "9100"
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
# TODO: Review NSG
resource "azurerm_network_security_rule" "master_ingress_k8s_nodeport_from_alb" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master-in-any-30000-32767-alb"
description = "${var.cluster_name} master - Kubernetes NodePort range from Azure Load Balancer"
priority = 525
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "30000-32767"
# TODO: Reference subnet
source_address_prefix = "AzureLoadBalancer"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
# TODO: Review NSG
resource "azurerm_network_security_rule" "master_ingress_k8s_nodeport" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master-in-any-30000-32767"
description = "${var.cluster_name} master - Kubernetes NodePort range"
priority = 530
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "30000-32767"
# TODO: Reference subnet
source_address_prefix = "*"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}
resource "azurerm_network_security_rule" "master_ingress_kubelet_secure" {
count = "${var.external_nsg_master_id == "" ? 1 : 0}"
name = "${var.cluster_name}-master-in-tcp-10255-vnet"
description = "${var.cluster_name} master - kubelet"
priority = 535
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "10255"
# TODO: CR on how open this should be
# TODO: Reference subnet
source_address_prefix = "VirtualNetwork"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.master.name}"
}

@ -1,169 +0,0 @@
resource "azurerm_network_security_group" "worker" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
}
resource "azurerm_network_security_rule" "worker_egress" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-out"
description = "${var.cluster_name} worker - Outbound"
priority = 2010
direction = "Outbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "*"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
}
resource "azurerm_network_security_rule" "worker_ingress_ssh" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-in-ssh"
description = "${var.cluster_name} worker - SSH"
priority = 600
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "22"
# TODO: Reference subnet
source_address_prefix = "${var.ssh_network_internal}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
}
resource "azurerm_network_security_rule" "worker_ingress_ssh_admin" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-in-ssh-external"
description = "${var.cluster_name} worker - SSH external"
priority = 605
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "22"
# TODO: Reference subnet
source_address_prefix = "${var.ssh_network_external}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
}
# TODO: Determine if we need two rules for this
resource "azurerm_network_security_rule" "worker_ingress_k8s_nodeport" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-in-any-30000-32767"
description = "${var.cluster_name} worker - Kubernetes NodePort range"
priority = 610
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "30000-32767"
source_address_prefix = "VirtualNetwork"
destination_address_prefix = "*"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
}
resource "azurerm_network_security_rule" "worker_ingress_flannel" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-in-udp-4789"
description = "${var.cluster_name} worker - flannel"
priority = 615
direction = "Inbound"
access = "Allow"
protocol = "UDP"
source_port_range = "*"
destination_port_range = "4789"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
}
resource "azurerm_network_security_rule" "worker_ingress_kubelet_secure" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-in-tcp-10255-vnet"
description = "${var.cluster_name} worker - kubelet"
priority = 620
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "10255"
# TODO: CR on how open this should be
# TODO: Reference subnet
source_address_prefix = "VirtualNetwork"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
}
resource "azurerm_network_security_rule" "worker_ingress_node_exporter_from_worker" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-in-tcp-9100-vnet"
description = "${var.cluster_name} worker - Prometheus node exporter from worker"
priority = 625
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "9100"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
}
resource "azurerm_network_security_rule" "worker_ingress_node_exporter_from_master" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-in-tcp-9100-master"
description = "${var.cluster_name} worker - Prometheus node exporter from master"
priority = 630
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "9100"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
}
resource "azurerm_network_security_rule" "worker_ingress_heapster_from_master" {
count = "${var.external_nsg_worker_id == "" ? 1 : 0}"
name = "${var.cluster_name}-worker-in-tcp-4194-master"
description = "${var.cluster_name} worker - Heapster from master"
priority = 635
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_port_range = "*"
destination_port_range = "4194"
# TODO: Reference subnet
source_address_prefix = "${var.vnet_cidr_block}"
destination_address_prefix = "${var.vnet_cidr_block}"
resource_group_name = "${var.resource_group_name}"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
}

@ -1,84 +0,0 @@
locals {
# A regular expression that parses a Azure subnet id to extract subnet name.
const_id_to_subnet_name_regex = "/^/subscriptions/[-\\w]+/resourceGroups/[-\\w]+/providers/Microsoft.Network/virtualNetworks/[.\\w]+/subnets/([.\\w-]+)$/"
# A regular expression that parses Azure resource IDs into component identifiers
const_id_to_group_name_regex = "/^/subscriptions/[-\\w]+/resourceGroups/([\\w()-\\.]+)/providers/[.\\w]+/[.\\w]+/([.\\w-]+)$/"
}
output "vnet_id" {
value = "${var.external_vnet_id == "" ? element(concat(azurerm_virtual_network.tectonic_vnet.*.name, list("")), 0) : replace(var.external_vnet_id, local.const_id_to_group_name_regex, "$2")}"
}
output "master_subnet" {
value = "${var.external_master_subnet_id == "" ? element(concat(azurerm_subnet.master_subnet.*.id, list("")), 0) : var.external_master_subnet_id}"
}
output "worker_subnet" {
value = "${var.external_worker_subnet_id == "" ? element(concat(azurerm_subnet.worker_subnet.*.id, list("")), 0) : var.external_worker_subnet_id}"
}
output "worker_subnet_name" {
value = "${var.external_worker_subnet_id == "" ? element(concat(azurerm_subnet.worker_subnet.*.name, list("")), 0) : replace(var.external_worker_subnet_id, local.const_id_to_subnet_name_regex, "$1")}"
}
output "vnet_resource_group" {
value = "${var.external_vnet_id == "" ? "" : replace(var.external_vnet_id, local.const_id_to_group_name_regex, "$1")}"
}
# TODO: Allow user to provide their own network
output "etcd_cidr" {
value = "${element(concat(azurerm_subnet.master_subnet.*.address_prefix, list("")), 0)}"
}
output "master_cidr" {
value = "${element(concat(azurerm_subnet.master_subnet.*.address_prefix, list("")), 0)}"
}
output "worker_cidr" {
value = "${element(concat(azurerm_subnet.worker_subnet.*.address_prefix, list("")), 0)}"
}
output "worker_nsg_name" {
value = "${var.external_nsg_worker_id == "" ? element(concat(azurerm_network_security_group.worker.*.name, list("")), 0) : var.external_nsg_worker_id}"
}
output "etcd_network_interface_ids" {
value = ["${azurerm_network_interface.etcd_nic.*.id}"]
}
output "etcd_endpoints" {
value = "${azurerm_network_interface.etcd_nic.*.private_ip_address}"
}
output "master_network_interface_ids" {
value = ["${azurerm_network_interface.tectonic_master.*.id}"]
}
output "worker_network_interface_ids" {
value = ["${azurerm_network_interface.tectonic_worker.*.id}"]
}
output "master_private_ip_addresses" {
value = ["${azurerm_network_interface.tectonic_master.*.private_ip_address}"]
}
output "worker_private_ip_addresses" {
value = ["${azurerm_network_interface.tectonic_worker.*.private_ip_address}"]
}
output "api_ip_addresses" {
value = ["${split("|", var.private_cluster ? join("|", azurerm_network_interface.tectonic_master.*.private_ip_address) : join("|", azurerm_public_ip.api_ip.*.ip_address))}"]
}
output "console_ip_addresses" {
value = ["${split("|", var.private_cluster ? join("|", azurerm_network_interface.tectonic_worker.*.private_ip_address) : join("|", azurerm_public_ip.console_ip.*.ip_address))}"]
}
output "ingress_fqdn" {
value = "${var.base_domain == "" ? element(concat(azurerm_public_ip.console_ip.*.fqdn, list("")), 0) : "${var.cluster_name}.${var.base_domain}${var.private_cluster ? ":32000" : ""}"}"
}
output "api_fqdn" {
value = "${var.base_domain == "" ? element(concat(azurerm_public_ip.api_ip.*.fqdn, list("")), 0) : "${var.cluster_name}-api.${var.base_domain}"}"
}

Some files were not shown because too many files have changed in this diff Show More